diff --git a/agent/conf/log4j-cloud.xml.in b/agent/conf/log4j-cloud.xml.in index 9ed43e083e03..44ebd1358af6 100644 --- a/agent/conf/log4j-cloud.xml.in +++ b/agent/conf/log4j-cloud.xml.in @@ -17,91 +17,60 @@ KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/agent/src/main/java/com/cloud/agent/Agent.java b/agent/src/main/java/com/cloud/agent/Agent.java index 9e0ee746c034..e2ec7d9a17df 100644 --- a/agent/src/main/java/com/cloud/agent/Agent.java +++ b/agent/src/main/java/com/cloud/agent/Agent.java @@ -56,8 +56,8 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; -import org.apache.log4j.MDC; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.api.AgentControlAnswer; import com.cloud.agent.api.AgentControlCommand; @@ -89,6 +89,7 @@ import com.cloud.utils.nio.Task; import com.cloud.utils.script.OutputInterpreter; import com.cloud.utils.script.Script; +import org.apache.logging.log4j.ThreadContext; /** * @config @@ -104,7 +105,7 @@ * **/ public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater { - protected static Logger s_logger = Logger.getLogger(Agent.class); + protected Logger logger = LogManager.getLogger(getClass()); public enum ExitStatus { Normal(0), // Normal status = 0. @@ -181,7 +182,7 @@ public Agent(final IAgentShell shell, final int localAgentId, final ServerResour final String value = _shell.getPersistentProperty(getResourceName(), "id"); _id = value != null ? Long.parseLong(value) : null; - s_logger.info("id is " + (_id != null ? _id : "")); + logger.info("id is " + (_id != null ? _id : "")); final Map params = new HashMap<>(); @@ -199,7 +200,7 @@ public Agent(final IAgentShell shell, final int localAgentId, final ServerResour // ((NioClient)_connection).setBindAddress(_shell.getPrivateIp()); - s_logger.debug("Adding shutdown hook"); + logger.debug("Adding shutdown hook"); Runtime.getRuntime().addShutdownHook(_shutdownThread); _ugentTaskPool = @@ -210,7 +211,7 @@ public Agent(final IAgentShell shell, final int localAgentId, final ServerResour new ThreadPoolExecutor(_shell.getWorkers(), 5 * _shell.getWorkers(), 1, TimeUnit.DAYS, new LinkedBlockingQueue(), new NamedThreadFactory( "agentRequest-Handler")); - s_logger.info("Agent [id = " + (_id != null ? _id : "new") + " : type = " + getResourceName() + " : zone = " + _shell.getZone() + " : pod = " + _shell.getPod() + + logger.info("Agent [id = " + (_id != null ? _id : "new") + " : type = " + getResourceName() + " : zone = " + _shell.getZone() + " : pod = " + _shell.getPod() + " : workers = " + _shell.getWorkers() + " : host = " + host + " : port = " + _shell.getPort()); } @@ -268,7 +269,7 @@ public void run() { public void start() { if (!_resource.start()) { - s_logger.error("Unable to start the resource: " + _resource.getName()); + logger.error("Unable to start the resource: " + _resource.getName()); throw new CloudRuntimeException("Unable to start the resource: " + _resource.getName()); } @@ -285,14 +286,14 @@ public void start() { try { _connection.start(); } catch (final NioConnectionException e) { - s_logger.warn("NIO Connection Exception " + e); - s_logger.info("Attempted to connect to the server, but received an unexpected exception, trying again..."); + logger.warn("NIO Connection Exception " + e); + logger.info("Attempted to connect to the server, but received an unexpected exception, trying again..."); } while (!_connection.isStartup()) { final String host = _shell.getNextHost(); _shell.getBackoffAlgorithm().waitBeforeRetry(); _connection = new NioClient("Agent", host, _shell.getPort(), _shell.getWorkers(), this); - s_logger.info("Connecting to host:" + host); + logger.info("Connecting to host:" + host); try { _connection.start(); } catch (final NioConnectionException e) { @@ -300,9 +301,9 @@ public void start() { try { _connection.cleanUp(); } catch (final IOException ex) { - s_logger.warn("Fail to clean up old connection. " + ex); + logger.warn("Fail to clean up old connection. " + ex); } - s_logger.info("Attempted to connect to the server, but received an unexpected exception, trying again...", e); + logger.info("Attempted to connect to the server, but received an unexpected exception, trying again...", e); } } _shell.updateConnectedHost(); @@ -311,7 +312,7 @@ public void start() { } public void stop(final String reason, final String detail) { - s_logger.info("Stopping the agent: Reason = " + reason + (detail != null ? ": Detail = " + detail : "")); + logger.info("Stopping the agent: Reason = " + reason + (detail != null ? ": Detail = " + detail : "")); _reconnectAllowed = false; if (_connection != null) { final ShutdownCommand cmd = new ShutdownCommand(reason, detail); @@ -321,15 +322,15 @@ public void stop(final String reason, final String detail) { _link.send(req.toBytes()); } } catch (final ClosedChannelException e) { - s_logger.warn("Unable to send: " + cmd.toString()); + logger.warn("Unable to send: " + cmd.toString()); } catch (final Exception e) { - s_logger.warn("Unable to send: " + cmd.toString() + " due to exception: ", e); + logger.warn("Unable to send: " + cmd.toString() + " due to exception: ", e); } - s_logger.debug("Sending shutdown to management server"); + logger.debug("Sending shutdown to management server"); try { Thread.sleep(1000); } catch (final InterruptedException e) { - s_logger.debug("Who the heck interrupted me here?"); + logger.debug("Who the heck interrupted me here?"); } _connection.stop(); _connection = null; @@ -376,7 +377,7 @@ public Long getId() { } public void setId(final Long id) { - s_logger.info("Set agent id " + id); + logger.info("Set agent id " + id); _id = id; _shell.setPersistentProperty(getResourceName(), "id", Long.toString(id)); } @@ -395,7 +396,7 @@ private synchronized void scheduleHostLBCheckerTask(final long checkInterval) { hostLBTimer.cancel(); } if (checkInterval > 0L) { - s_logger.info("Scheduling preferred host timer task with host.lb.interval=" + checkInterval + "ms"); + logger.info("Scheduling preferred host timer task with host.lb.interval=" + checkInterval + "ms"); hostLBTimer = new Timer("Host LB Timer"); hostLBTimer.scheduleAtFixedRate(new PreferredHostCheckerTask(), checkInterval, checkInterval); } @@ -403,8 +404,8 @@ private synchronized void scheduleHostLBCheckerTask(final long checkInterval) { public void scheduleWatch(final Link link, final Request request, final long delay, final long period) { synchronized (_watchList) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Adding a watch list"); + if (logger.isDebugEnabled()) { + logger.debug("Adding a watch list"); } final WatchTask task = new WatchTask(link, request, this); _timer.schedule(task, 0, period); @@ -415,14 +416,14 @@ public void scheduleWatch(final Link link, final Request request, final long del public void triggerUpdate() { PingCommand command = _resource.getCurrentStatus(getId()); command.setOutOfBand(true); - s_logger.debug("Sending out of band ping"); + logger.debug("Sending out of band ping"); final Request request = new Request(_id, -1, command, false); request.setSequence(getNextSequence()); try { _link.send(request.toBytes()); } catch (final ClosedChannelException e) { - s_logger.warn("Unable to send ping update: " + request.toString()); + logger.warn("Unable to send ping update: " + request.toString()); } } @@ -431,8 +432,8 @@ protected void cancelTasks() { for (final WatchTask task : _watchList) { task.cancel(); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Clearing watch list: " + _watchList.size()); + if (logger.isDebugEnabled()) { + logger.debug("Clearing watch list: " + _watchList.size()); } _watchList.clear(); } @@ -469,14 +470,14 @@ public void sendStartup(final Link link) { final Request request = new Request(_id != null ? _id : -1, -1, commands, false, false); request.setSequence(getNextSequence()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Sending Startup: " + request.toString()); + if (logger.isDebugEnabled()) { + logger.debug("Sending Startup: " + request.toString()); } lockStartupTask(link); try { link.send(request.toBytes()); } catch (final ClosedChannelException e) { - s_logger.warn("Unable to send request: " + request.toString()); + logger.warn("Unable to send request: " + request.toString()); } if (_resource instanceof ResourceStatusUpdater) { @@ -490,11 +491,11 @@ protected void setupStartupCommand(final StartupCommand startup) { try { addr = InetAddress.getLocalHost(); } catch (final UnknownHostException e) { - s_logger.warn("unknown host? ", e); + logger.warn("unknown host? ", e); throw new CloudRuntimeException("Cannot get local IP address"); } - final Script command = new Script("hostname", 500, s_logger); + final Script command = new Script("hostname", 500, logger); final OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser(); final String result = command.execute(parser); final String hostname = result == null ? parser.getLine() : addr.toString(); @@ -536,14 +537,14 @@ protected void reconnect(final Link link) { _resource.disconnected(); - s_logger.info("Lost connection to host: " + _shell.getConnectedHost() + ". Attempting reconnection while we still have " + _inProgress.get() + " commands in progress."); + logger.info("Lost connection to host: " + _shell.getConnectedHost() + ". Attempting reconnection while we still have " + _inProgress.get() + " commands in progress."); _connection.stop(); try { _connection.cleanUp(); } catch (final IOException e) { - s_logger.warn("Fail to clean up old connection. " + e); + logger.warn("Fail to clean up old connection. " + e); } while (_connection.isStartup()) { @@ -553,22 +554,22 @@ protected void reconnect(final Link link) { do { final String host = _shell.getNextHost(); _connection = new NioClient("Agent", host, _shell.getPort(), _shell.getWorkers(), this); - s_logger.info("Reconnecting to host:" + host); + logger.info("Reconnecting to host:" + host); try { _connection.start(); } catch (final NioConnectionException e) { - s_logger.info("Attempted to re-connect to the server, but received an unexpected exception, trying again...", e); + logger.info("Attempted to re-connect to the server, but received an unexpected exception, trying again...", e); _connection.stop(); try { _connection.cleanUp(); } catch (final IOException ex) { - s_logger.warn("Fail to clean up old connection. " + ex); + logger.warn("Fail to clean up old connection. " + ex); } } _shell.getBackoffAlgorithm().waitBeforeRetry(); } while (!_connection.isStartup()); _shell.updateConnectedHost(); - s_logger.info("Connected to the host: " + _shell.getConnectedHost()); + logger.info("Connected to the host: " + _shell.getConnectedHost()); } public void processStartupAnswer(final Answer answer, final Response response, final Link link) { @@ -583,15 +584,15 @@ public void processStartupAnswer(final Answer answer, final Response response, f } final StartupAnswer startup = (StartupAnswer)answer; if (!startup.getResult()) { - s_logger.error("Not allowed to connect to the server: " + answer.getDetails()); + logger.error("Not allowed to connect to the server: " + answer.getDetails()); System.exit(1); } if (cancelled) { - s_logger.warn("Threw away a startup answer because we're reconnecting."); + logger.warn("Threw away a startup answer because we're reconnecting."); return; } - s_logger.info("Process agent startup answer, agent id = " + startup.getHostId()); + logger.info("Process agent startup answer, agent id = " + startup.getHostId()); setId(startup.getHostId()); _pingInterval = (long)startup.getPingInterval() * 1000; // change to ms. @@ -601,7 +602,7 @@ public void processStartupAnswer(final Answer answer, final Response response, f _ugentTaskPool.setKeepAliveTime(2 * _pingInterval, TimeUnit.MILLISECONDS); - s_logger.info("Startup Response Received: agent id = " + getId()); + logger.info("Startup Response Received: agent id = " + getId()); } protected void processRequest(final Request request, final Link link) { @@ -616,18 +617,18 @@ protected void processRequest(final Request request, final Link link) { Answer answer; try { if (cmd.getContextParam("logid") != null) { - MDC.put("logcontextid", cmd.getContextParam("logid")); + ThreadContext.put("logcontextid", cmd.getContextParam("logid")); } - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { if (!requestLogged) // ensures request is logged only once per method call { final String requestMsg = request.toString(); if (requestMsg != null) { - s_logger.debug("Request:" + requestMsg); + logger.debug("Request:" + requestMsg); } requestLogged = true; } - s_logger.debug("Processing command: " + cmd.toString()); + logger.debug("Processing command: " + cmd.toString()); } if (cmd instanceof CronCommand) { @@ -636,7 +637,7 @@ protected void processRequest(final Request request, final Link link) { answer = new Answer(cmd, true, null); } else if (cmd instanceof ShutdownCommand) { final ShutdownCommand shutdown = (ShutdownCommand)cmd; - s_logger.debug("Received shutdownCommand, due to: " + shutdown.getReason()); + logger.debug("Received shutdownCommand, due to: " + shutdown.getReason()); cancelTasks(); if (shutdown.isRemoveHost()) { cleanupAgentZoneProperties(); @@ -644,11 +645,11 @@ protected void processRequest(final Request request, final Link link) { _reconnectAllowed = false; answer = new Answer(cmd, true, null); } else if (cmd instanceof ReadyCommand && ((ReadyCommand)cmd).getDetails() != null) { - s_logger.debug("Not ready to connect to mgt server: " + ((ReadyCommand)cmd).getDetails()); + logger.debug("Not ready to connect to mgt server: " + ((ReadyCommand)cmd).getDetails()); System.exit(1); return; } else if (cmd instanceof MaintainCommand) { - s_logger.debug("Received maintainCommand, do not cancel current tasks"); + logger.debug("Received maintainCommand, do not cancel current tasks"); answer = new MaintainAnswer((MaintainCommand)cmd); } else if (cmd instanceof AgentControlCommand) { answer = null; @@ -662,7 +663,7 @@ protected void processRequest(final Request request, final Link link) { } if (answer == null) { - s_logger.warn("No handler found to process cmd: " + cmd.toString()); + logger.warn("No handler found to process cmd: " + cmd.toString()); answer = new AgentControlAnswer(cmd); } } else if (cmd instanceof SetupKeyStoreCommand && ((SetupKeyStoreCommand) cmd).isHandleByAgent()) { @@ -685,12 +686,12 @@ protected void processRequest(final Request request, final Link link) { _inProgress.decrementAndGet(); } if (answer == null) { - s_logger.debug("Response: unsupported command" + cmd.toString()); + logger.debug("Response: unsupported command" + cmd.toString()); answer = Answer.createUnsupportedCommandAnswer(cmd); } } } catch (final Throwable th) { - s_logger.warn("Caught: ", th); + logger.warn("Caught: ", th); final StringWriter writer = new StringWriter(); th.printStackTrace(new PrintWriter(writer)); answer = new Answer(cmd, false, writer.toString()); @@ -706,10 +707,10 @@ protected void processRequest(final Request request, final Link link) { } response = new Response(request, answers); } finally { - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { final String responseMsg = response.toString(); if (responseMsg != null) { - s_logger.debug(response.toString()); + logger.debug(response.toString()); } } @@ -717,7 +718,7 @@ protected void processRequest(final Request request, final Link link) { try { link.send(response.toBytes()); } catch (final ClosedChannelException e) { - s_logger.warn("Unable to send response: " + response.toString()); + logger.warn("Unable to send response: " + response.toString()); } } } @@ -727,7 +728,7 @@ public Answer setupAgentKeystore(final SetupKeyStoreCommand cmd) { final String keyStorePassword = cmd.getKeystorePassword(); final long validityDays = cmd.getValidityDays(); - s_logger.debug("Setting up agent keystore file and generating CSR"); + logger.debug("Setting up agent keystore file and generating CSR"); final File agentFile = PropertiesUtil.findConfigFile("agent.properties"); if (agentFile == null) { @@ -742,7 +743,7 @@ public Answer setupAgentKeystore(final SetupKeyStoreCommand cmd) { _shell.setPersistentProperty(null, KeyStoreUtils.KS_PASSPHRASE_PROPERTY, storedPassword); } - Script script = new Script(_keystoreSetupPath, 300000, s_logger); + Script script = new Script(_keystoreSetupPath, 300000, logger); script.add(agentFile.getAbsolutePath()); script.add(keyStoreFile); script.add(storedPassword); @@ -767,7 +768,7 @@ private Answer setupAgentCertificate(final SetupCertificateCommand cmd) { final String privateKey = cmd.getPrivateKey(); final String caCertificates = cmd.getCaCertificates(); - s_logger.debug("Importing received certificate to agent's keystore"); + logger.debug("Importing received certificate to agent's keystore"); final File agentFile = PropertiesUtil.findConfigFile("agent.properties"); if (agentFile == null) { @@ -781,13 +782,13 @@ private Answer setupAgentCertificate(final SetupCertificateCommand cmd) { try { FileUtils.writeStringToFile(new File(certFile), certificate, Charset.defaultCharset()); FileUtils.writeStringToFile(new File(caCertFile), caCertificates, Charset.defaultCharset()); - s_logger.debug("Saved received client certificate to: " + certFile); + logger.debug("Saved received client certificate to: " + certFile); } catch (IOException e) { throw new CloudRuntimeException("Unable to save received agent client and ca certificates", e); } String ksPassphrase = _shell.getPersistentProperty(null, KeyStoreUtils.KS_PASSPHRASE_PROPERTY); - Script script = new Script(_keystoreCertImportPath, 300000, s_logger); + Script script = new Script(_keystoreCertImportPath, 300000, logger); script.add(agentFile.getAbsolutePath()); script.add(ksPassphrase); script.add(keyStoreFile); @@ -812,7 +813,7 @@ private void processManagementServerList(final List msList, final String _shell.setPersistentProperty(null, "host", newMSHosts); _shell.setHosts(newMSHosts); _shell.resetHostCounter(); - s_logger.info("Processed new management server list: " + newMSHosts); + logger.info("Processed new management server list: " + newMSHosts); } catch (final Exception e) { throw new CloudRuntimeException("Could not persist received management servers list", e); } @@ -831,8 +832,8 @@ private Answer setupManagementServerList(final SetupMSListCommand cmd) { public void processResponse(final Response response, final Link link) { final Answer answer = response.getAnswer(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Received response: " + response.toString()); + if (logger.isDebugEnabled()) { + logger.debug("Received response: " + response.toString()); } if (answer instanceof StartupAnswer) { processStartupAnswer(answer, response, link); @@ -844,7 +845,7 @@ public void processResponse(final Response response, final Link link) { } } } else if (answer instanceof PingAnswer && (((PingAnswer) answer).isSendStartup()) && _reconnectAllowed) { - s_logger.info("Management server requested startup command to reinitialize the agent"); + logger.info("Management server requested startup command to reinitialize the agent"); sendStartup(link); } else { setLastPingResponseTime(); @@ -859,29 +860,29 @@ public void processReadyCommand(final Command cmd) { NumbersUtil.enableHumanReadableSizes = humanReadable; } - s_logger.info("Processing agent ready command, agent id = " + ready.getHostId()); + logger.info("Processing agent ready command, agent id = " + ready.getHostId()); if (ready.getHostId() != null) { setId(ready.getHostId()); } processManagementServerList(ready.getMsHostList(), ready.getLbAlgorithm(), ready.getLbCheckInterval()); - s_logger.info("Ready command is processed for agent id = " + getId()); + logger.info("Ready command is processed for agent id = " + getId()); } public void processOtherTask(final Task task) { final Object obj = task.get(); if (obj instanceof Response) { if (System.currentTimeMillis() - _lastPingResponseTime > _pingInterval * _shell.getPingRetries()) { - s_logger.error("Ping Interval has gone past " + _pingInterval * _shell.getPingRetries() + ". Won't reconnect to mgt server, as connection is still alive"); + logger.error("Ping Interval has gone past " + _pingInterval * _shell.getPingRetries() + ". Won't reconnect to mgt server, as connection is still alive"); return; } final PingCommand ping = _resource.getCurrentStatus(getId()); final Request request = new Request(_id, -1, ping, false); request.setSequence(getNextSequence()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Sending ping: " + request.toString()); + if (logger.isDebugEnabled()) { + logger.debug("Sending ping: " + request.toString()); } try { @@ -889,14 +890,14 @@ public void processOtherTask(final Task task) { //if i can send pingcommand out, means the link is ok setLastPingResponseTime(); } catch (final ClosedChannelException e) { - s_logger.warn("Unable to send request: " + request.toString()); + logger.warn("Unable to send request: " + request.toString()); } } else if (obj instanceof Request) { final Request req = (Request)obj; final Command command = req.getCommand(); if (command.getContextParam("logid") != null) { - MDC.put("logcontextid", command.getContextParam("logid")); + ThreadContext.put("logcontextid", command.getContextParam("logid")); } Answer answer = null; _inProgress.incrementAndGet(); @@ -908,17 +909,17 @@ public void processOtherTask(final Task task) { if (answer != null) { final Response response = new Response(req, answer); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Watch Sent: " + response.toString()); + if (logger.isDebugEnabled()) { + logger.debug("Watch Sent: " + response.toString()); } try { task.getLink().send(response.toBytes()); } catch (final ClosedChannelException e) { - s_logger.warn("Unable to send response: " + response.toString()); + logger.warn("Unable to send response: " + response.toString()); } } } else { - s_logger.warn("Ignoring an unknown task"); + logger.warn("Ignoring an unknown task"); } } @@ -958,7 +959,7 @@ public AgentControlAnswer sendRequest(final AgentControlCommand cmd, final int t try { listener.wait(timeoutInMilliseconds); } catch (final InterruptedException e) { - s_logger.warn("sendRequest is interrupted, exit waiting"); + logger.warn("sendRequest is interrupted, exit waiting"); } } @@ -980,7 +981,7 @@ private void postRequest(final Request request) throws AgentControlChannelExcept try { _link.send(request.toBytes()); } catch (final ClosedChannelException e) { - s_logger.warn("Unable to post agent control request: " + request.toString()); + logger.warn("Unable to post agent control request: " + request.toString()); throw new AgentControlChannelException("Unable to post agent control request due to " + e.getMessage()); } } else { @@ -1044,8 +1045,8 @@ public WatchTask(final Link link, final Request request, final Agent agent) { @Override protected void runInContext() { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Scheduling " + (_request instanceof Response ? "Ping" : "Watch Task")); + if (logger.isTraceEnabled()) { + logger.trace("Scheduling " + (_request instanceof Response ? "Ping" : "Watch Task")); } try { if (_request instanceof Response) { @@ -1054,7 +1055,7 @@ protected void runInContext() { _link.schedule(new ServerHandler(Task.Type.OTHER, _link, _request)); } } catch (final ClosedChannelException e) { - s_logger.warn("Unable to schedule task because channel is closed"); + logger.warn("Unable to schedule task because channel is closed"); } } } @@ -1064,7 +1065,7 @@ public class StartupTask extends ManagedContextTimerTask { protected volatile boolean cancelled = false; public StartupTask(final Link link) { - s_logger.debug("Startup task created"); + logger.debug("Startup task created"); _link = link; } @@ -1074,7 +1075,7 @@ public synchronized boolean cancel() { if (!cancelled) { cancelled = true; _startupWait = _startupWaitDefault; - s_logger.debug("Startup task cancelled"); + logger.debug("Startup task cancelled"); return super.cancel(); } return true; @@ -1083,8 +1084,8 @@ public synchronized boolean cancel() { @Override protected synchronized void runInContext() { if (!cancelled) { - if (s_logger.isInfoEnabled()) { - s_logger.info("The startup command is now cancelled"); + if (logger.isInfoEnabled()) { + logger.info("The startup command is now cancelled"); } cancelled = true; _startup = null; @@ -1136,9 +1137,9 @@ public void doTask(final Task task) throws TaskExecutionException { _executor.submit(new AgentRequestHandler(getType(), getLink(), request)); } } catch (final ClassNotFoundException e) { - s_logger.error("Unable to find this request "); + logger.error("Unable to find this request "); } catch (final Exception e) { - s_logger.error("Error parsing task", e); + logger.error("Error parsing task", e); } } else if (task.getType() == Task.Type.DISCONNECT) { reconnect(task.getLink()); @@ -1166,7 +1167,7 @@ protected void runInContext() { while (true) { try { if (_inProgress.get() == 0) { - s_logger.debug("Running post certificate renewal task to restart services."); + logger.debug("Running post certificate renewal task to restart services."); // Let the resource perform any post certificate renewal cleanups _resource.executeRequest(new PostCertificateRenewalCommand()); @@ -1191,12 +1192,12 @@ protected void runInContext() { shell.launchNewAgent(resource); return; } - if (s_logger.isTraceEnabled()) { - s_logger.debug("Other tasks are in progress, will retry post certificate renewal command after few seconds"); + if (logger.isTraceEnabled()) { + logger.debug("Other tasks are in progress, will retry post certificate renewal command after few seconds"); } Thread.sleep(5000); } catch (final Exception e) { - s_logger.warn("Failed to execute post certificate renewal command:", e); + logger.warn("Failed to execute post certificate renewal command:", e); break; } } @@ -1214,8 +1215,8 @@ protected void runInContext() { } final String preferredHost = msList[0]; final String connectedHost = _shell.getConnectedHost(); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Running preferred host checker task, connected host=" + connectedHost + ", preferred host=" + preferredHost); + if (logger.isTraceEnabled()) { + logger.trace("Running preferred host checker task, connected host=" + connectedHost + ", preferred host=" + preferredHost); } if (preferredHost != null && !preferredHost.equals(connectedHost) && _link != null) { boolean isHostUp = true; @@ -1223,20 +1224,20 @@ protected void runInContext() { socket.connect(new InetSocketAddress(preferredHost, _shell.getPort()), 5000); } catch (final IOException e) { isHostUp = false; - if (s_logger.isTraceEnabled()) { - s_logger.trace("Host: " + preferredHost + " is not reachable"); + if (logger.isTraceEnabled()) { + logger.trace("Host: " + preferredHost + " is not reachable"); } } if (isHostUp && _link != null && _inProgress.get() == 0) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Preferred host " + preferredHost + " is found to be reachable, trying to reconnect"); + if (logger.isDebugEnabled()) { + logger.debug("Preferred host " + preferredHost + " is found to be reachable, trying to reconnect"); } _shell.resetHostCounter(); reconnect(_link); } } } catch (Throwable t) { - s_logger.error("Error caught while attempting to connect to preferred host", t); + logger.error("Error caught while attempting to connect to preferred host", t); } } diff --git a/agent/src/main/java/com/cloud/agent/AgentShell.java b/agent/src/main/java/com/cloud/agent/AgentShell.java index ef042496a372..4b2bd9a524f6 100644 --- a/agent/src/main/java/com/cloud/agent/AgentShell.java +++ b/agent/src/main/java/com/cloud/agent/AgentShell.java @@ -34,8 +34,9 @@ import org.apache.commons.lang.math.NumberUtils; import org.apache.commons.lang3.BooleanUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; -import org.apache.log4j.xml.DOMConfigurator; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.core.config.Configurator; import javax.naming.ConfigurationException; import java.io.File; @@ -53,7 +54,7 @@ import java.util.UUID; public class AgentShell implements IAgentShell, Daemon { - private static final Logger s_logger = Logger.getLogger(AgentShell.class.getName()); + protected static Logger LOGGER = LogManager.getLogger(AgentShell.class); private final Properties _properties = new Properties(); private final Map _cmdLineProperties = new HashMap(); @@ -221,7 +222,7 @@ void loadProperties() throws ConfigurationException { throw new ConfigurationException("Unable to find agent.properties."); } - s_logger.info("agent.properties found at " + file.getAbsolutePath()); + LOGGER.info("agent.properties found at " + file.getAbsolutePath()); try { PropertiesUtil.loadFromFile(_properties, file); @@ -349,7 +350,7 @@ protected int getPortOrWorkers(String portOrWorkers, AgentProperties.Property c = this.getClass(); @@ -381,19 +382,19 @@ public void init(String[] args) throws ConfigurationException { if (_version == null) { throw new CloudRuntimeException("Unable to find the implementation version of this agent"); } - s_logger.info("Implementation Version is " + _version); + LOGGER.info("Implementation Version is " + _version); loadProperties(); parseCommand(args); - if (s_logger.isDebugEnabled()) { + if (LOGGER.isDebugEnabled()) { List properties = Collections.list((Enumeration)_properties.propertyNames()); for (String property : properties) { - s_logger.debug("Found property: " + property); + LOGGER.debug("Found property: " + property); } } - s_logger.info("Defaulting to using properties file for storage"); + LOGGER.info("Defaulting to using properties file for storage"); _storage = new PropertiesStorage(); _storage.configure("Storage", new HashMap()); @@ -403,14 +404,14 @@ public void init(String[] args) throws ConfigurationException { _properties.put(cmdLineProp.getKey(), cmdLineProp.getValue()); } - s_logger.info("Defaulting to the constant time backoff algorithm"); + LOGGER.info("Defaulting to the constant time backoff algorithm"); _backoff = new ConstantTimeBackoff(); _backoff.configure("ConstantTimeBackoff", new HashMap()); } private void launchAgent() throws ConfigurationException { String resourceClassNames = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.RESOURCE); - s_logger.trace("resource=" + resourceClassNames); + LOGGER.trace("resource=" + resourceClassNames); if (resourceClassNames != null) { launchAgentFromClassInfo(resourceClassNames); return; @@ -440,10 +441,10 @@ private void launchAgentFromClassInfo(String resourceClassNames) throws Configur private void launchAgentFromTypeInfo() throws ConfigurationException { String typeInfo = getProperty(null, "type"); if (typeInfo == null) { - s_logger.error("Unable to retrieve the type"); + LOGGER.error("Unable to retrieve the type"); throw new ConfigurationException("Unable to retrieve the type of this agent."); } - s_logger.trace("Launching agent based on type=" + typeInfo); + LOGGER.trace("Launching agent based on type=" + typeInfo); } public void launchNewAgent(ServerResource resource) throws ConfigurationException { @@ -477,17 +478,17 @@ public void start() { } if (ipv6disabled) { - s_logger.info("Preferring IPv4 address family for agent connection"); + LOGGER.info("Preferring IPv4 address family for agent connection"); System.setProperty("java.net.preferIPv4Stack", "true"); if (ipv6prefer) { - s_logger.info("ipv6prefer is set to true, but ipv6disabled is false. Not preferring IPv6 for agent connection"); + LOGGER.info("ipv6prefer is set to true, but ipv6disabled is false. Not preferring IPv6 for agent connection"); } } else { if (ipv6prefer) { - s_logger.info("Preferring IPv6 address family for agent connection"); + LOGGER.info("Preferring IPv6 address family for agent connection"); System.setProperty("java.net.preferIPv6Addresses", "true"); } else { - s_logger.info("Using default Java settings for IPv6 preference for agent connection"); + LOGGER.info("Using default Java settings for IPv6 preference for agent connection"); } } @@ -505,7 +506,7 @@ public void start() { String pidDir = getProperty(null, "piddir"); final String run = "agent." + instance + "pid"; - s_logger.debug("Checking to see if " + run + " exists."); + LOGGER.debug("Checking to see if " + run + " exists."); ProcessUtil.pidCheck(pidDir, run); launchAgent(); @@ -514,11 +515,11 @@ public void start() { while (!_exit) Thread.sleep(1000); } catch (InterruptedException e) { - s_logger.debug("[ignored] AgentShell was interrupted."); + LOGGER.debug("[ignored] AgentShell was interrupted."); } } catch (final Exception e) { - s_logger.error("Unable to start agent: ", e); + LOGGER.error("Unable to start agent: ", e); System.exit(ExitStatus.Error.value()); } } @@ -535,7 +536,7 @@ public void destroy() { public static void main(String[] args) { try { - s_logger.debug("Initializing AgentShell from main"); + LOGGER.debug("Initializing AgentShell from main"); AgentShell shell = new AgentShell(); shell.init(args); shell.start(); diff --git a/agent/src/main/java/com/cloud/agent/dao/impl/PropertiesStorage.java b/agent/src/main/java/com/cloud/agent/dao/impl/PropertiesStorage.java index 87610c29f345..b4b22fa8d168 100644 --- a/agent/src/main/java/com/cloud/agent/dao/impl/PropertiesStorage.java +++ b/agent/src/main/java/com/cloud/agent/dao/impl/PropertiesStorage.java @@ -24,7 +24,8 @@ import java.util.Properties; import org.apache.commons.io.IOUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.dao.StorageComponent; import com.cloud.utils.PropertiesUtil; @@ -36,7 +37,7 @@ * path to the properties _file | String | db/db.properties || * } **/ public class PropertiesStorage implements StorageComponent { - private static final Logger s_logger = Logger.getLogger(PropertiesStorage.class); + protected Logger logger = LogManager.getLogger(getClass()); Properties _properties = new Properties(); File _file; String _name; @@ -49,7 +50,7 @@ public synchronized String get(String key) { @Override public synchronized void persist(String key, String value) { if (!loadFromFile(_file)) { - s_logger.error("Failed to load changes and then write to them"); + logger.error("Failed to load changes and then write to them"); } _properties.setProperty(key, value); FileOutputStream output = null; @@ -59,7 +60,7 @@ public synchronized void persist(String key, String value) { output.flush(); output.close(); } catch (IOException e) { - s_logger.error("Uh-oh: ", e); + logger.error("Uh-oh: ", e); } finally { IOUtils.closeQuietly(output); } @@ -70,10 +71,10 @@ private synchronized boolean loadFromFile(final File file) { PropertiesUtil.loadFromFile(_properties, file); _file = file; } catch (FileNotFoundException e) { - s_logger.error("How did we get here? ", e); + logger.error("How did we get here? ", e); return false; } catch (IOException e) { - s_logger.error("IOException: ", e); + logger.error("IOException: ", e); return false; } return true; @@ -92,13 +93,13 @@ public synchronized boolean configure(String name, Map params) { file = new File(path); try { if (!file.createNewFile()) { - s_logger.error(String.format("Unable to create _file: %s", file.getAbsolutePath())); + logger.error(String.format("Unable to create _file: %s", file.getAbsolutePath())); return false; } } catch (IOException e) { - s_logger.error(String.format("Unable to create file: %s", file.getAbsolutePath())); - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("IOException while trying to create file: %s", file.getAbsolutePath()), e); + logger.error(String.format("Unable to create file: %s", file.getAbsolutePath())); + if (logger.isDebugEnabled()) { + logger.debug(String.format("IOException while trying to create file: %s", file.getAbsolutePath()), e); } return false; } diff --git a/agent/src/main/java/com/cloud/agent/dhcp/DhcpProtocolParserServer.java b/agent/src/main/java/com/cloud/agent/dhcp/DhcpProtocolParserServer.java index 0ee9fd6860db..00488f94382b 100644 --- a/agent/src/main/java/com/cloud/agent/dhcp/DhcpProtocolParserServer.java +++ b/agent/src/main/java/com/cloud/agent/dhcp/DhcpProtocolParserServer.java @@ -25,12 +25,13 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import org.apache.log4j.Logger; import com.cloud.utils.concurrency.NamedThreadFactory; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; public class DhcpProtocolParserServer extends Thread { - private static final Logger s_logger = Logger.getLogger(DhcpProtocolParserServer.class);; + protected Logger logger = LogManager.getLogger(DhcpProtocolParserServer.class);; protected ExecutorService _executor; private int dhcpServerPort = 67; private int bufferSize = 300; @@ -54,7 +55,7 @@ public void run() { dhcpSocket.receive(dgp); } } catch (IOException e) { - s_logger.debug(e.getMessage()); + logger.debug(e.getMessage()); } } } diff --git a/agent/src/main/java/com/cloud/agent/mockvm/MockVmMgr.java b/agent/src/main/java/com/cloud/agent/mockvm/MockVmMgr.java index b155cb725b13..1e6fefa48180 100644 --- a/agent/src/main/java/com/cloud/agent/mockvm/MockVmMgr.java +++ b/agent/src/main/java/com/cloud/agent/mockvm/MockVmMgr.java @@ -22,14 +22,15 @@ import java.util.Random; import java.util.Set; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VirtualMachine.State; public class MockVmMgr implements VmMgr { - private static final Logger s_logger = Logger.getLogger(MockVmMgr.class); + protected Logger logger = LogManager.getLogger(getClass()); private static final int DEFAULT_DOM0_MEM_MB = 128; private static final Random randSeed = new Random(); @@ -56,14 +57,14 @@ public Set getCurrentVMs() { public String startVM(String vmName, String vnetId, String gateway, String dns, String privateIP, String privateMac, String privateMask, String publicIP, String publicMac, String publicMask, int cpuCount, int cpuUtilization, long ramSize, String localPath, String vncPassword) { - if (s_logger.isInfoEnabled()) { + if (logger.isInfoEnabled()) { StringBuffer sb = new StringBuffer(); sb.append("Start VM. name: " + vmName + ", vnet: " + vnetId + ", dns: " + dns); sb.append(", privateIP: " + privateIP + ", privateMac: " + privateMac + ", privateMask: " + privateMask); sb.append(", publicIP: " + publicIP + ", publicMac: " + publicMac + ", publicMask: " + publicMask); sb.append(", cpu count: " + cpuCount + ", cpuUtilization: " + cpuUtilization + ", ram : " + ramSize); sb.append(", localPath: " + localPath); - s_logger.info(sb.toString()); + logger.info(sb.toString()); } synchronized (this) { @@ -86,8 +87,8 @@ public String startVM(String vmName, String vnetId, String gateway, String dns, @Override public String stopVM(String vmName, boolean force) { - if (s_logger.isInfoEnabled()) - s_logger.info("Stop VM. name: " + vmName); + if (logger.isInfoEnabled()) + logger.info("Stop VM. name: " + vmName); synchronized (this) { MockVm vm = vms.get(vmName); @@ -102,8 +103,8 @@ public String stopVM(String vmName, boolean force) { @Override public String rebootVM(String vmName) { - if (s_logger.isInfoEnabled()) - s_logger.info("Reboot VM. name: " + vmName); + if (logger.isInfoEnabled()) + logger.info("Reboot VM. name: " + vmName); synchronized (this) { MockVm vm = vms.get(vmName); @@ -115,8 +116,8 @@ public String rebootVM(String vmName) { @Override public boolean migrate(String vmName, String params) { - if (s_logger.isInfoEnabled()) - s_logger.info("Migrate VM. name: " + vmName); + if (logger.isInfoEnabled()) + logger.info("Migrate VM. name: " + vmName); synchronized (this) { MockVm vm = vms.get(vmName); @@ -258,13 +259,13 @@ public MockVm createVmFromSpec(VirtualMachineTO vmSpec) { vm = vms.get(vmName); if (vm == null) { if (ramSize > getHostFreeMemory()) { - s_logger.debug("host is out of memory"); + logger.debug("host is out of memory"); throw new CloudRuntimeException("Host is out of Memory"); } int vncPort = allocVncPort(); if (vncPort < 0) { - s_logger.debug("Unable to allocate VNC port"); + logger.debug("Unable to allocate VNC port"); throw new CloudRuntimeException("Unable to allocate vnc port"); } diff --git a/agent/src/main/java/com/cloud/agent/properties/AgentPropertiesFileHandler.java b/agent/src/main/java/com/cloud/agent/properties/AgentPropertiesFileHandler.java index 602aa1efcb0d..614848fb96e4 100644 --- a/agent/src/main/java/com/cloud/agent/properties/AgentPropertiesFileHandler.java +++ b/agent/src/main/java/com/cloud/agent/properties/AgentPropertiesFileHandler.java @@ -22,7 +22,8 @@ import org.apache.commons.beanutils.converters.IntegerConverter; import org.apache.commons.beanutils.converters.LongConverter; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; /** * This class provides a facility to read the agent's properties file and get @@ -31,7 +32,7 @@ */ public class AgentPropertiesFileHandler { - private static final Logger logger = Logger.getLogger(AgentPropertiesFileHandler.class); + protected static Logger LOGGER = LogManager.getLogger(AgentPropertiesFileHandler.class); /** * This method reads the property in the agent.properties file. @@ -47,7 +48,7 @@ public static T getPropertyValue(AgentProperties.Property property) { File agentPropertiesFile = PropertiesUtil.findConfigFile(KeyStoreUtils.AGENT_PROPSFILE); if (agentPropertiesFile == null) { - logger.debug(String.format("File [%s] was not found, we will use default defined values. Property [%s]: [%s].", KeyStoreUtils.AGENT_PROPSFILE, name, defaultValue)); + LOGGER.debug(String.format("File [%s] was not found, we will use default defined values. Property [%s]: [%s].", KeyStoreUtils.AGENT_PROPSFILE, name, defaultValue)); return defaultValue; } @@ -55,7 +56,7 @@ public static T getPropertyValue(AgentProperties.Property property) { try { String configValue = PropertiesUtil.loadFromFile(agentPropertiesFile).getProperty(name); if (StringUtils.isBlank(configValue)) { - logger.debug(String.format("Property [%s] has empty or null value. Using default value [%s].", name, defaultValue)); + LOGGER.debug(String.format("Property [%s] has empty or null value. Using default value [%s].", name, defaultValue)); return defaultValue; } @@ -67,11 +68,11 @@ public static T getPropertyValue(AgentProperties.Property property) { ConvertUtils.register(new LongConverter(defaultValue), Long.class); } - logger.debug(String.format("Property [%s] was altered. Now using the value [%s].", name, configValue)); + LOGGER.debug(String.format("Property [%s] was altered. Now using the value [%s].", name, configValue)); return (T)ConvertUtils.convert(configValue, property.getTypeClass()); } catch (IOException ex) { - logger.debug(String.format("Failed to get property [%s]. Using default value [%s].", name, defaultValue), ex); + LOGGER.debug(String.format("Failed to get property [%s]. Using default value [%s].", name, defaultValue), ex); } return defaultValue; diff --git a/agent/src/main/java/com/cloud/agent/resource/consoleproxy/ConsoleProxyResource.java b/agent/src/main/java/com/cloud/agent/resource/consoleproxy/ConsoleProxyResource.java index 5412c3470127..f0407a129882 100644 --- a/agent/src/main/java/com/cloud/agent/resource/consoleproxy/ConsoleProxyResource.java +++ b/agent/src/main/java/com/cloud/agent/resource/consoleproxy/ConsoleProxyResource.java @@ -34,7 +34,6 @@ import com.cloud.agent.api.proxy.AllowConsoleAccessCommand; import org.apache.cloudstack.managed.context.ManagedContextRunnable; -import org.apache.log4j.Logger; import com.cloud.agent.Agent.ExitStatus; import com.cloud.agent.api.AgentControlAnswer; @@ -81,7 +80,6 @@ * */ public class ConsoleProxyResource extends ServerResourceBase implements ServerResource { - static final Logger s_logger = Logger.getLogger(ConsoleProxyResource.class); private final Properties properties = new Properties(); private Thread consoleProxyMain = null; @@ -101,7 +99,7 @@ public Answer executeRequest(final Command cmd) { } else if (cmd instanceof WatchConsoleProxyLoadCommand) { return execute((WatchConsoleProxyLoadCommand)cmd); } else if (cmd instanceof ReadyCommand) { - s_logger.info("Receive ReadyCommand, response with ReadyAnswer"); + logger.info("Receive ReadyCommand, response with ReadyAnswer"); return new ReadyAnswer((ReadyCommand)cmd); } else if (cmd instanceof CheckHealthCommand) { return new CheckHealthAnswer((CheckHealthCommand)cmd, true); @@ -123,13 +121,13 @@ private Answer execute(AllowConsoleAccessCommand cmd) { return new Answer(cmd); } catch (SecurityException | NoSuchMethodException | ClassNotFoundException | InvocationTargetException | IllegalAccessException e) { String errorMsg = "Unable to add allowed session due to: " + e.getMessage(); - s_logger.error(errorMsg, e); + logger.error(errorMsg, e); return new Answer(cmd, false, errorMsg); } } private Answer execute(StartConsoleProxyAgentHttpHandlerCommand cmd) { - s_logger.info("Invoke launchConsoleProxy() in responding to StartConsoleProxyAgentHttpHandlerCommand"); + logger.info("Invoke launchConsoleProxy() in responding to StartConsoleProxyAgentHttpHandlerCommand"); launchConsoleProxy(cmd.getKeystoreBits(), cmd.getKeystorePassword(), cmd.getEncryptorPassword(), cmd.isSourceIpCheckEnabled()); return new Answer(cmd); } @@ -140,7 +138,7 @@ private void disableRpFilter() { { out.write("0"); } catch (IOException e) { - s_logger.warn("Unable to disable rp_filter"); + logger.warn("Unable to disable rp_filter"); } } @@ -177,12 +175,12 @@ private Answer executeProxyLoadScan(final Command cmd, final long proxyVmId, fin try { is.close(); } catch (final IOException e) { - s_logger.warn("Exception when closing , console proxy address : " + proxyManagementIp); + logger.warn("Exception when closing , console proxy address : " + proxyManagementIp); success = false; } } } catch (final IOException e) { - s_logger.warn("Unable to open console proxy command port url, console proxy address : " + proxyManagementIp); + logger.warn("Unable to open console proxy command port url, console proxy address : " + proxyManagementIp); success = false; } @@ -227,14 +225,14 @@ public boolean configure(String name, Map params) throws Configu if (eth1Ip != null) { params.put("private.network.device", "eth1"); } else { - s_logger.info("eth1ip parameter has not been configured, assuming that we are not inside a system vm"); + logger.info("eth1ip parameter has not been configured, assuming that we are not inside a system vm"); } String eth2ip = (String)params.get("eth2ip"); if (eth2ip != null) { params.put("public.network.device", "eth2"); } else { - s_logger.info("eth2ip parameter is not found, assuming that we are not inside a system vm"); + logger.info("eth2ip parameter is not found, assuming that we are not inside a system vm"); } super.configure(name, params); @@ -262,7 +260,7 @@ public boolean configure(String name, Map params) throws Configu } String internalDns1 = (String) params.get("internaldns1"); if (internalDns1 == null) { - s_logger.warn("No DNS entry found during configuration of ConsoleProxy"); + logger.warn("No DNS entry found during configuration of ConsoleProxy"); } else { addRouteToInternalIpOrCidr(localGateway, eth1Ip, eth1Mask, internalDns1); } @@ -280,20 +278,20 @@ public boolean configure(String name, Map params) throws Configu disableRpFilter(); } - if (s_logger.isInfoEnabled()) - s_logger.info("Receive proxyVmId in ConsoleProxyResource configuration as " + proxyVmId); + if (logger.isInfoEnabled()) + logger.info("Receive proxyVmId in ConsoleProxyResource configuration as " + proxyVmId); return true; } private void addRouteToInternalIpOrCidr(String localgw, String eth1ip, String eth1mask, String destIpOrCidr) { - s_logger.debug("addRouteToInternalIp: localgw=" + localgw + ", eth1ip=" + eth1ip + ", eth1mask=" + eth1mask + ",destIp=" + destIpOrCidr); + logger.debug("addRouteToInternalIp: localgw=" + localgw + ", eth1ip=" + eth1ip + ", eth1mask=" + eth1mask + ",destIp=" + destIpOrCidr); if (destIpOrCidr == null) { - s_logger.debug("addRouteToInternalIp: destIp is null"); + logger.debug("addRouteToInternalIp: destIp is null"); return; } if (!NetUtils.isValidIp4(destIpOrCidr) && !NetUtils.isValidIp4Cidr(destIpOrCidr)) { - s_logger.warn(" destIp is not a valid ip address or cidr destIp=" + destIpOrCidr); + logger.warn(" destIp is not a valid ip address or cidr destIp=" + destIpOrCidr); return; } boolean inSameSubnet = false; @@ -301,27 +299,27 @@ private void addRouteToInternalIpOrCidr(String localgw, String eth1ip, String et if (eth1ip != null && eth1mask != null) { inSameSubnet = NetUtils.sameSubnet(eth1ip, destIpOrCidr, eth1mask); } else { - s_logger.warn("addRouteToInternalIp: unable to determine same subnet: eth1ip=" + eth1ip + ", dest ip=" + destIpOrCidr + ", eth1mask=" + eth1mask); + logger.warn("addRouteToInternalIp: unable to determine same subnet: eth1ip=" + eth1ip + ", dest ip=" + destIpOrCidr + ", eth1mask=" + eth1mask); } } else { inSameSubnet = NetUtils.isNetworkAWithinNetworkB(destIpOrCidr, NetUtils.ipAndNetMaskToCidr(eth1ip, eth1mask)); } if (inSameSubnet) { - s_logger.debug("addRouteToInternalIp: dest ip " + destIpOrCidr + " is in the same subnet as eth1 ip " + eth1ip); + logger.debug("addRouteToInternalIp: dest ip " + destIpOrCidr + " is in the same subnet as eth1 ip " + eth1ip); return; } - Script command = new Script("/bin/bash", s_logger); + Script command = new Script("/bin/bash", logger); command.add("-c"); command.add("ip route delete " + destIpOrCidr); command.execute(); - command = new Script("/bin/bash", s_logger); + command = new Script("/bin/bash", logger); command.add("-c"); command.add("ip route add " + destIpOrCidr + " via " + localgw); String result = command.execute(); if (result != null) { - s_logger.warn("Error in configuring route to internal ip err=" + result); + logger.warn("Error in configuring route to internal ip err=" + result); } else { - s_logger.debug("addRouteToInternalIp: added route to internal ip=" + destIpOrCidr + " via " + localgw); + logger.debug("addRouteToInternalIp: added route to internal ip=" + destIpOrCidr + " via " + localgw); } } @@ -332,36 +330,36 @@ public String getName() { private void launchConsoleProxy(final byte[] ksBits, final String ksPassword, final String encryptorPassword, final Boolean isSourceIpCheckEnabled) { final Object resource = this; - s_logger.info("Building class loader for com.cloud.consoleproxy.ConsoleProxy"); + logger.info("Building class loader for com.cloud.consoleproxy.ConsoleProxy"); if (consoleProxyMain == null) { - s_logger.info("Running com.cloud.consoleproxy.ConsoleProxy with encryptor password=" + encryptorPassword); + logger.info("Running com.cloud.consoleproxy.ConsoleProxy with encryptor password=" + encryptorPassword); consoleProxyMain = new Thread(new ManagedContextRunnable() { @Override protected void runInContext() { try { Class consoleProxyClazz = Class.forName("com.cloud.consoleproxy.ConsoleProxy"); try { - s_logger.info("Invoke startWithContext()"); + logger.info("Invoke startWithContext()"); Method method = consoleProxyClazz.getMethod("startWithContext", Properties.class, Object.class, byte[].class, String.class, String.class, Boolean.class); method.invoke(null, properties, resource, ksBits, ksPassword, encryptorPassword, isSourceIpCheckEnabled); } catch (SecurityException e) { - s_logger.error("Unable to launch console proxy due to SecurityException", e); + logger.error("Unable to launch console proxy due to SecurityException", e); System.exit(ExitStatus.Error.value()); } catch (NoSuchMethodException e) { - s_logger.error("Unable to launch console proxy due to NoSuchMethodException", e); + logger.error("Unable to launch console proxy due to NoSuchMethodException", e); System.exit(ExitStatus.Error.value()); } catch (IllegalArgumentException e) { - s_logger.error("Unable to launch console proxy due to IllegalArgumentException", e); + logger.error("Unable to launch console proxy due to IllegalArgumentException", e); System.exit(ExitStatus.Error.value()); } catch (IllegalAccessException e) { - s_logger.error("Unable to launch console proxy due to IllegalAccessException", e); + logger.error("Unable to launch console proxy due to IllegalAccessException", e); System.exit(ExitStatus.Error.value()); } catch (InvocationTargetException e) { - s_logger.error("Unable to launch console proxy due to InvocationTargetException " + e.getTargetException().toString(), e); + logger.error("Unable to launch console proxy due to InvocationTargetException " + e.getTargetException().toString(), e); System.exit(ExitStatus.Error.value()); } } catch (final ClassNotFoundException e) { - s_logger.error("Unable to launch console proxy due to ClassNotFoundException"); + logger.error("Unable to launch console proxy due to ClassNotFoundException"); System.exit(ExitStatus.Error.value()); } } @@ -369,7 +367,7 @@ protected void runInContext() { consoleProxyMain.setDaemon(true); consoleProxyMain.start(); } else { - s_logger.info("com.cloud.consoleproxy.ConsoleProxy is already running"); + logger.info("com.cloud.consoleproxy.ConsoleProxy is already running"); try { Class consoleProxyClazz = Class.forName("com.cloud.consoleproxy.ConsoleProxy"); @@ -378,22 +376,22 @@ protected void runInContext() { methodSetup = consoleProxyClazz.getMethod("setIsSourceIpCheckEnabled", Boolean.class); methodSetup.invoke(null, isSourceIpCheckEnabled); } catch (SecurityException e) { - s_logger.error("Unable to launch console proxy due to SecurityException", e); + logger.error("Unable to launch console proxy due to SecurityException", e); System.exit(ExitStatus.Error.value()); } catch (NoSuchMethodException e) { - s_logger.error("Unable to launch console proxy due to NoSuchMethodException", e); + logger.error("Unable to launch console proxy due to NoSuchMethodException", e); System.exit(ExitStatus.Error.value()); } catch (IllegalArgumentException e) { - s_logger.error("Unable to launch console proxy due to IllegalArgumentException", e); + logger.error("Unable to launch console proxy due to IllegalArgumentException", e); System.exit(ExitStatus.Error.value()); } catch (IllegalAccessException e) { - s_logger.error("Unable to launch console proxy due to IllegalAccessException", e); + logger.error("Unable to launch console proxy due to IllegalAccessException", e); System.exit(ExitStatus.Error.value()); } catch (InvocationTargetException e) { - s_logger.error("Unable to launch console proxy due to InvocationTargetException " + e.getTargetException().toString(), e); + logger.error("Unable to launch console proxy due to InvocationTargetException " + e.getTargetException().toString(), e); System.exit(ExitStatus.Error.value()); } catch (final ClassNotFoundException e) { - s_logger.error("Unable to launch console proxy due to ClassNotFoundException", e); + logger.error("Unable to launch console proxy due to ClassNotFoundException", e); System.exit(ExitStatus.Error.value()); } } @@ -420,10 +418,10 @@ public String authenticateConsoleAccess(String host, String port, String vmId, S result.setTunnelUrl(authAnswer.getTunnelUrl()); result.setTunnelSession(authAnswer.getTunnelSession()); } else { - s_logger.error("Authentication failed for vm: " + vmId + " with sid: " + sid); + logger.error("Authentication failed for vm: " + vmId + " with sid: " + sid); } } catch (AgentControlChannelException e) { - s_logger.error("Unable to send out console access authentication request due to " + e.getMessage(), e); + logger.error("Unable to send out console access authentication request due to " + e.getMessage(), e); } return new Gson().toJson(result); @@ -434,17 +432,17 @@ public void reportLoadInfo(String gsonLoadInfo) { try { getAgentControl().postRequest(cmd); - if (s_logger.isDebugEnabled()) - s_logger.debug("Report proxy load info, proxy : " + proxyVmId + ", load: " + gsonLoadInfo); + if (logger.isDebugEnabled()) + logger.debug("Report proxy load info, proxy : " + proxyVmId + ", load: " + gsonLoadInfo); } catch (AgentControlChannelException e) { - s_logger.error("Unable to send out load info due to " + e.getMessage(), e); + logger.error("Unable to send out load info due to " + e.getMessage(), e); } } public void ensureRoute(String address) { if (localGateway != null) { - if (s_logger.isDebugEnabled()) - s_logger.debug("Ensure route for " + address + " via " + localGateway); + if (logger.isDebugEnabled()) + logger.debug("Ensure route for " + address + " via " + localGateway); // this method won't be called in high frequency, serialize access // to script execution @@ -452,7 +450,7 @@ public void ensureRoute(String address) { try { addRouteToInternalIpOrCidr(localGateway, eth1Ip, eth1Mask, address); } catch (Throwable e) { - s_logger.warn("Unexpected exception while adding internal route to " + address, e); + logger.warn("Unexpected exception while adding internal route to " + address, e); } } } diff --git a/api/src/main/java/com/cloud/agent/api/Command.java b/api/src/main/java/com/cloud/agent/api/Command.java index c873139099cc..eb979c0060b9 100644 --- a/api/src/main/java/com/cloud/agent/api/Command.java +++ b/api/src/main/java/com/cloud/agent/api/Command.java @@ -20,6 +20,8 @@ import java.util.Map; import com.cloud.agent.api.LogLevel.Log4jLevel; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; /** * implemented by classes that extends the Command class. Command specifies @@ -27,6 +29,8 @@ */ public abstract class Command { + protected transient Logger logger = LogManager.getLogger(getClass()); + public static enum OnError { Continue, Stop } diff --git a/api/src/main/java/com/cloud/agent/api/LogLevel.java b/api/src/main/java/com/cloud/agent/api/LogLevel.java index a8da272f39f4..136cb6d7228c 100644 --- a/api/src/main/java/com/cloud/agent/api/LogLevel.java +++ b/api/src/main/java/com/cloud/agent/api/LogLevel.java @@ -23,8 +23,8 @@ import java.lang.annotation.Retention; import java.lang.annotation.Target; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.Logger; /** */ @@ -41,7 +41,7 @@ private Log4jLevel(Level level) { } public boolean enabled(Logger logger) { - return _level != Level.OFF && logger.isEnabledFor(_level); + return _level != Level.OFF && logger.isEnabled(_level); } } diff --git a/api/src/main/java/com/cloud/agent/api/storage/OVFHelper.java b/api/src/main/java/com/cloud/agent/api/storage/OVFHelper.java index d4daf0e4270a..6396e3deb723 100644 --- a/api/src/main/java/com/cloud/agent/api/storage/OVFHelper.java +++ b/api/src/main/java/com/cloud/agent/api/storage/OVFHelper.java @@ -39,7 +39,8 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.math.NumberUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.w3c.dom.Document; import org.w3c.dom.Element; import org.w3c.dom.Node; @@ -63,7 +64,7 @@ import com.cloud.utils.exception.CloudRuntimeException; public class OVFHelper { - private static final Logger s_logger = Logger.getLogger(OVFHelper.class); + protected Logger logger = LogManager.getLogger(getClass()); private final OVFParser ovfParser; @@ -118,7 +119,7 @@ protected OVFPropertyTO createOVFPropertyFromNode(Node node, int index, String c boolean password = StringUtils.isNotBlank(passStr) && passStr.equalsIgnoreCase("true"); String label = ovfParser.getChildNodeValue(node, "Label"); String description = ovfParser.getChildNodeValue(node, "Description"); - s_logger.debug("Creating OVF property index " + index + (category == null ? "" : " for category " + category) + logger.debug("Creating OVF property index " + index + (category == null ? "" : " for category " + category) + " with key = " + key); return new OVFPropertyTO(key, type, value, qualifiers, userConfigurable, label, description, password, index, category); @@ -151,7 +152,7 @@ public List getConfigurableOVFPropertiesFromDocument(Document doc if (child.getNodeName().equalsIgnoreCase("Category") || child.getNodeName().endsWith(":Category")) { lastCategoryFound = child.getTextContent(); - s_logger.info("Category found " + lastCategoryFound); + logger.info("Category found " + lastCategoryFound); } else if (child.getNodeName().equalsIgnoreCase("Property") || child.getNodeName().endsWith(":Property")) { OVFPropertyTO prop = createOVFPropertyFromNode(child, propertyIndex, lastCategoryFound); @@ -249,13 +250,13 @@ private List matchHardwareItemsToDiskAndFilesInformation(List extractDisksFromOvfDocumentTree(Document doc) { od._controller = getControllerType(items, od._diskId); vd.add(od); } - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("found %d disk definitions",vd.size())); + if (logger.isTraceEnabled()) { + logger.trace(String.format("found %d disk definitions",vd.size())); } return vd; } @@ -365,8 +366,8 @@ protected List extractFilesFromOvfDocumentTree(File ovfFile, Document d vf.add(of); } } - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("found %d file definitions in %s",vf.size(), ovfFile.getPath())); + if (logger.isTraceEnabled()) { + logger.trace(String.format("found %d file definitions in %s",vf.size(), ovfFile.getPath())); } return vf; } @@ -461,7 +462,7 @@ public void rewriteOVFFileForSingleDisk(final String origOvfFilePath, final Stri Element disk = (Element)disks.item(i); String fileRef = ovfParser.getNodeAttribute(disk, "fileRef"); if (keepfile == null) { - s_logger.info("FATAL: OVA format error"); + logger.info("FATAL: OVA format error"); } else if (keepfile.equals(fileRef)) { keepdisk = ovfParser.getNodeAttribute(disk, "diskId"); } else { @@ -505,7 +506,7 @@ private void writeDocumentToFile(String newOvfFilePath, Document doc) { outfile.write(writer.toString()); outfile.close(); } catch (IOException | TransformerException e) { - s_logger.info("Unexpected exception caught while rewriting OVF:" + e.getMessage(), e); + logger.info("Unexpected exception caught while rewriting OVF:" + e.getMessage(), e); throw new CloudRuntimeException(e); } } @@ -521,8 +522,8 @@ OVFFile getFileDefinitionFromDiskDefinition(String fileRef, List files) public List getNetPrerequisitesFromDocument(Document doc) throws InternalErrorException { if (doc == null) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("no document to parse; returning no prerequisite networks"); + if (logger.isTraceEnabled()) { + logger.trace("no document to parse; returning no prerequisite networks"); } return Collections.emptyList(); } @@ -539,8 +540,8 @@ public List getNetPrerequisitesFromDocument(Document doc) throws I private void matchNicsToNets(Map nets, Node systemElement) { final DocumentTraversal traversal = (DocumentTraversal) systemElement; final NodeIterator iterator = traversal.createNodeIterator(systemElement, NodeFilter.SHOW_ELEMENT, null, true); - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("starting out with %d network-prerequisites, parsing hardware",nets.size())); + if (logger.isTraceEnabled()) { + logger.trace(String.format("starting out with %d network-prerequisites, parsing hardware",nets.size())); } int nicCount = 0; for (Node n = iterator.nextNode(); n != null; n = iterator.nextNode()) { @@ -549,8 +550,8 @@ private void matchNicsToNets(Map nets, Node systemElement) nicCount++; String name = e.getTextContent(); // should be in our nets if(nets.get(name) == null) { - if(s_logger.isInfoEnabled()) { - s_logger.info(String.format("found a nic definition without a network definition byname %s, adding it to the list.", name)); + if(logger.isInfoEnabled()) { + logger.info(String.format("found a nic definition without a network definition byname %s, adding it to the list.", name)); } nets.put(name, new OVFNetworkTO()); } @@ -560,8 +561,8 @@ private void matchNicsToNets(Map nets, Node systemElement) } } } - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("ending up with %d network-prerequisites, parsed %d nics", nets.size(), nicCount)); + if (logger.isTraceEnabled()) { + logger.trace(String.format("ending up with %d network-prerequisites, parsed %d nics", nets.size(), nicCount)); } } @@ -584,7 +585,7 @@ private void fillNicPrerequisites(OVFNetworkTO nic, Node parentNode) { int addressOnParent = Integer.parseInt(addressOnParentStr); nic.setAddressOnParent(addressOnParent); } catch (NumberFormatException e) { - s_logger.warn("Encountered element of type \"AddressOnParent\", that could not be parse to an integer number: " + addressOnParentStr); + logger.warn("Encountered element of type \"AddressOnParent\", that could not be parse to an integer number: " + addressOnParentStr); } boolean automaticAllocation = StringUtils.isNotBlank(automaticAllocationStr) && Boolean.parseBoolean(automaticAllocationStr); @@ -596,7 +597,7 @@ private void fillNicPrerequisites(OVFNetworkTO nic, Node parentNode) { int instanceId = Integer.parseInt(instanceIdStr); nic.setInstanceID(instanceId); } catch (NumberFormatException e) { - s_logger.warn("Encountered element of type \"InstanceID\", that could not be parse to an integer number: " + instanceIdStr); + logger.warn("Encountered element of type \"InstanceID\", that could not be parse to an integer number: " + instanceIdStr); } nic.setResourceSubType(resourceSubType); @@ -608,7 +609,7 @@ private void checkForOnlyOneSystemNode(Document doc) throws InternalErrorExcepti NodeList systemElements = ovfParser.getElementsFromOVFDocument(doc, "VirtualSystem"); if (systemElements.getLength() != 1) { String msg = "found " + systemElements.getLength() + " system definitions in OVA, can only handle exactly one."; - s_logger.warn(msg); + logger.warn(msg); throw new InternalErrorException(msg); } } @@ -629,8 +630,8 @@ private Map getNetworksFromDocumentTree(Document doc) { nets.put(networkName,network); } - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("found %d networks in template", nets.size())); + if (logger.isTraceEnabled()) { + logger.trace(String.format("found %d networks in template", nets.size())); } return nets; } @@ -770,7 +771,7 @@ private Long getLongValueFromString(String value) { try { return Long.parseLong(value); } catch (NumberFormatException e) { - s_logger.debug("Could not parse the value: " + value + ", ignoring it"); + logger.debug("Could not parse the value: " + value + ", ignoring it"); } } return null; @@ -781,7 +782,7 @@ private Integer getIntValueFromString(String value) { try { return Integer.parseInt(value); } catch (NumberFormatException e) { - s_logger.debug("Could not parse the value: " + value + ", ignoring it"); + logger.debug("Could not parse the value: " + value + ", ignoring it"); } } return null; @@ -819,7 +820,7 @@ public List getEulaSectionsFromDocument(Document doc) { try { compressedLicense = compressOVFEula(eulaLicense); } catch (IOException e) { - s_logger.error("Could not compress the license for info " + eulaInfo); + logger.error("Could not compress the license for info " + eulaInfo); continue; } OVFEulaSectionTO eula = new OVFEulaSectionTO(eulaInfo, compressedLicense, eulaIndex); diff --git a/api/src/main/java/com/cloud/agent/api/storage/OVFParser.java b/api/src/main/java/com/cloud/agent/api/storage/OVFParser.java index b66fbe418d73..38f478d63cf8 100644 --- a/api/src/main/java/com/cloud/agent/api/storage/OVFParser.java +++ b/api/src/main/java/com/cloud/agent/api/storage/OVFParser.java @@ -27,7 +27,8 @@ import org.apache.cloudstack.utils.security.ParserUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.w3c.dom.Document; import org.w3c.dom.Element; import org.w3c.dom.Node; @@ -36,7 +37,7 @@ import org.xml.sax.SAXException; public class OVFParser { - private static final Logger s_logger = Logger.getLogger(OVFParser.class); + protected Logger logger = LogManager.getLogger(getClass()); private static final String DEFAULT_OVF_SCHEMA = "http://schemas.dmtf.org/ovf/envelope/1"; private static final String VMW_SCHEMA = "http://www.vmware.com/schema/ovf"; @@ -53,7 +54,7 @@ public OVFParser() { documentBuilderFactory.setNamespaceAware(true); documentBuilder = documentBuilderFactory.newDocumentBuilder(); } catch (ParserConfigurationException e) { - s_logger.error("Cannot start the OVF parser: " + e.getMessage(), e); + logger.error("Cannot start the OVF parser: " + e.getMessage(), e); } } @@ -69,7 +70,7 @@ public Document parseOVFFile(String ovfFilePath) { try { return documentBuilder.parse(new File(ovfFilePath)); } catch (SAXException | IOException e) { - s_logger.error("Error parsing " + ovfFilePath + " " + e.getMessage(), e); + logger.error("Error parsing " + ovfFilePath + " " + e.getMessage(), e); return null; } } diff --git a/api/src/main/java/com/cloud/network/NetworkProfile.java b/api/src/main/java/com/cloud/network/NetworkProfile.java index 448530695750..1a5c80ea8718 100644 --- a/api/src/main/java/com/cloud/network/NetworkProfile.java +++ b/api/src/main/java/com/cloud/network/NetworkProfile.java @@ -22,10 +22,8 @@ import com.cloud.network.Networks.BroadcastDomainType; import com.cloud.network.Networks.Mode; import com.cloud.network.Networks.TrafficType; -import org.apache.log4j.Logger; public class NetworkProfile implements Network { - static final Logger s_logger = Logger.getLogger(NetworkProfile.class); private final long id; private final String uuid; private final long dataCenterId; diff --git a/api/src/main/java/com/cloud/storage/DataStoreRole.java b/api/src/main/java/com/cloud/storage/DataStoreRole.java index 185e370159ca..d9af495ab00d 100644 --- a/api/src/main/java/com/cloud/storage/DataStoreRole.java +++ b/api/src/main/java/com/cloud/storage/DataStoreRole.java @@ -20,6 +20,7 @@ import com.cloud.utils.exception.CloudRuntimeException; + public enum DataStoreRole { Primary("primary"), Image("image"), ImageCache("imagecache"), Backup("backup"), Object("object"); diff --git a/api/src/main/java/org/apache/cloudstack/acl/RoleType.java b/api/src/main/java/org/apache/cloudstack/acl/RoleType.java index ec82cd6605bb..005d47c85bc2 100644 --- a/api/src/main/java/org/apache/cloudstack/acl/RoleType.java +++ b/api/src/main/java/org/apache/cloudstack/acl/RoleType.java @@ -20,7 +20,8 @@ import com.cloud.user.Account; import com.google.common.base.Enums; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import java.util.HashMap; import java.util.Map; @@ -37,7 +38,7 @@ public enum RoleType { private Account.Type accountType; private int mask; - private static Logger logger = Logger.getLogger(RoleType.class.getName()); + private static Logger LOGGER = LogManager.getLogger(RoleType.class.getName()); private static Map ACCOUNT_TYPE_MAP = new HashMap<>(); static { @@ -104,10 +105,10 @@ public static Long getRoleByAccountType(final Long roleId, final Account.Type ac * */ public static Account.Type getAccountTypeByRole(final Role role, final Account.Type defautAccountType) { if (role != null) { - logger.debug(String.format("Role [%s] is not null; therefore, we use its account type [%s].", role, defautAccountType)); + LOGGER.debug(String.format("Role [%s] is not null; therefore, we use its account type [%s].", role, defautAccountType)); return role.getRoleType().getAccountType(); } - logger.debug(String.format("Role is null; therefore, we use the default account type [%s] value.", defautAccountType)); + LOGGER.debug(String.format("Role is null; therefore, we use the default account type [%s] value.", defautAccountType)); return defautAccountType; } } diff --git a/api/src/main/java/org/apache/cloudstack/api/AbstractGetUploadParamsCmd.java b/api/src/main/java/org/apache/cloudstack/api/AbstractGetUploadParamsCmd.java index ed3381ae97c0..083a1be00f56 100644 --- a/api/src/main/java/org/apache/cloudstack/api/AbstractGetUploadParamsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/AbstractGetUploadParamsCmd.java @@ -25,11 +25,9 @@ import org.apache.cloudstack.api.response.GetUploadParamsResponse; import org.apache.cloudstack.api.response.ProjectResponse; import org.apache.cloudstack.api.response.ZoneResponse; -import org.apache.log4j.Logger; public abstract class AbstractGetUploadParamsCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(AbstractGetUploadParamsCmd.class.getName()); @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, required = true, description = "the name of the volume/template/iso") private String name; diff --git a/api/src/main/java/org/apache/cloudstack/api/BaseAsyncCmd.java b/api/src/main/java/org/apache/cloudstack/api/BaseAsyncCmd.java index 865ec7456409..6859b0a7f406 100644 --- a/api/src/main/java/org/apache/cloudstack/api/BaseAsyncCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/BaseAsyncCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api; -import org.apache.log4j.Logger; /** * queryAsyncJobResult API command. @@ -30,7 +29,6 @@ public abstract class BaseAsyncCmd extends BaseCmd { public static final String migrationSyncObject = "migration"; public static final String snapshotHostSyncObject = "snapshothost"; public static final String gslbSyncObject = "globalserverloadbalancer"; - private static final Logger s_logger = Logger.getLogger(BaseAsyncCmd.class.getName()); private Object job; diff --git a/api/src/main/java/org/apache/cloudstack/api/BaseCmd.java b/api/src/main/java/org/apache/cloudstack/api/BaseCmd.java index f32922819b01..323fd4e6f64d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/BaseCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/BaseCmd.java @@ -47,7 +47,8 @@ import org.apache.cloudstack.storage.template.VnfTemplateManager; import org.apache.cloudstack.usage.UsageService; import org.apache.commons.collections.MapUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.configuration.ConfigurationService; import com.cloud.exception.ConcurrentOperationException; @@ -95,7 +96,7 @@ import com.cloud.vm.snapshot.VMSnapshotService; public abstract class BaseCmd { - private static final Logger s_logger = Logger.getLogger(BaseCmd.class.getName()); + protected transient Logger logger = LogManager.getLogger(getClass()); public static final String RESPONSE_SUFFIX = "response"; public static final String RESPONSE_TYPE_XML = HttpUtils.RESPONSE_TYPE_XML; public static final String RESPONSE_TYPE_JSON = HttpUtils.RESPONSE_TYPE_JSON; @@ -374,7 +375,7 @@ public List getParamFields() { if (roleIsAllowed) { validFields.add(field); } else { - s_logger.debug("Ignoring parameter " + parameterAnnotation.name() + " as the caller is not authorized to pass it in"); + logger.debug("Ignoring parameter " + parameterAnnotation.name() + " as the caller is not authorized to pass it in"); } } @@ -419,7 +420,7 @@ public boolean isDisplay(){ if(!isDisplay) break; } catch (Exception e){ - s_logger.trace("Caught exception while checking first class entities for display property, continuing on", e); + logger.trace("Caught exception while checking first class entities for display property, continuing on", e); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/BaseListTemplateOrIsoPermissionsCmd.java b/api/src/main/java/org/apache/cloudstack/api/BaseListTemplateOrIsoPermissionsCmd.java index 052d7d1e8b72..be95547a8a73 100644 --- a/api/src/main/java/org/apache/cloudstack/api/BaseListTemplateOrIsoPermissionsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/BaseListTemplateOrIsoPermissionsCmd.java @@ -18,7 +18,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.ResponseObject.ResponseView; import org.apache.cloudstack.api.command.ResponseViewProvider; @@ -28,7 +27,6 @@ import com.cloud.user.Account; public abstract class BaseListTemplateOrIsoPermissionsCmd extends BaseCmd implements ResponseViewProvider { - public Logger logger = getLogger(); protected static final String s_name = "listtemplatepermissionsresponse"; ///////////////////////////////////////////////////// @@ -59,9 +57,6 @@ public long getEntityOwnerId() { return Account.ACCOUNT_ID_SYSTEM; // no account info given, parent this command to SYSTEM so ERROR events are tracked } - protected Logger getLogger() { - return Logger.getLogger(BaseListTemplateOrIsoPermissionsCmd.class); - } @Override public String getCommandName() { diff --git a/api/src/main/java/org/apache/cloudstack/api/BaseUpdateTemplateOrIsoCmd.java b/api/src/main/java/org/apache/cloudstack/api/BaseUpdateTemplateOrIsoCmd.java index 08f390f19724..e3aead6881ba 100644 --- a/api/src/main/java/org/apache/cloudstack/api/BaseUpdateTemplateOrIsoCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/BaseUpdateTemplateOrIsoCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.response.GuestOSResponse; import org.apache.cloudstack.api.response.TemplateResponse; @@ -24,7 +23,6 @@ import java.util.Map; public abstract class BaseUpdateTemplateOrIsoCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(BaseUpdateTemplateOrIsoCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/BaseUpdateTemplateOrIsoPermissionsCmd.java b/api/src/main/java/org/apache/cloudstack/api/BaseUpdateTemplateOrIsoPermissionsCmd.java index 410ffefb00dd..e6ee0897db02 100644 --- a/api/src/main/java/org/apache/cloudstack/api/BaseUpdateTemplateOrIsoPermissionsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/BaseUpdateTemplateOrIsoPermissionsCmd.java @@ -18,7 +18,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.response.ProjectResponse; import org.apache.cloudstack.api.response.SuccessResponse; @@ -27,16 +26,12 @@ import com.cloud.exception.InvalidParameterValueException; public abstract class BaseUpdateTemplateOrIsoPermissionsCmd extends BaseCmd { - public Logger _logger = getLogger(); protected String _name = getResponseName(); // /////////////////////////////////////////////////// // ////////////// API parameters ///////////////////// // /////////////////////////////////////////////////// - protected Logger getLogger() { - return Logger.getLogger(BaseUpdateTemplateOrIsoPermissionsCmd.class); - } protected String getResponseName() { return "updatetemplateorisopermissionsresponse"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/account/CreateAccountCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/account/CreateAccountCmd.java index 945bb956c3eb..6dbc6acc59a9 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/account/CreateAccountCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/account/CreateAccountCmd.java @@ -21,7 +21,6 @@ import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; @@ -43,7 +42,6 @@ @APICommand(name = "createAccount", description = "Creates an account", responseObject = AccountResponse.class, entityType = {Account.class}, requestHasSensitiveInfo = true, responseHasSensitiveInfo = true) public class CreateAccountCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(CreateAccountCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/account/DeleteAccountCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/account/DeleteAccountCmd.java index 9a0ea4e9ec0e..36e22acff91e 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/account/DeleteAccountCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/account/DeleteAccountCmd.java @@ -31,7 +31,6 @@ import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.region.RegionService; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.user.Account; @@ -39,7 +38,6 @@ @APICommand(name = "deleteAccount", description = "Deletes a account, and all users associated with this account", responseObject = SuccessResponse.class, entityType = {Account.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteAccountCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteAccountCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/account/DisableAccountCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/account/DisableAccountCmd.java index 91b0673e186e..55293eca619e 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/account/DisableAccountCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/account/DisableAccountCmd.java @@ -18,7 +18,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -43,7 +42,6 @@ @APICommand(name = "disableAccount", description = "Disables an account", responseObject = AccountResponse.class, entityType = {Account.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class DisableAccountCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DisableAccountCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/account/EnableAccountCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/account/EnableAccountCmd.java index cc37dc237134..da96383f1345 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/account/EnableAccountCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/account/EnableAccountCmd.java @@ -19,7 +19,6 @@ import javax.inject.Inject; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -39,7 +38,6 @@ @APICommand(name = "enableAccount", description = "Enables an account", responseObject = AccountResponse.class, entityType = {Account.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class EnableAccountCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(EnableAccountCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/account/LockAccountCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/account/LockAccountCmd.java index a430914a25a1..d7847373e927 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/account/LockAccountCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/account/LockAccountCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.account; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -35,7 +34,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class LockAccountCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(LockAccountCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/account/UpdateAccountCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/account/UpdateAccountCmd.java index 36d299bcfc85..91cbb90e4da4 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/account/UpdateAccountCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/account/UpdateAccountCmd.java @@ -23,7 +23,6 @@ import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.api.response.RoleResponse; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -43,7 +42,6 @@ @APICommand(name = "updateAccount", description = "Updates account information for the authenticated user", responseObject = AccountResponse.class, entityType = {Account.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class UpdateAccountCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(UpdateAccountCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/address/AcquirePodIpCmdByAdmin.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/address/AcquirePodIpCmdByAdmin.java index a965624a2a94..7397697bd2cc 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/address/AcquirePodIpCmdByAdmin.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/address/AcquirePodIpCmdByAdmin.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.address; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -35,7 +34,6 @@ @APICommand(name = "acquirePodIpAddress", description = "Allocates IP addresses in respective Pod of a Zone", responseObject = AcquirePodIpCmdResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class AcquirePodIpCmdByAdmin extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(AcquirePodIpCmdByAdmin.class.getName()); private static final String s_name = "acquirepodipaddress"; ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/address/AssociateIPAddrCmdByAdmin.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/address/AssociateIPAddrCmdByAdmin.java index 56f41b5f58ed..672691ffbd8f 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/address/AssociateIPAddrCmdByAdmin.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/address/AssociateIPAddrCmdByAdmin.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.address; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ResponseObject.ResponseView; @@ -27,5 +26,4 @@ @APICommand(name = "associateIpAddress", description = "Acquires and associates a public IP to an account.", responseObject = IPAddressResponse.class, responseView = ResponseView.Full, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class AssociateIPAddrCmdByAdmin extends AssociateIPAddrCmd implements AdminCmd { - public static final Logger s_logger = Logger.getLogger(AssociateIPAddrCmdByAdmin.class.getName()); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/address/ReleasePodIpCmdByAdmin.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/address/ReleasePodIpCmdByAdmin.java index b6bfbcaa1e22..7d4cab6a0ac4 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/address/ReleasePodIpCmdByAdmin.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/address/ReleasePodIpCmdByAdmin.java @@ -16,7 +16,6 @@ //under the License. package org.apache.cloudstack.api.command.admin.address; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -31,7 +30,6 @@ @APICommand(name = "releasePodIpAddress", description = "Releases a Pod IP back to the Pod", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ReleasePodIpCmdByAdmin extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(ReleasePodIpCmdByAdmin.class.getName()); private static final String s_name = "releasepodipresponse"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/affinitygroup/UpdateVMAffinityGroupCmdByAdmin.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/affinitygroup/UpdateVMAffinityGroupCmdByAdmin.java index 7bf9b64481b3..43e70838e18f 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/affinitygroup/UpdateVMAffinityGroupCmdByAdmin.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/affinitygroup/UpdateVMAffinityGroupCmdByAdmin.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.affinitygroup; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ResponseObject.ResponseView; @@ -33,5 +32,4 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class UpdateVMAffinityGroupCmdByAdmin extends UpdateVMAffinityGroupCmd implements AdminCmd { - public static final Logger s_logger = Logger.getLogger(UpdateVMAffinityGroupCmdByAdmin.class.getName()); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/alert/GenerateAlertCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/alert/GenerateAlertCmd.java index 9446272df6e6..30f3bbbec1b2 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/alert/GenerateAlertCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/alert/GenerateAlertCmd.java @@ -27,7 +27,6 @@ import org.apache.cloudstack.api.response.PodResponse; import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.api.response.ZoneResponse; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; @@ -35,7 +34,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class GenerateAlertCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(GenerateAlertCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/autoscale/CreateCounterCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/autoscale/CreateCounterCmd.java index 617d1104eda5..7fa66ffff1f4 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/autoscale/CreateCounterCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/autoscale/CreateCounterCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.admin.autoscale; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -35,7 +34,6 @@ @APICommand(name = "createCounter", description = "Adds metric counter for VM auto scaling", responseObject = CounterResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateCounterCmd extends BaseAsyncCreateCmd { - public static final Logger s_logger = Logger.getLogger(CreateCounterCmd.class.getName()); private static final String s_name = "counterresponse"; // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/autoscale/DeleteCounterCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/autoscale/DeleteCounterCmd.java index e1f7859a56b5..b7b2ce5cb70d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/autoscale/DeleteCounterCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/autoscale/DeleteCounterCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.admin.autoscale; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -36,7 +35,6 @@ @APICommand(name = "deleteCounter", description = "Deletes a counter for VM auto scaling", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteCounterCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteCounterCmd.class.getName()); // /////////////////////////////////////////////////// // ////////////// API parameters ///////////////////// @@ -55,7 +53,7 @@ public void execute() { try { result = _autoScaleService.deleteCounter(getId()); } catch (ResourceInUseException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_IN_USE_ERROR, ex.getMessage()); } @@ -63,7 +61,7 @@ public void execute() { SuccessResponse response = new SuccessResponse(getCommandName()); this.setResponseObject(response); } else { - s_logger.warn("Failed to delete counter with Id: " + getId()); + logger.warn("Failed to delete counter with Id: " + getId()); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to delete counter."); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/UpdateBackupOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/UpdateBackupOfferingCmd.java index 1cd6f4a83280..9de06715ee74 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/UpdateBackupOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/UpdateBackupOfferingCmd.java @@ -30,7 +30,6 @@ import org.apache.cloudstack.backup.BackupOffering; import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.exception.InvalidParameterValueException; import com.cloud.user.Account; @@ -39,7 +38,6 @@ @APICommand(name = "updateBackupOffering", description = "Updates a backup offering.", responseObject = BackupOfferingResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, since = "4.16.0") public class UpdateBackupOfferingCmd extends BaseCmd { - private static final Logger LOGGER = Logger.getLogger(UpdateBackupOfferingCmd.class.getName()); @Inject private BackupManager backupManager; @@ -100,7 +98,7 @@ public void execute() { this.setResponseObject(response); } catch (CloudRuntimeException e) { ApiErrorCode paramError = e instanceof InvalidParameterValueException ? ApiErrorCode.PARAM_ERROR : ApiErrorCode.INTERNAL_ERROR; - LOGGER.error(String.format("Failed to update Backup Offering [id: %s] due to: [%s].", id, e.getMessage()), e); + logger.error(String.format("Failed to update Backup Offering [id: %s] due to: [%s].", id, e.getMessage()), e); throw new ServerApiException(paramError, e.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/ca/IssueCertificateCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/ca/IssueCertificateCmd.java index 4c543fdb6a89..463af000f58b 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/ca/IssueCertificateCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/ca/IssueCertificateCmd.java @@ -37,7 +37,6 @@ import org.apache.cloudstack.framework.ca.Certificate; import org.apache.cloudstack.utils.security.CertUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; @@ -49,7 +48,6 @@ since = "4.11.0", authorized = {RoleType.Admin}) public class IssueCertificateCmd extends BaseAsyncCmd { - private static final Logger LOG = Logger.getLogger(IssueCertificateCmd.class); @Inject @@ -132,7 +130,7 @@ public void execute() { certificateResponse.setCaCertificate(CertUtils.x509CertificatesToPem(certificate.getCaCertificates())); } } catch (final IOException e) { - LOG.error("Failed to generate and convert client certificate(s) to PEM due to error: ", e); + logger.error("Failed to generate and convert client certificate(s) to PEM due to error: ", e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to process and return client certificate"); } certificateResponse.setResponseName(getCommandName()); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java index df48b2511b60..184a443d9db9 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java @@ -21,7 +21,6 @@ import java.util.List; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -42,7 +41,6 @@ @APICommand(name = "addCluster", description = "Adds a new cluster", responseObject = ClusterResponse.class, requestHasSensitiveInfo = true, responseHasSensitiveInfo = false) public class AddClusterCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(AddClusterCmd.class.getName()); @Parameter(name = ApiConstants.CLUSTER_NAME, type = CommandType.STRING, required = true, description = "the cluster name") @@ -226,10 +224,10 @@ public void execute() { this.setResponseObject(response); } catch (DiscoveryException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } catch (ResourceInUseException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); ServerApiException e = new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); for (String proxyObj : ex.getIdProxyList()) { e.addProxyObject(proxyObj); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/DeleteClusterCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/DeleteClusterCmd.java index 497cef4c3eca..2b1cfe8bcb58 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/DeleteClusterCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/DeleteClusterCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.admin.cluster; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -33,7 +32,6 @@ @APICommand(name = "deleteCluster", description = "Deletes a cluster.", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteClusterCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(DeleteClusterCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/ListClustersCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/ListClustersCmd.java index d83330c664a9..67d0678410cf 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/ListClustersCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/ListClustersCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -36,7 +35,6 @@ @APICommand(name = "listClusters", description = "Lists clusters.", responseObject = ClusterResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListClustersCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListClustersCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/UpdateClusterCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/UpdateClusterCmd.java index dd527fb409af..77bb97fd39d7 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/UpdateClusterCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/UpdateClusterCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.admin.cluster; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -34,7 +33,6 @@ @APICommand(name = "updateCluster", description = "Updates an existing cluster", responseObject = ClusterResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateClusterCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(AddClusterCmd.class.getName()); @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = ClusterResponse.class, required = true, description = "the ID of the Cluster") diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/config/ListCfgGroupsByCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/config/ListCfgGroupsByCmd.java index 46ab10cb2bcd..d735218169d6 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/config/ListCfgGroupsByCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/config/ListCfgGroupsByCmd.java @@ -27,14 +27,12 @@ import org.apache.cloudstack.api.response.ConfigurationGroupResponse; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.config.ConfigurationGroup; -import org.apache.log4j.Logger; import com.cloud.utils.Pair; @APICommand(name = ListCfgGroupsByCmd.APINAME, description = "Lists all configuration groups (primarily used for UI).", responseObject = ConfigurationGroupResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, since = "4.18.0") public class ListCfgGroupsByCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListCfgGroupsByCmd.class.getName()); public static final String APINAME = "listConfigurationGroups"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/config/ListCfgsByCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/config/ListCfgsByCmd.java index 80abe5d3e8ff..e365d8bc2dc7 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/config/ListCfgsByCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/config/ListCfgsByCmd.java @@ -23,7 +23,6 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.DomainResponse; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -47,7 +46,6 @@ public class ListCfgsByCmd extends BaseListCmd { public static final String APINAME = "listConfigurations"; - public static final Logger s_logger = Logger.getLogger(ListCfgsByCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/config/ListDeploymentPlannersCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/config/ListDeploymentPlannersCmd.java index 4f5186a6df69..78fa31beeb48 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/config/ListDeploymentPlannersCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/config/ListDeploymentPlannersCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.BaseListCmd; @@ -29,7 +28,6 @@ @APICommand(name = "listDeploymentPlanners", description = "Lists all DeploymentPlanners available.", responseObject = DeploymentPlannersResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListDeploymentPlannersCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListDeploymentPlannersCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/config/ListHypervisorCapabilitiesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/config/ListHypervisorCapabilitiesCmd.java index 64f1c19b70cd..e7cc9e0234e2 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/config/ListHypervisorCapabilitiesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/config/ListHypervisorCapabilitiesCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -39,7 +38,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListHypervisorCapabilitiesCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListHypervisorCapabilitiesCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/config/ResetCfgCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/config/ResetCfgCmd.java index ada389e00930..f114b263b634 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/config/ResetCfgCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/config/ResetCfgCmd.java @@ -25,7 +25,6 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.ImageStoreResponse; import org.apache.cloudstack.framework.config.ConfigKey; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.response.AccountResponse; import org.apache.cloudstack.api.response.ClusterResponse; @@ -41,7 +40,6 @@ @APICommand(name = "resetConfiguration", description = "Resets a configuration. The configuration will be set to default value for global setting, and removed from account_details or domain_details for Account/Domain settings", responseObject = ConfigurationResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, since = "4.16.0") public class ResetCfgCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(ResetCfgCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/config/UpdateCfgCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/config/UpdateCfgCmd.java index 63dc51452f0f..dbf478df7012 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/config/UpdateCfgCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/config/UpdateCfgCmd.java @@ -19,7 +19,6 @@ import com.cloud.utils.crypt.DBEncryptionUtil; import org.apache.cloudstack.acl.RoleService; import org.apache.cloudstack.api.response.DomainResponse; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiArgValidator; import org.apache.cloudstack.api.ApiConstants; @@ -41,7 +40,6 @@ @APICommand(name = "updateConfiguration", description = "Updates a configuration.", responseObject = ConfigurationResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateCfgCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(UpdateCfgCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/config/UpdateHypervisorCapabilitiesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/config/UpdateHypervisorCapabilitiesCmd.java index 02cdf1a0717c..50984188bf56 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/config/UpdateHypervisorCapabilitiesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/config/UpdateHypervisorCapabilitiesCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.config; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -36,7 +35,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateHypervisorCapabilitiesCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(UpdateHypervisorCapabilitiesCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/diagnostics/RunDiagnosticsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/diagnostics/RunDiagnosticsCmd.java index 3d3c7410dc58..4537eb6f215c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/diagnostics/RunDiagnosticsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/diagnostics/RunDiagnosticsCmd.java @@ -38,7 +38,6 @@ import org.apache.cloudstack.diagnostics.DiagnosticsService; import org.apache.cloudstack.diagnostics.DiagnosticsType; import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.InsufficientCapacityException; @@ -53,7 +52,6 @@ authorized = {RoleType.Admin}, since = "4.12.0.0") public class RunDiagnosticsCmd extends BaseAsyncCmd { - private static final Logger LOGGER = Logger.getLogger(RunDiagnosticsCmd.class); @Inject private DiagnosticsService diagnosticsService; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/direct/download/ListTemplateDirectDownloadCertificatesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/direct/download/ListTemplateDirectDownloadCertificatesCmd.java index 53b29a37f8f1..145ff6ba7823 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/direct/download/ListTemplateDirectDownloadCertificatesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/direct/download/ListTemplateDirectDownloadCertificatesCmd.java @@ -35,7 +35,6 @@ import org.apache.cloudstack.direct.download.DirectDownloadCertificate; import org.apache.cloudstack.direct.download.DirectDownloadCertificateHostMap; import org.apache.cloudstack.direct.download.DirectDownloadManager; -import org.apache.log4j.Logger; import javax.inject.Inject; import java.util.ArrayList; @@ -63,7 +62,6 @@ public class ListTemplateDirectDownloadCertificatesCmd extends BaseListCmd { description = "if set to true: include the hosts where the certificate is uploaded to") private Boolean listHosts; - private static final Logger LOG = Logger.getLogger(ListTemplateDirectDownloadCertificatesCmd.class); public boolean isListHosts() { return listHosts != null && listHosts; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/direct/download/RevokeTemplateDirectDownloadCertificateCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/direct/download/RevokeTemplateDirectDownloadCertificateCmd.java index e44ebd312742..eb9031cbc587 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/direct/download/RevokeTemplateDirectDownloadCertificateCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/direct/download/RevokeTemplateDirectDownloadCertificateCmd.java @@ -41,7 +41,6 @@ import org.apache.cloudstack.direct.download.DirectDownloadManager.HostCertificateStatus; import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import javax.inject.Inject; import java.util.ArrayList; @@ -57,7 +56,6 @@ public class RevokeTemplateDirectDownloadCertificateCmd extends BaseCmd { @Inject DirectDownloadManager directDownloadManager; - private static final Logger LOG = Logger.getLogger(RevokeTemplateDirectDownloadCertificateCmd.class); @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = DirectDownloadCertificateResponse.class, diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/direct/download/UploadTemplateDirectDownloadCertificateCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/direct/download/UploadTemplateDirectDownloadCertificateCmd.java index 0fa1797c7fe8..c5c102be56d6 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/direct/download/UploadTemplateDirectDownloadCertificateCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/direct/download/UploadTemplateDirectDownloadCertificateCmd.java @@ -33,7 +33,6 @@ import org.apache.cloudstack.direct.download.DirectDownloadCertificate; import org.apache.cloudstack.direct.download.DirectDownloadManager; import org.apache.cloudstack.direct.download.DirectDownloadManager.HostCertificateStatus; -import org.apache.log4j.Logger; import javax.inject.Inject; import java.util.ArrayList; @@ -49,7 +48,6 @@ public class UploadTemplateDirectDownloadCertificateCmd extends BaseCmd { @Inject DirectDownloadManager directDownloadManager; - private static final Logger LOG = Logger.getLogger(UploadTemplateDirectDownloadCertificateCmd.class); @Parameter(name = ApiConstants.CERTIFICATE, type = BaseCmd.CommandType.STRING, required = true, length = 65535, description = "SSL certificate") @@ -97,7 +95,7 @@ public void execute() { } try { - LOG.debug("Uploading certificate " + name + " to agents for Direct Download"); + logger.debug("Uploading certificate " + name + " to agents for Direct Download"); Pair> uploadStatus = directDownloadManager.uploadCertificateToHosts(certificate, name, hypervisor, zoneId, hostId); DirectDownloadCertificate certificate = uploadStatus.first(); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/CreateDomainCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/CreateDomainCmd.java index f6e09695f88b..c7f06920bb8d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/CreateDomainCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/CreateDomainCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.admin.domain; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -35,7 +34,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, authorized = { RoleType.Admin, RoleType.DomainAdmin }) public class CreateDomainCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(CreateDomainCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/DeleteDomainCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/DeleteDomainCmd.java index e0783b63098f..db3bae25e399 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/DeleteDomainCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/DeleteDomainCmd.java @@ -30,7 +30,6 @@ import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.region.RegionService; -import org.apache.log4j.Logger; import com.cloud.domain.Domain; import com.cloud.event.EventTypes; @@ -40,7 +39,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, authorized = { RoleType.Admin, RoleType.DomainAdmin }) public class DeleteDomainCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteDomainCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/ListDomainChildrenCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/ListDomainChildrenCmd.java index 72614627f31f..8514bb6dda56 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/ListDomainChildrenCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/ListDomainChildrenCmd.java @@ -22,7 +22,6 @@ import com.cloud.server.ResourceIcon; import com.cloud.server.ResourceTag; import org.apache.cloudstack.api.response.ResourceIconResponse; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -37,7 +36,6 @@ @APICommand(name = "listDomainChildren", description = "Lists all children domains belonging to a specified domain", responseObject = DomainResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListDomainChildrenCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListDomainChildrenCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmd.java index 8b6661f27ff2..2098389a1690 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmd.java @@ -23,7 +23,6 @@ import com.cloud.server.ResourceIcon; import com.cloud.server.ResourceTag; import org.apache.cloudstack.api.response.ResourceIconResponse; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -41,7 +40,6 @@ @APICommand(name = "listDomains", description = "Lists domains and provides detailed information for listed domains", responseObject = DomainResponse.class, responseView = ResponseView.Restricted, entityType = {Domain.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListDomainsCmd extends BaseListCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(ListDomainsCmd.class.getName()); private static final String s_name = "listdomainsresponse"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/UpdateDomainCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/UpdateDomainCmd.java index db4030da7726..353cb852bfdf 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/UpdateDomainCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/UpdateDomainCmd.java @@ -19,7 +19,6 @@ import javax.inject.Inject; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -37,7 +36,6 @@ @APICommand(name = "updateDomain", description = "Updates a domain with a new name", responseObject = DomainResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateDomainCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(UpdateDomainCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/guest/AddGuestOsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/guest/AddGuestOsCmd.java index 0ad500712878..b854e8389c4f 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/guest/AddGuestOsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/guest/AddGuestOsCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.admin.guest; import org.apache.commons.collections.MapUtils; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -43,7 +42,6 @@ @APICommand(name = "addGuestOs", description = "Add a new guest OS type", responseObject = GuestOSResponse.class, since = "4.4.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class AddGuestOsCmd extends BaseAsyncCreateCmd { - public static final Logger s_logger = Logger.getLogger(AddGuestOsCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/guest/AddGuestOsMappingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/guest/AddGuestOsMappingCmd.java index 0ddd21994e3e..3fdfebb54bf5 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/guest/AddGuestOsMappingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/guest/AddGuestOsMappingCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.admin.guest; import org.apache.commons.lang3.BooleanUtils; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -36,7 +35,6 @@ @APICommand(name = "addGuestOsMapping", description = "Adds a guest OS name to hypervisor OS name mapping", responseObject = GuestOsMappingResponse.class, since = "4.4.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class AddGuestOsMappingCmd extends BaseAsyncCreateCmd { - public static final Logger s_logger = Logger.getLogger(AddGuestOsMappingCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/guest/GetHypervisorGuestOsNamesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/guest/GetHypervisorGuestOsNamesCmd.java index 795177082900..da920a2ec2d0 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/guest/GetHypervisorGuestOsNamesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/guest/GetHypervisorGuestOsNamesCmd.java @@ -26,7 +26,6 @@ import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.response.HypervisorGuestOsNamesResponse; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.user.Account; @@ -35,8 +34,6 @@ @APICommand(name = GetHypervisorGuestOsNamesCmd.APINAME, description = "Gets the guest OS names in the hypervisor", responseObject = HypervisorGuestOsNamesResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, since = "4.19.0", authorized = {RoleType.Admin}) public class GetHypervisorGuestOsNamesCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(GetHypervisorGuestOsNamesCmd.class.getName()); - public static final String APINAME = "getHypervisorGuestOsNames"; ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/guest/ListGuestOsMappingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/guest/ListGuestOsMappingCmd.java index 29ae0b4f1884..23e62cdc7810 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/guest/ListGuestOsMappingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/guest/ListGuestOsMappingCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -35,7 +34,6 @@ @APICommand(name = "listGuestOsMapping", description = "Lists all available OS mappings for given hypervisor", responseObject = GuestOsMappingResponse.class, since = "4.4.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListGuestOsMappingCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListGuestOsMappingCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/guest/RemoveGuestOsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/guest/RemoveGuestOsCmd.java index 14beb83e073f..d38682ce5bb4 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/guest/RemoveGuestOsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/guest/RemoveGuestOsCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.guest; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -36,7 +35,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class RemoveGuestOsCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(RemoveGuestOsCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/guest/RemoveGuestOsMappingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/guest/RemoveGuestOsMappingCmd.java index 0a72b7e04515..a472ab672c55 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/guest/RemoveGuestOsMappingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/guest/RemoveGuestOsMappingCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.guest; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -36,7 +35,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class RemoveGuestOsMappingCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(RemoveGuestOsMappingCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/guest/UpdateGuestOsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/guest/UpdateGuestOsCmd.java index 25f022b3b515..c98cd149ef30 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/guest/UpdateGuestOsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/guest/UpdateGuestOsCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.admin.guest; import org.apache.commons.collections.MapUtils; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -42,7 +41,6 @@ since = "4.4.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateGuestOsCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(UpdateGuestOsCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/guest/UpdateGuestOsMappingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/guest/UpdateGuestOsMappingCmd.java index c83be131b4fd..fc67ef0a7e76 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/guest/UpdateGuestOsMappingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/guest/UpdateGuestOsMappingCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.admin.guest; import org.apache.commons.lang3.BooleanUtils; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -35,7 +34,6 @@ @APICommand(name = "updateGuestOsMapping", description = "Updates the information about Guest OS to Hypervisor specific name mapping", responseObject = GuestOsMappingResponse.class, since = "4.4.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateGuestOsMappingCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(UpdateGuestOsMappingCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/AddHostCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/AddHostCmd.java index 15955b9f0a85..ca27837aa881 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/AddHostCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/AddHostCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -40,7 +39,6 @@ @APICommand(name = "addHost", description = "Adds a new host.", responseObject = HostResponse.class, requestHasSensitiveInfo = true, responseHasSensitiveInfo = false) public class AddHostCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(AddHostCmd.class.getName()); ///////////////////////////////////////////////////// @@ -150,7 +148,7 @@ public void execute() { this.setResponseObject(response); } catch (DiscoveryException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/AddSecondaryStorageCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/AddSecondaryStorageCmd.java index 225eb1dfecdc..c965a39450bd 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/AddSecondaryStorageCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/AddSecondaryStorageCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.host; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -34,7 +33,6 @@ @APICommand(name = "addSecondaryStorage", description = "Adds secondary storage.", responseObject = ImageStoreResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class AddSecondaryStorageCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(AddSecondaryStorageCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// @@ -81,7 +79,7 @@ public void execute(){ throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to add secondary storage"); } } catch (DiscoveryException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/CancelMaintenanceCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/CancelMaintenanceCmd.java index cca449f570b2..a514a61b8a41 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/CancelMaintenanceCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/CancelMaintenanceCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.host; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -35,7 +34,6 @@ @APICommand(name = "cancelHostMaintenance", description = "Cancels host maintenance.", responseObject = HostResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CancelMaintenanceCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(CancelMaintenanceCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/DeleteHostCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/DeleteHostCmd.java index 934965cd24cc..38325c2f072d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/DeleteHostCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/DeleteHostCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.host; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -32,7 +31,6 @@ @APICommand(name = "deleteHost", description = "Deletes a host.", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteHostCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(DeleteHostCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/FindHostsForMigrationCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/FindHostsForMigrationCmd.java index 2b6ccb68eea7..db30e4f4c02f 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/FindHostsForMigrationCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/FindHostsForMigrationCmd.java @@ -20,7 +20,6 @@ import java.util.List; import java.util.Map; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -37,7 +36,6 @@ @APICommand(name = "findHostsForMigration", description = "Find hosts suitable for migrating a virtual machine.", responseObject = HostForMigrationResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class FindHostsForMigrationCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(FindHostsForMigrationCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/ListHostTagsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/ListHostTagsCmd.java index ed4f9a09bc5f..9ea2b2a7fbb5 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/ListHostTagsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/ListHostTagsCmd.java @@ -18,7 +18,6 @@ */ package org.apache.cloudstack.api.command.admin.host; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.api.BaseListCmd; @@ -27,7 +26,6 @@ @APICommand(name = "listHostTags", description = "Lists host tags", responseObject = HostTagResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListHostTagsCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListHostTagsCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/ListHostsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/ListHostsCmd.java index b8668f61ca46..af87bbf33bb0 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/ListHostsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/ListHostsCmd.java @@ -21,7 +21,6 @@ import java.util.List; import java.util.Map; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -45,7 +44,6 @@ @APICommand(name = "listHosts", description = "Lists hosts.", responseObject = HostResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListHostsCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListHostsCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/PrepareForMaintenanceCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/PrepareForMaintenanceCmd.java index a89965e822ee..2641c54364ee 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/PrepareForMaintenanceCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/PrepareForMaintenanceCmd.java @@ -25,7 +25,6 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.HostResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.host.Host; @@ -35,7 +34,6 @@ @APICommand(name = "prepareHostForMaintenance", description = "Prepares a host for maintenance.", responseObject = HostResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class PrepareForMaintenanceCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(PrepareForMaintenanceCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/ReconnectHostCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/ReconnectHostCmd.java index 7439d9da514b..3550d61fdb97 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/ReconnectHostCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/ReconnectHostCmd.java @@ -25,7 +25,6 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.HostResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.AgentUnavailableException; @@ -36,7 +35,6 @@ @APICommand(name = "reconnectHost", description = "Reconnects a host.", responseObject = HostResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ReconnectHostCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(ReconnectHostCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/ReleaseHostReservationCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/ReleaseHostReservationCmd.java index 90c388bc7dc7..7fee0684c781 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/ReleaseHostReservationCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/ReleaseHostReservationCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.host; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -35,7 +34,6 @@ @APICommand(name = "releaseHostReservation", description = "Releases host reservation.", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ReleaseHostReservationCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(ReleaseHostReservationCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/UpdateHostCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/UpdateHostCmd.java index 9cf47a9c4b96..88eeadb9b139 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/UpdateHostCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/UpdateHostCmd.java @@ -27,14 +27,12 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.GuestOSCategoryResponse; import org.apache.cloudstack.api.response.HostResponse; -import org.apache.log4j.Logger; import java.util.List; @APICommand(name = "updateHost", description = "Updates a host.", responseObject = HostResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateHostCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(UpdateHostCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// @@ -127,7 +125,7 @@ public void execute() { hostResponse.setResponseName(getCommandName()); this.setResponseObject(hostResponse); } catch (Exception e) { - s_logger.debug("Failed to update host:" + getId(), e); + logger.debug("Failed to update host:" + getId(), e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to update host:" + getId() + "," + e.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/UpdateHostPasswordCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/UpdateHostPasswordCmd.java index 2e05ad144756..c94fe2c58656 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/UpdateHostPasswordCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/UpdateHostPasswordCmd.java @@ -24,14 +24,12 @@ import org.apache.cloudstack.api.response.HostResponse; import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.commons.lang3.BooleanUtils; -import org.apache.log4j.Logger; import com.cloud.user.Account; @APICommand(name = "updateHostPassword", description = "Update password of a host/pool on management server.", responseObject = SuccessResponse.class, requestHasSensitiveInfo = true, responseHasSensitiveInfo = false) public class UpdateHostPasswordCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(UpdateHostPasswordCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/internallb/ConfigureInternalLoadBalancerElementCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/internallb/ConfigureInternalLoadBalancerElementCmd.java index 1c7e4a0ce214..18dfc87397ae 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/internallb/ConfigureInternalLoadBalancerElementCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/internallb/ConfigureInternalLoadBalancerElementCmd.java @@ -21,7 +21,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -47,7 +46,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ConfigureInternalLoadBalancerElementCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(ConfigureInternalLoadBalancerElementCmd.class.getName()); @Inject private List _service; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/internallb/CreateInternalLoadBalancerElementCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/internallb/CreateInternalLoadBalancerElementCmd.java index f11f081031a0..971f097fca52 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/internallb/CreateInternalLoadBalancerElementCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/internallb/CreateInternalLoadBalancerElementCmd.java @@ -20,7 +20,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -45,7 +44,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateInternalLoadBalancerElementCmd extends BaseAsyncCreateCmd { - public static final Logger s_logger = Logger.getLogger(CreateInternalLoadBalancerElementCmd.class.getName()); @Inject private List _service; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/internallb/ListInternalLBVMsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/internallb/ListInternalLBVMsCmd.java index f57569097062..0eb00234382d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/internallb/ListInternalLBVMsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/internallb/ListInternalLBVMsCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.admin.internallb; import org.apache.commons.lang.BooleanUtils; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -39,7 +38,6 @@ @APICommand(name = "listInternalLoadBalancerVMs", description = "List internal LB VMs.", responseObject = DomainRouterResponse.class, entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListInternalLBVMsCmd extends BaseListProjectAndAccountResourcesCmd { - public static final Logger s_logger = Logger.getLogger(ListInternalLBVMsCmd.class.getName()); private static final String s_name = "listinternallbvmsresponse"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/internallb/ListInternalLoadBalancerElementsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/internallb/ListInternalLoadBalancerElementsCmd.java index 82d373e31192..6c2fadee7370 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/internallb/ListInternalLoadBalancerElementsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/internallb/ListInternalLoadBalancerElementsCmd.java @@ -21,7 +21,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -46,7 +45,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListInternalLoadBalancerElementsCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListInternalLoadBalancerElementsCmd.class.getName()); @Inject private InternalLoadBalancerElementService _service; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/internallb/StartInternalLBVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/internallb/StartInternalLBVMCmd.java index fdec79478538..3dd7d2adf378 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/internallb/StartInternalLBVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/internallb/StartInternalLBVMCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.internallb; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -42,7 +41,6 @@ @APICommand(name = "startInternalLoadBalancerVM", responseObject = DomainRouterResponse.class, description = "Starts an existing internal lb vm.", entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class StartInternalLBVMCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(StartInternalLBVMCmd.class.getName()); private static final String s_name = "startinternallbvmresponse"; ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/internallb/StopInternalLBVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/internallb/StopInternalLBVMCmd.java index 76ad4d438d28..a746e5d906d6 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/internallb/StopInternalLBVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/internallb/StopInternalLBVMCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.internallb; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -41,7 +40,6 @@ @APICommand(name = "stopInternalLoadBalancerVM", description = "Stops an Internal LB vm.", responseObject = DomainRouterResponse.class, entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class StopInternalLBVMCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(StopInternalLBVMCmd.class.getName()); private static final String s_name = "stopinternallbvmresponse"; // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/management/ListMgmtsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/management/ListMgmtsCmd.java index 3af772d332f1..a68ed62857ac 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/management/ListMgmtsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/management/ListMgmtsCmd.java @@ -23,12 +23,10 @@ import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.ManagementServerResponse; -import org.apache.log4j.Logger; @APICommand(name = "listManagementServers", description = "Lists management servers.", responseObject = ManagementServerResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListMgmtsCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListMgmtsCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/AddNetworkDeviceCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/AddNetworkDeviceCmd.java index be0cd9f2fa74..334772970431 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/AddNetworkDeviceCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/AddNetworkDeviceCmd.java @@ -20,7 +20,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -44,7 +43,6 @@ responseObject = NetworkDeviceResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class AddNetworkDeviceCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(AddNetworkDeviceCmd.class); // /////////////////////////////////////////////////// // ////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/AddNetworkServiceProviderCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/AddNetworkServiceProviderCmd.java index 176375ce1812..40a822393452 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/AddNetworkServiceProviderCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/AddNetworkServiceProviderCmd.java @@ -18,7 +18,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -43,7 +42,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class AddNetworkServiceProviderCmd extends BaseAsyncCreateCmd { - public static final Logger s_logger = Logger.getLogger(AddNetworkServiceProviderCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateGuestNetworkIpv6PrefixCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateGuestNetworkIpv6PrefixCmd.java index d7c7bec99f9e..f6b035c57837 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateGuestNetworkIpv6PrefixCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateGuestNetworkIpv6PrefixCmd.java @@ -27,7 +27,6 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.DataCenterGuestIpv6PrefixResponse; import org.apache.cloudstack.api.response.ZoneResponse; -import org.apache.log4j.Logger; import com.cloud.dc.DataCenterGuestIpv6Prefix; import com.cloud.event.EventTypes; @@ -45,7 +44,6 @@ responseHasSensitiveInfo = false, authorized = {RoleType.Admin}) public class CreateGuestNetworkIpv6PrefixCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(CreateGuestNetworkIpv6PrefixCmd.class); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateManagementNetworkIpRangeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateManagementNetworkIpRangeCmd.java index 2bab4f053523..85cfddfb714f 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateManagementNetworkIpRangeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateManagementNetworkIpRangeCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.network; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.ApiArgValidator; @@ -44,7 +43,6 @@ responseHasSensitiveInfo = false, authorized = {RoleType.Admin}) public class CreateManagementNetworkIpRangeCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(CreateManagementNetworkIpRangeCmd.class); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateNetworkCmdByAdmin.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateNetworkCmdByAdmin.java index 53b02718ea35..cd9770877ed7 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateNetworkCmdByAdmin.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateNetworkCmdByAdmin.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.admin.network; import org.apache.cloudstack.api.ApiArgValidator; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -32,7 +31,6 @@ @APICommand(name = "createNetwork", description = "Creates a network", responseObject = NetworkResponse.class, responseView = ResponseView.Full, entityType = {Network.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateNetworkCmdByAdmin extends CreateNetworkCmd implements AdminCmd { - public static final Logger s_logger = Logger.getLogger(CreateNetworkCmdByAdmin.class.getName()); @Parameter(name=ApiConstants.VLAN, type=CommandType.STRING, description="the ID or VID of the network") private String vlan; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateNetworkOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateNetworkOfferingCmd.java index 2112be34543e..f2b1a18831a0 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateNetworkOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateNetworkOfferingCmd.java @@ -29,7 +29,6 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -50,7 +49,6 @@ @APICommand(name = "createNetworkOffering", description = "Creates a network offering.", responseObject = NetworkOfferingResponse.class, since = "3.0.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateNetworkOfferingCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(CreateNetworkOfferingCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreatePhysicalNetworkCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreatePhysicalNetworkCmd.java index 294ee047fe68..7eb52b92456c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreatePhysicalNetworkCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreatePhysicalNetworkCmd.java @@ -18,7 +18,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -40,7 +39,6 @@ @APICommand(name = "createPhysicalNetwork", description = "Creates a physical network", responseObject = PhysicalNetworkResponse.class, since = "3.0.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreatePhysicalNetworkCmd extends BaseAsyncCreateCmd { - public static final Logger s_logger = Logger.getLogger(CreatePhysicalNetworkCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateStorageNetworkIpRangeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateStorageNetworkIpRangeCmd.java index d2dc3d8688fa..42262cc2bf15 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateStorageNetworkIpRangeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateStorageNetworkIpRangeCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.network; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -42,7 +41,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateStorageNetworkIpRangeCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(CreateStorageNetworkIpRangeCmd.class); ///////////////////////////////////////////////////// @@ -119,7 +117,7 @@ public void execute() throws ResourceUnavailableException, InsufficientCapacityE response.setResponseName(getCommandName()); this.setResponseObject(response); } catch (Exception e) { - s_logger.warn("Create storage network IP range failed", e); + logger.warn("Create storage network IP range failed", e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DedicateGuestVlanRangeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DedicateGuestVlanRangeCmd.java index e6a289d096b7..355f738679e0 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DedicateGuestVlanRangeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DedicateGuestVlanRangeCmd.java @@ -18,7 +18,6 @@ */ package org.apache.cloudstack.api.command.admin.network; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -39,7 +38,6 @@ @APICommand(name = "dedicateGuestVlanRange", description = "Dedicates a guest vlan range to an account", responseObject = GuestVlanRangeResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DedicateGuestVlanRangeCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(DedicateGuestVlanRangeCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteGuestNetworkIpv6PrefixCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteGuestNetworkIpv6PrefixCmd.java index 67d309456d6a..e2ada4191a82 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteGuestNetworkIpv6PrefixCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteGuestNetworkIpv6PrefixCmd.java @@ -26,7 +26,6 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.DataCenterGuestIpv6PrefixResponse; import org.apache.cloudstack.api.response.SuccessResponse; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.ConcurrentOperationException; @@ -45,7 +44,6 @@ responseHasSensitiveInfo = false, authorized = {RoleType.Admin}) public class DeleteGuestNetworkIpv6PrefixCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteGuestNetworkIpv6PrefixCmd.class); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteManagementNetworkIpRangeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteManagementNetworkIpRangeCmd.java index abb72eb27244..41cf5e518b34 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteManagementNetworkIpRangeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteManagementNetworkIpRangeCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.network; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.ApiArgValidator; @@ -42,7 +41,6 @@ responseHasSensitiveInfo = false, authorized = {RoleType.Admin}) public class DeleteManagementNetworkIpRangeCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteManagementNetworkIpRangeCmd.class); ///////////////////////////////////////////////////// @@ -112,13 +110,13 @@ public void execute() { SuccessResponse response = new SuccessResponse(getCommandName()); this.setResponseObject(response); } catch (ResourceUnavailableException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); } catch (ConcurrentOperationException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } catch (Exception e) { - s_logger.warn("Failed to delete management ip range from " + getStartIp() + " to " + getEndIp() + " of Pod: " + getPodId(), e); + logger.warn("Failed to delete management ip range from " + getStartIp() + " to " + getEndIp() + " of Pod: " + getPodId(), e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteNetworkDeviceCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteNetworkDeviceCmd.java index d7e8744d6b72..89a36d0b94f5 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteNetworkDeviceCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteNetworkDeviceCmd.java @@ -18,7 +18,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -40,7 +39,6 @@ @APICommand(name = "deleteNetworkDevice", description = "Deletes network device.", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteNetworkDeviceCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(DeleteNetworkDeviceCmd.class); @Inject ExternalNetworkDeviceManager nwDeviceMgr; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteNetworkOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteNetworkOfferingCmd.java index 80ce48cc7d98..e0598b71ea17 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteNetworkOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteNetworkOfferingCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.network; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -32,7 +31,6 @@ @APICommand(name = "deleteNetworkOffering", description = "Deletes a network offering.", responseObject = SuccessResponse.class, since = "3.0.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteNetworkOfferingCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(DeleteNetworkOfferingCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteNetworkServiceProviderCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteNetworkServiceProviderCmd.java index 1ccfff5d7ba2..4b56612fddaa 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteNetworkServiceProviderCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteNetworkServiceProviderCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.network; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -36,7 +35,6 @@ @APICommand(name = "deleteNetworkServiceProvider", description = "Deletes a Network Service Provider.", responseObject = SuccessResponse.class, since = "3.0.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteNetworkServiceProviderCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteNetworkServiceProviderCmd.class.getName()); ///////////////////////////////////////////////////// @@ -78,10 +76,10 @@ public void execute() { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to delete network service provider"); } } catch (ResourceUnavailableException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); } catch (ConcurrentOperationException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeletePhysicalNetworkCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeletePhysicalNetworkCmd.java index 79f0685c6c65..3233130211c4 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeletePhysicalNetworkCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeletePhysicalNetworkCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.network; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -35,7 +34,6 @@ @APICommand(name = "deletePhysicalNetwork", description = "Deletes a Physical Network.", responseObject = SuccessResponse.class, since = "3.0.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeletePhysicalNetworkCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeletePhysicalNetworkCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteStorageNetworkIpRangeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteStorageNetworkIpRangeCmd.java index b5de43dc5e24..454dfba92f20 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteStorageNetworkIpRangeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteStorageNetworkIpRangeCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.network; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -37,7 +36,6 @@ @APICommand(name = "deleteStorageNetworkIpRange", description = "Deletes a storage network IP Range.", responseObject = SuccessResponse.class, since = "3.0.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteStorageNetworkIpRangeCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteStorageNetworkIpRangeCmd.class); ///////////////////////////////////////////////////// @@ -77,7 +75,7 @@ public void execute() throws ResourceUnavailableException, InsufficientCapacityE SuccessResponse response = new SuccessResponse(getCommandName()); this.setResponseObject(response); } catch (Exception e) { - s_logger.warn("Failed to delete storage network ip range " + getId(), e); + logger.warn("Failed to delete storage network ip range " + getId(), e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListDedicatedGuestVlanRangesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListDedicatedGuestVlanRangesCmd.java index 67324d819272..0247a3069212 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListDedicatedGuestVlanRangesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListDedicatedGuestVlanRangesCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -39,7 +38,6 @@ @APICommand(name = "listDedicatedGuestVlanRanges", description = "Lists dedicated guest vlan ranges", responseObject = GuestVlanRangeResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListDedicatedGuestVlanRangesCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListDedicatedGuestVlanRangesCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListGuestVlansCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListGuestVlansCmd.java index 1daeac9c53d0..4b368f5e0341 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListGuestVlansCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListGuestVlansCmd.java @@ -22,7 +22,6 @@ import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.response.PhysicalNetworkResponse; import org.apache.cloudstack.api.response.ZoneResponse; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -39,7 +38,6 @@ since = "4.17.0", authorized = {RoleType.Admin}) public class ListGuestVlansCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListGuestVlansCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListNetworkDeviceCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListNetworkDeviceCmd.java index 405c2654bdf1..768bab641087 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListNetworkDeviceCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListNetworkDeviceCmd.java @@ -22,7 +22,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -45,7 +44,6 @@ @APICommand(name = "listNetworkDevice", description = "List network devices", responseObject = NetworkDeviceResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListNetworkDeviceCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListNetworkDeviceCmd.class); private static final String s_name = "listnetworkdevice"; @Inject diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListNetworkServiceProvidersCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListNetworkServiceProvidersCmd.java index 67fc8292a938..68495a62215f 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListNetworkServiceProvidersCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListNetworkServiceProvidersCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -40,7 +39,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListNetworkServiceProvidersCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListNetworkServiceProvidersCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListPhysicalNetworksCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListPhysicalNetworksCmd.java index b8f30d3f7a11..51a6ddabd9f1 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListPhysicalNetworksCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListPhysicalNetworksCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -38,7 +37,6 @@ @APICommand(name = "listPhysicalNetworks", description = "Lists physical networks", responseObject = PhysicalNetworkResponse.class, since = "3.0.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListPhysicalNetworksCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListPhysicalNetworksCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListStorageNetworkIpRangeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListStorageNetworkIpRangeCmd.java index c22ec8ee19b8..556162ca360d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListStorageNetworkIpRangeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListStorageNetworkIpRangeCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -42,7 +41,6 @@ @APICommand(name = "listStorageNetworkIpRange", description = "List a storage network IP range.", responseObject = StorageNetworkIpRangeResponse.class, since = "3.0.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListStorageNetworkIpRangeCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListStorageNetworkIpRangeCmd.class); String _name = "liststoragenetworkiprangeresponse"; @@ -99,7 +97,7 @@ public void execute() throws ResourceUnavailableException, InsufficientCapacityE response.setResponseName(getCommandName()); this.setResponseObject(response); } catch (Exception e) { - s_logger.warn("Failed to list storage network ip range for rangeId=" + getRangeId() + " podId=" + getPodId() + " zoneId=" + getZoneId()); + logger.warn("Failed to list storage network ip range for rangeId=" + getRangeId() + " podId=" + getPodId() + " zoneId=" + getZoneId()); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListSupportedNetworkServicesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListSupportedNetworkServicesCmd.java index 361da2d0db44..120c6af41ad6 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListSupportedNetworkServicesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListSupportedNetworkServicesCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -40,7 +39,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListSupportedNetworkServicesCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListSupportedNetworkServicesCmd.class.getName()); @Parameter(name = ApiConstants.PROVIDER, type = CommandType.STRING, description = "network service provider name") private String providerName; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/MigrateNetworkCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/MigrateNetworkCmd.java index b38e8f453b0e..8ef853b99da8 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/MigrateNetworkCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/MigrateNetworkCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.network; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.acl.SecurityChecker.AccessType; @@ -48,7 +47,6 @@ since = "4.11.0", authorized = {RoleType.Admin}) public class MigrateNetworkCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(MigrateNetworkCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/MigrateVPCCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/MigrateVPCCmd.java index cca367ce7506..3e0801be40b1 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/MigrateVPCCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/MigrateVPCCmd.java @@ -19,7 +19,6 @@ import java.util.HashMap; import java.util.Map; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.acl.SecurityChecker; @@ -50,7 +49,6 @@ since = "4.11.0", authorized = {RoleType.Admin}) public class MigrateVPCCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(MigrateVPCCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ReleaseDedicatedGuestVlanRangeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ReleaseDedicatedGuestVlanRangeCmd.java index 916357920a16..b3125ec36680 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ReleaseDedicatedGuestVlanRangeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ReleaseDedicatedGuestVlanRangeCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.admin.network; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -36,7 +35,6 @@ @APICommand(name = "releaseDedicatedGuestVlanRange", description = "Releases a dedicated guest vlan range to the system", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ReleaseDedicatedGuestVlanRangeCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(ReleaseDedicatedGuestVlanRangeCmd.class.getName()); // /////////////////////////////////////////////////// // ////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/UpdateNetworkOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/UpdateNetworkOfferingCmd.java index e8f9e5f8cfe7..75fb45e1f115 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/UpdateNetworkOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/UpdateNetworkOfferingCmd.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.NetworkOfferingResponse; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.dc.DataCenter; import com.cloud.domain.Domain; @@ -39,7 +38,6 @@ @APICommand(name = "updateNetworkOffering", description = "Updates a network offering.", responseObject = NetworkOfferingResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateNetworkOfferingCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(UpdateNetworkOfferingCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/UpdateNetworkServiceProviderCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/UpdateNetworkServiceProviderCmd.java index 1bbf21b85ed4..b4801d9368eb 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/UpdateNetworkServiceProviderCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/UpdateNetworkServiceProviderCmd.java @@ -18,7 +18,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -40,7 +39,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateNetworkServiceProviderCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(UpdateNetworkServiceProviderCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/UpdatePhysicalNetworkCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/UpdatePhysicalNetworkCmd.java index 24fd93f49d1e..162116470bd5 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/UpdatePhysicalNetworkCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/UpdatePhysicalNetworkCmd.java @@ -18,7 +18,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -34,7 +33,6 @@ @APICommand(name = "updatePhysicalNetwork", description = "Updates a physical network", responseObject = PhysicalNetworkResponse.class, since = "3.0.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdatePhysicalNetworkCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(UpdatePhysicalNetworkCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/UpdatePodManagementNetworkIpRangeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/UpdatePodManagementNetworkIpRangeCmd.java index 4e880f122d85..6f90a070f0d1 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/UpdatePodManagementNetworkIpRangeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/UpdatePodManagementNetworkIpRangeCmd.java @@ -26,7 +26,6 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.PodResponse; import org.apache.cloudstack.api.response.SuccessResponse; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.ConcurrentOperationException; @@ -42,7 +41,6 @@ authorized = {RoleType.Admin}) public class UpdatePodManagementNetworkIpRangeCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(UpdatePodManagementNetworkIpRangeCmd.class); ///////////////////////////////////////////////////// @@ -138,10 +136,10 @@ public void execute() { SuccessResponse response = new SuccessResponse(getCommandName()); this.setResponseObject(response); } catch (ConcurrentOperationException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } catch (Exception e) { - s_logger.warn("Failed to update pod management IP range " + getNewStartIP() + "-" + getNewEndIP() + " of Pod: " + getPodId(), e); + logger.warn("Failed to update pod management IP range " + getNewStartIP() + "-" + getNewEndIP() + " of Pod: " + getPodId(), e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/UpdateStorageNetworkIpRangeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/UpdateStorageNetworkIpRangeCmd.java index 459c89debc9a..65e2437417de 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/UpdateStorageNetworkIpRangeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/UpdateStorageNetworkIpRangeCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.network; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -41,7 +40,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateStorageNetworkIpRangeCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(UpdateStorageNetworkIpRangeCmd.class); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// @@ -107,7 +105,7 @@ public void execute() throws ResourceUnavailableException, InsufficientCapacityE response.setResponseName(getCommandName()); this.setResponseObject(response); } catch (Exception e) { - s_logger.warn("Update storage network IP range failed", e); + logger.warn("Update storage network IP range failed", e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateDiskOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateDiskOfferingCmd.java index c2d8b3b6839f..c46e4cd6b445 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateDiskOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateDiskOfferingCmd.java @@ -37,7 +37,6 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.offering.DiskOffering; import com.cloud.offering.ServiceOffering; @@ -47,7 +46,6 @@ @APICommand(name = "createDiskOffering", description = "Creates a disk offering.", responseObject = DiskOfferingResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateDiskOfferingCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(CreateDiskOfferingCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java index d947f6f06599..4562aa7da19e 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java @@ -37,7 +37,6 @@ import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.StringUtils; import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; import com.cloud.exception.InvalidParameterValueException; import com.cloud.offering.ServiceOffering; @@ -47,7 +46,6 @@ @APICommand(name = "createServiceOffering", description = "Creates a service offering.", responseObject = ServiceOfferingResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateServiceOfferingCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(CreateServiceOfferingCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/DeleteDiskOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/DeleteDiskOfferingCmd.java index 0159cd297f9c..591b09c60a5d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/DeleteDiskOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/DeleteDiskOfferingCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.offering; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -32,7 +31,6 @@ @APICommand(name = "deleteDiskOffering", description = "Updates a disk offering.", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteDiskOfferingCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(DeleteDiskOfferingCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/DeleteServiceOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/DeleteServiceOfferingCmd.java index 9b7f9d48e30a..19203289d104 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/DeleteServiceOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/DeleteServiceOfferingCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.offering; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -32,7 +31,6 @@ @APICommand(name = "deleteServiceOffering", description = "Deletes a service offering.", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteServiceOfferingCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(DeleteServiceOfferingCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateDiskOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateDiskOfferingCmd.java index 1d5898ea4a0e..1bc0f2feb7ce 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateDiskOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateDiskOfferingCmd.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.DiskOfferingResponse; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.dc.DataCenter; import com.cloud.domain.Domain; @@ -39,7 +38,6 @@ @APICommand(name = "updateDiskOffering", description = "Updates a disk offering.", responseObject = DiskOfferingResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateDiskOfferingCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(UpdateDiskOfferingCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateServiceOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateServiceOfferingCmd.java index d86564a60c69..5a96212af04b 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateServiceOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateServiceOfferingCmd.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.ServiceOfferingResponse; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.dc.DataCenter; import com.cloud.domain.Domain; @@ -39,7 +38,6 @@ @APICommand(name = "updateServiceOffering", description = "Updates a service offering.", responseObject = ServiceOfferingResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateServiceOfferingCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(UpdateServiceOfferingCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/pod/CreatePodCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/pod/CreatePodCmd.java index b15854ca875f..c1d9a6db4296 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/pod/CreatePodCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/pod/CreatePodCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.admin.pod; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -34,7 +33,6 @@ @APICommand(name = "createPod", description = "Creates a new Pod.", responseObject = PodResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreatePodCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(CreatePodCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/pod/DeletePodCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/pod/DeletePodCmd.java index bdb9ef266944..c1de800d745b 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/pod/DeletePodCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/pod/DeletePodCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.admin.pod; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -33,7 +32,6 @@ @APICommand(name = "deletePod", description = "Deletes a Pod.", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeletePodCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(DeletePodCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/pod/ListPodsByCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/pod/ListPodsByCmd.java index c0e26a32eee9..5ad0b457ced7 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/pod/ListPodsByCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/pod/ListPodsByCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -35,7 +34,6 @@ @APICommand(name = "listPods", description = "Lists all Pods.", responseObject = PodResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListPodsByCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListPodsByCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/pod/UpdatePodCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/pod/UpdatePodCmd.java index 99ab5e1cee4a..7dae6f4c7cf0 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/pod/UpdatePodCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/pod/UpdatePodCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.admin.pod; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -33,7 +32,6 @@ @APICommand(name = "updatePod", description = "Updates a Pod.", responseObject = PodResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdatePodCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(UpdatePodCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/region/AddRegionCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/region/AddRegionCmd.java index 61bf32ab822c..3a93a2750429 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/region/AddRegionCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/region/AddRegionCmd.java @@ -18,7 +18,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -35,7 +34,6 @@ @APICommand(name = "addRegion", description = "Adds a Region", responseObject = RegionResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class AddRegionCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(AddRegionCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/region/CreatePortableIpRangeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/region/CreatePortableIpRangeCmd.java index 61deceb06913..fd103c838309 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/region/CreatePortableIpRangeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/region/CreatePortableIpRangeCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.admin.region; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -43,7 +42,6 @@ responseHasSensitiveInfo = false) public class CreatePortableIpRangeCmd extends BaseAsyncCreateCmd { - public static final Logger s_logger = Logger.getLogger(CreatePortableIpRangeCmd.class.getName()); ///////////////////////////////////////////////////// @@ -126,7 +124,7 @@ public void create() throws ResourceAllocationException { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create portable public IP range"); } } catch (ConcurrentOperationException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/region/DeletePortableIpRangeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/region/DeletePortableIpRangeCmd.java index 6cc884645e9c..3ff46fcc94d5 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/region/DeletePortableIpRangeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/region/DeletePortableIpRangeCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.admin.region; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -35,7 +34,6 @@ @APICommand(name = "deletePortableIpRange", description = "deletes a range of portable public IP's associated with a region", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeletePortableIpRangeCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeletePortableIpRangeCmd.class.getName()); private static final String s_name = "deleteportablepublicipresponse"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/region/ListPortableIpRangesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/region/ListPortableIpRangesCmd.java index ed0ddd68aca2..e654da6df449 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/region/ListPortableIpRangesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/region/ListPortableIpRangesCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -37,7 +36,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListPortableIpRangesCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListPortableIpRangesCmd.class.getName()); private static final String s_name = "listportableipresponse"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/region/RemoveRegionCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/region/RemoveRegionCmd.java index 180e34c7a8f0..3ea323eebfba 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/region/RemoveRegionCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/region/RemoveRegionCmd.java @@ -18,7 +18,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -34,7 +33,6 @@ @APICommand(name = "removeRegion", description = "Removes specified region", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class RemoveRegionCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(RemoveRegionCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/region/UpdateRegionCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/region/UpdateRegionCmd.java index c772efd5e1fe..4267f6a2c286 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/region/UpdateRegionCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/region/UpdateRegionCmd.java @@ -18,7 +18,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -35,7 +34,6 @@ @APICommand(name = "updateRegion", description = "Updates a region", responseObject = RegionResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateRegionCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(UpdateRegionCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/ArchiveAlertsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/ArchiveAlertsCmd.java index 003b823e106e..dc8c15cf09df 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/ArchiveAlertsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/ArchiveAlertsCmd.java @@ -19,7 +19,6 @@ import java.util.Date; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -37,7 +36,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ArchiveAlertsCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(ArchiveAlertsCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/CleanVMReservationsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/CleanVMReservationsCmd.java index 9acc71ce6930..1ae8c9441233 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/CleanVMReservationsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/CleanVMReservationsCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.resource; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiErrorCode; @@ -31,7 +30,6 @@ @APICommand(name = "cleanVMReservations", description = "Cleanups VM reservations in the database.", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CleanVMReservationsCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(CleanVMReservationsCmd.class.getName()); private static final String s_name = "cleanvmreservationresponse"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/DeleteAlertsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/DeleteAlertsCmd.java index eb3848927583..9262a120f72c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/DeleteAlertsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/DeleteAlertsCmd.java @@ -19,7 +19,6 @@ import java.util.Date; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -37,7 +36,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteAlertsCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(DeleteAlertsCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/ListAlertsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/ListAlertsCmd.java index 3471ab60837a..64cf691e6a73 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/ListAlertsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/ListAlertsCmd.java @@ -25,7 +25,6 @@ import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.response.AlertResponse; import org.apache.cloudstack.api.response.ListResponse; -import org.apache.log4j.Logger; import com.cloud.alert.Alert; import com.cloud.utils.Pair; @@ -34,7 +33,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListAlertsCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListAlertsCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/ListCapacityCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/ListCapacityCmd.java index 253677616f03..17a648f3a395 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/ListCapacityCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/ListCapacityCmd.java @@ -21,7 +21,6 @@ import java.util.Comparator; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -40,7 +39,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListCapacityCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListCapacityCmd.class.getName()); private static final DecimalFormat s_percentFormat = new DecimalFormat("##.##"); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/StartRollingMaintenanceCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/StartRollingMaintenanceCmd.java index 7ee3e50e0cd9..04fa1002611c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/StartRollingMaintenanceCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/StartRollingMaintenanceCmd.java @@ -34,7 +34,6 @@ import org.apache.cloudstack.api.response.RollingMaintenanceResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.ConcurrentOperationException; @@ -55,7 +54,6 @@ public class StartRollingMaintenanceCmd extends BaseAsyncCmd { @Inject RollingMaintenanceManager manager; - public static final Logger s_logger = Logger.getLogger(StartRollingMaintenanceCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/UploadCustomCertificateCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/UploadCustomCertificateCmd.java index 5dfada572162..c5ae6890c3e5 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/UploadCustomCertificateCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/UploadCustomCertificateCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.resource; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -34,7 +33,6 @@ description = "Uploads a custom certificate for the console proxy VMs to use for SSL. Can be used to upload a single certificate signed by a known CA. Can also be used, through multiple calls, to upload a chain of certificates from CA to the custom certificate itself.", requestHasSensitiveInfo = true, responseHasSensitiveInfo = false) public class UploadCustomCertificateCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(UploadCustomCertificateCmd.class.getName()); @Parameter(name = ApiConstants.CERTIFICATE, type = CommandType.STRING, required = true, description = "The certificate to be uploaded.", length = 65535) diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/icon/DeleteResourceIconCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/icon/DeleteResourceIconCmd.java index 8fb02ea70542..e97a68bddcb6 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/icon/DeleteResourceIconCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/icon/DeleteResourceIconCmd.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import java.util.List; @@ -37,7 +36,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, authorized = {RoleType.Admin, RoleType.DomainAdmin, RoleType.ResourceAdmin, RoleType.User}) public class DeleteResourceIconCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(DeleteResourceIconCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/icon/ListResourceIconCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/icon/ListResourceIconCmd.java index 0af11ceec220..6cc3173cf155 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/icon/ListResourceIconCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/icon/ListResourceIconCmd.java @@ -26,7 +26,6 @@ import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.ResourceIconResponse; -import org.apache.log4j.Logger; import java.util.List; @@ -35,7 +34,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, authorized = {RoleType.Admin, RoleType.DomainAdmin, RoleType.ResourceAdmin, RoleType.User}) public class ListResourceIconCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(ListResourceIconCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/icon/UploadResourceIconCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/icon/UploadResourceIconCmd.java index ea5d8995a2af..5a6acd961bf5 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/icon/UploadResourceIconCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/resource/icon/UploadResourceIconCmd.java @@ -31,7 +31,6 @@ import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.context.CallContext; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import java.awt.image.BufferedImage; @@ -46,7 +45,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, authorized = {RoleType.Admin, RoleType.DomainAdmin, RoleType.ResourceAdmin, RoleType.User}) public class UploadResourceIconCmd extends BaseCmd { - public static final Logger LOGGER = Logger.getLogger(UploadResourceIconCmd.class.getName()); ///////////////////////////////////////////////////// @@ -120,7 +118,7 @@ private boolean imageValidator (String base64Image) { return false; } } catch (Exception e) { - LOGGER.warn("Data uploaded not a valid image"); + logger.warn("Data uploaded not a valid image"); return false; } return true; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/router/ConfigureOvsElementCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/router/ConfigureOvsElementCmd.java index a1b01a1c04fb..4a8c0bc3a3b9 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/router/ConfigureOvsElementCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/router/ConfigureOvsElementCmd.java @@ -29,7 +29,6 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.OvsProviderResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.ConcurrentOperationException; @@ -42,8 +41,6 @@ @APICommand(name = "configureOvsElement", responseObject = OvsProviderResponse.class, description = "Configures an ovs element.", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ConfigureOvsElementCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger - .getLogger(ConfigureOvsElementCmd.class.getName()); @Inject private List _service; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/router/ConfigureVirtualRouterElementCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/router/ConfigureVirtualRouterElementCmd.java index b7f7a050d07d..aa119f3aca75 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/router/ConfigureVirtualRouterElementCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/router/ConfigureVirtualRouterElementCmd.java @@ -20,7 +20,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -43,7 +42,6 @@ @APICommand(name = "configureVirtualRouterElement", responseObject = VirtualRouterProviderResponse.class, description = "Configures a virtual router element.", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ConfigureVirtualRouterElementCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(ConfigureVirtualRouterElementCmd.class.getName()); @Inject private List _service; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/router/CreateVirtualRouterElementCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/router/CreateVirtualRouterElementCmd.java index f93ca35a06a7..e85531c83c4d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/router/CreateVirtualRouterElementCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/router/CreateVirtualRouterElementCmd.java @@ -20,7 +20,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -43,7 +42,6 @@ @APICommand(name = "createVirtualRouterElement", responseObject = VirtualRouterProviderResponse.class, description = "Create a virtual router element.", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateVirtualRouterElementCmd extends BaseAsyncCreateCmd { - public static final Logger s_logger = Logger.getLogger(CreateVirtualRouterElementCmd.class.getName()); @Inject private List _service; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/router/DestroyRouterCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/router/DestroyRouterCmd.java index d2dce6b9ff34..39ccee47fbeb 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/router/DestroyRouterCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/router/DestroyRouterCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.router; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -38,7 +37,6 @@ @APICommand(name = "destroyRouter", description = "Destroys a router.", responseObject = DomainRouterResponse.class, entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DestroyRouterCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DestroyRouterCmd.class.getName()); // /////////////////////////////////////////////////// // ////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/router/GetRouterHealthCheckResultsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/router/GetRouterHealthCheckResultsCmd.java index 93a48eba49cc..4bef26e05550 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/router/GetRouterHealthCheckResultsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/router/GetRouterHealthCheckResultsCmd.java @@ -30,7 +30,6 @@ import org.apache.cloudstack.api.response.RouterHealthCheckResultsListResponse; import org.apache.cloudstack.context.CallContext; import org.apache.commons.lang.BooleanUtils; -import org.apache.log4j.Logger; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.ResourceUnavailableException; @@ -47,7 +46,6 @@ responseHasSensitiveInfo = false, since = "4.14.0") public class GetRouterHealthCheckResultsCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(GetRouterHealthCheckResultsCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/router/ListOvsElementsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/router/ListOvsElementsCmd.java index 89b39f87e446..a267aa526691 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/router/ListOvsElementsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/router/ListOvsElementsCmd.java @@ -26,11 +26,9 @@ import org.apache.cloudstack.api.BaseListCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; -import org.apache.cloudstack.api.command.user.network.ListNetworkOfferingsCmd; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.OvsProviderResponse; import org.apache.cloudstack.api.response.ProviderResponse; -import org.apache.log4j.Logger; import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientCapacityException; @@ -42,8 +40,7 @@ @APICommand(name = "listOvsElements", description = "Lists all available ovs elements.", responseObject = OvsProviderResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListOvsElementsCmd extends BaseListCmd { - public static final Logger s_logger = Logger - .getLogger(ListNetworkOfferingsCmd.class.getName()); + @Inject private List _service; // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/router/ListRoutersCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/router/ListRoutersCmd.java index 6e955e462121..e0cdc0dcf807 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/router/ListRoutersCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/router/ListRoutersCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.admin.router; import org.apache.commons.lang.BooleanUtils; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -40,7 +39,6 @@ @APICommand(name = "listRouters", description = "List routers.", responseObject = DomainRouterResponse.class, entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListRoutersCmd extends BaseListProjectAndAccountResourcesCmd { - public static final Logger s_logger = Logger.getLogger(ListRoutersCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/router/ListVirtualRouterElementsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/router/ListVirtualRouterElementsCmd.java index 6eb24dce0747..424b8c29d041 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/router/ListVirtualRouterElementsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/router/ListVirtualRouterElementsCmd.java @@ -21,7 +21,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -42,7 +41,6 @@ @APICommand(name = "listVirtualRouterElements", description = "Lists all available virtual router elements.", responseObject = VirtualRouterProviderResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListVirtualRouterElementsCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListVirtualRouterElementsCmd.class.getName()); // TODO, VirtualRouterElementServer is not singleton in system! @Inject diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/router/RebootRouterCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/router/RebootRouterCmd.java index 6e334d7e4422..1d97dd803098 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/router/RebootRouterCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/router/RebootRouterCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.router; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -39,7 +38,6 @@ @APICommand(name = "rebootRouter", description = "Starts a router.", responseObject = DomainRouterResponse.class, entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class RebootRouterCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(RebootRouterCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/router/StartRouterCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/router/StartRouterCmd.java index 121b2a1bd11f..24ab78810374 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/router/StartRouterCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/router/StartRouterCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.router; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -41,7 +40,6 @@ @APICommand(name = "startRouter", responseObject = DomainRouterResponse.class, description = "Starts a router.", entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class StartRouterCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(StartRouterCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/router/StopRouterCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/router/StopRouterCmd.java index 2da38d904264..971086a57cff 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/router/StopRouterCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/router/StopRouterCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.router; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -40,7 +39,6 @@ @APICommand(name = "stopRouter", description = "Stops a router.", responseObject = DomainRouterResponse.class, entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class StopRouterCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(StopRouterCmd.class.getName()); // /////////////////////////////////////////////////// // ////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/router/UpgradeRouterCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/router/UpgradeRouterCmd.java index 2d5255614c6e..3265a089d672 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/router/UpgradeRouterCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/router/UpgradeRouterCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.router; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -34,7 +33,6 @@ @APICommand(name = "changeServiceForRouter", description = "Upgrades domain router to a new service offering", responseObject = DomainRouterResponse.class, entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpgradeRouterCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(UpgradeRouterCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/router/UpgradeRouterTemplateCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/router/UpgradeRouterTemplateCmd.java index fa0fe5800eeb..74464cab3150 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/router/UpgradeRouterTemplateCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/router/UpgradeRouterTemplateCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.admin.router; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -43,7 +42,6 @@ @APICommand(name = "upgradeRouterTemplate", description = "Upgrades router to use newer template", responseObject = BaseResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpgradeRouterTemplateCmd extends org.apache.cloudstack.api.BaseCmd { - public static final Logger s_logger = Logger.getLogger(UpgradeRouterTemplateCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/AddImageStoreCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/AddImageStoreCmd.java index b8ab1461a2ba..7c8f0e21afbb 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/AddImageStoreCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/AddImageStoreCmd.java @@ -21,7 +21,6 @@ import java.util.Iterator; import java.util.Map; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -39,7 +38,6 @@ @APICommand(name = "addImageStore", description = "Adds backup image store.", responseObject = ImageStoreResponse.class, since = "4.2.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class AddImageStoreCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(AddImageStoreCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// @@ -137,7 +135,7 @@ public void execute(){ throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to add secondary storage"); } } catch (DiscoveryException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/AddImageStoreS3CMD.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/AddImageStoreS3CMD.java index 34ff171b91fa..2fe3c7cd106a 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/AddImageStoreS3CMD.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/AddImageStoreS3CMD.java @@ -38,7 +38,6 @@ import java.util.Map; import com.cloud.utils.storage.S3.ClientOptions; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -59,7 +58,6 @@ @APICommand(name = "addImageStoreS3", description = "Adds S3 Image Store", responseObject = ImageStoreResponse.class, since = "4.7.0", requestHasSensitiveInfo = true, responseHasSensitiveInfo = false) public final class AddImageStoreS3CMD extends BaseCmd implements ClientOptions { - public static final Logger s_logger = Logger.getLogger(AddImageStoreS3CMD.class.getName()); private static final String s_name = "addImageStoreS3Response"; @@ -141,7 +139,7 @@ public void execute() throws ResourceUnavailableException, InsufficientCapacityE throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to add S3 Image Store."); } } catch (DiscoveryException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/AddObjectStoragePoolCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/AddObjectStoragePoolCmd.java index a538962e076b..b779ba2a2b47 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/AddObjectStoragePoolCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/AddObjectStoragePoolCmd.java @@ -25,7 +25,6 @@ import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.ObjectStoreResponse; -import org.apache.log4j.Logger; import java.util.Collection; import java.util.HashMap; @@ -35,7 +34,6 @@ @APICommand(name = "addObjectStoragePool", description = "Adds a object storage pool", responseObject = ObjectStoreResponse.class, since = "4.19.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class AddObjectStoragePoolCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(AddObjectStoragePoolCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// @@ -125,7 +123,7 @@ public void execute(){ throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to add object storage"); } } catch (Exception ex) { - s_logger.error("Exception: ", ex); + logger.error("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/CancelPrimaryStorageMaintenanceCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/CancelPrimaryStorageMaintenanceCmd.java index a694aba30fbe..7e925f286d09 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/CancelPrimaryStorageMaintenanceCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/CancelPrimaryStorageMaintenanceCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.storage; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -36,7 +35,6 @@ @APICommand(name = "cancelStorageMaintenance", description = "Cancels maintenance for primary storage", responseObject = StoragePoolResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CancelPrimaryStorageMaintenanceCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(CancelPrimaryStorageMaintenanceCmd.class.getName()); private static final String s_name = "cancelprimarystoragemaintenanceresponse"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/CreateSecondaryStagingStoreCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/CreateSecondaryStagingStoreCmd.java index 08069446893b..5776eb6464cf 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/CreateSecondaryStagingStoreCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/CreateSecondaryStagingStoreCmd.java @@ -21,7 +21,6 @@ import java.util.HashMap; import java.util.Map; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -38,7 +37,6 @@ @APICommand(name = "createSecondaryStagingStore", description = "create secondary staging store.", responseObject = ImageStoreResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateSecondaryStagingStoreCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(CreateSecondaryStagingStoreCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// @@ -113,7 +111,7 @@ public void execute() { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to add secondary storage"); } } catch (Exception ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java index 477d7570dfad..75813a7aabf5 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java @@ -19,7 +19,6 @@ import java.net.UnknownHostException; import java.util.Map; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -41,7 +40,6 @@ @APICommand(name = "createStoragePool", description = "Creates a storage pool.", responseObject = StoragePoolResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateStoragePoolCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(CreateStoragePoolCmd.class.getName()); ///////////////////////////////////////////////////// @@ -170,13 +168,13 @@ public void execute() { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to add storage pool"); } } catch (ResourceUnavailableException ex1) { - s_logger.warn("Exception: ", ex1); + logger.warn("Exception: ", ex1); throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex1.getMessage()); } catch (ResourceInUseException ex2) { - s_logger.warn("Exception: ", ex2); + logger.warn("Exception: ", ex2); throw new ServerApiException(ApiErrorCode.RESOURCE_IN_USE_ERROR, ex2.getMessage()); } catch (UnknownHostException ex3) { - s_logger.warn("Exception: ", ex3); + logger.warn("Exception: ", ex3); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex3.getMessage()); } catch (Exception ex4) { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex4.getMessage()); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/DeleteImageStoreCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/DeleteImageStoreCmd.java index 194f0baf603e..50a9d9a6f397 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/DeleteImageStoreCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/DeleteImageStoreCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.storage; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -32,7 +31,6 @@ @APICommand(name = "deleteImageStore", description = "Deletes an image store or Secondary Storage.", responseObject = SuccessResponse.class, since = "4.2.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteImageStoreCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(DeleteImageStoreCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/DeleteObjectStoragePoolCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/DeleteObjectStoragePoolCmd.java index ed305d9689d5..6cb38d4c862c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/DeleteObjectStoragePoolCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/DeleteObjectStoragePoolCmd.java @@ -25,12 +25,10 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.ObjectStoreResponse; import org.apache.cloudstack.api.response.SuccessResponse; -import org.apache.log4j.Logger; @APICommand(name = "deleteObjectStoragePool", description = "Deletes an Object Storage Pool", responseObject = SuccessResponse.class, since = "4.19.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteObjectStoragePoolCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(DeleteObjectStoragePoolCmd.class.getName()); // /////////////////////////////////////////////////// // ////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/DeletePoolCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/DeletePoolCmd.java index d87768e7f393..28f71e0740f7 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/DeletePoolCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/DeletePoolCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.storage; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -34,7 +33,6 @@ @APICommand(name = "deleteStoragePool", description = "Deletes a storage pool.", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeletePoolCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(DeletePoolCmd.class.getName()); // /////////////////////////////////////////////////// // ////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/DeleteSecondaryStagingStoreCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/DeleteSecondaryStagingStoreCmd.java index 34a26461fa82..a0c2731ccdaf 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/DeleteSecondaryStagingStoreCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/DeleteSecondaryStagingStoreCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.storage; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -32,7 +31,6 @@ @APICommand(name = "deleteSecondaryStagingStore", description = "Deletes a secondary staging store .", responseObject = SuccessResponse.class, since = "4.2.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteSecondaryStagingStoreCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(DeleteSecondaryStagingStoreCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/FindStoragePoolsForMigrationCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/FindStoragePoolsForMigrationCmd.java index 699f4831415b..0848f4bd7ad2 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/FindStoragePoolsForMigrationCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/FindStoragePoolsForMigrationCmd.java @@ -30,7 +30,6 @@ import org.apache.cloudstack.api.response.StoragePoolResponse; import org.apache.cloudstack.api.response.VolumeResponse; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.storage.StoragePool; import com.cloud.utils.Pair; @@ -38,7 +37,6 @@ @APICommand(name = "findStoragePoolsForMigration", description = "Lists storage pools available for migration of a volume.", responseObject = StoragePoolResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class FindStoragePoolsForMigrationCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(FindStoragePoolsForMigrationCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListImageStoresCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListImageStoresCmd.java index a9eac3ed76d0..5270569de44e 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListImageStoresCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListImageStoresCmd.java @@ -23,12 +23,10 @@ import org.apache.cloudstack.api.response.ImageStoreResponse; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.ZoneResponse; -import org.apache.log4j.Logger; @APICommand(name = "listImageStores", description = "Lists image stores.", responseObject = ImageStoreResponse.class, since = "4.2.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListImageStoresCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListImageStoresCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListObjectStoragePoolsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListObjectStoragePoolsCmd.java index 9d8d8eccc3ce..005a1a54444d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListObjectStoragePoolsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListObjectStoragePoolsCmd.java @@ -23,13 +23,11 @@ import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.ObjectStoreResponse; -import org.apache.log4j.Logger; @APICommand(name = "listObjectStoragePools", description = "Lists object storage pools.", responseObject = ObjectStoreResponse.class, since = "4.19.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class ListObjectStoragePoolsCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListObjectStoragePoolsCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListSecondaryStagingStoresCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListSecondaryStagingStoresCmd.java index e315c8a6d47d..0cad16a247fd 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListSecondaryStagingStoresCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListSecondaryStagingStoresCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.storage; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -29,7 +28,6 @@ @APICommand(name = "listSecondaryStagingStores", description = "Lists secondary staging stores.", responseObject = ImageStoreResponse.class, since = "4.2.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListSecondaryStagingStoresCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListSecondaryStagingStoresCmd.class.getName()); private static final String s_name = "listsecondarystagingstoreresponse"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListStoragePoolsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListStoragePoolsCmd.java index 6923353b3bf5..293ed3103cbc 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListStoragePoolsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListStoragePoolsCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.storage; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -33,7 +32,6 @@ @APICommand(name = "listStoragePools", description = "Lists storage pools.", responseObject = StoragePoolResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListStoragePoolsCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListStoragePoolsCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListStorageProvidersCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListStorageProvidersCmd.java index 347b66061f6e..efe7a23b5cb4 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListStorageProvidersCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListStorageProvidersCmd.java @@ -20,7 +20,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -40,7 +39,6 @@ @APICommand(name = "listStorageProviders", description = "Lists storage providers.", responseObject = StorageProviderResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListStorageProvidersCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListStorageProvidersCmd.class.getName()); @Parameter(name = ApiConstants.TYPE, type = CommandType.STRING, description = "the type of storage provider: either primary or image", required = true) private String type; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListStorageTagsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListStorageTagsCmd.java index 43981ee1a3dc..d9bb5d4cd930 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListStorageTagsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListStorageTagsCmd.java @@ -18,7 +18,6 @@ */ package org.apache.cloudstack.api.command.admin.storage; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.api.BaseListCmd; @@ -27,7 +26,6 @@ @APICommand(name = "listStorageTags", description = "Lists storage tags", responseObject = StorageTagResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListStorageTagsCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListStorageTagsCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java index de9b55a9ff12..8f5a7aced3fe 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.api.response.ImageStoreResponse; import org.apache.cloudstack.api.response.MigrationResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; @@ -41,7 +40,6 @@ authorized = {RoleType.Admin}) public class MigrateSecondaryStorageDataCmd extends BaseAsyncCmd { - public static final Logger LOGGER = Logger.getLogger(MigrateSecondaryStorageDataCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/PreparePrimaryStorageForMaintenanceCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/PreparePrimaryStorageForMaintenanceCmd.java index ddabefb14c86..818b3a5bbeab 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/PreparePrimaryStorageForMaintenanceCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/PreparePrimaryStorageForMaintenanceCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.storage; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -37,7 +36,6 @@ @APICommand(name = "enableStorageMaintenance", description = "Puts storage pool into maintenance state", responseObject = StoragePoolResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class PreparePrimaryStorageForMaintenanceCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(PreparePrimaryStorageForMaintenanceCmd.class.getName()); private static final String s_name = "prepareprimarystorageformaintenanceresponse"; ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/SyncStoragePoolCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/SyncStoragePoolCmd.java index d7a783a4ff43..9f81f2f6c86c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/SyncStoragePoolCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/SyncStoragePoolCmd.java @@ -33,7 +33,6 @@ import org.apache.cloudstack.api.ApiErrorCode; import org.apache.cloudstack.context.CallContext; -import java.util.logging.Logger; @APICommand(name = "syncStoragePool", description = "Sync storage pool with management server (currently supported for Datastore Cluster in VMware and syncs the datastores in it)", @@ -45,7 +44,6 @@ ) public class SyncStoragePoolCmd extends BaseAsyncCmd { - public static final Logger LOGGER = Logger.getLogger(SyncStoragePoolCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateCloudToUseObjectStoreCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateCloudToUseObjectStoreCmd.java index 3351d389c6f6..5ac34f27bada 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateCloudToUseObjectStoreCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateCloudToUseObjectStoreCmd.java @@ -21,7 +21,6 @@ import java.util.Iterator; import java.util.Map; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -38,7 +37,6 @@ @APICommand(name = "updateCloudToUseObjectStore", description = "Migrate current NFS secondary storages to use object store.", responseObject = ImageStoreResponse.class, since = "4.3.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateCloudToUseObjectStoreCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(UpdateCloudToUseObjectStoreCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// @@ -130,7 +128,7 @@ public void execute(){ throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to add secondary storage"); } } catch (DiscoveryException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java index d7dca93b485a..bcc438b957bf 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java @@ -25,14 +25,12 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.ImageStoreResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.storage.ImageStore; @APICommand(name = UpdateImageStoreCmd.APINAME, description = "Updates image store read-only status", responseObject = ImageStoreResponse.class, entityType = {ImageStore.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, since = "4.15.0") public class UpdateImageStoreCmd extends BaseCmd { - private static final Logger LOG = Logger.getLogger(UpdateImageStoreCmd.class.getName()); public static final String APINAME = "updateImageStore"; ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateStorageCapabilitiesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateStorageCapabilitiesCmd.java index 8403d3c62418..716c95b45c3e 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateStorageCapabilitiesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateStorageCapabilitiesCmd.java @@ -29,14 +29,12 @@ import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.StoragePoolResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; @APICommand(name = "updateStorageCapabilities", description = "Syncs capabilities of storage pools", responseObject = StoragePoolResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, since = "4.16.0") public class UpdateStorageCapabilitiesCmd extends BaseCmd { - private static final Logger LOG = Logger.getLogger(UpdateStorageCapabilitiesCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateStoragePoolCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateStoragePoolCmd.java index 7a907e0f76a7..13f02ef83c28 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateStoragePoolCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateStoragePoolCmd.java @@ -20,7 +20,6 @@ import java.util.Map; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -37,7 +36,6 @@ @APICommand(name = "updateStoragePool", description = "Updates a storage pool.", responseObject = StoragePoolResponse.class, since = "3.0.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateStoragePoolCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(UpdateStoragePoolCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/swift/AddSwiftCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/swift/AddSwiftCmd.java index 7eb87006d0d6..bd72f3213de1 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/swift/AddSwiftCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/swift/AddSwiftCmd.java @@ -19,7 +19,6 @@ import java.util.HashMap; import java.util.Map; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -36,7 +35,6 @@ @APICommand(name = "addSwift", description = "Adds Swift.", responseObject = ImageStoreResponse.class, since = "3.0.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class AddSwiftCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(AddSwiftCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// @@ -102,7 +100,7 @@ public void execute() { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to add Swift secondary storage"); } } catch (DiscoveryException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/swift/ListSwiftsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/swift/ListSwiftsCmd.java index 6d7bfbaf1b10..e21a23349bb5 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/swift/ListSwiftsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/swift/ListSwiftsCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.swift; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -31,7 +30,6 @@ @APICommand(name = "listSwifts", description = "List Swift.", responseObject = ImageStoreResponse.class, since = "3.0.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListSwiftsCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListSwiftsCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/DestroySystemVmCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/DestroySystemVmCmd.java index 7ed536f6d4cb..7e0faab2fb50 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/DestroySystemVmCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/DestroySystemVmCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.systemvm; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -37,7 +36,6 @@ @APICommand(name = "destroySystemVm", responseObject = SystemVmResponse.class, description = "Destroys a system virtual machine.", entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DestroySystemVmCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DestroySystemVmCmd.class.getName()); @ACL(accessType = AccessType.OperateEntry) diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/ListSystemVMsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/ListSystemVMsCmd.java index b6f8c92fa178..e8e5ee0ebad6 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/ListSystemVMsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/ListSystemVMsCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -39,7 +38,6 @@ @APICommand(name = "listSystemVms", description = "List system virtual machines.", responseObject = SystemVmResponse.class, entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListSystemVMsCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListSystemVMsCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/MigrateSystemVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/MigrateSystemVMCmd.java index f0f7aca16c8d..ccc6093aa834 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/MigrateSystemVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/MigrateSystemVMCmd.java @@ -31,7 +31,6 @@ import org.apache.cloudstack.api.response.SystemVmResponse; import org.apache.cloudstack.context.CallContext; import org.apache.commons.lang.BooleanUtils; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.ConcurrentOperationException; @@ -47,7 +46,6 @@ @APICommand(name = "migrateSystemVm", description = "Attempts Migration of a system virtual machine to the host specified.", responseObject = SystemVmResponse.class, entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class MigrateSystemVMCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(MigrateSystemVMCmd.class.getName()); ///////////////////////////////////////////////////// @@ -171,16 +169,16 @@ public void execute() { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to migrate the system vm"); } } catch (ResourceUnavailableException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); } catch (ConcurrentOperationException e) { - s_logger.warn("Exception: ", e); + logger.warn("Exception: ", e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); } catch (ManagementServerException e) { - s_logger.warn("Exception: ", e); + logger.warn("Exception: ", e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); } catch (VirtualMachineMigrationException e) { - s_logger.warn("Exception: ", e); + logger.warn("Exception: ", e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/PatchSystemVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/PatchSystemVMCmd.java index ae3c36b4948e..4f4b26316673 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/PatchSystemVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/PatchSystemVMCmd.java @@ -30,13 +30,11 @@ import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.api.response.SystemVmResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; @APICommand(name = "patchSystemVm", description = "Attempts to live patch systemVMs - CPVM, SSVM ", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, authorized = { RoleType.Admin }, since = "4.17.0") public class PatchSystemVMCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(PatchSystemVMCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/RebootSystemVmCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/RebootSystemVmCmd.java index 0ba7e0c2edf5..30bd51184ac3 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/RebootSystemVmCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/RebootSystemVmCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.systemvm; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -37,7 +36,6 @@ @APICommand(name = "rebootSystemVm", description = "Reboots a system VM.", responseObject = SystemVmResponse.class, entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class RebootSystemVmCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(RebootSystemVmCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/ScaleSystemVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/ScaleSystemVMCmd.java index f694988efa05..06e57674c537 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/ScaleSystemVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/ScaleSystemVMCmd.java @@ -18,7 +18,6 @@ import java.util.Map; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -46,7 +45,6 @@ + "The system vm must be in a \"Stopped\" state for " + "this command to take effect.", entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ScaleSystemVMCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(ScaleSystemVMCmd.class.getName()); private static final String s_name = "changeserviceforsystemvmresponse"; ///////////////////////////////////////////////////// @@ -111,16 +109,16 @@ public void execute() { try { result = _mgr.upgradeSystemVM(this); } catch (ResourceUnavailableException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); } catch (ConcurrentOperationException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } catch (ManagementServerException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } catch (VirtualMachineMigrationException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } if (result != null) { diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/StartSystemVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/StartSystemVMCmd.java index 0cb517f40585..eac3d64ab59e 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/StartSystemVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/StartSystemVMCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.systemvm; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -37,7 +36,6 @@ @APICommand(name = "startSystemVm", responseObject = SystemVmResponse.class, description = "Starts a system virtual machine.", entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class StartSystemVMCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(StartSystemVMCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/StopSystemVmCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/StopSystemVmCmd.java index 4bb533ce5b6a..1d84382f5d22 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/StopSystemVmCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/StopSystemVmCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.systemvm; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -39,7 +38,6 @@ @APICommand(name = "stopSystemVm", description = "Stops a system VM.", responseObject = SystemVmResponse.class, entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class StopSystemVmCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(StopSystemVmCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/UpgradeSystemVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/UpgradeSystemVMCmd.java index 12f80f32b069..5abe90e3f589 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/UpgradeSystemVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/UpgradeSystemVMCmd.java @@ -18,7 +18,6 @@ import java.util.Map; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -41,7 +40,6 @@ + "The system vm must be in a \"Stopped\" state for " + "this command to take effect.", entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpgradeSystemVMCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(UpgradeSystemVMCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/template/PrepareTemplateCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/template/PrepareTemplateCmd.java index 9b8d402864a8..9a59efb19f2a 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/template/PrepareTemplateCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/template/PrepareTemplateCmd.java @@ -18,7 +18,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -38,7 +37,6 @@ @APICommand(name = "prepareTemplate", responseObject = TemplateResponse.class, description = "load template into primary storage", entityType = {VirtualMachineTemplate.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class PrepareTemplateCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(PrepareTemplateCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/AddTrafficMonitorCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/AddTrafficMonitorCmd.java index 95593714feb0..5c0f1fc1cd21 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/AddTrafficMonitorCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/AddTrafficMonitorCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.usage; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -35,7 +34,6 @@ @APICommand(name = "addTrafficMonitor", description = "Adds Traffic Monitor Host for Direct Network Usage", responseObject = TrafficMonitorResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class AddTrafficMonitorCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(AddTrafficMonitorCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/AddTrafficTypeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/AddTrafficTypeCmd.java index 809129402a87..b1810676b744 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/AddTrafficTypeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/AddTrafficTypeCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.usage; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -37,7 +36,6 @@ @APICommand(name = "addTrafficType", description = "Adds traffic type to a physical network", responseObject = TrafficTypeResponse.class, since = "3.0.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class AddTrafficTypeCmd extends BaseAsyncCreateCmd { - public static final Logger s_logger = Logger.getLogger(AddTrafficTypeCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/DeleteTrafficMonitorCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/DeleteTrafficMonitorCmd.java index ff371d6e83d9..8fdb3af399c1 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/DeleteTrafficMonitorCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/DeleteTrafficMonitorCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.usage; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -33,7 +32,6 @@ @APICommand(name = "deleteTrafficMonitor", description = "Deletes an traffic monitor host.", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteTrafficMonitorCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(DeleteTrafficMonitorCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/DeleteTrafficTypeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/DeleteTrafficTypeCmd.java index a728690a09d6..a1e4ebda09a3 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/DeleteTrafficTypeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/DeleteTrafficTypeCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.usage; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -34,7 +33,6 @@ @APICommand(name = "deleteTrafficType", description = "Deletes traffic type of a physical network", responseObject = SuccessResponse.class, since = "3.0.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteTrafficTypeCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteTrafficTypeCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/GenerateUsageRecordsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/GenerateUsageRecordsCmd.java index 65b864f2ab6e..491b0fe85bae 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/GenerateUsageRecordsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/GenerateUsageRecordsCmd.java @@ -18,7 +18,6 @@ import java.util.Date; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -37,7 +36,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class GenerateUsageRecordsCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(GenerateUsageRecordsCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/ListTrafficMonitorsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/ListTrafficMonitorsCmd.java index f3a65bbfffd8..ed42bc42dbd1 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/ListTrafficMonitorsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/ListTrafficMonitorsCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -34,7 +33,6 @@ @APICommand(name = "listTrafficMonitors", description = "List traffic monitor Hosts.", responseObject = TrafficMonitorResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListTrafficMonitorsCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListTrafficMonitorsCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/ListTrafficTypeImplementorsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/ListTrafficTypeImplementorsCmd.java index 97f43155be67..1ad8872db962 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/ListTrafficTypeImplementorsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/ListTrafficTypeImplementorsCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -44,7 +43,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListTrafficTypeImplementorsCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListTrafficTypeImplementorsCmd.class); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/ListTrafficTypesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/ListTrafficTypesCmd.java index 6e36ca3bd8d1..d106a736fcab 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/ListTrafficTypesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/ListTrafficTypesCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -37,7 +36,6 @@ @APICommand(name = "listTrafficTypes", description = "Lists traffic types of a given physical network.", responseObject = ProviderResponse.class, since = "3.0.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListTrafficTypesCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListTrafficTypesCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/ListUsageTypesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/ListUsageTypesCmd.java index 15f9dd20e29f..2772743c75a4 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/ListUsageTypesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/ListUsageTypesCmd.java @@ -18,7 +18,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.BaseCmd; @@ -30,7 +29,6 @@ @APICommand(name = "listUsageTypes", description = "List Usage Types", responseObject = UsageTypeResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListUsageTypesCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(ListUsageTypesCmd.class.getName()); @Override public long getEntityOwnerId() { diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/RemoveRawUsageRecordsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/RemoveRawUsageRecordsCmd.java index 710a11c0d7f2..3e698e614423 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/RemoveRawUsageRecordsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/RemoveRawUsageRecordsCmd.java @@ -29,11 +29,9 @@ import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.SuccessResponse; -import org.apache.log4j.Logger; @APICommand(name = "removeRawUsageRecords", description = "Safely removes raw records from cloud_usage table", responseObject = SuccessResponse.class, since = "4.6.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class RemoveRawUsageRecordsCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(RemoveRawUsageRecordsCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/UpdateTrafficTypeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/UpdateTrafficTypeCmd.java index 103e58c4e038..c7b3c2b433b4 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/UpdateTrafficTypeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/UpdateTrafficTypeCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.usage; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -34,7 +33,6 @@ @APICommand(name = "updateTrafficType", description = "Updates traffic type of a physical network", responseObject = TrafficTypeResponse.class, since = "3.0.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateTrafficTypeCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(UpdateTrafficTypeCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/user/CreateUserCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/user/CreateUserCmd.java index e8f5944e4079..e2a2baecc866 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/user/CreateUserCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/user/CreateUserCmd.java @@ -27,7 +27,6 @@ import org.apache.cloudstack.api.response.UserResponse; import org.apache.cloudstack.context.CallContext; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.user.Account; import com.cloud.user.User; @@ -35,7 +34,6 @@ @APICommand(name = "createUser", description = "Creates a user for an account that already exists", responseObject = UserResponse.class, requestHasSensitiveInfo = true, responseHasSensitiveInfo = true) public class CreateUserCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(CreateUserCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/user/DeleteUserCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/user/DeleteUserCmd.java index a4f13d5e9c7e..560e449412c2 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/user/DeleteUserCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/user/DeleteUserCmd.java @@ -19,7 +19,6 @@ import javax.inject.Inject; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -38,7 +37,6 @@ @APICommand(name = "deleteUser", description = "Deletes a user for an account", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteUserCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(DeleteUserCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/user/DisableUserCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/user/DisableUserCmd.java index f7a51d0b1d3b..974c1c7bebed 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/user/DisableUserCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/user/DisableUserCmd.java @@ -18,7 +18,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -39,7 +38,6 @@ @APICommand(name = "disableUser", description = "Disables a user account", responseObject = UserResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class DisableUserCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DisableUserCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/user/EnableUserCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/user/EnableUserCmd.java index f13eac8f2a90..77d8d530daf2 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/user/EnableUserCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/user/EnableUserCmd.java @@ -18,7 +18,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -37,7 +36,6 @@ @APICommand(name = "enableUser", description = "Enables a user account", responseObject = UserResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class EnableUserCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(EnableUserCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/user/GetUserCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/user/GetUserCmd.java index 5fcad8068b12..3427cef33661 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/user/GetUserCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/user/GetUserCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.user; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -30,7 +29,6 @@ @APICommand(name = "getUser", description = "Find user account by API key", responseObject = UserResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class GetUserCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(GetUserCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/user/GetUserKeysCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/user/GetUserKeysCmd.java index 253a55652ead..3a3414d95d8a 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/user/GetUserKeysCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/user/GetUserKeysCmd.java @@ -29,7 +29,6 @@ import org.apache.cloudstack.api.response.UserResponse; import java.util.Map; -import org.apache.log4j.Logger; @APICommand(name = "getUserKeys", description = "This command allows the user to query the seceret and API keys for the account", @@ -44,7 +43,6 @@ public class GetUserKeysCmd extends BaseCmd{ @Parameter(name= ApiConstants.ID, type = CommandType.UUID, entityType = UserResponse.class, required = true, description = "ID of the user whose keys are required") private Long id; - public static final Logger s_logger = Logger.getLogger(GetUserKeysCmd.class.getName()); public Long getID(){ return id; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/user/ListUsersCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/user/ListUsersCmd.java index a516a30e769b..ef9e3fa22405 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/user/ListUsersCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/user/ListUsersCmd.java @@ -20,7 +20,6 @@ import com.cloud.server.ResourceTag; import com.cloud.user.Account; import org.apache.cloudstack.api.response.ResourceIconResponse; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -34,7 +33,6 @@ @APICommand(name = "listUsers", description = "Lists user accounts", responseObject = UserResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class ListUsersCmd extends BaseListAccountResourcesCmd { - public static final Logger s_logger = Logger.getLogger(ListUsersCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/user/LockUserCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/user/LockUserCmd.java index df6ef4f7b0b0..5c8bff0732ad 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/user/LockUserCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/user/LockUserCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.user; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -33,7 +32,6 @@ @APICommand(name = "lockUser", description = "Locks a user account", responseObject = UserResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class LockUserCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(LockUserCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/user/MoveUserCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/user/MoveUserCmd.java index b7090977d96f..e57258a45711 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/user/MoveUserCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/user/MoveUserCmd.java @@ -32,7 +32,6 @@ import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.region.RegionService; import org.apache.commons.lang3.ObjectUtils; -import org.apache.log4j.Logger; import com.cloud.user.Account; import com.cloud.user.User; @@ -46,7 +45,6 @@ since = "4.11", authorized = {RoleType.Admin}) public class MoveUserCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(MoveUserCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/user/RegisterCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/user/RegisterCmd.java index 4199015b9ca4..b3e7d2bec821 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/user/RegisterCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/user/RegisterCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.admin.user; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -34,7 +33,6 @@ description = "This command allows a user to register for the developer API, returning a secret key and an API key. This request is made through the integration API port, so it is a privileged command and must be made on behalf of a user. It is up to the implementer just how the username and password are entered, and then how that translates to an integration API request. Both secret key and API key should be returned to the user", requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class RegisterCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(RegisterCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/user/UpdateUserCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/user/UpdateUserCmd.java index cb9f6e189f03..3f8d386d2669 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/user/UpdateUserCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/user/UpdateUserCmd.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.api.response.UserResponse; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.region.RegionService; -import org.apache.log4j.Logger; import com.cloud.user.Account; import com.cloud.user.User; @@ -37,7 +36,6 @@ @APICommand(name = "updateUser", description = "Updates a user account", responseObject = UserResponse.class, requestHasSensitiveInfo = true, responseHasSensitiveInfo = true) public class UpdateUserCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(UpdateUserCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/CreateVlanIpRangeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/CreateVlanIpRangeCmd.java index 66aefd46966b..cceadea85322 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/CreateVlanIpRangeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/CreateVlanIpRangeCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.admin.vlan; import com.cloud.utils.net.NetUtils; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -43,7 +42,6 @@ @APICommand(name = "createVlanIpRange", description = "Creates a VLAN IP range.", responseObject = VlanIpRangeResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateVlanIpRangeCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(CreateVlanIpRangeCmd.class.getName()); ///////////////////////////////////////////////////// @@ -226,10 +224,10 @@ public void execute() throws ResourceUnavailableException, ResourceAllocationExc throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create vlan ip range"); } } catch (ConcurrentOperationException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } catch (InsufficientCapacityException ex) { - s_logger.info(ex); + logger.info(ex); throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, ex.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/DedicatePublicIpRangeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/DedicatePublicIpRangeCmd.java index 7c122dfc22e6..cac029f3aa12 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/DedicatePublicIpRangeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/DedicatePublicIpRangeCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.vlan; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -36,7 +35,6 @@ @APICommand(name = "dedicatePublicIpRange", description = "Dedicates a Public IP range to an account", responseObject = VlanIpRangeResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DedicatePublicIpRangeCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(DedicatePublicIpRangeCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/DeleteVlanIpRangeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/DeleteVlanIpRangeCmd.java index 15f0bde37a10..7f583fe225af 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/DeleteVlanIpRangeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/DeleteVlanIpRangeCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.vlan; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -32,7 +31,6 @@ @APICommand(name = "deleteVlanIpRange", description = "Creates a VLAN IP range.", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteVlanIpRangeCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(DeleteVlanIpRangeCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/ListVlanIpRangesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/ListVlanIpRangesCmd.java index 3b5370e3f15d..c11b505c684d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/ListVlanIpRangesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/ListVlanIpRangesCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -40,7 +39,6 @@ @APICommand(name = "listVlanIpRanges", description = "Lists all VLAN IP ranges.", responseObject = VlanIpRangeResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListVlanIpRangesCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListVlanIpRangesCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/ReleasePublicIpRangeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/ReleasePublicIpRangeCmd.java index 846433a60fc9..be4cea41cd89 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/ReleasePublicIpRangeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/ReleasePublicIpRangeCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.vlan; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -32,7 +31,6 @@ @APICommand(name = "releasePublicIpRange", description = "Releases a Public IP range back to the system pool", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ReleasePublicIpRangeCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(ReleasePublicIpRangeCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/UpdateVlanIpRangeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/UpdateVlanIpRangeCmd.java index caaf4c5ae016..df6d99f8e2af 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/UpdateVlanIpRangeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/UpdateVlanIpRangeCmd.java @@ -24,7 +24,6 @@ import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.VlanIpRangeResponse; -import org.apache.log4j.Logger; import com.cloud.dc.Vlan; import com.cloud.exception.ConcurrentOperationException; @@ -39,7 +38,6 @@ authorized = {RoleType.Admin}) public class UpdateVlanIpRangeCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(UpdateVlanIpRangeCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// @@ -149,7 +147,7 @@ public void execute() throws ResourceUnavailableException, ResourceAllocationExc throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to Update vlan ip range"); } } catch (ConcurrentOperationException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/AssignVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/AssignVMCmd.java index 1d53bbb39ad7..ac63a5efac37 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/AssignVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/AssignVMCmd.java @@ -19,7 +19,6 @@ import java.util.List; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -46,7 +45,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class AssignVMCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(AssignVMCmd.class.getName()); ///////////////////////////////////////////////////// @@ -133,7 +131,7 @@ public void execute() { e.printStackTrace(); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); } catch (Exception e) { - s_logger.error("Failed to move vm due to: " + e.getStackTrace()); + logger.error("Failed to move vm due to: " + e.getStackTrace()); if (e.getMessage() != null) { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to move vm due to " + e.getMessage()); } else if (e.getCause() != null) { diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/DeployVMCmdByAdmin.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/DeployVMCmdByAdmin.java index acdc0e074278..6bb7657b86ba 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/DeployVMCmdByAdmin.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/DeployVMCmdByAdmin.java @@ -20,7 +20,6 @@ import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.response.ClusterResponse; import org.apache.cloudstack.api.response.PodResponse; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ResponseObject.ResponseView; @@ -34,7 +33,6 @@ @APICommand(name = "deployVirtualMachine", description = "Creates and automatically starts a virtual machine based on a service offering, disk offering, and template.", responseObject = UserVmResponse.class, responseView = ResponseView.Full, entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class DeployVMCmdByAdmin extends DeployVMCmd implements AdminCmd { - public static final Logger s_logger = Logger.getLogger(DeployVMCmdByAdmin.class.getName()); @Parameter(name = ApiConstants.POD_ID, type = CommandType.UUID, entityType = PodResponse.class, description = "destination Pod ID to deploy the VM to - parameter available for root admin only", since = "4.13") diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ExpungeVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ExpungeVMCmd.java index 4cd7f541c4ef..a964e873bad1 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ExpungeVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ExpungeVMCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.vm; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -43,7 +42,6 @@ @APICommand(name = "expungeVirtualMachine", description = "Expunge a virtual machine. Once expunged, it cannot be recoverd.", responseObject = SuccessResponse.class, entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ExpungeVMCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(ExpungeVMCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/GetVMUserDataCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/GetVMUserDataCmd.java index 98af820201c3..8745ef12ce44 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/GetVMUserDataCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/GetVMUserDataCmd.java @@ -24,7 +24,6 @@ import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.api.response.VMUserDataResponse; -import org.apache.log4j.Logger; import com.cloud.user.Account; import com.cloud.uservm.UserVm; @@ -32,7 +31,6 @@ @APICommand(name = "getVirtualMachineUserData", description = "Returns user data associated with the VM", responseObject = VMUserDataResponse.class, since = "4.4", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class GetVMUserDataCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(GetVMUserDataCmd.class); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportUnmanagedInstanceCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportUnmanagedInstanceCmd.java index d632c786a16a..dd897218a4d3 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportUnmanagedInstanceCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportUnmanagedInstanceCmd.java @@ -41,7 +41,6 @@ import org.apache.commons.collections.MapUtils; import org.apache.commons.lang.BooleanUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.ConcurrentOperationException; @@ -65,7 +64,6 @@ authorized = {RoleType.Admin}, since = "4.14.0") public class ImportUnmanagedInstanceCmd extends BaseAsyncCmd { - public static final Logger LOGGER = Logger.getLogger(ImportUnmanagedInstanceCmd.class); @Inject public VmImportService vmImportService; @@ -203,8 +201,8 @@ public Map getNicNetworkList() { for (Map entry : (Collection>)nicNetworkList.values()) { String nic = entry.get(VmDetailConstants.NIC); String networkUuid = entry.get(VmDetailConstants.NETWORK); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format("nic, '%s', goes on net, '%s'", nic, networkUuid)); + if (logger.isTraceEnabled()) { + logger.trace(String.format("nic, '%s', goes on net, '%s'", nic, networkUuid)); } if (StringUtils.isAnyEmpty(nic, networkUuid) || _entityMgr.findByUuid(Network.class, networkUuid) == null) { throw new InvalidParameterValueException(String.format("Network ID: %s for NIC ID: %s is invalid", networkUuid, nic)); @@ -221,8 +219,8 @@ public Map getNicIpAddressList() { for (Map entry : (Collection>)nicIpAddressList.values()) { String nic = entry.get(VmDetailConstants.NIC); String ipAddress = StringUtils.defaultIfEmpty(entry.get(VmDetailConstants.IP4_ADDRESS), null); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format("nic, '%s', gets ip, '%s'", nic, ipAddress)); + if (logger.isTraceEnabled()) { + logger.trace(String.format("nic, '%s', gets ip, '%s'", nic, ipAddress)); } if (StringUtils.isEmpty(nic)) { throw new InvalidParameterValueException(String.format("NIC ID: '%s' is invalid for IP address mapping", nic)); @@ -246,8 +244,8 @@ public Map getDataDiskToDiskOfferingList() { for (Map entry : (Collection>)dataDiskToDiskOfferingList.values()) { String disk = entry.get(VmDetailConstants.DISK); String offeringUuid = entry.get(VmDetailConstants.DISK_OFFERING); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format("disk, '%s', gets offering, '%s'", disk, offeringUuid)); + if (logger.isTraceEnabled()) { + logger.trace(String.format("disk, '%s', gets offering, '%s'", disk, offeringUuid)); } if (StringUtils.isAnyEmpty(disk, offeringUuid) || _entityMgr.findByUuid(DiskOffering.class, offeringUuid) == null) { throw new InvalidParameterValueException(String.format("Disk offering ID: %s for disk ID: %s is invalid", offeringUuid, disk)); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportVmCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportVmCmd.java index e8b9f3addde7..1a34b7ea6cce 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportVmCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportVmCmd.java @@ -39,7 +39,6 @@ import org.apache.cloudstack.vm.VmImportService; import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import javax.inject.Inject; @@ -52,7 +51,6 @@ authorized = {RoleType.Admin}, since = "4.19.0") public class ImportVmCmd extends ImportUnmanagedInstanceCmd { - public static final Logger LOGGER = Logger.getLogger(ImportVmCmd.class); @Inject public VmImportService vmImportService; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ListUnmanagedInstancesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ListUnmanagedInstancesCmd.java index 13b6748c74de..6932aa383fa2 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ListUnmanagedInstancesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ListUnmanagedInstancesCmd.java @@ -32,7 +32,6 @@ import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.vm.UnmanagedInstanceTO; import org.apache.cloudstack.vm.VmImportService; -import org.apache.log4j.Logger; import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientCapacityException; @@ -51,7 +50,6 @@ authorized = {RoleType.Admin}, since = "4.14.0") public class ListUnmanagedInstancesCmd extends BaseListCmd { - public static final Logger LOGGER = Logger.getLogger(ListUnmanagedInstancesCmd.class.getName()); @Inject public VmImportService vmImportService; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ListVMsCmdByAdmin.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ListVMsCmdByAdmin.java index 5b3e607416be..b48941e7d17a 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ListVMsCmdByAdmin.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ListVMsCmdByAdmin.java @@ -27,14 +27,12 @@ import org.apache.cloudstack.api.response.PodResponse; import org.apache.cloudstack.api.response.StoragePoolResponse; import org.apache.cloudstack.api.response.UserVmResponse; -import org.apache.log4j.Logger; import com.cloud.vm.VirtualMachine; @APICommand(name = "listVirtualMachines", description = "List the virtual machines owned by the account.", responseObject = UserVmResponse.class, responseView = ResponseView.Full, entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class ListVMsCmdByAdmin extends ListVMsCmd implements AdminCmd { - public static final Logger s_logger = Logger.getLogger(ListVMsCmdByAdmin.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ListVmsForImportCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ListVmsForImportCmd.java index 88df04d9ef56..f40f1c0cb4a9 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ListVmsForImportCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ListVmsForImportCmd.java @@ -36,7 +36,6 @@ import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.vm.UnmanagedInstanceTO; import org.apache.cloudstack.vm.VmImportService; -import org.apache.log4j.Logger; import javax.inject.Inject; @@ -50,7 +49,6 @@ authorized = {RoleType.Admin}, since = "4.19.0") public class ListVmsForImportCmd extends BaseListCmd { - public static final Logger LOGGER = Logger.getLogger(ListVmsForImportCmd.class.getName()); @Inject public VmImportService vmImportService; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVMCmd.java index b685b366cd11..8881a2bc354e 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVMCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.admin.vm; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -50,7 +49,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class MigrateVMCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(MigrateVMCmd.class.getName()); ///////////////////////////////////////////////////// @@ -184,10 +182,10 @@ public void execute() { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to migrate vm"); } } catch (ResourceUnavailableException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); } catch (VirtualMachineMigrationException | ConcurrentOperationException | ManagementServerException e) { - s_logger.warn("Exception: ", e); + logger.warn("Exception: ", e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVirtualMachineWithVolumeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVirtualMachineWithVolumeCmd.java index 549d02b45791..b736e8606364 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVirtualMachineWithVolumeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVirtualMachineWithVolumeCmd.java @@ -33,7 +33,6 @@ import org.apache.cloudstack.api.response.SystemVmResponse; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.commons.collections.MapUtils; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.ConcurrentOperationException; @@ -52,7 +51,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class MigrateVirtualMachineWithVolumeCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(MigrateVirtualMachineWithVolumeCmd.class.getName()); ///////////////////////////////////////////////////// @@ -157,7 +155,7 @@ private Host getDestinationHost() { Host destinationHost = _resourceService.getHost(getHostId()); // OfflineVmwareMigration: destination host would have to not be a required parameter for stopped VMs if (destinationHost == null) { - s_logger.error(String.format("Unable to find the host with ID [%s].", getHostId())); + logger.error(String.format("Unable to find the host with ID [%s].", getHostId())); throw new InvalidParameterValueException("Unable to find the specified host to migrate the VM."); } return destinationHost; @@ -193,10 +191,10 @@ public void execute() { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to migrate vm"); } } catch (ResourceUnavailableException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); } catch (ConcurrentOperationException | ManagementServerException | VirtualMachineMigrationException e) { - s_logger.warn("Exception: ", e); + logger.warn("Exception: ", e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/RecoverVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/RecoverVMCmd.java index b0698ed1e7a2..f34d555dc707 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/RecoverVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/RecoverVMCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.admin.vm; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -38,7 +37,6 @@ @APICommand(name = "recoverVirtualMachine", description = "Recovers a virtual machine.", responseObject = UserVmResponse.class, entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class RecoverVMCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(RecoverVMCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/UnmanageVMInstanceCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/UnmanageVMInstanceCmd.java index 20c7c536c45e..bbcb8840f666 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/UnmanageVMInstanceCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/UnmanageVMInstanceCmd.java @@ -40,7 +40,6 @@ import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.vm.UnmanagedVMsManager; -import org.apache.log4j.Logger; import javax.inject.Inject; @@ -53,7 +52,6 @@ since = "4.15.0") public class UnmanageVMInstanceCmd extends BaseAsyncCmd { - public static final Logger LOGGER = Logger.getLogger(UnmanageVMInstanceCmd.class); @Inject private UnmanagedVMsManager unmanagedVMsManager; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/volume/DestroyVolumeCmdByAdmin.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/volume/DestroyVolumeCmdByAdmin.java index 44ce32f90c2f..0840b4ce6f99 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/volume/DestroyVolumeCmdByAdmin.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/volume/DestroyVolumeCmdByAdmin.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.volume; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; @@ -37,7 +36,6 @@ responseHasSensitiveInfo = true) public class DestroyVolumeCmdByAdmin extends DestroyVolumeCmd implements AdminCmd { - public static final Logger s_logger = Logger.getLogger(DestroyVolumeCmdByAdmin.class.getName()); @Override public void execute() { diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/volume/RecoverVolumeCmdByAdmin.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/volume/RecoverVolumeCmdByAdmin.java index f51aeec97197..e276c8a00b65 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/volume/RecoverVolumeCmdByAdmin.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/volume/RecoverVolumeCmdByAdmin.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.volume; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; @@ -36,7 +35,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class RecoverVolumeCmdByAdmin extends RecoverVolumeCmd implements AdminCmd { - public static final Logger s_logger = Logger.getLogger(RecoverVolumeCmdByAdmin.class.getName()); @Override public void execute() { diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/CreatePrivateGatewayByAdminCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/CreatePrivateGatewayByAdminCmd.java index b5ba70cf5e27..1b2163853ec3 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/CreatePrivateGatewayByAdminCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/CreatePrivateGatewayByAdminCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.admin.vpc; import org.apache.commons.lang3.BooleanUtils; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -37,7 +36,6 @@ since = "4.17.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreatePrivateGatewayByAdminCmd extends CreatePrivateGatewayCmd implements AdminCmd { - public static final Logger s_logger = Logger.getLogger(CreatePrivateGatewayByAdminCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/CreateVPCOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/CreateVPCOfferingCmd.java index b69e7f4a828e..382c081e01e2 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/CreateVPCOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/CreateVPCOfferingCmd.java @@ -29,7 +29,6 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -48,7 +47,6 @@ @APICommand(name = "createVPCOffering", description = "Creates VPC offering", responseObject = VpcOfferingResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateVPCOfferingCmd extends BaseAsyncCreateCmd { - public static final Logger s_logger = Logger.getLogger(CreateVPCOfferingCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// @@ -131,8 +129,8 @@ public Map> getServiceProviders() { Iterator> iter = servicesCollection.iterator(); while (iter.hasNext()) { Map obj = iter.next(); - if (s_logger.isTraceEnabled()) { - s_logger.trace("service provider entry specified: " + obj); + if (logger.isTraceEnabled()) { + logger.trace("service provider entry specified: " + obj); } HashMap services = (HashMap)obj; String service = services.get("service"); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/DeletePrivateGatewayCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/DeletePrivateGatewayCmd.java index d6c3cac68c72..d104edc381bc 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/DeletePrivateGatewayCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/DeletePrivateGatewayCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.vpc; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; @@ -41,7 +40,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class DeletePrivateGatewayCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeletePrivateGatewayCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/DeleteVPCOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/DeleteVPCOfferingCmd.java index aba4c857b0b9..6aa0c3f3afce 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/DeleteVPCOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/DeleteVPCOfferingCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.admin.vpc; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -34,7 +33,6 @@ @APICommand(name = "deleteVPCOffering", description = "Deletes VPC offering", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteVPCOfferingCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteVPCOfferingCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/ListPrivateGatewaysCmdByAdminCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/ListPrivateGatewaysCmdByAdminCmd.java index 13a63e9cdd83..1211bd3311c5 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/ListPrivateGatewaysCmdByAdminCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/ListPrivateGatewaysCmdByAdminCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.vpc; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ResponseObject; @@ -30,6 +29,5 @@ responseView = ResponseObject.ResponseView.Full, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListPrivateGatewaysCmdByAdminCmd extends ListPrivateGatewaysCmd implements AdminCmd { - public static final Logger s_logger = Logger.getLogger(ListPrivateGatewaysCmdByAdminCmd.class.getName()); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/UpdateVPCOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/UpdateVPCOfferingCmd.java index 12babad504e0..b59837281ef3 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/UpdateVPCOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/UpdateVPCOfferingCmd.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.VpcOfferingResponse; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.dc.DataCenter; import com.cloud.domain.Domain; @@ -40,7 +39,6 @@ @APICommand(name = "updateVPCOffering", description = "Updates VPC offering", responseObject = VpcOfferingResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateVPCOfferingCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(UpdateVPCOfferingCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/zone/CreateZoneCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/zone/CreateZoneCmd.java index aca3e00d0957..24660e41ed9b 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/zone/CreateZoneCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/zone/CreateZoneCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.zone; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -35,7 +34,6 @@ @APICommand(name = "createZone", description = "Creates a Zone.", responseObject = ZoneResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateZoneCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(CreateZoneCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/zone/DeleteZoneCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/zone/DeleteZoneCmd.java index c530e999bf8d..b89636c6fe52 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/zone/DeleteZoneCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/zone/DeleteZoneCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.zone; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -33,7 +32,6 @@ @APICommand(name = "deleteZone", description = "Deletes a Zone.", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteZoneCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(DeleteZoneCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/zone/MarkDefaultZoneForAccountCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/zone/MarkDefaultZoneForAccountCmd.java index 264aea3f449c..5d3f5dcd47fa 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/zone/MarkDefaultZoneForAccountCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/zone/MarkDefaultZoneForAccountCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.admin.zone; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -37,7 +36,6 @@ @APICommand(name = "markDefaultZoneForAccount", description = "Marks a default zone for this account", responseObject = AccountResponse.class, since = "4.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class MarkDefaultZoneForAccountCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(MarkDefaultZoneForAccountCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/zone/UpdateZoneCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/zone/UpdateZoneCmd.java index 1379050cbf79..1b2793d3e158 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/zone/UpdateZoneCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/zone/UpdateZoneCmd.java @@ -19,7 +19,6 @@ import java.util.List; import java.util.Map; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -37,7 +36,6 @@ @APICommand(name = "updateZone", description = "Updates a Zone.", responseObject = ZoneResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateZoneCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(UpdateZoneCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/account/AddAccountToProjectCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/account/AddAccountToProjectCmd.java index 57c0e4848434..2fbcb6df1ccb 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/account/AddAccountToProjectCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/account/AddAccountToProjectCmd.java @@ -24,7 +24,6 @@ import org.apache.cloudstack.api.response.ProjectRoleResponse; import org.apache.commons.lang3.EnumUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -44,7 +43,6 @@ @APICommand(name = "addAccountToProject", description = "Adds account to a project", responseObject = SuccessResponse.class, since = "3.0.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class AddAccountToProjectCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(AddAccountToProjectCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/account/DeleteAccountFromProjectCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/account/DeleteAccountFromProjectCmd.java index 34935f506ec9..5e0977938a30 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/account/DeleteAccountFromProjectCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/account/DeleteAccountFromProjectCmd.java @@ -19,7 +19,6 @@ import java.util.List; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -38,7 +37,6 @@ @APICommand(name = "deleteAccountFromProject", description = "Deletes account from the project", responseObject = SuccessResponse.class, since = "3.0.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteAccountFromProjectCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteAccountFromProjectCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/account/DeleteUserFromProjectCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/account/DeleteUserFromProjectCmd.java index 596fb876008c..8319911c5c8d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/account/DeleteUserFromProjectCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/account/DeleteUserFromProjectCmd.java @@ -32,7 +32,6 @@ import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.api.response.UserResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.InvalidParameterValueException; @@ -41,7 +40,6 @@ @APICommand(name = "deleteUserFromProject", description = "Deletes user from the project", responseObject = SuccessResponse.class, since = "4.15.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, authorized = {RoleType.Admin, RoleType.DomainAdmin, RoleType.ResourceAdmin, RoleType.User}) public class DeleteUserFromProjectCmd extends BaseAsyncCmd { - public static final Logger LOGGER = Logger.getLogger(DeleteUserFromProjectCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmd.java index 66a4d91172af..11c4d863c47e 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmd.java @@ -31,7 +31,6 @@ import org.apache.cloudstack.api.response.AccountResponse; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.ResourceIconResponse; -import org.apache.log4j.Logger; import com.cloud.exception.InvalidParameterValueException; import com.cloud.server.ResourceIcon; @@ -41,7 +40,6 @@ @APICommand(name = "listAccounts", description = "Lists accounts and provides detailed account information for listed accounts", responseObject = AccountResponse.class, responseView = ResponseView.Restricted, entityType = {Account.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class ListAccountsCmd extends BaseListDomainResourcesCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(ListAccountsCmd.class.getName()); private static final String s_name = "listaccountsresponse"; ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/account/ListProjectAccountsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/account/ListProjectAccountsCmd.java index 3d50fc553b80..21aedc7d7725 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/account/ListProjectAccountsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/account/ListProjectAccountsCmd.java @@ -26,14 +26,12 @@ import org.apache.cloudstack.api.response.ProjectResponse; import org.apache.cloudstack.api.response.ProjectRoleResponse; import org.apache.cloudstack.api.response.UserResponse; -import org.apache.log4j.Logger; import com.cloud.user.Account; @APICommand(name = "listProjectAccounts", description = "Lists project's accounts", responseObject = ProjectResponse.class, since = "3.0.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListProjectAccountsCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListProjectAccountsCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/address/AssociateIPAddrCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/address/AssociateIPAddrCmd.java index 75b83b40f3eb..5ea144785167 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/address/AssociateIPAddrCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/address/AssociateIPAddrCmd.java @@ -18,7 +18,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; @@ -65,7 +64,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class AssociateIPAddrCmd extends BaseAsyncCreateCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(AssociateIPAddrCmd.class.getName()); private static final String s_name = "associateipaddressresponse"; ///////////////////////////////////////////////////// @@ -325,11 +323,11 @@ public void create() throws ResourceAllocationException { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to allocate IP address"); } } catch (ConcurrentOperationException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } catch (InsufficientAddressCapacityException ex) { - s_logger.info(ex); - s_logger.trace(ex); + logger.info(ex); + logger.trace(ex); throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, ex.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/address/DisassociateIPAddrCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/address/DisassociateIPAddrCmd.java index f9bfcb253b4d..f4c06e512f07 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/address/DisassociateIPAddrCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/address/DisassociateIPAddrCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.address; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -39,7 +38,6 @@ @APICommand(name = "disassociateIpAddress", description = "Disassociates an IP address from the account.", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, entityType = { IpAddress.class }) public class DisassociateIPAddrCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DisassociateIPAddrCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/address/ListPublicIpAddressesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/address/ListPublicIpAddressesCmd.java index 22eb70cc0c21..5c1c61130ba6 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/address/ListPublicIpAddressesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/address/ListPublicIpAddressesCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; @@ -43,7 +42,6 @@ @APICommand(name = "listPublicIpAddresses", description = "Lists all public IP addresses", responseObject = IPAddressResponse.class, responseView = ResponseView.Restricted, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, entityType = { IpAddress.class }) public class ListPublicIpAddressesCmd extends BaseListRetrieveOnlyResourceCountCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(ListPublicIpAddressesCmd.class.getName()); private static final String s_name = "listpublicipaddressesresponse"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/address/ReleaseIPAddrCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/address/ReleaseIPAddrCmd.java index eb908300283b..effe45c51ed0 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/address/ReleaseIPAddrCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/address/ReleaseIPAddrCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.address; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; @@ -41,7 +40,6 @@ responseHasSensitiveInfo = false, authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class ReleaseIPAddrCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(ReleaseIPAddrCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/address/ReserveIPAddrCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/address/ReserveIPAddrCmd.java index 5e72986fb582..e323d413b9ac 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/address/ReserveIPAddrCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/address/ReserveIPAddrCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.user.address; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; @@ -51,7 +50,6 @@ responseHasSensitiveInfo = false, authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class ReserveIPAddrCmd extends BaseCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(ReserveIPAddrCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/address/UpdateIPAddrCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/address/UpdateIPAddrCmd.java index 7055e1d5cb2e..194967e2e4a4 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/address/UpdateIPAddrCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/address/UpdateIPAddrCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.user.address; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; @@ -42,7 +41,6 @@ @APICommand(name = "updateIpAddress", description = "Updates an IP address", responseObject = IPAddressResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, entityType = { IpAddress.class }) public class UpdateIPAddrCmd extends BaseAsyncCustomIdCmd { - public static final Logger s_logger = Logger.getLogger(UpdateIPAddrCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/affinitygroup/CreateAffinityGroupCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/affinitygroup/CreateAffinityGroupCmd.java index 60dbc2a6e9a0..ee0a38ef35dc 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/affinitygroup/CreateAffinityGroupCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/affinitygroup/CreateAffinityGroupCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.affinitygroup; -import org.apache.log4j.Logger; import org.apache.cloudstack.affinity.AffinityGroup; import org.apache.cloudstack.affinity.AffinityGroupResponse; import org.apache.cloudstack.api.APICommand; @@ -37,7 +36,6 @@ @APICommand(name = "createAffinityGroup", responseObject = AffinityGroupResponse.class, description = "Creates an affinity/anti-affinity group", entityType = {AffinityGroup.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateAffinityGroupCmd extends BaseAsyncCreateCmd { - public static final Logger s_logger = Logger.getLogger(CreateAffinityGroupCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/affinitygroup/DeleteAffinityGroupCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/affinitygroup/DeleteAffinityGroupCmd.java index c8967b080204..2f24158cadbb 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/affinitygroup/DeleteAffinityGroupCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/affinitygroup/DeleteAffinityGroupCmd.java @@ -18,7 +18,6 @@ import org.apache.cloudstack.api.response.ProjectResponse; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.affinity.AffinityGroup; import org.apache.cloudstack.affinity.AffinityGroupResponse; @@ -40,7 +39,6 @@ @APICommand(name = "deleteAffinityGroup", description = "Deletes affinity group", responseObject = SuccessResponse.class, entityType = {AffinityGroup.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteAffinityGroupCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteAffinityGroupCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/affinitygroup/ListAffinityGroupTypesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/affinitygroup/ListAffinityGroupTypesCmd.java index 2d6f45cc06e5..c90294171b31 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/affinitygroup/ListAffinityGroupTypesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/affinitygroup/ListAffinityGroupTypesCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.affinity.AffinityGroupTypeResponse; import org.apache.cloudstack.api.APICommand; @@ -31,7 +30,6 @@ @APICommand(name = "listAffinityGroupTypes", description = "Lists affinity group types available", responseObject = AffinityGroupTypeResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListAffinityGroupTypesCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListAffinityGroupTypesCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/affinitygroup/ListAffinityGroupsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/affinitygroup/ListAffinityGroupsCmd.java index ed6c31451982..ee23e3794ce1 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/affinitygroup/ListAffinityGroupsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/affinitygroup/ListAffinityGroupsCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.affinitygroup; -import org.apache.log4j.Logger; import org.apache.cloudstack.affinity.AffinityGroup; import org.apache.cloudstack.affinity.AffinityGroupResponse; @@ -31,7 +30,6 @@ @APICommand(name = "listAffinityGroups", description = "Lists affinity groups", responseObject = AffinityGroupResponse.class, entityType = {AffinityGroup.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListAffinityGroupsCmd extends BaseListProjectAndAccountResourcesCmd { - public static final Logger s_logger = Logger.getLogger(ListAffinityGroupsCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/affinitygroup/UpdateVMAffinityGroupCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/affinitygroup/UpdateVMAffinityGroupCmd.java index c70e4fbda669..6cd9bce62595 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/affinitygroup/UpdateVMAffinityGroupCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/affinitygroup/UpdateVMAffinityGroupCmd.java @@ -20,7 +20,6 @@ import java.util.EnumSet; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.affinity.AffinityGroupResponse; @@ -55,7 +54,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class UpdateVMAffinityGroupCmd extends BaseAsyncCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(UpdateVMAffinityGroupCmd.class.getName()); private static final String s_name = "updatevirtualmachineresponse"; ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScalePolicyCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScalePolicyCmd.java index eff93522b626..a000e265f93b 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScalePolicyCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScalePolicyCmd.java @@ -18,7 +18,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -43,7 +42,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateAutoScalePolicyCmd extends BaseAsyncCreateCmd { - public static final Logger s_logger = Logger.getLogger(CreateAutoScalePolicyCmd.class.getName()); private static final String s_name = "autoscalepolicyresponse"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScaleVmGroupCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScaleVmGroupCmd.java index cdbe153f0179..7c9362d4b691 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScaleVmGroupCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScaleVmGroupCmd.java @@ -18,7 +18,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; @@ -45,7 +44,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateAutoScaleVmGroupCmd extends BaseAsyncCreateCmd { - public static final Logger s_logger = Logger.getLogger(CreateAutoScaleVmGroupCmd.class.getName()); private static final String s_name = "autoscalevmgroupresponse"; @@ -233,7 +231,7 @@ public void execute() { } } catch (Exception ex) { // TODO what will happen if Resource Layer fails in a step in between - s_logger.warn("Failed to create autoscale vm group", ex); + logger.warn("Failed to create autoscale vm group", ex); } finally { if (!success || vmGroup == null) { _autoScaleService.deleteAutoScaleVmGroup(getEntityId(), true); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScaleVmProfileCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScaleVmProfileCmd.java index db6ccd9ce53c..f5b8c3da8550 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScaleVmProfileCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScaleVmProfileCmd.java @@ -20,7 +20,6 @@ import java.util.Map; import org.apache.commons.collections.MapUtils; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; @@ -51,7 +50,6 @@ responseHasSensitiveInfo = false) @SuppressWarnings("rawtypes") public class CreateAutoScaleVmProfileCmd extends BaseAsyncCreateCmd { - public static final Logger s_logger = Logger.getLogger(CreateAutoScaleVmProfileCmd.class.getName()); private static final String s_name = "autoscalevmprofileresponse"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/CreateConditionCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/CreateConditionCmd.java index 77bc15b1e0f1..0ffb9afdac4d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/CreateConditionCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/CreateConditionCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.user.autoscale; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -39,7 +38,6 @@ @APICommand(name = "createCondition", description = "Creates a condition for VM auto scaling", responseObject = ConditionResponse.class, entityType = {Condition.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateConditionCmd extends BaseAsyncCreateCmd { - public static final Logger s_logger = Logger.getLogger(CreateConditionCmd.class.getName()); private static final String s_name = "conditionresponse"; // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DeleteAutoScalePolicyCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DeleteAutoScalePolicyCmd.java index cf5ff3660968..cee9460dbe60 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DeleteAutoScalePolicyCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DeleteAutoScalePolicyCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.autoscale; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -38,7 +37,6 @@ @APICommand(name = "deleteAutoScalePolicy", description = "Deletes a autoscale policy.", responseObject = SuccessResponse.class, entityType = {AutoScalePolicy.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteAutoScalePolicyCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteAutoScalePolicyCmd.class.getName()); // /////////////////////////////////////////////////// // ////////////// API parameters ///////////////////// // /////////////////////////////////////////////////// @@ -93,7 +91,7 @@ public void execute() { SuccessResponse response = new SuccessResponse(getCommandName()); setResponseObject(response); } else { - s_logger.warn("Failed to delete autoscale policy " + getId()); + logger.warn("Failed to delete autoscale policy " + getId()); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to delete AutoScale Policy"); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DeleteAutoScaleVmGroupCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DeleteAutoScaleVmGroupCmd.java index badfcc0957e8..6bf2157533e1 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DeleteAutoScaleVmGroupCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DeleteAutoScaleVmGroupCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.autoscale; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -38,7 +37,6 @@ @APICommand(name = "deleteAutoScaleVmGroup", description = "Deletes a autoscale vm group.", responseObject = SuccessResponse.class, entityType = {AutoScaleVmGroup.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteAutoScaleVmGroupCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteAutoScaleVmGroupCmd.class.getName()); // /////////////////////////////////////////////////// // ////////////// API parameters ///////////////////// // /////////////////////////////////////////////////// @@ -103,7 +101,7 @@ public void execute() { SuccessResponse response = new SuccessResponse(getCommandName()); setResponseObject(response); } else { - s_logger.warn("Failed to delete autoscale vm group " + getId()); + logger.warn("Failed to delete autoscale vm group " + getId()); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to delete autoscale vm group"); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DeleteAutoScaleVmProfileCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DeleteAutoScaleVmProfileCmd.java index 06bf7a9d378f..b90f6aa8ffa5 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DeleteAutoScaleVmProfileCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DeleteAutoScaleVmProfileCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.autoscale; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -38,7 +37,6 @@ @APICommand(name = "deleteAutoScaleVmProfile", description = "Deletes a autoscale vm profile.", responseObject = SuccessResponse.class, entityType = {AutoScaleVmProfile.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteAutoScaleVmProfileCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteAutoScaleVmProfileCmd.class.getName()); // /////////////////////////////////////////////////// // ////////////// API parameters ///////////////////// // /////////////////////////////////////////////////// @@ -92,7 +90,7 @@ public void execute() { SuccessResponse response = new SuccessResponse(getCommandName()); setResponseObject(response); } else { - s_logger.warn("Failed to delete autoscale vm profile " + getId()); + logger.warn("Failed to delete autoscale vm profile " + getId()); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to delete autoscale vm profile"); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DeleteConditionCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DeleteConditionCmd.java index 840484e45adc..9590012e0375 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DeleteConditionCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DeleteConditionCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.user.autoscale; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -39,7 +38,6 @@ @APICommand(name = "deleteCondition", description = "Removes a condition for VM auto scaling", responseObject = SuccessResponse.class, entityType = {Condition.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteConditionCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteConditionCmd.class.getName()); // /////////////////////////////////////////////////// // ////////////// API parameters ///////////////////// @@ -59,14 +57,14 @@ public void execute() { try { result = _autoScaleService.deleteCondition(getId()); } catch (ResourceInUseException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_IN_USE_ERROR, ex.getMessage()); } if (result) { SuccessResponse response = new SuccessResponse(getCommandName()); setResponseObject(response); } else { - s_logger.warn("Failed to delete condition " + getId()); + logger.warn("Failed to delete condition " + getId()); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to delete condition."); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DisableAutoScaleVmGroupCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DisableAutoScaleVmGroupCmd.java index b0daf2ec6648..2414c0d82b62 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DisableAutoScaleVmGroupCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DisableAutoScaleVmGroupCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.user.autoscale; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -37,7 +36,6 @@ @APICommand(name = "disableAutoScaleVmGroup", description = "Disables an AutoScale Vm Group", responseObject = AutoScaleVmGroupResponse.class, entityType = {AutoScaleVmGroup.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DisableAutoScaleVmGroupCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DisableAutoScaleVmGroupCmd.class.getName()); private static final String s_name = "disableautoscalevmGroupresponse"; // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/EnableAutoScaleVmGroupCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/EnableAutoScaleVmGroupCmd.java index b6f2a82ac49f..96d329d3e0c8 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/EnableAutoScaleVmGroupCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/EnableAutoScaleVmGroupCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.user.autoscale; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -37,7 +36,6 @@ @APICommand(name = "enableAutoScaleVmGroup", description = "Enables an AutoScale Vm Group", responseObject = AutoScaleVmGroupResponse.class, entityType = {AutoScaleVmGroup.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class EnableAutoScaleVmGroupCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(EnableAutoScaleVmGroupCmd.class.getName()); private static final String s_name = "enableautoscalevmGroupresponse"; // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/ListAutoScalePoliciesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/ListAutoScalePoliciesCmd.java index dc0baf4c7866..4935889c5255 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/ListAutoScalePoliciesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/ListAutoScalePoliciesCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -35,7 +34,6 @@ @APICommand(name = "listAutoScalePolicies", description = "Lists autoscale policies.", responseObject = AutoScalePolicyResponse.class, entityType = {AutoScalePolicy.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListAutoScalePoliciesCmd extends BaseListProjectAndAccountResourcesCmd { - public static final Logger s_logger = Logger.getLogger(ListAutoScalePoliciesCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/ListAutoScaleVmGroupsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/ListAutoScaleVmGroupsCmd.java index 8404fbbb49f4..6aa4abcccd8c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/ListAutoScaleVmGroupsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/ListAutoScaleVmGroupsCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; @@ -39,7 +38,6 @@ @APICommand(name = "listAutoScaleVmGroups", description = "Lists autoscale vm groups.", responseObject = AutoScaleVmGroupResponse.class, entityType = {AutoScaleVmGroup.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListAutoScaleVmGroupsCmd extends BaseListProjectAndAccountResourcesCmd { - public static final Logger s_logger = Logger.getLogger(ListAutoScaleVmGroupsCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/ListAutoScaleVmProfilesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/ListAutoScaleVmProfilesCmd.java index 435471faf135..bcaea273ce84 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/ListAutoScaleVmProfilesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/ListAutoScaleVmProfilesCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; @@ -37,7 +36,6 @@ @APICommand(name = "listAutoScaleVmProfiles", description = "Lists autoscale vm profiles.", responseObject = AutoScaleVmProfileResponse.class, entityType = {AutoScaleVmProfile.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListAutoScaleVmProfilesCmd extends BaseListProjectAndAccountResourcesCmd { - public static final Logger s_logger = Logger.getLogger(ListAutoScaleVmProfilesCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/ListConditionsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/ListConditionsCmd.java index fc1ca7097c8e..febf937d75f6 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/ListConditionsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/ListConditionsCmd.java @@ -20,7 +20,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -36,7 +35,6 @@ @APICommand(name = "listConditions", description = "List Conditions for VM auto scaling", responseObject = ConditionResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListConditionsCmd extends BaseListProjectAndAccountResourcesCmd { - public static final Logger s_logger = Logger.getLogger(ListConditionsCmd.class.getName()); // /////////////////////////////////////////////////// // ////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/ListCountersCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/ListCountersCmd.java index 7da8bd40b61a..d03584fd63d5 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/ListCountersCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/ListCountersCmd.java @@ -20,7 +20,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -35,7 +34,6 @@ @APICommand(name = "listCounters", description = "List the counters for VM auto scaling", responseObject = CounterResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListCountersCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListCountersCmd.class.getName()); private static final String s_name = "counterresponse"; // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/UpdateAutoScalePolicyCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/UpdateAutoScalePolicyCmd.java index a64b5cb16801..927a9191fcc2 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/UpdateAutoScalePolicyCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/UpdateAutoScalePolicyCmd.java @@ -19,7 +19,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -41,7 +40,6 @@ @APICommand(name = "updateAutoScalePolicy", description = "Updates an existing autoscale policy.", responseObject = AutoScalePolicyResponse.class, entityType = {AutoScalePolicy.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateAutoScalePolicyCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(UpdateAutoScalePolicyCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/UpdateAutoScaleVmGroupCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/UpdateAutoScaleVmGroupCmd.java index 87cd1fd3f4ed..69ae8aa36aa1 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/UpdateAutoScaleVmGroupCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/UpdateAutoScaleVmGroupCmd.java @@ -19,7 +19,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.acl.SecurityChecker.AccessType; @@ -42,7 +41,6 @@ @APICommand(name = "updateAutoScaleVmGroup", description = "Updates an existing autoscale vm group.", responseObject = AutoScaleVmGroupResponse.class, entityType = {AutoScaleVmGroup.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateAutoScaleVmGroupCmd extends BaseAsyncCustomIdCmd { - public static final Logger s_logger = Logger.getLogger(UpdateAutoScaleVmGroupCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/UpdateAutoScaleVmProfileCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/UpdateAutoScaleVmProfileCmd.java index 3e65d38e5201..e8ca502b5cdb 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/UpdateAutoScaleVmProfileCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/UpdateAutoScaleVmProfileCmd.java @@ -20,7 +20,6 @@ import java.util.HashMap; import java.util.Map; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.acl.SecurityChecker.AccessType; @@ -46,7 +45,6 @@ @APICommand(name = "updateAutoScaleVmProfile", description = "Updates an existing autoscale vm profile.", responseObject = AutoScaleVmProfileResponse.class, entityType = {AutoScaleVmProfile.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateAutoScaleVmProfileCmd extends BaseAsyncCustomIdCmd { - public static final Logger s_logger = Logger.getLogger(UpdateAutoScaleVmProfileCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/UpdateConditionCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/UpdateConditionCmd.java index e946dd370c13..4ed8244ff0c9 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/UpdateConditionCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/UpdateConditionCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.user.autoscale; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.acl.SecurityChecker.AccessType; @@ -41,7 +40,6 @@ authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, since = "4.18.0") public class UpdateConditionCmd extends BaseAsyncCmd { - public static final Logger LOGGER = Logger.getLogger(UpdateConditionCmd.class.getName()); // /////////////////////////////////////////////////// // ////////////// API parameters ///////////////////// @@ -69,7 +67,7 @@ public void execute() { response.setResponseName(getCommandName()); setResponseObject(response); } catch (ResourceInUseException ex) { - LOGGER.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_IN_USE_ERROR, ex.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/bucket/CreateBucketCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/bucket/CreateBucketCmd.java index e9a140cf46eb..d2c91e578713 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/bucket/CreateBucketCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/bucket/CreateBucketCmd.java @@ -34,14 +34,12 @@ import org.apache.cloudstack.api.response.ObjectStoreResponse; import org.apache.cloudstack.api.response.ProjectResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; @APICommand(name = "createBucket", responseObject = BucketResponse.class, description = "Creates a bucket in the specified object storage pool. ", responseView = ResponseView.Restricted, entityType = {Bucket.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, since = "4.19.0", authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class CreateBucketCmd extends BaseAsyncCreateCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(CreateBucketCmd.class.getName()); private static final String s_name = "createbucketresponse"; ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/bucket/DeleteBucketCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/bucket/DeleteBucketCmd.java index bf9552b779e6..8cd2790e4ae2 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/bucket/DeleteBucketCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/bucket/DeleteBucketCmd.java @@ -30,13 +30,11 @@ import org.apache.cloudstack.api.response.BucketResponse; import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; @APICommand(name = "deleteBucket", description = "Deletes an empty Bucket.", responseObject = SuccessResponse.class, entityType = {Bucket.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, since = "4.19.0", authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class DeleteBucketCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(DeleteBucketCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/bucket/ListBucketsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/bucket/ListBucketsCmd.java index 897b9fc66960..bda0c7ed381e 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/bucket/ListBucketsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/bucket/ListBucketsCmd.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.api.response.BucketResponse; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.StoragePoolResponse; -import org.apache.log4j.Logger; import java.util.List; @@ -36,7 +35,6 @@ Bucket.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, since = "4.19.0", authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class ListBucketsCmd extends BaseListTaggedResourcesCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(ListBucketsCmd.class.getName()); private static final String s_name = "listbucketsresponse"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/bucket/UpdateBucketCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/bucket/UpdateBucketCmd.java index b3b7e00770d9..8e281b20e915 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/bucket/UpdateBucketCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/bucket/UpdateBucketCmd.java @@ -32,13 +32,11 @@ import org.apache.cloudstack.api.response.BucketResponse; import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; @APICommand(name = "updateBucket", description = "Updates Bucket properties", responseObject = SuccessResponse.class, entityType = {Bucket.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, since = "4.19.0", authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class UpdateBucketCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(UpdateBucketCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/config/ListCapabilitiesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/config/ListCapabilitiesCmd.java index 65920a97c98a..cf25dfaf5b54 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/config/ListCapabilitiesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/config/ListCapabilitiesCmd.java @@ -23,14 +23,12 @@ import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.response.CapabilitiesResponse; import org.apache.cloudstack.config.ApiServiceConfiguration; -import org.apache.log4j.Logger; import com.cloud.user.Account; @APICommand(name = "listCapabilities", description = "Lists capabilities", responseObject = CapabilitiesResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListCapabilitiesCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(ListCapabilitiesCmd.class.getName()); @Override diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/consoleproxy/CreateConsoleEndpointCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/consoleproxy/CreateConsoleEndpointCmd.java index 11e84f1bef9c..63b47e163b6b 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/consoleproxy/CreateConsoleEndpointCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/consoleproxy/CreateConsoleEndpointCmd.java @@ -35,7 +35,6 @@ import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.utils.consoleproxy.ConsoleAccessUtils; import org.apache.commons.collections.MapUtils; -import org.apache.log4j.Logger; import javax.inject.Inject; import java.util.Map; @@ -46,7 +45,6 @@ authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class CreateConsoleEndpointCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(CreateConsoleEndpointCmd.class.getName()); @Inject private ConsoleAccessManager consoleManager; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/event/ArchiveEventsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/event/ArchiveEventsCmd.java index cdff7882e656..669b1782e9e4 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/event/ArchiveEventsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/event/ArchiveEventsCmd.java @@ -19,7 +19,6 @@ import java.util.Date; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -39,7 +38,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ArchiveEventsCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(ArchiveEventsCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/event/DeleteEventsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/event/DeleteEventsCmd.java index 9d049ac5f063..c9c3f1d69554 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/event/DeleteEventsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/event/DeleteEventsCmd.java @@ -19,7 +19,6 @@ import java.util.Date; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -39,7 +38,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteEventsCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(DeleteEventsCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/event/ListEventTypesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/event/ListEventTypesCmd.java index dd0f51792781..e3f14f711ce7 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/event/ListEventTypesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/event/ListEventTypesCmd.java @@ -18,7 +18,6 @@ import java.util.ArrayList; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.BaseCmd; @@ -30,7 +29,6 @@ @APICommand(name = "listEventTypes", description = "List Event Types", responseObject = EventTypeResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListEventTypesCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(ListEventTypesCmd.class.getName()); @Override public long getEntityOwnerId() { diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/event/ListEventsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/event/ListEventsCmd.java index 89f1c7090e0a..b5273c649222 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/event/ListEventsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/event/ListEventsCmd.java @@ -24,14 +24,12 @@ import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.response.EventResponse; import org.apache.cloudstack.api.response.ListResponse; -import org.apache.log4j.Logger; import com.cloud.event.Event; @APICommand(name = "listEvents", description = "A command to list events.", responseObject = EventResponse.class, entityType = {Event.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListEventsCmd extends BaseListProjectAndAccountResourcesCmd { - public static final Logger s_logger = Logger.getLogger(ListEventsCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreateEgressFirewallRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreateEgressFirewallRuleCmd.java index bedb073b8806..8cbbcea6a59f 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreateEgressFirewallRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreateEgressFirewallRuleCmd.java @@ -21,7 +21,6 @@ import java.util.List; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; @@ -48,7 +47,6 @@ @APICommand(name = "createEgressFirewallRule", description = "Creates a egress firewall rule for a given network ", responseObject = FirewallResponse.class, entityType = {FirewallRule.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateEgressFirewallRuleCmd extends BaseAsyncCreateCmd implements FirewallRule { - public static final Logger s_logger = Logger.getLogger(CreateEgressFirewallRuleCmd.class.getName()); // /////////////////////////////////////////////////// @@ -257,10 +255,10 @@ public void create() { } } catch (NetworkRuleConflictException ex) { String message = "Network rule conflict: "; - if (!s_logger.isTraceEnabled()) { - s_logger.info(message + ex.getMessage()); + if (!logger.isTraceEnabled()) { + logger.info(message + ex.getMessage()); } else { - s_logger.trace(message, ex); + logger.trace(message, ex); } throw new ServerApiException(ApiErrorCode.NETWORK_RULE_CONFLICT_ERROR, ex.getMessage()); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreateFirewallRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreateFirewallRuleCmd.java index b77041ee1748..24b5a78c0855 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreateFirewallRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreateFirewallRuleCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; @@ -46,7 +45,6 @@ @APICommand(name = "createFirewallRule", description = "Creates a firewall rule for a given IP address", responseObject = FirewallResponse.class, entityType = {FirewallRule.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateFirewallRuleCmd extends BaseAsyncCreateCmd implements FirewallRule { - public static final Logger s_logger = Logger.getLogger(CreateFirewallRuleCmd.class.getName()); // /////////////////////////////////////////////////// @@ -248,7 +246,7 @@ public void create() { setEntityUuid(result.getUuid()); } } catch (NetworkRuleConflictException ex) { - s_logger.trace("Network Rule Conflict: ", ex); + logger.trace("Network Rule Conflict: ", ex); throw new ServerApiException(ApiErrorCode.NETWORK_RULE_CONFLICT_ERROR, ex.getMessage(), ex); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreatePortForwardingRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreatePortForwardingRuleCmd.java index 5e1362101502..3545b3d21fb9 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreatePortForwardingRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreatePortForwardingRuleCmd.java @@ -18,7 +18,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.acl.SecurityChecker.AccessType; @@ -54,7 +53,6 @@ VirtualMachine.class, IpAddress.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreatePortForwardingRuleCmd extends BaseAsyncCreateCmd implements PortForwardingRule { - public static final Logger s_logger = Logger.getLogger(CreatePortForwardingRuleCmd.class.getName()); // /////////////////////////////////////////////////// @@ -352,7 +350,7 @@ public void create() { setEntityId(result.getId()); setEntityUuid(result.getUuid()); } catch (NetworkRuleConflictException ex) { - s_logger.trace("Network Rule Conflict: ", ex); + logger.trace("Network Rule Conflict: ", ex); throw new ServerApiException(ApiErrorCode.NETWORK_RULE_CONFLICT_ERROR, ex.getMessage(), ex); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/DeleteEgressFirewallRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/DeleteEgressFirewallRuleCmd.java index f0ba8a92d01c..b93d943eab9f 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/DeleteEgressFirewallRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/DeleteEgressFirewallRuleCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.user.firewall; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -41,7 +40,6 @@ @APICommand(name = "deleteEgressFirewallRule", description = "Deletes an egress firewall rule", responseObject = SuccessResponse.class, entityType = {FirewallRule.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteEgressFirewallRuleCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteEgressFirewallRuleCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/DeleteFirewallRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/DeleteFirewallRuleCmd.java index da1f6b62eca4..c4a4dfd75cb1 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/DeleteFirewallRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/DeleteFirewallRuleCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.firewall; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -40,7 +39,6 @@ @APICommand(name = "deleteFirewallRule", description = "Deletes a firewall rule", responseObject = SuccessResponse.class, entityType = {FirewallRule.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteFirewallRuleCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteFirewallRuleCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/DeletePortForwardingRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/DeletePortForwardingRuleCmd.java index aebf8da159b9..267d18d8a8a7 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/DeletePortForwardingRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/DeletePortForwardingRuleCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.firewall; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -39,7 +38,6 @@ @APICommand(name = "deletePortForwardingRule", description = "Deletes a port forwarding rule", responseObject = SuccessResponse.class, entityType = {PortForwardingRule.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeletePortForwardingRuleCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeletePortForwardingRuleCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/ListEgressFirewallRulesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/ListEgressFirewallRulesCmd.java index c8c0e85252bb..aa0fd28fdec8 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/ListEgressFirewallRulesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/ListEgressFirewallRulesCmd.java @@ -20,7 +20,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; @@ -39,7 +38,6 @@ @APICommand(name = "listEgressFirewallRules", description = "Lists all egress firewall rules for network ID.", responseObject = FirewallResponse.class, entityType = {FirewallRule.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListEgressFirewallRulesCmd extends BaseListTaggedResourcesCmd implements IListFirewallRulesCmd { - public static final Logger s_logger = Logger.getLogger(ListEgressFirewallRulesCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/ListFirewallRulesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/ListFirewallRulesCmd.java index f79b77893d2a..19a05b158908 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/ListFirewallRulesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/ListFirewallRulesCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; @@ -38,7 +37,6 @@ @APICommand(name = "listFirewallRules", description = "Lists all firewall rules for an IP address.", responseObject = FirewallResponse.class, entityType = {FirewallRule.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListFirewallRulesCmd extends BaseListTaggedResourcesCmd implements IListFirewallRulesCmd { - public static final Logger s_logger = Logger.getLogger(ListFirewallRulesCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/ListPortForwardingRulesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/ListPortForwardingRulesCmd.java index 3a942c485fcd..a2e9152a9e47 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/ListPortForwardingRulesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/ListPortForwardingRulesCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; @@ -37,7 +36,6 @@ @APICommand(name = "listPortForwardingRules", description = "Lists all port forwarding rules for an IP address.", responseObject = FirewallRuleResponse.class, entityType = {PortForwardingRule.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListPortForwardingRulesCmd extends BaseListTaggedResourcesCmd { - public static final Logger s_logger = Logger.getLogger(ListPortForwardingRulesCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/UpdateEgressFirewallRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/UpdateEgressFirewallRuleCmd.java index 1aa060743676..a8db4ec2b29e 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/UpdateEgressFirewallRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/UpdateEgressFirewallRuleCmd.java @@ -26,7 +26,6 @@ import org.apache.cloudstack.api.response.FirewallResponse; import org.apache.cloudstack.api.response.FirewallRuleResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.InvalidParameterValueException; @@ -37,7 +36,6 @@ @APICommand(name = "updateEgressFirewallRule", description = "Updates egress firewall rule ", responseObject = FirewallResponse.class, since = "4.4", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateEgressFirewallRuleCmd extends BaseAsyncCustomIdCmd { - public static final Logger s_logger = Logger.getLogger(UpdateEgressFirewallRuleCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/UpdateFirewallRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/UpdateFirewallRuleCmd.java index b39efa01350d..89c9bc891eb0 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/UpdateFirewallRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/UpdateFirewallRuleCmd.java @@ -26,7 +26,6 @@ import org.apache.cloudstack.api.response.FirewallResponse; import org.apache.cloudstack.api.response.FirewallRuleResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.InvalidParameterValueException; @@ -37,7 +36,6 @@ @APICommand(name = "updateFirewallRule", description = "Updates firewall rule ", responseObject = FirewallResponse.class, since = "4.4", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateFirewallRuleCmd extends BaseAsyncCustomIdCmd { - public static final Logger s_logger = Logger.getLogger(UpdateFirewallRuleCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/UpdatePortForwardingRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/UpdatePortForwardingRuleCmd.java index 2afc0bb66adf..3fb66bd861fc 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/UpdatePortForwardingRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/UpdatePortForwardingRuleCmd.java @@ -24,7 +24,6 @@ import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.response.FirewallRuleResponse; import org.apache.cloudstack.api.response.UserVmResponse; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.InvalidParameterValueException; @@ -38,7 +37,6 @@ description = "Updates a port forwarding rule. Only the private port and the virtual machine can be updated.", entityType = {PortForwardingRule.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdatePortForwardingRuleCmd extends BaseAsyncCustomIdCmd { - public static final Logger s_logger = Logger.getLogger(UpdatePortForwardingRuleCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/guest/ListGuestOsCategoriesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/guest/ListGuestOsCategoriesCmd.java index 18a209011c1a..c74514d662ca 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/guest/ListGuestOsCategoriesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/guest/ListGuestOsCategoriesCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -34,7 +33,6 @@ @APICommand(name = "listOsCategories", description = "Lists all supported OS categories for this cloud.", responseObject = GuestOSCategoryResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListGuestOsCategoriesCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListGuestOsCategoriesCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/guest/ListGuestOsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/guest/ListGuestOsCmd.java index 9d6cd4385610..b31a46692201 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/guest/ListGuestOsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/guest/ListGuestOsCmd.java @@ -20,7 +20,6 @@ import java.util.List; import org.apache.commons.lang3.BooleanUtils; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; @@ -37,7 +36,6 @@ @APICommand(name = "listOsTypes", description = "Lists all supported OS types for this cloud.", responseObject = GuestOSResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListGuestOsCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListGuestOsCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/ipv6/CreateIpv6FirewallRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/ipv6/CreateIpv6FirewallRuleCmd.java index d4c59cd8d475..4e3cf4621efd 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/ipv6/CreateIpv6FirewallRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/ipv6/CreateIpv6FirewallRuleCmd.java @@ -32,7 +32,6 @@ import org.apache.cloudstack.api.response.NetworkResponse; import org.apache.cloudstack.context.CallContext; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.InvalidParameterValueException; @@ -50,7 +49,6 @@ responseHasSensitiveInfo = false, authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class CreateIpv6FirewallRuleCmd extends BaseAsyncCreateCmd { - public static final Logger s_logger = Logger.getLogger(CreateIpv6FirewallRuleCmd.class.getName()); // /////////////////////////////////////////////////// @@ -224,7 +222,7 @@ public void create() { setEntityId(result.getId()); setEntityUuid(result.getUuid()); } catch (NetworkRuleConflictException e) { - s_logger.trace("Network Rule Conflict: ", e); + logger.trace("Network Rule Conflict: ", e); throw new ServerApiException(ApiErrorCode.NETWORK_RULE_CONFLICT_ERROR, e.getMessage(), e); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/ipv6/DeleteIpv6FirewallRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/ipv6/DeleteIpv6FirewallRuleCmd.java index 5e176a3fabf1..aaee19b59489 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/ipv6/DeleteIpv6FirewallRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/ipv6/DeleteIpv6FirewallRuleCmd.java @@ -27,7 +27,6 @@ import org.apache.cloudstack.api.response.FirewallRuleResponse; import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.ResourceUnavailableException; @@ -41,7 +40,6 @@ responseHasSensitiveInfo = false, authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class DeleteIpv6FirewallRuleCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteIpv6FirewallRuleCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/ipv6/ListIpv6FirewallRulesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/ipv6/ListIpv6FirewallRulesCmd.java index 239413162751..7ade2e3ed040 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/ipv6/ListIpv6FirewallRulesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/ipv6/ListIpv6FirewallRulesCmd.java @@ -29,7 +29,6 @@ import org.apache.cloudstack.api.response.FirewallRuleResponse; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.NetworkResponse; -import org.apache.log4j.Logger; import com.cloud.network.rules.FirewallRule; import com.cloud.utils.Pair; @@ -41,7 +40,6 @@ responseHasSensitiveInfo = false, authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class ListIpv6FirewallRulesCmd extends BaseListTaggedResourcesCmd implements IListFirewallRulesCmd { - public static final Logger s_logger = Logger.getLogger(ListIpv6FirewallRulesCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/ipv6/UpdateIpv6FirewallRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/ipv6/UpdateIpv6FirewallRuleCmd.java index 49765c58dafa..2d63d703dc5e 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/ipv6/UpdateIpv6FirewallRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/ipv6/UpdateIpv6FirewallRuleCmd.java @@ -27,7 +27,6 @@ import org.apache.cloudstack.api.response.FirewallResponse; import org.apache.cloudstack.api.response.FirewallRuleResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.ResourceUnavailableException; @@ -41,7 +40,6 @@ responseHasSensitiveInfo = false, authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class UpdateIpv6FirewallRuleCmd extends BaseAsyncCustomIdCmd { - public static final Logger s_logger = Logger.getLogger(UpdateIpv6FirewallRuleCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/AttachIsoCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/AttachIsoCmd.java index cdc72de4a107..d795fbabb528 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/AttachIsoCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/AttachIsoCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.user.iso; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -39,7 +38,6 @@ @APICommand(name = "attachIso", description = "Attaches an ISO to a virtual machine.", responseObject = UserVmResponse.class, responseView = ResponseView.Restricted, requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class AttachIsoCmd extends BaseAsyncCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(AttachIsoCmd.class.getName()); private static final String s_name = "attachisoresponse"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/CopyIsoCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/CopyIsoCmd.java index b7c13ce4adb2..2db7b7e1eb95 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/CopyIsoCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/CopyIsoCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.iso; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ResponseObject.ResponseView; @@ -26,6 +25,5 @@ @APICommand(name = "copyIso", description = "Copies an ISO from one zone to another.", responseObject = TemplateResponse.class, responseView = ResponseView.Restricted, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CopyIsoCmd extends CopyTemplateCmd { - public static final Logger s_logger = Logger.getLogger(CopyIsoCmd.class.getName()); private static final String s_name = "copyisoresponse"; } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/DeleteIsoCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/DeleteIsoCmd.java index 504e7c1b5cf7..feae31026b9f 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/DeleteIsoCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/DeleteIsoCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.iso; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -37,7 +36,6 @@ @APICommand(name = "deleteIso", description = "Deletes an ISO file.", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteIsoCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteIsoCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/DetachIsoCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/DetachIsoCmd.java index e3b22c4a49f7..292e1c6f099b 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/DetachIsoCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/DetachIsoCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.user.iso; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -37,7 +36,6 @@ @APICommand(name = "detachIso", description = "Detaches any ISO file (if any) currently attached to a virtual machine.", responseObject = UserVmResponse.class, responseView = ResponseView.Restricted, requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class DetachIsoCmd extends BaseAsyncCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(DetachIsoCmd.class.getName()); private static final String s_name = "detachisoresponse"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/ExtractIsoCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/ExtractIsoCmd.java index 03ba2fab7225..5db680066a6f 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/ExtractIsoCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/ExtractIsoCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.iso; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -38,7 +37,6 @@ @APICommand(name = "extractIso", description = "Extracts an ISO", responseObject = ExtractResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ExtractIsoCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(ExtractIsoCmd.class.getName()); ///////////////////////////////////////////////////// @@ -130,7 +128,7 @@ public void execute() { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to extract ISO"); } } catch (InternalErrorException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/GetUploadParamsForIsoCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/GetUploadParamsForIsoCmd.java index e17595662994..01a47f22b726 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/GetUploadParamsForIsoCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/GetUploadParamsForIsoCmd.java @@ -117,7 +117,7 @@ public void execute() throws ResourceUnavailableException, InsufficientCapacityE response.setResponseName(getCommandName()); setResponseObject(response); } catch (ResourceAllocationException | MalformedURLException e) { - s_logger.error("Exception while registering ISO", e); + logger.error("Exception while registering ISO", e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Exception while registering ISO: " + e.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/ListIsoPermissionsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/ListIsoPermissionsCmd.java index fbbe0880aef9..6f220c774b84 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/ListIsoPermissionsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/ListIsoPermissionsCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.iso; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.BaseListTemplateOrIsoPermissionsCmd; @@ -40,10 +39,6 @@ public String getMediaType() { return "iso"; } - @Override - protected Logger getLogger() { - return Logger.getLogger(ListIsoPermissionsCmd.class.getName()); - } @Override protected boolean templateIsCorrectType(VirtualMachineTemplate template) { diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/ListIsosCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/ListIsosCmd.java index f723cb93ae90..04dcbf8ca964 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/ListIsosCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/ListIsosCmd.java @@ -19,7 +19,6 @@ import com.cloud.server.ResourceIcon; import com.cloud.server.ResourceTag; import org.apache.cloudstack.api.response.ResourceIconResponse; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -41,7 +40,6 @@ @APICommand(name = "listIsos", description = "Lists all available ISO files.", responseObject = TemplateResponse.class, responseView = ResponseView.Restricted, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListIsosCmd extends BaseListTaggedResourcesCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(ListIsosCmd.class.getName()); private static final String s_name = "listisosresponse"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/RegisterIsoCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/RegisterIsoCmd.java index 1d750038042b..becfdcd653d3 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/RegisterIsoCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/RegisterIsoCmd.java @@ -35,7 +35,6 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.context.CallContext; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.exception.ResourceAllocationException; import com.cloud.template.VirtualMachineTemplate; @@ -43,7 +42,6 @@ @APICommand(name = "registerIso", responseObject = TemplateResponse.class, description = "Registers an existing ISO into the CloudStack Cloud.", responseView = ResponseView.Restricted, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class RegisterIsoCmd extends BaseCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(RegisterIsoCmd.class.getName()); private static final String s_name = "registerisoresponse"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/UpdateIsoCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/UpdateIsoCmd.java index 58c475c661e3..95d9feed7e33 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/UpdateIsoCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/UpdateIsoCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.user.iso; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiErrorCode; @@ -33,7 +32,6 @@ @APICommand(name = "updateIso", description = "Updates an ISO file.", responseObject = TemplateResponse.class, responseView = ResponseView.Restricted, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateIsoCmd extends BaseUpdateTemplateOrIsoCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(UpdateIsoCmd.class.getName()); private static final String s_name = "updateisoresponse"; ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/UpdateIsoPermissionsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/UpdateIsoPermissionsCmd.java index dd07faf9a873..02ada253fd25 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/UpdateIsoPermissionsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/UpdateIsoPermissionsCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.iso; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.BaseUpdateTemplateOrIsoPermissionsCmd; @@ -33,10 +32,6 @@ protected String getResponseName() { return "updateisopermissionsresponse"; } - @Override - protected Logger getLogger() { - return Logger.getLogger(UpdateIsoPermissionsCmd.class.getName()); - } @Override public long getEntityOwnerId() { diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/job/QueryAsyncJobResultCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/job/QueryAsyncJobResultCmd.java index f578078e3a18..3d328543dc29 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/job/QueryAsyncJobResultCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/job/QueryAsyncJobResultCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.job; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -29,7 +28,6 @@ @APICommand(name = "queryAsyncJobResult", description = "Retrieves the current status of asynchronous job.", responseObject = AsyncJobResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class QueryAsyncJobResultCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(QueryAsyncJobResultCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/AssignCertToLoadBalancerCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/AssignCertToLoadBalancerCmd.java index 50d69c8d3aea..4f9d2f37d13f 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/AssignCertToLoadBalancerCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/AssignCertToLoadBalancerCmd.java @@ -17,7 +17,6 @@ // under the License. package org.apache.cloudstack.api.command.user.loadbalancer; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -42,7 +41,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class AssignCertToLoadBalancerCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(AssignCertToLoadBalancerCmd.class.getName()); @Parameter(name = ApiConstants.LBID, diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/AssignToLoadBalancerRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/AssignToLoadBalancerRuleCmd.java index c245ab236fea..81a52ce2dfec 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/AssignToLoadBalancerRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/AssignToLoadBalancerRuleCmd.java @@ -24,7 +24,6 @@ import java.util.Map; import com.cloud.utils.exception.CloudRuntimeException; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -51,7 +50,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class AssignToLoadBalancerRuleCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(AssignToLoadBalancerRuleCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/CreateApplicationLoadBalancerCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/CreateApplicationLoadBalancerCmd.java index 8c63c8f7951c..2199dfb4e8b4 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/CreateApplicationLoadBalancerCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/CreateApplicationLoadBalancerCmd.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.api.response.NetworkResponse; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.network.lb.ApplicationLoadBalancerRule; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.InsufficientAddressCapacityException; @@ -44,7 +43,6 @@ @APICommand(name = "createLoadBalancer", description = "Creates an internal load balancer", responseObject = ApplicationLoadBalancerResponse.class, since = "4.2.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateApplicationLoadBalancerCmd extends BaseAsyncCreateCmd { - public static final Logger s_logger = Logger.getLogger(CreateApplicationLoadBalancerCmd.class.getName()); ///////////////////////////////////////////////////// @@ -202,7 +200,7 @@ public void execute() throws ResourceAllocationException, ResourceUnavailableExc setResponseObject(lbResponse); lbResponse.setResponseName(getCommandName()); } catch (Exception ex) { - s_logger.warn("Failed to create load balancer due to exception ", ex); + logger.warn("Failed to create load balancer due to exception ", ex); } finally { if (rule == null) { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create load balancer"); @@ -220,13 +218,13 @@ public void create() { this.setEntityId(result.getId()); this.setEntityUuid(result.getUuid()); } catch (NetworkRuleConflictException e) { - s_logger.warn("Exception: ", e); + logger.warn("Exception: ", e); throw new ServerApiException(ApiErrorCode.NETWORK_RULE_CONFLICT_ERROR, e.getMessage()); } catch (InsufficientAddressCapacityException e) { - s_logger.warn("Exception: ", e); + logger.warn("Exception: ", e); throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, e.getMessage()); } catch (InsufficientVirtualNetworkCapacityException e) { - s_logger.warn("Exception: ", e); + logger.warn("Exception: ", e); throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, e.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/CreateLBHealthCheckPolicyCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/CreateLBHealthCheckPolicyCmd.java index 57cb80787385..c24a5f19f077 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/CreateLBHealthCheckPolicyCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/CreateLBHealthCheckPolicyCmd.java @@ -26,7 +26,6 @@ import org.apache.cloudstack.api.response.FirewallRuleResponse; import org.apache.cloudstack.api.response.LBHealthCheckResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.InvalidParameterValueException; @@ -44,7 +43,6 @@ responseHasSensitiveInfo = false) @SuppressWarnings("rawtypes") public class CreateLBHealthCheckPolicyCmd extends BaseAsyncCreateCmd { - public static final Logger s_logger = Logger.getLogger(CreateLBHealthCheckPolicyCmd.class.getName()); // /////////////////////////////////////////////////// @@ -181,7 +179,7 @@ public void create() { this.setEntityId(result.getId()); this.setEntityUuid(result.getUuid()); } catch (InvalidParameterValueException e) { - s_logger.warn("Exception: ", e); + logger.warn("Exception: ", e); throw new ServerApiException(ApiErrorCode.MALFORMED_PARAMETER_ERROR, e.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/CreateLBStickinessPolicyCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/CreateLBStickinessPolicyCmd.java index 66a15984ae42..c6b5036bc955 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/CreateLBStickinessPolicyCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/CreateLBStickinessPolicyCmd.java @@ -29,7 +29,6 @@ import org.apache.cloudstack.api.response.FirewallRuleResponse; import org.apache.cloudstack.api.response.LBStickinessResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.InvalidParameterValueException; @@ -44,7 +43,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) @SuppressWarnings("rawtypes") public class CreateLBStickinessPolicyCmd extends BaseAsyncCreateCmd { - public static final Logger s_logger = Logger.getLogger(CreateLBStickinessPolicyCmd.class.getName()); private static final String s_name = "createLBStickinessPolicy"; @@ -164,7 +162,7 @@ public void create() { this.setEntityId(result.getId()); this.setEntityUuid(result.getUuid()); } catch (NetworkRuleConflictException e) { - s_logger.warn("Exception: ", e); + logger.warn("Exception: ", e); throw new ServerApiException(ApiErrorCode.NETWORK_RULE_CONFLICT_ERROR, e.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/CreateLoadBalancerRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/CreateLoadBalancerRuleCmd.java index ef9e46f3a76f..f86d1ae85dab 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/CreateLoadBalancerRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/CreateLoadBalancerRuleCmd.java @@ -33,7 +33,6 @@ import org.apache.cloudstack.api.response.NetworkResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenter.NetworkType; @@ -52,7 +51,6 @@ @APICommand(name = "createLoadBalancerRule", description = "Creates a load balancer rule", responseObject = LoadBalancerResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateLoadBalancerRuleCmd extends BaseAsyncCreateCmd /*implements LoadBalancer */{ - public static final Logger s_logger = Logger.getLogger(CreateLoadBalancerRuleCmd.class.getName()); ///////////////////////////////////////////////////// @@ -284,7 +282,7 @@ public void execute() throws ResourceAllocationException, ResourceUnavailableExc } lbResponse.setResponseName(getCommandName()); } catch (Exception ex) { - s_logger.warn("Failed to create LB rule due to exception ", ex); + logger.warn("Failed to create LB rule due to exception ", ex); } finally { if (!success || rule == null) { @@ -309,10 +307,10 @@ public void create() { this.setEntityId(result.getId()); this.setEntityUuid(result.getUuid()); } catch (NetworkRuleConflictException e) { - s_logger.warn("Exception: ", e); + logger.warn("Exception: ", e); throw new ServerApiException(ApiErrorCode.NETWORK_RULE_CONFLICT_ERROR, e.getMessage()); } catch (InsufficientAddressCapacityException e) { - s_logger.warn("Exception: ", e); + logger.warn("Exception: ", e); throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, e.getMessage()); } catch (InvalidParameterValueException e) { throw new ServerApiException(ApiErrorCode.PARAM_ERROR, e.getMessage()); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/DeleteApplicationLoadBalancerCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/DeleteApplicationLoadBalancerCmd.java index 912c760168b7..410df086393c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/DeleteApplicationLoadBalancerCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/DeleteApplicationLoadBalancerCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.loadbalancer; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -36,7 +35,6 @@ @APICommand(name = "deleteLoadBalancer", description = "Deletes an internal load balancer", responseObject = SuccessResponse.class, since = "4.2.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteApplicationLoadBalancerCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteApplicationLoadBalancerCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/DeleteLBHealthCheckPolicyCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/DeleteLBHealthCheckPolicyCmd.java index 159d6b21cbc6..3cf1f345037a 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/DeleteLBHealthCheckPolicyCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/DeleteLBHealthCheckPolicyCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.loadbalancer; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -37,7 +36,6 @@ @APICommand(name = "deleteLBHealthCheckPolicy", description = "Deletes a load balancer health check policy.", responseObject = SuccessResponse.class, since = "4.2.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteLBHealthCheckPolicyCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteLBHealthCheckPolicyCmd.class.getName()); // /////////////////////////////////////////////////// // ////////////// API parameters ///////////////////// // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/DeleteLBStickinessPolicyCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/DeleteLBStickinessPolicyCmd.java index 10c342911b44..5d04de3cae58 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/DeleteLBStickinessPolicyCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/DeleteLBStickinessPolicyCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.loadbalancer; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -37,7 +36,6 @@ @APICommand(name = "deleteLBStickinessPolicy", description = "Deletes a load balancer stickiness policy.", responseObject = SuccessResponse.class, since = "3.0.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteLBStickinessPolicyCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteLBStickinessPolicyCmd.class.getName()); private static final String s_name = "deleteLBstickinessrruleresponse"; // /////////////////////////////////////////////////// // ////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/DeleteLoadBalancerRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/DeleteLoadBalancerRuleCmd.java index f05d4cd2be38..b4079430ee32 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/DeleteLoadBalancerRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/DeleteLoadBalancerRuleCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.loadbalancer; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -37,7 +36,6 @@ @APICommand(name = "deleteLoadBalancerRule", description = "Deletes a load balancer rule.", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteLoadBalancerRuleCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteLoadBalancerRuleCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/DeleteSslCertCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/DeleteSslCertCmd.java index 3db733182a1f..887007e537e8 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/DeleteSslCertCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/DeleteSslCertCmd.java @@ -18,7 +18,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -40,7 +39,6 @@ @APICommand(name = "deleteSslCert", description = "Delete a certificate to CloudStack", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteSslCertCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(DeleteSslCertCmd.class.getName()); @Inject diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListApplicationLoadBalancersCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListApplicationLoadBalancersCmd.java index ad68b301844d..d54f3e1155ec 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListApplicationLoadBalancersCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListApplicationLoadBalancersCmd.java @@ -29,7 +29,6 @@ import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.NetworkResponse; import org.apache.cloudstack.network.lb.ApplicationLoadBalancerRule; -import org.apache.log4j.Logger; import com.cloud.exception.InvalidParameterValueException; import com.cloud.network.rules.LoadBalancerContainer.Scheme; @@ -38,7 +37,6 @@ @APICommand(name = "listLoadBalancers", description = "Lists internal load balancers", responseObject = ApplicationLoadBalancerResponse.class, since = "4.2.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListApplicationLoadBalancersCmd extends BaseListTaggedResourcesCmd { - public static final Logger s_logger = Logger.getLogger(ListApplicationLoadBalancersCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListLBHealthCheckPoliciesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListLBHealthCheckPoliciesCmd.java index 1c1f5d1f9afe..cb2cdb446d1c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListLBHealthCheckPoliciesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListLBHealthCheckPoliciesCmd.java @@ -27,7 +27,6 @@ import org.apache.cloudstack.api.response.FirewallRuleResponse; import org.apache.cloudstack.api.response.LBHealthCheckResponse; import org.apache.cloudstack.api.response.ListResponse; -import org.apache.log4j.Logger; import com.cloud.exception.InvalidParameterValueException; @@ -37,7 +36,6 @@ @APICommand(name = "listLBHealthCheckPolicies", description = "Lists load balancer health check policies.", responseObject = LBHealthCheckResponse.class, since = "4.2.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListLBHealthCheckPoliciesCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListLBHealthCheckPoliciesCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListLBStickinessPoliciesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListLBStickinessPoliciesCmd.java index 3d08d9257978..a48e2ea37b71 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListLBStickinessPoliciesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListLBStickinessPoliciesCmd.java @@ -29,7 +29,6 @@ import org.apache.cloudstack.api.response.LBStickinessResponse; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.network.rules.LoadBalancer; import com.cloud.network.rules.StickinessPolicy; @@ -38,7 +37,6 @@ @APICommand(name = "listLBStickinessPolicies", description = "Lists load balancer stickiness policies.", responseObject = LBStickinessResponse.class, since = "3.0.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListLBStickinessPoliciesCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListLBStickinessPoliciesCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListLoadBalancerRuleInstancesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListLoadBalancerRuleInstancesCmd.java index 723e0efec122..3bfc68a95bad 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListLoadBalancerRuleInstancesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListLoadBalancerRuleInstancesCmd.java @@ -23,7 +23,6 @@ import org.apache.cloudstack.api.command.user.UserCmd; import org.apache.cloudstack.api.response.LoadBalancerRuleVmMapResponse; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -41,7 +40,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class ListLoadBalancerRuleInstancesCmd extends BaseListCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(ListLoadBalancerRuleInstancesCmd.class.getName()); private static final String s_name = "listloadbalancerruleinstancesresponse"; @@ -97,10 +95,10 @@ public String getCommandName() { public void execute() { Pair, List> vmServiceMap = _lbService.listLoadBalancerInstances(this); List result = vmServiceMap.first(); - s_logger.debug(String.format("A total of [%s] user VMs were obtained when listing the load balancer instances: [%s].", result.size(), result)); + logger.debug(String.format("A total of [%s] user VMs were obtained when listing the load balancer instances: [%s].", result.size(), result)); List serviceStates = vmServiceMap.second(); - s_logger.debug(String.format("A total of [%s] service states were obtained when listing the load balancer instances: [%s].", serviceStates.size(), serviceStates)); + logger.debug(String.format("A total of [%s] service states were obtained when listing the load balancer instances: [%s].", serviceStates.size(), serviceStates)); if (!isListLbVmip()) { ListResponse response = new ListResponse<>(); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListLoadBalancerRulesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListLoadBalancerRulesCmd.java index 51a8fa4bfc8c..b8b82f0c4a89 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListLoadBalancerRulesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListLoadBalancerRulesCmd.java @@ -31,7 +31,6 @@ import org.apache.cloudstack.api.response.NetworkResponse; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.api.response.ZoneResponse; -import org.apache.log4j.Logger; import com.cloud.network.rules.LoadBalancer; import com.cloud.utils.Pair; @@ -39,7 +38,6 @@ @APICommand(name = "listLoadBalancerRules", description = "Lists load balancer rules.", responseObject = LoadBalancerResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListLoadBalancerRulesCmd extends BaseListTaggedResourcesCmd { - public static final Logger s_logger = Logger.getLogger(ListLoadBalancerRulesCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListSslCertsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListSslCertsCmd.java index 3f422801780b..1bc300fdc69f 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListSslCertsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListSslCertsCmd.java @@ -20,7 +20,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -39,7 +38,6 @@ @APICommand(name = "listSslCerts", description = "Lists SSL certificates", responseObject = SslCertResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListSslCertsCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(ListSslCertsCmd.class.getName()); @Inject diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/RemoveCertFromLoadBalancerCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/RemoveCertFromLoadBalancerCmd.java index 38593629f787..dfaafe89923b 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/RemoveCertFromLoadBalancerCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/RemoveCertFromLoadBalancerCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.loadbalancer; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -40,7 +39,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class RemoveCertFromLoadBalancerCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(RemoveCertFromLoadBalancerCmd.class.getName()); @Parameter(name = ApiConstants.LBID, diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/RemoveFromLoadBalancerRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/RemoveFromLoadBalancerRuleCmd.java index 01c30c4b3c73..d29f2676ed55 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/RemoveFromLoadBalancerRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/RemoveFromLoadBalancerRuleCmd.java @@ -24,7 +24,6 @@ import java.util.ArrayList; import com.cloud.vm.VirtualMachine; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -49,7 +48,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class RemoveFromLoadBalancerRuleCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(RemoveFromLoadBalancerRuleCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/UpdateApplicationLoadBalancerCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/UpdateApplicationLoadBalancerCmd.java index 27d49096ed69..d129cd8988f8 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/UpdateApplicationLoadBalancerCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/UpdateApplicationLoadBalancerCmd.java @@ -25,7 +25,6 @@ import org.apache.cloudstack.api.response.FirewallRuleResponse; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.network.lb.ApplicationLoadBalancerRule; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.InvalidParameterValueException; @@ -34,7 +33,6 @@ @APICommand(name = "updateLoadBalancer", description = "Updates an internal load balancer", responseObject = ApplicationLoadBalancerResponse.class, since = "4.4.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateApplicationLoadBalancerCmd extends BaseAsyncCustomIdCmd { - public static final Logger s_logger = Logger.getLogger(UpdateApplicationLoadBalancerCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/UpdateLBHealthCheckPolicyCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/UpdateLBHealthCheckPolicyCmd.java index b6bb59e60dc9..fdd98fc3a0a4 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/UpdateLBHealthCheckPolicyCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/UpdateLBHealthCheckPolicyCmd.java @@ -19,7 +19,6 @@ import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.response.LBHealthCheckResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.network.rules.HealthCheckPolicy; @@ -30,7 +29,6 @@ @APICommand(name = "updateLBHealthCheckPolicy", description = "Updates load balancer health check policy", responseObject = LBHealthCheckResponse.class, since = "4.4", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateLBHealthCheckPolicyCmd extends BaseAsyncCustomIdCmd{ - public static final Logger s_logger = Logger.getLogger(UpdateLBHealthCheckPolicyCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/UpdateLBStickinessPolicyCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/UpdateLBStickinessPolicyCmd.java index 2b1f1cc38f60..b2137cf262d8 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/UpdateLBStickinessPolicyCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/UpdateLBStickinessPolicyCmd.java @@ -19,7 +19,6 @@ import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.response.LBStickinessResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.network.rules.LoadBalancer; @@ -29,7 +28,6 @@ @APICommand(name = "updateLBStickinessPolicy", description = "Updates load balancer stickiness policy", responseObject = LBStickinessResponse.class, since = "4.4", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateLBStickinessPolicyCmd extends BaseAsyncCustomIdCmd{ - public static final Logger s_logger = Logger.getLogger(UpdateLBStickinessPolicyCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/UpdateLoadBalancerRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/UpdateLoadBalancerRuleCmd.java index b09c01adee63..25254ba9eb75 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/UpdateLoadBalancerRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/UpdateLoadBalancerRuleCmd.java @@ -27,7 +27,6 @@ import org.apache.cloudstack.api.response.FirewallRuleResponse; import org.apache.cloudstack.api.response.LoadBalancerResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.InvalidParameterValueException; @@ -38,7 +37,6 @@ @APICommand(name = "updateLoadBalancerRule", description = "Updates load balancer", responseObject = LoadBalancerResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateLoadBalancerRuleCmd extends BaseAsyncCustomIdCmd { - public static final Logger s_logger = Logger.getLogger(UpdateLoadBalancerRuleCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/UploadSslCertCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/UploadSslCertCmd.java index abafde856158..e51b4dee9db6 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/UploadSslCertCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/UploadSslCertCmd.java @@ -18,7 +18,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -41,7 +40,6 @@ @APICommand(name = "uploadSslCert", description = "Upload a certificate to CloudStack", responseObject = SslCertResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UploadSslCertCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(UploadSslCertCmd.class.getName()); @Inject diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/nat/CreateIpForwardingRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/nat/CreateIpForwardingRuleCmd.java index 62c4906b1b78..e883a7a0e4dd 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/nat/CreateIpForwardingRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/nat/CreateIpForwardingRuleCmd.java @@ -18,7 +18,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -45,7 +44,6 @@ @APICommand(name = "createIpForwardingRule", description = "Creates an IP forwarding rule", responseObject = FirewallRuleResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateIpForwardingRuleCmd extends BaseAsyncCreateCmd implements StaticNatRule { - public static final Logger s_logger = Logger.getLogger(CreateIpForwardingRuleCmd.class.getName()); ///////////////////////////////////////////////////// @@ -150,7 +148,7 @@ public void create() { setEntityId(rule.getId()); setEntityUuid(rule.getUuid()); } catch (NetworkRuleConflictException e) { - s_logger.info("Unable to create static NAT rule due to ", e); + logger.info("Unable to create static NAT rule due to ", e); throw new ServerApiException(ApiErrorCode.NETWORK_RULE_CONFLICT_ERROR, e.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/nat/DeleteIpForwardingRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/nat/DeleteIpForwardingRuleCmd.java index 5b1335ccf22d..e4c16a317518 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/nat/DeleteIpForwardingRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/nat/DeleteIpForwardingRuleCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.nat; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -37,7 +36,6 @@ @APICommand(name = "deleteIpForwardingRule", description = "Deletes an IP forwarding rule", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteIpForwardingRuleCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteIpForwardingRuleCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/nat/DisableStaticNatCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/nat/DisableStaticNatCmd.java index 9afdfa3642ee..2bee7dfcc895 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/nat/DisableStaticNatCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/nat/DisableStaticNatCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.user.nat; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -38,7 +37,6 @@ @APICommand(name = "disableStaticNat", description = "Disables static rule for given IP address", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DisableStaticNatCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DisableStaticNatCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/nat/EnableStaticNatCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/nat/EnableStaticNatCmd.java index ba97356582a3..48c6cc20bf1c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/nat/EnableStaticNatCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/nat/EnableStaticNatCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.user.nat; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -40,7 +39,6 @@ @APICommand(name = "enableStaticNat", description = "Enables static NAT for given IP address", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class EnableStaticNatCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(EnableStaticNatCmd.class.getName()); ///////////////////////////////////////////////////// @@ -133,8 +131,8 @@ public void execute() throws ResourceUnavailableException { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to enable static NAT"); } } catch (NetworkRuleConflictException ex) { - s_logger.info("Network rule conflict: " + ex.getMessage()); - s_logger.trace("Network Rule Conflict: ", ex); + logger.info("Network rule conflict: " + ex.getMessage()); + logger.trace("Network Rule Conflict: ", ex); throw new ServerApiException(ApiErrorCode.NETWORK_RULE_CONFLICT_ERROR, ex.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/nat/ListIpForwardingRulesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/nat/ListIpForwardingRulesCmd.java index 5e9da328bc84..89981a6453b5 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/nat/ListIpForwardingRulesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/nat/ListIpForwardingRulesCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -38,7 +37,6 @@ @APICommand(name = "listIpForwardingRules", description = "List the IP forwarding rules", responseObject = FirewallRuleResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListIpForwardingRulesCmd extends BaseListProjectAndAccountResourcesCmd { - public static final Logger s_logger = Logger.getLogger(ListIpForwardingRulesCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkACLCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkACLCmd.java index 70e67151c1df..127661b1820a 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkACLCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkACLCmd.java @@ -32,7 +32,6 @@ import org.apache.cloudstack.api.response.NetworkResponse; import org.apache.cloudstack.context.CallContext; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.InvalidParameterValueException; @@ -43,7 +42,6 @@ @APICommand(name = "createNetworkACL", description = "Creates a ACL rule in the given network (the network has to belong to VPC)", responseObject = NetworkACLItemResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateNetworkACLCmd extends BaseAsyncCreateCmd { - public static final Logger s_logger = Logger.getLogger(CreateNetworkACLCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkACLListCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkACLListCmd.java index e5dbcc7b6d1b..cd25a604e776 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkACLListCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkACLListCmd.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.api.response.NetworkACLResponse; import org.apache.cloudstack.api.response.VpcResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.InvalidParameterValueException; @@ -41,7 +40,6 @@ responseObject = NetworkACLResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateNetworkACLListCmd extends BaseAsyncCreateCmd { - public static final Logger s_logger = Logger.getLogger(CreateNetworkACLListCmd.class.getName()); // /////////////////////////////////////////////////// @@ -130,7 +128,7 @@ public long getEntityOwnerId() { } else { account = CallContext.current().getCallingAccount(); if (!Account.Type.ADMIN.equals(account.getType())) { - s_logger.warn(String.format("Only Root Admin can create global ACLs. Account [%s] cannot create any global ACL.", account)); + logger.warn(String.format("Only Root Admin can create global ACLs. Account [%s] cannot create any global ACL.", account)); throw new PermissionDeniedException("Only Root Admin can create global ACLs."); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkCmd.java index ca379fb1596f..2395339a477c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.user.network; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; @@ -51,7 +50,6 @@ @APICommand(name = "createNetwork", description = "Creates a network", responseObject = NetworkResponse.class, responseView = ResponseView.Restricted, entityType = {Network.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateNetworkCmd extends BaseCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(CreateNetworkCmd.class.getName()); private static final String s_name = "createnetworkresponse"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkPermissionsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkPermissionsCmd.java index 4a1f65ba7a23..1df472cbb228 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkPermissionsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkPermissionsCmd.java @@ -27,7 +27,6 @@ import org.apache.cloudstack.api.response.NetworkResponse; import org.apache.cloudstack.api.response.ProjectResponse; import org.apache.cloudstack.api.response.SuccessResponse; -import org.apache.log4j.Logger; import com.cloud.exception.InvalidParameterValueException; import com.cloud.network.Network; @@ -43,7 +42,6 @@ since = "4.17.0", authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class CreateNetworkPermissionsCmd extends BaseCmd { - public static final Logger LOGGER = Logger.getLogger(CreateNetworkPermissionsCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/network/DeleteNetworkACLCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/network/DeleteNetworkACLCmd.java index f171492b2f6f..ca42626eacb5 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/network/DeleteNetworkACLCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/network/DeleteNetworkACLCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.user.network; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -36,7 +35,6 @@ @APICommand(name = "deleteNetworkACL", description = "Deletes a network ACL", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteNetworkACLCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteNetworkACLCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/network/DeleteNetworkACLListCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/network/DeleteNetworkACLListCmd.java index 5c24efae6eb7..45bc86e8c91d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/network/DeleteNetworkACLListCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/network/DeleteNetworkACLListCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.user.network; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -36,7 +35,6 @@ @APICommand(name = "deleteNetworkACLList", description = "Deletes a network ACL", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteNetworkACLListCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteNetworkACLListCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/network/DeleteNetworkCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/network/DeleteNetworkCmd.java index 5f15c23e23f1..8e8e18c67024 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/network/DeleteNetworkCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/network/DeleteNetworkCmd.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.api.response.NetworkResponse; import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.InvalidParameterValueException; @@ -37,7 +36,6 @@ @APICommand(name = "deleteNetwork", description = "Deletes a network", responseObject = SuccessResponse.class, entityType = {Network.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteNetworkCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteNetworkCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/network/ListNetworkACLListsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/network/ListNetworkACLListsCmd.java index f3a06142d788..c88f956943b4 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/network/ListNetworkACLListsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/network/ListNetworkACLListsCmd.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.api.response.NetworkACLResponse; import org.apache.cloudstack.api.response.NetworkResponse; import org.apache.cloudstack.api.response.VpcResponse; -import org.apache.log4j.Logger; import com.cloud.network.vpc.NetworkACL; import com.cloud.utils.Pair; @@ -36,7 +35,6 @@ @APICommand(name = "listNetworkACLLists", description = "Lists all network ACLs", responseObject = NetworkACLResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListNetworkACLListsCmd extends BaseListProjectAndAccountResourcesCmd { - public static final Logger s_logger = Logger.getLogger(ListNetworkACLListsCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/network/ListNetworkACLsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/network/ListNetworkACLsCmd.java index 945142f4df00..1ef2b9b7bfbf 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/network/ListNetworkACLsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/network/ListNetworkACLsCmd.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.api.response.NetworkACLItemResponse; import org.apache.cloudstack.api.response.NetworkACLResponse; import org.apache.cloudstack.api.response.NetworkResponse; -import org.apache.log4j.Logger; import com.cloud.network.vpc.NetworkACLItem; import com.cloud.utils.Pair; @@ -36,7 +35,6 @@ @APICommand(name = "listNetworkACLs", description = "Lists all network ACL items", responseObject = NetworkACLItemResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListNetworkACLsCmd extends BaseListTaggedResourcesCmd { - public static final Logger s_logger = Logger.getLogger(ListNetworkACLsCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/network/ListNetworkOfferingsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/network/ListNetworkOfferingsCmd.java index 70c01fd1d889..33f452008d99 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/network/ListNetworkOfferingsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/network/ListNetworkOfferingsCmd.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.api.response.NetworkOfferingResponse; import org.apache.cloudstack.api.response.NetworkResponse; import org.apache.cloudstack.api.response.ZoneResponse; -import org.apache.log4j.Logger; import com.cloud.offering.NetworkOffering; import com.cloud.utils.Pair; @@ -36,7 +35,6 @@ @APICommand(name = "listNetworkOfferings", description = "Lists all available network offerings.", responseObject = NetworkOfferingResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListNetworkOfferingsCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListNetworkOfferingsCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/network/ListNetworkPermissionsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/network/ListNetworkPermissionsCmd.java index 9e6b01d36763..6ea4937e1153 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/network/ListNetworkPermissionsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/network/ListNetworkPermissionsCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.network; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; @@ -42,7 +41,6 @@ since = "4.17.0", authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class ListNetworkPermissionsCmd extends BaseCmd implements UserCmd { - public static final Logger LOGGER = Logger.getLogger(ListNetworkPermissionsCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/network/ListNetworkProtocolsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/network/ListNetworkProtocolsCmd.java index 3008d1a81913..a7c359d66c2d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/network/ListNetworkProtocolsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/network/ListNetworkProtocolsCmd.java @@ -25,7 +25,6 @@ import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.NetworkProtocolResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import java.util.ArrayList; import java.util.List; @@ -34,7 +33,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, authorized = { RoleType.Admin, RoleType.DomainAdmin, RoleType.ResourceAdmin, RoleType.User}, since = "4.19.0") public class ListNetworkProtocolsCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(ListNetworkProtocolsCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/network/ListNetworksCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/network/ListNetworksCmd.java index c1e85a9b4c30..0e8425b14b4b 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/network/ListNetworksCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/network/ListNetworksCmd.java @@ -24,7 +24,6 @@ import org.apache.cloudstack.api.response.NetworkOfferingResponse; import org.apache.cloudstack.api.response.ResourceIconResponse; import org.apache.commons.lang3.BooleanUtils; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; @@ -46,7 +45,6 @@ @APICommand(name = "listNetworks", description = "Lists all available networks.", responseObject = NetworkResponse.class, responseView = ResponseView.Restricted, entityType = {Network.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListNetworksCmd extends BaseListRetrieveOnlyResourceCountCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(ListNetworksCmd.class.getName()); private static final String s_name = "listnetworksresponse"; ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/network/MoveNetworkAclItemCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/network/MoveNetworkAclItemCmd.java index da6ac437aa0e..5d36dcfd8e93 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/network/MoveNetworkAclItemCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/network/MoveNetworkAclItemCmd.java @@ -23,7 +23,6 @@ import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.response.NetworkACLItemResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.network.vpc.NetworkACLItem; @@ -32,7 +31,6 @@ @APICommand(name = "moveNetworkAclItem", description = "Move an ACL rule to a position bettwen two other ACL rules of the same ACL network list", responseObject = NetworkACLItemResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class MoveNetworkAclItemCmd extends BaseAsyncCustomIdCmd { - public static final Logger s_logger = Logger.getLogger(MoveNetworkAclItemCmd.class.getName()); private static final String s_name = "moveNetworkAclItemResponse"; @Parameter(name = ApiConstants.ID, type = CommandType.STRING, required = true, description = "The ID of the network ACL rule that is being moved to a new position.") diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/network/RemoveNetworkPermissionsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/network/RemoveNetworkPermissionsCmd.java index 05785c815886..c199d872e0c0 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/network/RemoveNetworkPermissionsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/network/RemoveNetworkPermissionsCmd.java @@ -27,7 +27,6 @@ import org.apache.cloudstack.api.response.NetworkResponse; import org.apache.cloudstack.api.response.ProjectResponse; import org.apache.cloudstack.api.response.SuccessResponse; -import org.apache.log4j.Logger; import com.cloud.exception.InvalidParameterValueException; import com.cloud.network.Network; @@ -43,7 +42,6 @@ since = "4.17.0", authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class RemoveNetworkPermissionsCmd extends BaseCmd { - public static final Logger LOGGER = Logger.getLogger(RemoveNetworkPermissionsCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/network/ReplaceNetworkACLListCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/network/ReplaceNetworkACLListCmd.java index ea4e74149f4d..f6e9557aadb3 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/network/ReplaceNetworkACLListCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/network/ReplaceNetworkACLListCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.network; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -37,7 +36,6 @@ @APICommand(name = "replaceNetworkACLList", description = "Replaces ACL associated with a network or private gateway", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ReplaceNetworkACLListCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(ReplaceNetworkACLListCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/network/ResetNetworkPermissionsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/network/ResetNetworkPermissionsCmd.java index f9817f9be5f9..a23b98c84a88 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/network/ResetNetworkPermissionsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/network/ResetNetworkPermissionsCmd.java @@ -25,7 +25,6 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.NetworkResponse; import org.apache.cloudstack.api.response.SuccessResponse; -import org.apache.log4j.Logger; import com.cloud.network.Network; import com.cloud.user.Account; @@ -38,7 +37,6 @@ since = "4.17.0", authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class ResetNetworkPermissionsCmd extends BaseCmd { - public static final Logger LOGGER = Logger.getLogger(ResetNetworkPermissionsCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/network/RestartNetworkCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/network/RestartNetworkCmd.java index 141dee3c9a04..ffc2e36dee53 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/network/RestartNetworkCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/network/RestartNetworkCmd.java @@ -27,7 +27,6 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.NetworkResponse; import org.apache.cloudstack.api.response.SuccessResponse; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.ConcurrentOperationException; @@ -43,7 +42,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class RestartNetworkCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(RestartNetworkCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/network/UpdateNetworkACLItemCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/network/UpdateNetworkACLItemCmd.java index f675fa23e343..42cb0697edb4 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/network/UpdateNetworkACLItemCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/network/UpdateNetworkACLItemCmd.java @@ -26,7 +26,6 @@ import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.response.NetworkACLItemResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.ResourceUnavailableException; @@ -35,7 +34,6 @@ @APICommand(name = "updateNetworkACLItem", description = "Updates ACL item with specified ID", responseObject = NetworkACLItemResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateNetworkACLItemCmd extends BaseAsyncCustomIdCmd { - public static final Logger s_logger = Logger.getLogger(UpdateNetworkACLItemCmd.class.getName()); private static final String s_name = "createnetworkaclresponse"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/network/UpdateNetworkACLListCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/network/UpdateNetworkACLListCmd.java index ddcb202aa8da..adab885542d1 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/network/UpdateNetworkACLListCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/network/UpdateNetworkACLListCmd.java @@ -25,7 +25,6 @@ import org.apache.cloudstack.api.response.NetworkACLResponse; import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.ResourceUnavailableException; @@ -34,7 +33,6 @@ @APICommand(name = "updateNetworkACLList", description = "Updates network ACL list", responseObject = SuccessResponse.class, since = "4.4", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateNetworkACLListCmd extends BaseAsyncCustomIdCmd { - public static final Logger s_logger = Logger.getLogger(UpdateNetworkACLListCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/network/UpdateNetworkCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/network/UpdateNetworkCmd.java index d3cc169b7da4..0d92a635e7f9 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/network/UpdateNetworkCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/network/UpdateNetworkCmd.java @@ -31,7 +31,6 @@ import org.apache.cloudstack.api.command.user.UserCmd; import org.apache.cloudstack.api.response.NetworkOfferingResponse; import org.apache.cloudstack.api.response.NetworkResponse; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.ConcurrentOperationException; @@ -43,7 +42,6 @@ @APICommand(name = "updateNetwork", description = "Updates a network", responseObject = NetworkResponse.class, responseView = ResponseView.Restricted, entityType = {Network.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateNetworkCmd extends BaseAsyncCustomIdCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(UpdateNetworkCmd.class.getName()); private static final String s_name = "updatenetworkresponse"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/offering/ListDiskOfferingsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/offering/ListDiskOfferingsCmd.java index e7284d515a26..7545c3e09f40 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/offering/ListDiskOfferingsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/offering/ListDiskOfferingsCmd.java @@ -20,7 +20,6 @@ import org.apache.cloudstack.api.response.StoragePoolResponse; import org.apache.cloudstack.api.response.VolumeResponse; import org.apache.cloudstack.api.response.ZoneResponse; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -32,7 +31,6 @@ @APICommand(name = "listDiskOfferings", description = "Lists all available disk offerings.", responseObject = DiskOfferingResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListDiskOfferingsCmd extends BaseListProjectAndAccountResourcesCmd { - public static final Logger s_logger = Logger.getLogger(ListDiskOfferingsCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/offering/ListServiceOfferingsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/offering/ListServiceOfferingsCmd.java index a9a699ed3ef0..e07d75a7d08f 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/offering/ListServiceOfferingsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/offering/ListServiceOfferingsCmd.java @@ -18,12 +18,10 @@ import org.apache.cloudstack.api.BaseListProjectAndAccountResourcesCmd; import org.apache.cloudstack.api.response.ZoneResponse; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.Parameter; -import org.apache.cloudstack.api.BaseCmd.CommandType; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.ServiceOfferingResponse; import org.apache.cloudstack.api.response.UserVmResponse; @@ -31,7 +29,6 @@ @APICommand(name = "listServiceOfferings", description = "Lists all available service offerings.", responseObject = ServiceOfferingResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListServiceOfferingsCmd extends BaseListProjectAndAccountResourcesCmd { - public static final Logger s_logger = Logger.getLogger(ListServiceOfferingsCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/project/ActivateProjectCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/project/ActivateProjectCmd.java index 58cc93ff95d3..42e045d4389f 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/project/ActivateProjectCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/project/ActivateProjectCmd.java @@ -18,7 +18,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -36,7 +35,6 @@ @APICommand(name = "activateProject", description = "Activates a project", responseObject = ProjectResponse.class, since = "3.0.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ActivateProjectCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(ActivateProjectCmd.class.getName()); private static final String s_name = "activaterojectresponse"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/project/CreateProjectCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/project/CreateProjectCmd.java index a5742e8d0dea..cb93729381a4 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/project/CreateProjectCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/project/CreateProjectCmd.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.api.response.UserResponse; import org.apache.cloudstack.context.CallContext; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.InvalidParameterValueException; @@ -39,7 +38,6 @@ @APICommand(name = "createProject", description = "Creates a project", responseObject = ProjectResponse.class, since = "3.0.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateProjectCmd extends BaseAsyncCreateCmd { - public static final Logger s_logger = Logger.getLogger(CreateProjectCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/project/DeleteProjectCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/project/DeleteProjectCmd.java index 85b411b075d3..1fd205fdae41 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/project/DeleteProjectCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/project/DeleteProjectCmd.java @@ -18,7 +18,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -37,7 +36,6 @@ @APICommand(name = "deleteProject", description = "Deletes a project", responseObject = SuccessResponse.class, since = "3.0.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteProjectCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteProjectCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/project/DeleteProjectInvitationCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/project/DeleteProjectInvitationCmd.java index 600fac350dec..d1b17eda76b1 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/project/DeleteProjectInvitationCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/project/DeleteProjectInvitationCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.project; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -34,7 +33,6 @@ @APICommand(name = "deleteProjectInvitation", description = "Deletes project invitation", responseObject = SuccessResponse.class, since = "3.0.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteProjectInvitationCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteProjectInvitationCmd.class.getName()); // /////////////////////////////////////////////////// // ////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/project/ListProjectInvitationsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/project/ListProjectInvitationsCmd.java index b8d2f9b5f1ac..210394ec2ddf 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/project/ListProjectInvitationsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/project/ListProjectInvitationsCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.user.project; import org.apache.cloudstack.api.response.UserResponse; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -34,7 +33,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListProjectInvitationsCmd extends BaseListAccountResourcesCmd { - public static final Logger s_logger = Logger.getLogger(ListProjectInvitationsCmd.class.getName()); // /////////////////////////////////////////////////// // ////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/project/ListProjectsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/project/ListProjectsCmd.java index 39d1c0dfedaa..d4679dbe0578 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/project/ListProjectsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/project/ListProjectsCmd.java @@ -24,7 +24,6 @@ import com.cloud.server.ResourceIcon; import com.cloud.server.ResourceTag; import org.apache.cloudstack.api.response.ResourceIconResponse; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -44,7 +43,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListProjectsCmd extends BaseListAccountResourcesCmd { - public static final Logger s_logger = Logger.getLogger(ListProjectsCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/project/SuspendProjectCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/project/SuspendProjectCmd.java index 4937b164aab7..a3eee8c80bb7 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/project/SuspendProjectCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/project/SuspendProjectCmd.java @@ -18,7 +18,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -38,7 +37,6 @@ @APICommand(name = "suspendProject", description = "Suspends a project", responseObject = ProjectResponse.class, since = "3.0.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class SuspendProjectCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(SuspendProjectCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/project/UpdateProjectCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/project/UpdateProjectCmd.java index 6520aa63a64b..4fabf7da788c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/project/UpdateProjectCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/project/UpdateProjectCmd.java @@ -29,7 +29,6 @@ import org.apache.cloudstack.context.CallContext; import org.apache.commons.lang3.EnumUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.InvalidParameterValueException; @@ -40,7 +39,6 @@ @APICommand(name = "updateProject", description = "Updates a project", responseObject = ProjectResponse.class, since = "3.0.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateProjectCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(UpdateProjectCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/project/UpdateProjectInvitationCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/project/UpdateProjectInvitationCmd.java index e783aa644677..0cbd9f702c27 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/project/UpdateProjectInvitationCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/project/UpdateProjectInvitationCmd.java @@ -27,7 +27,6 @@ import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.api.response.UserResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.user.Account; @@ -35,7 +34,6 @@ @APICommand(name = "updateProjectInvitation", description = "Accepts or declines project invitation", responseObject = SuccessResponse.class, since = "3.0.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateProjectInvitationCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(UpdateProjectInvitationCmd.class.getName()); // /////////////////////////////////////////////////// // ////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/region/ListRegionsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/region/ListRegionsCmd.java index d3eb8bf8b0f9..777f437851ca 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/region/ListRegionsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/region/ListRegionsCmd.java @@ -21,7 +21,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -35,7 +34,6 @@ @APICommand(name = "listRegions", description = "Lists Regions", responseObject = RegionResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListRegionsCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListRegionsCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/region/ha/gslb/AssignToGlobalLoadBalancerRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/region/ha/gslb/AssignToGlobalLoadBalancerRuleCmd.java index aedc363e5ccb..649b2a7bd9bc 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/region/ha/gslb/AssignToGlobalLoadBalancerRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/region/ha/gslb/AssignToGlobalLoadBalancerRuleCmd.java @@ -25,7 +25,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -53,7 +52,6 @@ responseHasSensitiveInfo = false) public class AssignToGlobalLoadBalancerRuleCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(AssignToGlobalLoadBalancerRuleCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/region/ha/gslb/CreateGlobalLoadBalancerRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/region/ha/gslb/CreateGlobalLoadBalancerRuleCmd.java index 3aaf06073924..ddaadde78524 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/region/ha/gslb/CreateGlobalLoadBalancerRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/region/ha/gslb/CreateGlobalLoadBalancerRuleCmd.java @@ -30,7 +30,6 @@ import org.apache.cloudstack.api.response.GlobalLoadBalancerResponse; import org.apache.cloudstack.api.response.RegionResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.ResourceAllocationException; @@ -42,7 +41,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateGlobalLoadBalancerRuleCmd extends BaseAsyncCreateCmd { - public static final Logger s_logger = Logger.getLogger(CreateGlobalLoadBalancerRuleCmd.class.getName()); ///////////////////////////////////////////////////// @@ -157,7 +155,7 @@ public void create() { this.setEntityUuid(gslbRule.getUuid()); CallContext.current().setEventDetails("Rule Id: " + getEntityId()); } catch (Exception ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.PARAM_ERROR, ex.getMessage()); } finally { diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/region/ha/gslb/DeleteGlobalLoadBalancerRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/region/ha/gslb/DeleteGlobalLoadBalancerRuleCmd.java index 87c4e60a90b8..7f3308614ccf 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/region/ha/gslb/DeleteGlobalLoadBalancerRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/region/ha/gslb/DeleteGlobalLoadBalancerRuleCmd.java @@ -19,7 +19,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -41,7 +40,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteGlobalLoadBalancerRuleCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteGlobalLoadBalancerRuleCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/region/ha/gslb/ListGlobalLoadBalancerRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/region/ha/gslb/ListGlobalLoadBalancerRuleCmd.java index 7a1bcfc37a72..bf0cf22a2ecb 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/region/ha/gslb/ListGlobalLoadBalancerRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/region/ha/gslb/ListGlobalLoadBalancerRuleCmd.java @@ -22,7 +22,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -38,7 +37,6 @@ @APICommand(name = "listGlobalLoadBalancerRules", description = "Lists load balancer rules.", responseObject = GlobalLoadBalancerResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListGlobalLoadBalancerRuleCmd extends BaseListTaggedResourcesCmd { - public static final Logger s_logger = Logger.getLogger(ListGlobalLoadBalancerRuleCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/region/ha/gslb/RemoveFromGlobalLoadBalancerRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/region/ha/gslb/RemoveFromGlobalLoadBalancerRuleCmd.java index b9bbfec847fe..d4b02139892c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/region/ha/gslb/RemoveFromGlobalLoadBalancerRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/region/ha/gslb/RemoveFromGlobalLoadBalancerRuleCmd.java @@ -21,7 +21,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -47,7 +46,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class RemoveFromGlobalLoadBalancerRuleCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(RemoveFromGlobalLoadBalancerRuleCmd.class.getName()); private static final String s_name = "removefromloadbalancerruleresponse"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/region/ha/gslb/UpdateGlobalLoadBalancerRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/region/ha/gslb/UpdateGlobalLoadBalancerRuleCmd.java index c8d307e4084c..7996998e5d92 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/region/ha/gslb/UpdateGlobalLoadBalancerRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/region/ha/gslb/UpdateGlobalLoadBalancerRuleCmd.java @@ -19,7 +19,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -37,7 +36,6 @@ @APICommand(name = "updateGlobalLoadBalancerRule", description = "update global load balancer rules.", responseObject = GlobalLoadBalancerResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateGlobalLoadBalancerRuleCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(UpdateGlobalLoadBalancerRuleCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/resource/GetCloudIdentifierCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/resource/GetCloudIdentifierCmd.java index b513b811e467..b9e43336217d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/resource/GetCloudIdentifierCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/resource/GetCloudIdentifierCmd.java @@ -18,7 +18,6 @@ import java.util.ArrayList; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -34,7 +33,6 @@ @APICommand(name = "getCloudIdentifier", description = "Retrieves a cloud identifier.", responseObject = CloudIdentifierResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class GetCloudIdentifierCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(GetCloudIdentifierCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/resource/ListHypervisorsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/resource/ListHypervisorsCmd.java index a0e750eb19f6..556f3b081f02 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/resource/ListHypervisorsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/resource/ListHypervisorsCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -34,7 +33,6 @@ @APICommand(name = "listHypervisors", description = "List hypervisors", responseObject = HypervisorResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListHypervisorsCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(ListHypervisorsCmd.class.getName()); // /////////////////////////////////////////////////// // ////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/resource/ListResourceLimitsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/resource/ListResourceLimitsCmd.java index adf1c93e4d03..d40d36634516 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/resource/ListResourceLimitsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/resource/ListResourceLimitsCmd.java @@ -27,14 +27,12 @@ import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.ResourceLimitResponse; -import org.apache.log4j.Logger; import com.cloud.configuration.ResourceLimit; @APICommand(name = "listResourceLimits", description = "Lists resource limits.", responseObject = ResourceLimitResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListResourceLimitsCmd extends BaseListProjectAndAccountResourcesCmd { - public static final Logger s_logger = Logger.getLogger(ListResourceLimitsCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/resource/UpdateResourceCountCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/resource/UpdateResourceCountCmd.java index 424087bc71ee..ae5188295397 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/resource/UpdateResourceCountCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/resource/UpdateResourceCountCmd.java @@ -30,7 +30,6 @@ import org.apache.cloudstack.api.response.ProjectResponse; import org.apache.cloudstack.api.response.ResourceCountResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.configuration.ResourceCount; import com.cloud.user.Account; @@ -38,7 +37,6 @@ @APICommand(name = "updateResourceCount", description = "Recalculate and update resource count for an account or domain.", responseObject = ResourceCountResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateResourceCountCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(UpdateResourceCountCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/resource/UpdateResourceLimitCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/resource/UpdateResourceLimitCmd.java index 41676ed23ca2..32b3b17527e2 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/resource/UpdateResourceLimitCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/resource/UpdateResourceLimitCmd.java @@ -26,14 +26,12 @@ import org.apache.cloudstack.api.response.ProjectResponse; import org.apache.cloudstack.api.response.ResourceLimitResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.configuration.ResourceLimit; @APICommand(name = "updateResourceLimit", description = "Updates resource limits for an account or domain.", responseObject = ResourceLimitResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateResourceLimitCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(UpdateResourceLimitCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/AuthorizeSecurityGroupEgressCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/AuthorizeSecurityGroupEgressCmd.java index 737bdc8773b3..13faafe348c0 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/AuthorizeSecurityGroupEgressCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/AuthorizeSecurityGroupEgressCmd.java @@ -22,7 +22,6 @@ import java.util.List; import java.util.Map; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -50,7 +49,6 @@ responseHasSensitiveInfo = false) @SuppressWarnings("rawtypes") public class AuthorizeSecurityGroupEgressCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(AuthorizeSecurityGroupEgressCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/AuthorizeSecurityGroupIngressCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/AuthorizeSecurityGroupIngressCmd.java index b6918905f292..640870fc3de3 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/AuthorizeSecurityGroupIngressCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/AuthorizeSecurityGroupIngressCmd.java @@ -22,7 +22,6 @@ import java.util.List; import java.util.Map; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -50,7 +49,6 @@ responseHasSensitiveInfo = false) @SuppressWarnings("rawtypes") public class AuthorizeSecurityGroupIngressCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(AuthorizeSecurityGroupIngressCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/CreateSecurityGroupCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/CreateSecurityGroupCmd.java index 4978aa174fc5..673eaaef33da 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/CreateSecurityGroupCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/CreateSecurityGroupCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.securitygroup; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -35,7 +34,6 @@ @APICommand(name = "createSecurityGroup", responseObject = SecurityGroupResponse.class, description = "Creates a security group", entityType = {SecurityGroup.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateSecurityGroupCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(CreateSecurityGroupCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/DeleteSecurityGroupCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/DeleteSecurityGroupCmd.java index 57d365d5ce12..b2ea90792b8e 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/DeleteSecurityGroupCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/DeleteSecurityGroupCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.securitygroup; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -39,7 +38,6 @@ @APICommand(name = "deleteSecurityGroup", description = "Deletes security group", responseObject = SuccessResponse.class, entityType = {SecurityGroup.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteSecurityGroupCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(DeleteSecurityGroupCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// @@ -124,7 +122,7 @@ public void execute() { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to delete security group"); } } catch (ResourceInUseException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_IN_USE_ERROR, ex.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/ListSecurityGroupsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/ListSecurityGroupsCmd.java index c4c103cf98e9..f93e7b39586b 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/ListSecurityGroupsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/ListSecurityGroupsCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.securitygroup; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -32,7 +31,6 @@ @APICommand(name = "listSecurityGroups", description = "Lists security groups", responseObject = SecurityGroupResponse.class, entityType = {SecurityGroup.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListSecurityGroupsCmd extends BaseListTaggedResourcesCmd { - public static final Logger s_logger = Logger.getLogger(ListSecurityGroupsCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/RevokeSecurityGroupEgressCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/RevokeSecurityGroupEgressCmd.java index f4a0362fce96..bf435406174c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/RevokeSecurityGroupEgressCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/RevokeSecurityGroupEgressCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.securitygroup; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -38,7 +37,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class RevokeSecurityGroupEgressCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(RevokeSecurityGroupEgressCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/RevokeSecurityGroupIngressCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/RevokeSecurityGroupIngressCmd.java index c5f88c9673f0..c426647fe36c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/RevokeSecurityGroupIngressCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/RevokeSecurityGroupIngressCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.securitygroup; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -38,7 +37,6 @@ @APICommand(name = "revokeSecurityGroupIngress", responseObject = SuccessResponse.class, description = "Deletes a particular ingress rule from this security group", entityType = {SecurityGroup.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class RevokeSecurityGroupIngressCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(RevokeSecurityGroupIngressCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/UpdateSecurityGroupCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/UpdateSecurityGroupCmd.java index 9b3000b1bf71..801fb6ac5e51 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/UpdateSecurityGroupCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/UpdateSecurityGroupCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.securitygroup; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.acl.SecurityChecker.AccessType; @@ -37,7 +36,6 @@ since = "4.14.0.0", authorized = {RoleType.Admin}) public class UpdateSecurityGroupCmd extends BaseCustomIdCmd { - public static final Logger s_logger = Logger.getLogger(UpdateSecurityGroupCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/ArchiveSnapshotCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/ArchiveSnapshotCmd.java index 78aa208ac1f1..f72de2278cc7 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/ArchiveSnapshotCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/ArchiveSnapshotCmd.java @@ -35,13 +35,11 @@ import org.apache.cloudstack.api.response.SnapshotResponse; import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; @APICommand(name = "archiveSnapshot", description = "Archives (moves) a snapshot on primary storage to secondary storage", responseObject = SnapshotResponse.class, entityType = {Snapshot.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ArchiveSnapshotCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(ArchiveSnapshotCmd.class.getName()); private static final String s_name = "createsnapshotresponse"; @ACL(accessType = SecurityChecker.AccessType.OperateEntry) diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CopySnapshotCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CopySnapshotCmd.java index f6d16c3eb493..07973fcbfca5 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CopySnapshotCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CopySnapshotCmd.java @@ -34,7 +34,6 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.context.CallContext; import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; import com.cloud.dc.DataCenter; import com.cloud.event.EventTypes; @@ -43,13 +42,15 @@ import com.cloud.exception.StorageUnavailableException; import com.cloud.storage.Snapshot; import com.cloud.user.Account; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; @APICommand(name = "copySnapshot", description = "Copies a snapshot from one zone to another.", responseObject = SnapshotResponse.class, responseView = ResponseObject.ResponseView.Restricted, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, since = "4.19.0", authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class CopySnapshotCmd extends BaseAsyncCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(CopySnapshotCmd.class.getName()); + public static final Logger logger = LogManager.getLogger(CopySnapshotCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// @@ -170,10 +171,10 @@ public void execute() throws ResourceUnavailableException { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to copy snapshot"); } } catch (StorageUnavailableException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); } catch (ResourceAllocationException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_ALLOCATION_ERROR, ex.getMessage()); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotCmd.java index eed3aa49fa59..3289ac2fe106 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotCmd.java @@ -35,7 +35,6 @@ import org.apache.cloudstack.api.response.VolumeResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.commons.collections.MapUtils; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.InvalidParameterValueException; @@ -50,7 +49,6 @@ @APICommand(name = "createSnapshot", description = "Creates an instant snapshot of a volume.", responseObject = SnapshotResponse.class, entityType = {Snapshot.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateSnapshotCmd extends BaseAsyncCreateCmd { - public static final Logger s_logger = Logger.getLogger(CreateSnapshotCmd.class.getName()); // /////////////////////////////////////////////////// // ////////////// API parameters ///////////////////// @@ -240,7 +238,7 @@ public void execute() { } String errorMessage = "Failed to create snapshot due to an internal error creating snapshot for volume " + getVolumeUuid(); - s_logger.error(errorMessage, e); + logger.error(errorMessage, e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, errorMessage); } } @@ -257,7 +255,7 @@ private Snapshot.LocationType getLocationType() { } catch (IllegalArgumentException e) { String errMesg = "Invalid locationType " + locationType + "Specified for volume " + getVolumeId() + " Valid values are: primary,secondary "; - s_logger.warn(errMesg); + logger.warn(errMesg); throw new CloudRuntimeException(errMesg); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotFromVMSnapshotCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotFromVMSnapshotCmd.java index 7b89e87202d9..6bebdc09f59b 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotFromVMSnapshotCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotFromVMSnapshotCmd.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.api.response.VMSnapshotResponse; import org.apache.cloudstack.api.response.VolumeResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.InvalidParameterValueException; @@ -43,7 +42,6 @@ @APICommand(name = "createSnapshotFromVMSnapshot", description = "Creates an instant snapshot of a volume from existing vm snapshot.", responseObject = SnapshotResponse.class, entityType = {Snapshot.class}, since = "4.10.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateSnapshotFromVMSnapshotCmd extends BaseAsyncCreateCmd { - public static final Logger s_logger = Logger.getLogger(CreateSnapshotFromVMSnapshotCmd.class.getName()); // /////////////////////////////////////////////////// // ////////////// API parameters ///////////////////// @@ -166,7 +164,7 @@ public void create() throws ResourceAllocationException { @Override public void execute() { - s_logger.info("CreateSnapshotFromVMSnapshotCmd with vm snapshot id:" + getVMSnapshotId() + " and snapshot id:" + getEntityId() + " starts:" + System.currentTimeMillis()); + logger.info("CreateSnapshotFromVMSnapshotCmd with vm snapshot id:" + getVMSnapshotId() + " and snapshot id:" + getEntityId() + " starts:" + System.currentTimeMillis()); CallContext.current().setEventDetails("Vm Snapshot Id: "+ this._uuidMgr.getUuid(VMSnapshot.class, getVMSnapshotId())); Snapshot snapshot = null; try { @@ -181,14 +179,14 @@ public void execute() { } catch (InvalidParameterValueException ex) { throw ex; } catch (Exception e) { - s_logger.debug("Failed to create snapshot", e); + logger.debug("Failed to create snapshot", e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create snapshot due to an internal error creating snapshot from vm snapshot " + getVMSnapshotId()); } finally { if (snapshot == null) { try { _snapshotService.deleteSnapshot(getEntityId(), null); } catch (Exception e) { - s_logger.debug("Failed to clean failed snapshot" + getEntityId()); + logger.debug("Failed to clean failed snapshot" + getEntityId()); } } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotPolicyCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotPolicyCmd.java index 00bfb9e7e2c9..e30b897db2ea 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotPolicyCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotPolicyCmd.java @@ -33,7 +33,6 @@ import org.apache.cloudstack.api.response.VolumeResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.commons.collections.MapUtils; -import org.apache.log4j.Logger; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.PermissionDeniedException; @@ -45,7 +44,6 @@ @APICommand(name = "createSnapshotPolicy", description = "Creates a snapshot policy for the account.", responseObject = SnapshotPolicyResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateSnapshotPolicyCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(CreateSnapshotPolicyCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/DeleteSnapshotCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/DeleteSnapshotCmd.java index 6d71b1363b42..a0a8cfac9bd2 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/DeleteSnapshotCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/DeleteSnapshotCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.user.snapshot; import org.apache.cloudstack.api.response.ZoneResponse; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -39,7 +38,6 @@ @APICommand(name = "deleteSnapshot", description = "Deletes a snapshot of a disk volume.", responseObject = SuccessResponse.class, entityType = {Snapshot.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteSnapshotCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteSnapshotCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/DeleteSnapshotPoliciesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/DeleteSnapshotPoliciesCmd.java index 1a72b22f06c5..6f4b60dc8b21 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/DeleteSnapshotPoliciesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/DeleteSnapshotPoliciesCmd.java @@ -18,7 +18,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -34,7 +33,6 @@ @APICommand(name = "deleteSnapshotPolicies", description = "Deletes snapshot policies for the account.", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteSnapshotPoliciesCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(DeleteSnapshotPoliciesCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/ListSnapshotPoliciesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/ListSnapshotPoliciesCmd.java index e30ee75dee76..126a4080e6d2 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/ListSnapshotPoliciesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/ListSnapshotPoliciesCmd.java @@ -20,7 +20,6 @@ import java.util.List; import org.apache.cloudstack.acl.RoleType; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -36,7 +35,6 @@ @APICommand(name = "listSnapshotPolicies", description = "Lists snapshot policies.", responseObject = SnapshotPolicyResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListSnapshotPoliciesCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListSnapshotPoliciesCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/ListSnapshotsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/ListSnapshotsCmd.java index cf665127a171..826c54c2e052 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/ListSnapshotsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/ListSnapshotsCmd.java @@ -28,14 +28,12 @@ import org.apache.cloudstack.api.response.SnapshotResponse; import org.apache.cloudstack.api.response.VolumeResponse; import org.apache.cloudstack.api.response.ZoneResponse; -import org.apache.log4j.Logger; import com.cloud.storage.Snapshot; @APICommand(name = "listSnapshots", description = "Lists all available snapshots for the account.", responseObject = SnapshotResponse.class, entityType = { Snapshot.class }, responseView = ResponseObject.ResponseView.Restricted, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListSnapshotsCmd extends BaseListTaggedResourcesCmd { - public static final Logger s_logger = Logger.getLogger(ListSnapshotsCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/RevertSnapshotCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/RevertSnapshotCmd.java index e65a03812d6f..fe3b4da0160e 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/RevertSnapshotCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/RevertSnapshotCmd.java @@ -30,7 +30,6 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.SnapshotResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.storage.Snapshot; @@ -39,7 +38,6 @@ @APICommand(name = "revertSnapshot", description = "This is supposed to revert a volume snapshot. This command is only supported with KVM so far", responseObject = SnapshotResponse.class, entityType = {Snapshot.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class RevertSnapshotCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(RevertSnapshotCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/UpdateSnapshotPolicyCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/UpdateSnapshotPolicyCmd.java index 0bedbe66199b..e7feb11f4afd 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/UpdateSnapshotPolicyCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/UpdateSnapshotPolicyCmd.java @@ -33,13 +33,11 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.SnapshotPolicyResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; @APICommand(name = "updateSnapshotPolicy", description = "Updates the snapshot policy.", responseObject = SnapshotPolicyResponse.class, responseView = ResponseObject.ResponseView.Restricted, entityType = {Volume.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateSnapshotPolicyCmd extends BaseAsyncCustomIdCmd { - public static final Logger s_logger = Logger.getLogger(UpdateSnapshotPolicyCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/ssh/CreateSSHKeyPairCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/ssh/CreateSSHKeyPairCmd.java index 521148b596d9..5212779e9654 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/ssh/CreateSSHKeyPairCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/ssh/CreateSSHKeyPairCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.ssh; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -32,7 +31,6 @@ @APICommand(name = "createSSHKeyPair", description = "Create a new keypair and returns the private key", responseObject = CreateSSHKeyPairResponse.class, entityType = {SSHKeyPair.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class CreateSSHKeyPairCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(CreateSSHKeyPairCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/ssh/DeleteSSHKeyPairCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/ssh/DeleteSSHKeyPairCmd.java index 39c65c181c50..364ca77ae1fc 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/ssh/DeleteSSHKeyPairCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/ssh/DeleteSSHKeyPairCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.user.ssh; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -34,7 +33,6 @@ @APICommand(name = "deleteSSHKeyPair", description = "Deletes a keypair by name", responseObject = SuccessResponse.class, entityType = {SSHKeyPair.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteSSHKeyPairCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(DeleteSSHKeyPairCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/ssh/ListSSHKeyPairsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/ssh/ListSSHKeyPairsCmd.java index 71fbb66a337b..6bf8dca864b0 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/ssh/ListSSHKeyPairsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/ssh/ListSSHKeyPairsCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -34,7 +33,6 @@ @APICommand(name = "listSSHKeyPairs", description = "List registered keypairs", responseObject = SSHKeyPairResponse.class, entityType = {SSHKeyPair.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListSSHKeyPairsCmd extends BaseListProjectAndAccountResourcesCmd { - public static final Logger s_logger = Logger.getLogger(ListSSHKeyPairsCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/ssh/RegisterSSHKeyPairCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/ssh/RegisterSSHKeyPairCmd.java index 8bacfde7f737..6a0c0541bb4f 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/ssh/RegisterSSHKeyPairCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/ssh/RegisterSSHKeyPairCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.ssh; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -32,7 +31,6 @@ @APICommand(name = "registerSSHKeyPair", description = "Register a public key in a keypair under a certain name", responseObject = SSHKeyPairResponse.class, entityType = {SSHKeyPair.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class RegisterSSHKeyPairCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(RegisterSSHKeyPairCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/tag/CreateTagsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/tag/CreateTagsCmd.java index 67705859419b..30904db46c44 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/tag/CreateTagsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/tag/CreateTagsCmd.java @@ -20,7 +20,6 @@ import java.util.List; import java.util.Map; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -38,7 +37,6 @@ @APICommand(name = "createTags", description = "Creates resource tag(s)", responseObject = SuccessResponse.class, since = "4.0.0", entityType = {ResourceTag.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateTagsCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(CreateTagsCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/tag/DeleteTagsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/tag/DeleteTagsCmd.java index 55dec6e9645a..f8f319eba23d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/tag/DeleteTagsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/tag/DeleteTagsCmd.java @@ -23,7 +23,6 @@ import java.util.List; import java.util.Map; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -40,7 +39,6 @@ @APICommand(name = "deleteTags", description = "Deleting resource tag(s)", responseObject = SuccessResponse.class, since = "4.0.0", entityType = {ResourceTag.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteTagsCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteTagsCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/template/CopyTemplateCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/template/CopyTemplateCmd.java index f672d4c8890e..f094bc435070 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/template/CopyTemplateCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/template/CopyTemplateCmd.java @@ -31,7 +31,6 @@ import org.apache.cloudstack.api.response.TemplateResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.dc.DataCenter; import com.cloud.event.EventTypes; @@ -43,7 +42,6 @@ @APICommand(name = "copyTemplate", description = "Copies a template from one zone to another.", responseObject = TemplateResponse.class, responseView = ResponseView.Restricted, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CopyTemplateCmd extends BaseAsyncCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(CopyTemplateCmd.class.getName()); private static final String s_name = "copytemplateresponse"; ///////////////////////////////////////////////////// @@ -191,7 +189,7 @@ public void execute() throws ResourceAllocationException { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to copy template"); } } catch (StorageUnavailableException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/template/CreateTemplateCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/template/CreateTemplateCmd.java index 6c39ab6d3c7e..0a7bf2918435 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/template/CreateTemplateCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/template/CreateTemplateCmd.java @@ -40,7 +40,6 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.context.CallContext; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.InvalidParameterValueException; @@ -55,7 +54,6 @@ + "A template created from this command is automatically designated as a private template visible to the account that created it.", responseView = ResponseView.Restricted, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateTemplateCmd extends BaseAsyncCreateCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(CreateTemplateCmd.class.getName()); private static final String s_name = "createtemplateresponse"; // /////////////////////////////////////////////////// @@ -348,11 +346,11 @@ private Long findAccountIdToUse(Account callingAccount) { try { accountIdToUse = _accountService.finalyzeAccountId(accountName, domainId, projectId, true); } catch (InvalidParameterValueException | PermissionDeniedException ex) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("An exception occurred while finalizing account id with accountName, domainId and projectId" + + if (logger.isDebugEnabled()) { + logger.debug(String.format("An exception occurred while finalizing account id with accountName, domainId and projectId" + "using callingAccountId=%s", callingAccount.getUuid()), ex); } - s_logger.warn("Unable to find accountId associated with accountName=" + accountName + " and domainId=" + logger.warn("Unable to find accountId associated with accountName=" + accountName + " and domainId=" + domainId + " or projectId=" + projectId + ", using callingAccountId=" + callingAccount.getUuid()); } return accountIdToUse != null ? accountIdToUse : callingAccount.getAccountId(); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/template/DeleteTemplateCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/template/DeleteTemplateCmd.java index 3d7aaaec31c1..245baf1e07e0 100755 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/template/DeleteTemplateCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/template/DeleteTemplateCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.user.template; import org.apache.commons.lang3.BooleanUtils; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -40,7 +39,6 @@ description = "Deletes a template from the system. All virtual machines using the deleted template will not be affected.", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteTemplateCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteTemplateCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/template/ExtractTemplateCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/template/ExtractTemplateCmd.java index 91e8d168ecb5..ce6ba5e300c1 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/template/ExtractTemplateCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/template/ExtractTemplateCmd.java @@ -27,7 +27,6 @@ import org.apache.cloudstack.api.response.TemplateResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.dc.DataCenter; import com.cloud.event.EventTypes; @@ -38,7 +37,6 @@ @APICommand(name = "extractTemplate", description = "Extracts a template", responseObject = ExtractResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ExtractTemplateCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(ExtractTemplateCmd.class.getName()); ///////////////////////////////////////////////////// @@ -129,7 +127,7 @@ public void execute() { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to extract template"); } } catch (InternalErrorException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/template/GetUploadParamsForTemplateCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/template/GetUploadParamsForTemplateCmd.java index ab872b84edbc..c878fda82409 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/template/GetUploadParamsForTemplateCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/template/GetUploadParamsForTemplateCmd.java @@ -34,7 +34,6 @@ import org.apache.cloudstack.api.response.GuestOSResponse; import org.apache.cloudstack.context.CallContext; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.exception.ResourceAllocationException; @@ -43,7 +42,6 @@ authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class GetUploadParamsForTemplateCmd extends AbstractGetUploadParamsCmd { - public static final Logger s_logger = Logger.getLogger(GetUploadParamsForTemplateCmd.class.getName()); private static final String s_name = "postuploadtemplateresponse"; @@ -172,7 +170,7 @@ public void execute() throws ServerApiException { response.setResponseName(getCommandName()); setResponseObject(response); } catch (ResourceAllocationException | MalformedURLException e) { - s_logger.error("exception while registering template", e); + logger.error("exception while registering template", e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "exception while registering template: " + e.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/template/ListTemplatePermissionsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/template/ListTemplatePermissionsCmd.java index 970c6b347430..6d544df41871 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/template/ListTemplatePermissionsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/template/ListTemplatePermissionsCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.template; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.BaseListTemplateOrIsoPermissionsCmd; @@ -40,11 +39,6 @@ public String getMediaType() { return "template"; } - @Override - protected Logger getLogger() { - return Logger.getLogger(ListTemplatePermissionsCmd.class.getName()); - } - @Override protected boolean templateIsCorrectType(VirtualMachineTemplate template) { return !template.getFormat().equals(ImageFormat.ISO); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/template/ListTemplatesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/template/ListTemplatesCmd.java index dae7cc97a4c9..113080257d02 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/template/ListTemplatesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/template/ListTemplatesCmd.java @@ -21,7 +21,6 @@ import com.cloud.server.ResourceTag; import org.apache.cloudstack.api.response.ResourceIconResponse; import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; import java.util.ArrayList; import java.util.Collections; @@ -46,7 +45,6 @@ @APICommand(name = "listTemplates", description = "List all public, private, and privileged templates.", responseObject = TemplateResponse.class, entityType = {VirtualMachineTemplate.class}, responseView = ResponseView.Restricted, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListTemplatesCmd extends BaseListTaggedResourcesCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(ListTemplatesCmd.class.getName()); private static final String s_name = "listtemplatesresponse"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java index 0a087888d521..1e5c4af9c154 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java @@ -41,7 +41,6 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.context.CallContext; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.exception.ResourceAllocationException; import com.cloud.template.VirtualMachineTemplate; @@ -49,7 +48,6 @@ @APICommand(name = "registerTemplate", description = "Registers an existing template into the CloudStack cloud. ", responseObject = TemplateResponse.class, responseView = ResponseView.Restricted, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class RegisterTemplateCmd extends BaseCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(RegisterTemplateCmd.class.getName()); private static final String s_name = "registertemplateresponse"; @@ -335,7 +333,7 @@ public void execute() throws ResourceAllocationException { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to register template"); } } catch (URISyntaxException ex1) { - s_logger.info(ex1); + logger.info(ex1); throw new ServerApiException(ApiErrorCode.PARAM_ERROR, ex1.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/template/UpdateTemplateCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/template/UpdateTemplateCmd.java index 2afa6a98b13d..3f11f3860b04 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/template/UpdateTemplateCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/template/UpdateTemplateCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.template; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -35,7 +34,6 @@ @APICommand(name = "updateTemplate", description = "Updates attributes of a template.", responseObject = TemplateResponse.class, responseView = ResponseView.Restricted, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateTemplateCmd extends BaseUpdateTemplateOrIsoCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(UpdateTemplateCmd.class.getName()); private static final String s_name = "updatetemplateresponse"; ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/template/UpdateTemplatePermissionsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/template/UpdateTemplatePermissionsCmd.java index 7cf5e0bc3f5b..de8f09a64005 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/template/UpdateTemplatePermissionsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/template/UpdateTemplatePermissionsCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.template; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.BaseUpdateTemplateOrIsoPermissionsCmd; @@ -35,10 +34,6 @@ protected String getResponseName() { return "updatetemplatepermissionsresponse"; } - @Override - protected Logger getLogger() { - return Logger.getLogger(UpdateTemplatePermissionsCmd.class.getName()); - } @Override public long getEntityOwnerId() { diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/DeleteUserDataCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/DeleteUserDataCmd.java index d27b90f07a1c..a1d1afc7b057 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/DeleteUserDataCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/DeleteUserDataCmd.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.api.response.UserDataResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.user.Account; import com.cloud.user.UserData; @@ -39,7 +38,6 @@ authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class DeleteUserDataCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(DeleteUserDataCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/LinkUserDataToTemplateCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/LinkUserDataToTemplateCmd.java index be1a95c36941..e322de00bb1b 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/LinkUserDataToTemplateCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/LinkUserDataToTemplateCmd.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.api.command.admin.AdminCmd; import org.apache.cloudstack.api.response.TemplateResponse; import org.apache.cloudstack.api.response.UserDataResponse; -import org.apache.log4j.Logger; import com.cloud.template.VirtualMachineTemplate; import com.cloud.user.Account; @@ -39,7 +38,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, since = "4.18.0", authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class LinkUserDataToTemplateCmd extends BaseCmd implements AdminCmd { - public static final Logger s_logger = Logger.getLogger(LinkUserDataToTemplateCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/ListUserDataCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/ListUserDataCmd.java index 87d8883e2e30..64ab3ec3d70e 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/ListUserDataCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/ListUserDataCmd.java @@ -26,7 +26,6 @@ import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.UserDataResponse; -import org.apache.log4j.Logger; import com.cloud.user.UserData; import com.cloud.utils.Pair; @@ -35,7 +34,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, since = "4.18", authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class ListUserDataCmd extends BaseListProjectAndAccountResourcesCmd { - public static final Logger s_logger = Logger.getLogger(ListUserDataCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/RegisterUserDataCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/RegisterUserDataCmd.java index f294f7dd8e09..8df25541a197 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/RegisterUserDataCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/RegisterUserDataCmd.java @@ -32,7 +32,6 @@ import org.apache.cloudstack.api.response.UserDataResponse; import org.apache.cloudstack.context.CallContext; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientCapacityException; @@ -52,7 +51,6 @@ authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class RegisterUserDataCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(RegisterUserDataCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/AddIpToVmNicCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/AddIpToVmNicCmd.java index fc009c7e5db1..0dc3dcdbdcc8 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/AddIpToVmNicCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/AddIpToVmNicCmd.java @@ -26,7 +26,6 @@ import org.apache.cloudstack.api.response.NicResponse; import org.apache.cloudstack.api.response.NicSecondaryIpResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.dc.DataCenter; import com.cloud.event.EventTypes; @@ -46,7 +45,6 @@ @APICommand(name = "addIpToNic", description = "Assigns secondary IP to NIC", responseObject = NicSecondaryIpResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class AddIpToVmNicCmd extends BaseAsyncCreateCmd { - public static final Logger s_logger = Logger.getLogger(AddIpToVmNicCmd.class.getName()); private static final String s_name = "addiptovmnicresponse"; ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/AddNicToVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/AddNicToVMCmd.java index 1e395831a6ff..ecd066d98cd5 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/AddNicToVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/AddNicToVMCmd.java @@ -37,7 +37,6 @@ import org.apache.cloudstack.api.response.NetworkResponse; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.InvalidParameterValueException; @@ -51,7 +50,6 @@ @APICommand(name = "addNicToVirtualMachine", description = "Adds VM to specified network by creating a NIC", responseObject = UserVmResponse.class, responseView = ResponseView.Restricted, entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class AddNicToVMCmd extends BaseAsyncCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(AddNicToVMCmd.class); private static final String s_name = "addnictovirtualmachineresponse"; ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java index 1cbe28f4ddee..446bdf30f07a 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java @@ -56,7 +56,6 @@ import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.BooleanUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.agent.api.LogLevel; import com.cloud.event.EventTypes; @@ -80,7 +79,6 @@ @APICommand(name = "deployVirtualMachine", description = "Creates and automatically starts a virtual machine based on a service offering, disk offering, and template.", responseObject = UserVmResponse.class, responseView = ResponseView.Restricted, entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class DeployVMCmd extends BaseAsyncCreateCustomIdCmd implements SecurityGroupAction, UserCmd { - public static final Logger s_logger = Logger.getLogger(DeployVMCmd.class.getName()); private static final String s_name = "deployvirtualmachineresponse"; @@ -313,7 +311,7 @@ public ApiConstants.BootType getBootType() { } catch (IllegalArgumentException e) { String errMesg = "Invalid bootType " + bootType + "Specified for vm " + getName() + " Valid values are: " + Arrays.toString(ApiConstants.BootType.values()); - s_logger.warn(errMesg); + logger.warn(errMesg); throw new InvalidParameterValueException(errMesg); } } @@ -360,14 +358,14 @@ public ApiConstants.BootMode getBootMode() { } catch (IllegalArgumentException e) { String msg = String.format("Invalid %s: %s specified for VM: %s. Valid values are: %s", ApiConstants.BOOT_MODE, bootMode, getName(), Arrays.toString(ApiConstants.BootMode.values())); - s_logger.error(msg); + logger.error(msg); throw new InvalidParameterValueException(msg); } } if (ApiConstants.BootType.UEFI.equals(getBootType())) { String msg = String.format("%s must be specified for the VM with boot type: %s. Valid values are: %s", ApiConstants.BOOT_MODE, getBootType(), Arrays.toString(ApiConstants.BootMode.values())); - s_logger.error(msg); + logger.error(msg); throw new InvalidParameterValueException(msg); } return null; @@ -400,8 +398,8 @@ public Map getVmNetworkMap() { nic = null; } String networkUuid = entry.get(VmDetailConstants.NETWORK); - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("nic, '%s', goes on net, '%s'", nic, networkUuid)); + if (logger.isTraceEnabled()) { + logger.trace(String.format("nic, '%s', goes on net, '%s'", nic, networkUuid)); } if (nic == null || StringUtils.isEmpty(networkUuid) || _entityMgr.findByUuid(Network.class, networkUuid) == null) { throw new InvalidParameterValueException(String.format("Network ID: %s for NIC ID: %s is invalid", networkUuid, nic)); @@ -711,7 +709,7 @@ public ApiConstants.IoDriverPolicy getIoDriverPolicy() { return ApiConstants.IoDriverPolicy.valueOf(policyType); } catch (IllegalArgumentException e) { String errMesg = String.format("Invalid io policy %s specified for vm %s. Valid values are: %s", ioDriverPolicy, getName(), Arrays.toString(ApiConstants.IoDriverPolicy.values())); - s_logger.warn(errMesg); + logger.warn(errMesg); throw new InvalidParameterValueException(errMesg); } } @@ -777,13 +775,13 @@ public void execute() { try { result = _userVmService.startVirtualMachine(this); } catch (ResourceUnavailableException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); } catch (ResourceAllocationException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_ALLOCATION_ERROR, ex.getMessage()); } catch (ConcurrentOperationException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } catch (InsufficientCapacityException ex) { StringBuilder message = new StringBuilder(ex.getMessage()); @@ -792,12 +790,12 @@ public void execute() { message.append(", Please check the affinity groups provided, there may not be sufficient capacity to follow them"); } } - s_logger.info(String.format("%s: %s", message.toString(), ex.getLocalizedMessage())); - s_logger.debug(message.toString(), ex); + logger.info(String.format("%s: %s", message.toString(), ex.getLocalizedMessage())); + logger.debug(message.toString(), ex); throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, message.toString()); } } else { - s_logger.info("VM " + getEntityUuid() + " already created, load UserVm from DB"); + logger.info("VM " + getEntityUuid() + " already created, load UserVm from DB"); result = _userVmService.finalizeCreateVirtualMachine(getEntityId()); } @@ -823,17 +821,17 @@ public void create() throws ResourceAllocationException { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to deploy vm"); } } catch (InsufficientCapacityException ex) { - s_logger.info(ex); - s_logger.trace(ex.getMessage(), ex); + logger.info(ex); + logger.trace(ex.getMessage(), ex); throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, ex.getMessage()); } catch (ResourceUnavailableException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); } catch (ConcurrentOperationException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } catch (ResourceAllocationException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_ALLOCATION_ERROR, ex.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DestroyVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DestroyVMCmd.java index 07fd55276f84..aa121162cb4e 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DestroyVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DestroyVMCmd.java @@ -18,7 +18,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -46,7 +45,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class DestroyVMCmd extends BaseAsyncCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(DestroyVMCmd.class.getName()); private static final String s_name = "destroyvirtualmachineresponse"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/GetVMPasswordCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/GetVMPasswordCmd.java index d3cbf82f7557..ce6114c7fd85 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/GetVMPasswordCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/GetVMPasswordCmd.java @@ -18,7 +18,6 @@ import java.security.InvalidParameterException; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -36,7 +35,6 @@ @APICommand(name = "getVMPassword", responseObject = GetVMPasswordResponse.class, description = "Returns an encrypted password for the VM", entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class GetVMPasswordCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(GetVMPasswordCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ListNicsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ListNicsCmd.java index 44710d018d3b..0e659fc02a1a 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ListNicsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ListNicsCmd.java @@ -33,7 +33,6 @@ import org.apache.cloudstack.api.response.NicResponse; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientCapacityException; @@ -45,7 +44,6 @@ @APICommand(name = "listNics", description = "list the vm nics IP to NIC", responseObject = NicResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListNicsCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListNicsCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// @@ -157,7 +155,7 @@ public void execute() throws ResourceUnavailableException, ResourceAllocationExc this.setResponseObject(response); } } catch (Exception e) { - s_logger.warn("Failed to list secondary ip address per nic "); + logger.warn("Failed to list secondary ip address per nic "); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ListVMsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ListVMsCmd.java index 6a5ec28d1bae..2d1160fb7a75 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ListVMsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ListVMsCmd.java @@ -45,7 +45,6 @@ import org.apache.cloudstack.api.response.VpcResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.commons.lang3.BooleanUtils; -import org.apache.log4j.Logger; import com.cloud.exception.InvalidParameterValueException; import com.cloud.server.ResourceIcon; @@ -56,7 +55,6 @@ @APICommand(name = "listVirtualMachines", description = "List the virtual machines owned by the account.", responseObject = UserVmResponse.class, responseView = ResponseView.Restricted, entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class ListVMsCmd extends BaseListRetrieveOnlyResourceCountCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(ListVMsCmd.class.getName()); private static final String s_name = "listvirtualmachinesresponse"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/RebootVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/RebootVMCmd.java index 9bdcc1aed56c..10900f61b22b 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/RebootVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/RebootVMCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.vm; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -42,7 +41,6 @@ @APICommand(name = "rebootVirtualMachine", description = "Reboots a virtual machine.", responseObject = UserVmResponse.class, responseView = ResponseView.Restricted, entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class RebootVMCmd extends BaseAsyncCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(RebootVMCmd.class.getName()); private static final String s_name = "rebootvirtualmachineresponse"; ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/RemoveIpFromVmNicCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/RemoveIpFromVmNicCmd.java index e964cc69231b..a4cd6159dfc7 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/RemoveIpFromVmNicCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/RemoveIpFromVmNicCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.vm; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -41,7 +40,6 @@ @APICommand(name = "removeIpFromNic", description = "Removes secondary IP from the NIC.", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class RemoveIpFromVmNicCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(RemoveIpFromVmNicCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/RemoveNicFromVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/RemoveNicFromVMCmd.java index 5fd016c38edc..d9024f340228 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/RemoveNicFromVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/RemoveNicFromVMCmd.java @@ -20,7 +20,6 @@ import java.util.EnumSet; import com.cloud.vm.Nic; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -45,7 +44,6 @@ @APICommand(name = "removeNicFromVirtualMachine", description = "Removes VM from specified network by deleting a NIC", responseObject = UserVmResponse.class, responseView = ResponseView.Restricted, entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class RemoveNicFromVMCmd extends BaseAsyncCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(RemoveNicFromVMCmd.class); private static final String s_name = "removenicfromvirtualmachineresponse"; ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ResetVMPasswordCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ResetVMPasswordCmd.java index 1cf4c929b32d..7270004aeed8 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ResetVMPasswordCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ResetVMPasswordCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.user.vm; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -45,7 +44,6 @@ "support this feature for this command to take effect. [async]", responseView = ResponseView.Restricted, entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class ResetVMPasswordCmd extends BaseAsyncCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(ResetVMPasswordCmd.class.getName()); private static final String s_name = "resetpasswordforvirtualmachineresponse"; @@ -122,9 +120,9 @@ public void execute() throws ResourceUnavailableException, InsufficientCapacityE UserVm vm = _responseGenerator.findUserVmById(getId()); if (StringUtils.isBlank(password)) { password = _mgr.generateRandomPassword(); - s_logger.debug(String.format("Resetting VM [%s] password to a randomly generated password.", vm.getUuid())); + logger.debug(String.format("Resetting VM [%s] password to a randomly generated password.", vm.getUuid())); } else { - s_logger.debug(String.format("Resetting VM [%s] password to password defined by user.", vm.getUuid())); + logger.debug(String.format("Resetting VM [%s] password to password defined by user.", vm.getUuid())); } CallContext.current().setEventDetails("Vm Id: " + getId()); UserVm result = _userVmService.resetVMPassword(this, password); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ResetVMSSHKeyCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ResetVMSSHKeyCmd.java index 259cfebbad56..a4019411e1d2 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ResetVMSSHKeyCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ResetVMSSHKeyCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.user.vm; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -50,7 +49,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class ResetVMSSHKeyCmd extends BaseAsyncCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(ResetVMSSHKeyCmd.class.getName()); private static final String s_name = "resetSSHKeyforvirtualmachineresponse"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ResetVMUserDataCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ResetVMUserDataCmd.java index 3ead67e21064..089dfaecf946 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ResetVMUserDataCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ResetVMUserDataCmd.java @@ -37,7 +37,6 @@ import org.apache.cloudstack.api.response.UserDataResponse; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import java.util.Map; @@ -46,7 +45,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = true, since = "4.18.0") public class ResetVMUserDataCmd extends BaseCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(ResetVMUserDataCmd.class.getName()); private static final String s_name = "resetuserdataforvirtualmachineresponse"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/RestoreVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/RestoreVMCmd.java index 4b59bf560cb3..e1c4dd5f678e 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/RestoreVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/RestoreVMCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.user.vm; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -46,7 +45,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class RestoreVMCmd extends BaseAsyncCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(RestoreVMCmd.class); private static final String s_name = "restorevmresponse"; @ACL(accessType = AccessType.OperateEntry) diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ScaleVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ScaleVMCmd.java index 5af45762ece6..3af6d5245f00 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ScaleVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ScaleVMCmd.java @@ -33,7 +33,6 @@ import org.apache.cloudstack.api.command.user.UserCmd; import org.apache.cloudstack.api.response.ServiceOfferingResponse; import org.apache.cloudstack.api.response.UserVmResponse; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.ConcurrentOperationException; @@ -49,7 +48,6 @@ @APICommand(name = "scaleVirtualMachine", description = "Scales the virtual machine to a new service offering. This command also considers the volume size in the service offering or disk offering linked to the new service offering and apply all characteristics to the root volume.", responseObject = UserVmResponse.class, responseView = ResponseView.Restricted, entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ScaleVMCmd extends BaseAsyncCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(ScaleVMCmd.class.getName()); private static final String s_name = "scalevirtualmachineresponse"; ///////////////////////////////////////////////////// @@ -169,16 +167,16 @@ public void execute() { try { result = _userVmService.upgradeVirtualMachine(this); } catch (ResourceUnavailableException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); } catch (ConcurrentOperationException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } catch (ManagementServerException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } catch (VirtualMachineMigrationException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } if (result != null){ diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/StartVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/StartVMCmd.java index 10c50dc380b4..8bc4f0ff3b15 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/StartVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/StartVMCmd.java @@ -18,7 +18,6 @@ import org.apache.cloudstack.api.response.ClusterResponse; import org.apache.cloudstack.api.response.PodResponse; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.acl.SecurityChecker.AccessType; @@ -51,7 +50,6 @@ @APICommand(name = "startVirtualMachine", responseObject = UserVmResponse.class, description = "Starts a virtual machine.", responseView = ResponseView.Restricted, entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class StartVMCmd extends BaseAsyncCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(StartVMCmd.class.getName()); private static final String s_name = "startvirtualmachineresponse"; @@ -188,19 +186,19 @@ public void execute() { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to start a vm"); } } catch (ConcurrentOperationException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } catch (StorageUnavailableException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); } catch (ExecutionException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } catch (ResourceUnavailableException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); } catch (ResourceAllocationException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_ALLOCATION_ERROR, ex.getMessage()); } catch (InsufficientCapacityException ex) { StringBuilder message = new StringBuilder(ex.getMessage()); @@ -209,8 +207,8 @@ public void execute() { message.append(", Please check the affinity groups provided, there may not be sufficient capacity to follow them"); } } - s_logger.info(ex); - s_logger.info(message.toString(), ex); + logger.info(ex); + logger.info(message.toString(), ex); throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, message.toString()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/StopVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/StopVMCmd.java index 113ba9ed25db..bfd5d8d07f61 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/StopVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/StopVMCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.vm; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -41,7 +40,6 @@ @APICommand(name = "stopVirtualMachine", responseObject = UserVmResponse.class, description = "Stops a virtual machine.", responseView = ResponseView.Restricted, entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class StopVMCmd extends BaseAsyncCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(StopVMCmd.class.getName()); private static final String s_name = "stopvirtualmachineresponse"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/UpdateDefaultNicForVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/UpdateDefaultNicForVMCmd.java index ff533f82095e..837bde06a6ca 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/UpdateDefaultNicForVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/UpdateDefaultNicForVMCmd.java @@ -20,7 +20,6 @@ import java.util.EnumSet; import com.cloud.vm.Nic; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -45,7 +44,6 @@ @APICommand(name = "updateDefaultNicForVirtualMachine", description = "Changes the default NIC on a VM", responseObject = UserVmResponse.class, responseView = ResponseView.Restricted, entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class UpdateDefaultNicForVMCmd extends BaseAsyncCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(UpdateDefaultNicForVMCmd.class); private static final String s_name = "updatedefaultnicforvirtualmachineresponse"; ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/UpdateVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/UpdateVMCmd.java index 32ce1f6db524..9f72ac17c8f4 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/UpdateVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/UpdateVMCmd.java @@ -23,7 +23,6 @@ import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.api.response.UserDataResponse; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.acl.SecurityChecker.AccessType; @@ -54,7 +53,6 @@ "Therefore, stop the VM manually before issuing this call.", responseObject = UserVmResponse.class, responseView = ResponseView.Restricted, entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class UpdateVMCmd extends BaseCustomIdCmd implements SecurityGroupAction, UserCmd { - public static final Logger s_logger = Logger.getLogger(UpdateVMCmd.class.getName()); private static final String s_name = "updatevirtualmachineresponse"; ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/UpdateVmNicIpCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/UpdateVmNicIpCmd.java index 40658f9b58db..5c654701de91 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/UpdateVmNicIpCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/UpdateVmNicIpCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.EnumSet; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -49,7 +48,6 @@ @APICommand(name = "updateVmNicIp", description = "Update the default Ip of a VM Nic", responseObject = UserVmResponse.class) public class UpdateVmNicIpCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(UpdateVmNicIpCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/UpgradeVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/UpgradeVMCmd.java index 4b31c12ec0a3..6a7422e70bbb 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/UpgradeVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/UpgradeVMCmd.java @@ -18,7 +18,6 @@ import java.util.Map; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -47,7 +46,6 @@ "this command to take effect.", responseView = ResponseView.Restricted, entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class UpgradeVMCmd extends BaseCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(UpgradeVMCmd.class.getName()); private static final String s_name = "changeserviceforvirtualmachineresponse"; ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vmgroup/CreateVMGroupCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vmgroup/CreateVMGroupCmd.java index 154ec45873ad..e2952b5bd818 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vmgroup/CreateVMGroupCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vmgroup/CreateVMGroupCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.vmgroup; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -34,7 +33,6 @@ @APICommand(name = "createInstanceGroup", description = "Creates a vm group", responseObject = InstanceGroupResponse.class, entityType = {InstanceGroup.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateVMGroupCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(CreateVMGroupCmd.class.getName()); // /////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vmgroup/DeleteVMGroupCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vmgroup/DeleteVMGroupCmd.java index 0bdda0bb9e77..b74bc43eeb7a 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vmgroup/DeleteVMGroupCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vmgroup/DeleteVMGroupCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.vmgroup; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -35,7 +34,6 @@ @APICommand(name = "deleteInstanceGroup", description = "Deletes a vm group", responseObject = SuccessResponse.class, entityType = {InstanceGroup.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteVMGroupCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(DeleteVMGroupCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vmgroup/ListVMGroupsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vmgroup/ListVMGroupsCmd.java index 2e61c898ae05..31845a956e92 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vmgroup/ListVMGroupsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vmgroup/ListVMGroupsCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.vmgroup; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -30,7 +29,6 @@ @APICommand(name = "listInstanceGroups", description = "Lists vm groups", responseObject = InstanceGroupResponse.class, entityType = {InstanceGroup.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListVMGroupsCmd extends BaseListProjectAndAccountResourcesCmd { - public static final Logger s_logger = Logger.getLogger(ListVMGroupsCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vmgroup/UpdateVMGroupCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vmgroup/UpdateVMGroupCmd.java index 8873c852460a..5c553f064042 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vmgroup/UpdateVMGroupCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vmgroup/UpdateVMGroupCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.vmgroup; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -35,7 +34,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateVMGroupCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(UpdateVMGroupCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vmsnapshot/CreateVMSnapshotCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vmsnapshot/CreateVMSnapshotCmd.java index 2c0ea6bc4ae5..41f480d60876 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vmsnapshot/CreateVMSnapshotCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vmsnapshot/CreateVMSnapshotCmd.java @@ -30,7 +30,6 @@ import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.api.response.VMSnapshotResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.ResourceAllocationException; @@ -42,7 +41,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateVMSnapshotCmd extends BaseAsyncCreateCmd { - public static final Logger s_logger = Logger.getLogger(CreateVMSnapshotCmd.class.getName()); @ACL(accessType = AccessType.OperateEntry) @Parameter(name = ApiConstants.VIRTUAL_MACHINE_ID, type = CommandType.UUID, required = true, entityType = UserVmResponse.class, description = "The ID of the vm") diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vmsnapshot/DeleteVMSnapshotCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vmsnapshot/DeleteVMSnapshotCmd.java index bcddc75a8e77..94b8824f8685 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vmsnapshot/DeleteVMSnapshotCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vmsnapshot/DeleteVMSnapshotCmd.java @@ -18,7 +18,6 @@ package org.apache.cloudstack.api.command.user.vmsnapshot; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -39,7 +38,6 @@ @APICommand(name = "deleteVMSnapshot", description = "Deletes a vmsnapshot.", responseObject = SuccessResponse.class, since = "4.2.0", entityType = {VMSnapshot.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteVMSnapshotCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteVMSnapshotCmd.class.getName()); @ACL(accessType = AccessType.OperateEntry) @Parameter(name = ApiConstants.VM_SNAPSHOT_ID, diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vmsnapshot/RevertToVMSnapshotCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vmsnapshot/RevertToVMSnapshotCmd.java index 42cd18bfa2fc..310b45687d49 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vmsnapshot/RevertToVMSnapshotCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vmsnapshot/RevertToVMSnapshotCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.user.vmsnapshot; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -45,7 +44,6 @@ @APICommand(name = "revertToVMSnapshot", description = "Revert VM from a vmsnapshot.", responseObject = UserVmResponse.class, since = "4.2.0", responseView = ResponseView.Restricted, requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class RevertToVMSnapshotCmd extends BaseAsyncCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(RevertToVMSnapshotCmd.class.getName()); private static final String s_name = "reverttovmsnapshotresponse"; @ACL(accessType = AccessType.OperateEntry, pointerToEntity = "getVmId()") diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/AddResourceDetailCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/AddResourceDetailCmd.java index 8d472d96eef5..287991fa9846 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/AddResourceDetailCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/AddResourceDetailCmd.java @@ -18,7 +18,6 @@ import java.util.Map; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -33,7 +32,6 @@ @APICommand(name = "addResourceDetail", description = "Adds detail for the Resource.", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class AddResourceDetailCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(AddResourceDetailCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/AssignVolumeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/AssignVolumeCmd.java index 03413682c4f7..1a51aa03c226 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/AssignVolumeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/AssignVolumeCmd.java @@ -29,7 +29,6 @@ import org.apache.cloudstack.api.response.AccountResponse; import org.apache.cloudstack.api.response.ProjectResponse; import org.apache.cloudstack.api.response.VolumeResponse; -import org.apache.log4j.Logger; import com.cloud.storage.Volume; @@ -38,7 +37,6 @@ @APICommand(name = AssignVolumeCmd.CMD_NAME, responseObject = VolumeResponse.class, description = "Changes ownership of a Volume from one account to another.", entityType = { Volume.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, since = "4.18.0.0") public class AssignVolumeCmd extends BaseCmd implements UserCmd { - public static final Logger LOGGER = Logger.getLogger(AssignVolumeCmd.class.getName()); public static final String CMD_NAME = "assignVolume"; ///////////////////////////////////////////////////// @@ -96,7 +94,7 @@ public void execute() { } catch (CloudRuntimeException | ResourceAllocationException e) { String msg = String.format("Assign volume command for volume [%s] failed due to [%s].", getFullUrlParams().get("volumeid"), e.getMessage()); - LOGGER.error(msg, e); + logger.error(msg, e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, msg); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/AttachVolumeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/AttachVolumeCmd.java index 687d683309c6..1a3b9220877f 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/AttachVolumeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/AttachVolumeCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.user.volume; import org.apache.cloudstack.api.BaseAsyncCmd; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -41,7 +40,6 @@ @APICommand(name = "attachVolume", description = "Attaches a disk volume to a virtual machine.", responseObject = VolumeResponse.class, responseView = ResponseView.Restricted, entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class AttachVolumeCmd extends BaseAsyncCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(AttachVolumeCmd.class.getName()); private static final String s_name = "attachvolumeresponse"; ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/CreateVolumeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/CreateVolumeCmd.java index 566e8a46bd94..7ffcea50b219 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/CreateVolumeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/CreateVolumeCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.volume; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; @@ -48,7 +47,6 @@ Volume.class, VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateVolumeCmd extends BaseAsyncCreateCustomIdCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(CreateVolumeCmd.class.getName()); private static final String s_name = "createvolumeresponse"; ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/DeleteVolumeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/DeleteVolumeCmd.java index 4bcc8a88545f..6111488a8021 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/DeleteVolumeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/DeleteVolumeCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.volume; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -37,7 +36,6 @@ @APICommand(name = "deleteVolume", description = "Deletes a detached disk volume.", responseObject = SuccessResponse.class, entityType = {Volume.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteVolumeCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(DeleteVolumeCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/DestroyVolumeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/DestroyVolumeCmd.java index f4007ce9f40b..2eafb76e5915 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/DestroyVolumeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/DestroyVolumeCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.volume; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.acl.SecurityChecker.AccessType; @@ -42,7 +41,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class DestroyVolumeCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DestroyVolumeCmd.class.getName()); private static final String s_name = "destroyvolumeresponse"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/DetachVolumeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/DetachVolumeCmd.java index e92f6a3cb0d8..2fddcace84dd 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/DetachVolumeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/DetachVolumeCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.user.volume; import org.apache.cloudstack.api.BaseAsyncCmd; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -42,7 +41,6 @@ @APICommand(name = "detachVolume", description = "Detaches a disk volume from a virtual machine.", responseObject = VolumeResponse.class, responseView = ResponseView.Restricted, entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DetachVolumeCmd extends BaseAsyncCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(DetachVolumeCmd.class.getName()); private static final String s_name = "detachvolumeresponse"; ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/ExtractVolumeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/ExtractVolumeCmd.java index 8f6e3a6c1204..1146f80f0e2c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/ExtractVolumeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/ExtractVolumeCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.volume; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -41,7 +40,6 @@ @APICommand(name = "extractVolume", description = "Extracts volume", responseObject = ExtractResponse.class, entityType = {Volume.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ExtractVolumeCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(ExtractVolumeCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/GetUploadParamsForVolumeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/GetUploadParamsForVolumeCmd.java index 1342ffc4748c..4ccd5f97993a 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/GetUploadParamsForVolumeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/GetUploadParamsForVolumeCmd.java @@ -30,12 +30,10 @@ import org.apache.cloudstack.api.response.DiskOfferingResponse; import org.apache.cloudstack.api.response.GetUploadParamsResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; @APICommand(name = "getUploadParamsForVolume", description = "Upload a data disk to the cloudstack cloud.", responseObject = GetUploadParamsResponse.class, since = "4.6.0", requestHasSensitiveInfo= false, responseHasSensitiveInfo = false) public class GetUploadParamsForVolumeCmd extends AbstractGetUploadParamsCmd { - public static final Logger s_logger = Logger.getLogger(GetUploadParamsForVolumeCmd.class.getName()); private static final String s_name = "postuploadvolumeresponse"; @@ -62,7 +60,7 @@ public void execute() throws ServerApiException { response.setResponseName(getCommandName()); setResponseObject(response); } catch (MalformedURLException | ResourceAllocationException e) { - s_logger.error("exception while uploading volume", e); + logger.error("exception while uploading volume", e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "exception while uploading a volume: " + e.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/ListVolumesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/ListVolumesCmd.java index b62a909d71fd..a583675da76f 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/ListVolumesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/ListVolumesCmd.java @@ -36,14 +36,12 @@ import org.apache.cloudstack.api.response.VolumeResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.commons.lang3.BooleanUtils; -import org.apache.log4j.Logger; import com.cloud.storage.Volume; @APICommand(name = "listVolumes", description = "Lists all volumes.", responseObject = VolumeResponse.class, responseView = ResponseView.Restricted, entityType = { Volume.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListVolumesCmd extends BaseListRetrieveOnlyResourceCountCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(ListVolumesCmd.class.getName()); private static final String s_name = "listvolumesresponse"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/RecoverVolumeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/RecoverVolumeCmd.java index 2589f816bc63..cd5a7735e382 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/RecoverVolumeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/RecoverVolumeCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.user.volume; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; @@ -39,7 +38,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) public class RecoverVolumeCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(RecoverVolumeCmd.class.getName()); private static final String s_name = "recovervolumeresponse"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/RemoveResourceDetailCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/RemoveResourceDetailCmd.java index 98fe6a7e821f..bad839f8ac7e 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/RemoveResourceDetailCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/RemoveResourceDetailCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.volume; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -31,7 +30,6 @@ @APICommand(name = "removeResourceDetail", description = "Removes detail for the Resource.", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class RemoveResourceDetailCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(RemoveResourceDetailCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/ResizeVolumeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/ResizeVolumeCmd.java index 0daf141ba4a6..9254bad207bf 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/ResizeVolumeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/ResizeVolumeCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.volume; import org.apache.cloudstack.api.BaseAsyncCmd; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -44,7 +43,6 @@ @APICommand(name = "resizeVolume", description = "Resizes a volume", responseObject = VolumeResponse.class, responseView = ResponseView.Restricted, entityType = {Volume.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ResizeVolumeCmd extends BaseAsyncCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(ResizeVolumeCmd.class.getName()); private static final String s_name = "resizevolumeresponse"; @@ -195,10 +193,10 @@ public void execute() { volume = _volumeService.resizeVolume(this); } catch (ResourceAllocationException ex) { - s_logger.error(ex.getMessage()); + logger.error(ex.getMessage()); throw new ServerApiException(ApiErrorCode.RESOURCE_ALLOCATION_ERROR, ex.getMessage()); } catch (InvalidParameterValueException ex) { - s_logger.info(ex.getMessage()); + logger.info(ex.getMessage()); throw new ServerApiException(ApiErrorCode.UNSUPPORTED_ACTION_ERROR, ex.getMessage()); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/UpdateVolumeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/UpdateVolumeCmd.java index e7782674828f..467c587cc731 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/UpdateVolumeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/UpdateVolumeCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.volume; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.acl.SecurityChecker.AccessType; @@ -41,7 +40,6 @@ @APICommand(name = "updateVolume", description = "Updates the volume.", responseObject = VolumeResponse.class, responseView = ResponseView.Restricted, entityType = {Volume.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateVolumeCmd extends BaseAsyncCustomIdCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(UpdateVolumeCmd.class.getName()); private static final String s_name = "updatevolumeresponse"; ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/UploadVolumeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/UploadVolumeCmd.java index c622081079d8..339c276d59ea 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/UploadVolumeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/UploadVolumeCmd.java @@ -31,7 +31,6 @@ import org.apache.cloudstack.api.response.VolumeResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.dc.DataCenter; import com.cloud.event.EventTypes; @@ -45,7 +44,6 @@ @APICommand(name = "uploadVolume", description = "Uploads a data disk.", responseObject = VolumeResponse.class, responseView = ResponseView.Restricted, entityType = {Volume.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UploadVolumeCmd extends BaseAsyncCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(UploadVolumeCmd.class.getName()); private static final String s_name = "uploadvolumeresponse"; ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/CreatePrivateGatewayCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/CreatePrivateGatewayCmd.java index cf1315c9d55f..dceaabf648de 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/CreatePrivateGatewayCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/CreatePrivateGatewayCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.vpc; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; @@ -52,7 +51,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class CreatePrivateGatewayCmd extends BaseAsyncCreateCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(CreatePrivateGatewayCmd.class.getName()); private static final String s_name = "createprivategatewayresponse"; @@ -149,11 +147,11 @@ public void create() throws ResourceAllocationException { try { result = _vpcService.createVpcPrivateGateway(this); } catch (InsufficientCapacityException ex) { - s_logger.info(ex); - s_logger.trace(ex); + logger.info(ex); + logger.trace(ex); throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, ex.getMessage()); } catch (ConcurrentOperationException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/CreateStaticRouteCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/CreateStaticRouteCmd.java index 68d7a77d946b..b28c02cb8004 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/CreateStaticRouteCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/CreateStaticRouteCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.vpc; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -42,7 +41,6 @@ @APICommand(name = "createStaticRoute", description = "Creates a static route", responseObject = StaticRouteResponse.class, entityType = {StaticRoute.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateStaticRouteCmd extends BaseAsyncCreateCmd { - public static final Logger s_logger = Logger.getLogger(CreateStaticRouteCmd.class.getName()); @Parameter(name = ApiConstants.GATEWAY_ID, type = CommandType.UUID, @@ -75,8 +73,8 @@ public void create() throws ResourceAllocationException { setEntityId(result.getId()); setEntityUuid(result.getUuid()); } catch (NetworkRuleConflictException ex) { - s_logger.info("Network rule conflict: " + ex.getMessage()); - s_logger.trace("Network rule conflict: ", ex); + logger.info("Network rule conflict: " + ex.getMessage()); + logger.trace("Network rule conflict: ", ex); throw new ServerApiException(ApiErrorCode.NETWORK_RULE_CONFLICT_ERROR, ex.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/CreateVPCCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/CreateVPCCmd.java index 7ca66b2a471e..94f05f707a0a 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/CreateVPCCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/CreateVPCCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.user.vpc; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; @@ -46,7 +45,6 @@ @APICommand(name = "createVPC", description = "Creates a VPC", responseObject = VpcResponse.class, responseView = ResponseView.Restricted, entityType = {Vpc.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateVPCCmd extends BaseAsyncCreateCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(CreateVPCCmd.class.getName()); private static final String s_name = "createvpcresponse"; // /////////////////////////////////////////////////// @@ -213,18 +211,18 @@ public void execute() { if (isStart()) { _vpcService.startVpc(getEntityId(), true); } else { - s_logger.debug("Not starting VPC as " + ApiConstants.START + "=false was passed to the API"); + logger.debug("Not starting VPC as " + ApiConstants.START + "=false was passed to the API"); } vpc = _entityMgr.findById(Vpc.class, getEntityId()); } catch (ResourceUnavailableException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); } catch (ConcurrentOperationException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } catch (InsufficientCapacityException ex) { - s_logger.info(ex); - s_logger.trace(ex); + logger.info(ex); + logger.trace(ex); throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, ex.getMessage()); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/DeleteStaticRouteCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/DeleteStaticRouteCmd.java index 6210d80586f5..01b6aae425b3 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/DeleteStaticRouteCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/DeleteStaticRouteCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.vpc; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -40,7 +39,6 @@ @APICommand(name = "deleteStaticRoute", description = "Deletes a static route", responseObject = SuccessResponse.class, entityType = {StaticRoute.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteStaticRouteCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteStaticRouteCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/DeleteVPCCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/DeleteVPCCmd.java index f408e32f62de..c35d9084bcc5 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/DeleteVPCCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/DeleteVPCCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.user.vpc; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -39,7 +38,6 @@ @APICommand(name = "deleteVPC", description = "Deletes a VPC", responseObject = SuccessResponse.class, entityType = {Vpc.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteVPCCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteVPCCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// @@ -81,10 +79,10 @@ public void execute() { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to delete VPC"); } } catch (ResourceUnavailableException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); } catch (ConcurrentOperationException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/ListPrivateGatewaysCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/ListPrivateGatewaysCmd.java index 8813cccc7791..2304cef3c6d1 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/ListPrivateGatewaysCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/ListPrivateGatewaysCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -39,7 +38,6 @@ responseView = ResponseObject.ResponseView.Restricted, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListPrivateGatewaysCmd extends BaseListProjectAndAccountResourcesCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(ListPrivateGatewaysCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/ListVPCOfferingsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/ListVPCOfferingsCmd.java index c0f95fcd3616..f48e113286a9 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/ListVPCOfferingsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/ListVPCOfferingsCmd.java @@ -27,7 +27,6 @@ import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.VpcOfferingResponse; import org.apache.cloudstack.api.response.ZoneResponse; -import org.apache.log4j.Logger; import com.cloud.network.vpc.VpcOffering; import com.cloud.utils.Pair; @@ -35,7 +34,6 @@ @APICommand(name = "listVPCOfferings", description = "Lists VPC offerings", responseObject = VpcOfferingResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListVPCOfferingsCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListVPCOfferingsCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/ListVPCsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/ListVPCsCmd.java index 76cbcca61bb3..d128be1414d2 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/ListVPCsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/ListVPCsCmd.java @@ -33,7 +33,6 @@ import org.apache.cloudstack.api.response.VpcOfferingResponse; import org.apache.cloudstack.api.response.VpcResponse; import org.apache.cloudstack.api.response.ZoneResponse; -import org.apache.log4j.Logger; import com.cloud.network.vpc.Vpc; import com.cloud.utils.Pair; @@ -42,7 +41,6 @@ @APICommand(name = "listVPCs", description = "Lists VPCs", responseObject = VpcResponse.class, responseView = ResponseView.Restricted, entityType = {Vpc.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListVPCsCmd extends BaseListTaggedResourcesCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(ListVPCsCmd.class.getName()); private static final String s_name = "listvpcsresponse"; ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/RestartVPCCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/RestartVPCCmd.java index 04946619c0ad..5ccd496eeb4c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/RestartVPCCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/RestartVPCCmd.java @@ -27,7 +27,6 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.api.response.VpcResponse; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.ConcurrentOperationException; @@ -39,7 +38,6 @@ @APICommand(name = "restartVPC", description = "Restarts a VPC", responseObject = SuccessResponse.class, entityType = {Vpc.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class RestartVPCCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(RestartVPCCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// @@ -101,14 +99,14 @@ public void execute() { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to restart VPC"); } } catch (final ResourceUnavailableException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); } catch (final ConcurrentOperationException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } catch (final InsufficientCapacityException ex) { - s_logger.info(ex); - s_logger.trace(ex); + logger.info(ex); + logger.trace(ex); throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, ex.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/UpdateVPCCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/UpdateVPCCmd.java index d4c7d0d5c599..6fcfb5311f62 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/UpdateVPCCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/UpdateVPCCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.user.vpc; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.acl.SecurityChecker.AccessType; @@ -42,7 +41,6 @@ @APICommand(name = "updateVPC", description = "Updates a VPC", responseObject = VpcResponse.class, responseView = ResponseView.Restricted, entityType = {Vpc.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateVPCCmd extends BaseAsyncCustomIdCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(UpdateVPCCmd.class.getName()); private static final String s_name = "updatevpcresponse"; ///////////////////////////////////////////////////// @@ -129,11 +127,11 @@ public void execute() { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to update VPC"); } } catch (final ResourceUnavailableException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); } catch (final InsufficientCapacityException ex) { - s_logger.info(ex); - s_logger.trace(ex); + logger.info(ex); + logger.trace(ex); throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, ex.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/AddVpnUserCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/AddVpnUserCmd.java index f3b452008c98..9e950310cdc2 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/AddVpnUserCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/AddVpnUserCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.vpn; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -37,7 +36,6 @@ @APICommand(name = "addVpnUser", description = "Adds vpn users", responseObject = VpnUsersResponse.class, entityType = {VpnUser.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class AddVpnUserCmd extends BaseAsyncCreateCmd { - public static final Logger s_logger = Logger.getLogger(AddVpnUserCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateRemoteAccessVpnCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateRemoteAccessVpnCmd.java index 8ecf4b051ced..417ba2773c41 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateRemoteAccessVpnCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateRemoteAccessVpnCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.user.vpn; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; @@ -41,7 +40,6 @@ @APICommand(name = "createRemoteAccessVpn", description = "Creates a l2tp/ipsec remote access vpn", responseObject = RemoteAccessVpnResponse.class, entityType = {RemoteAccessVpn.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateRemoteAccessVpnCmd extends BaseAsyncCreateCmd { - public static final Logger s_logger = Logger.getLogger(CreateRemoteAccessVpnCmd.class.getName()); ///////////////////////////////////////////////////// @@ -148,8 +146,8 @@ public void create() { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create remote access vpn"); } } catch (NetworkRuleConflictException e) { - s_logger.info("Network rule conflict: " + e.getMessage()); - s_logger.trace("Network Rule Conflict: ", e); + logger.info("Network rule conflict: " + e.getMessage()); + logger.trace("Network Rule Conflict: ", e); throw new ServerApiException(ApiErrorCode.NETWORK_RULE_CONFLICT_ERROR, e.getMessage()); } } @@ -166,7 +164,7 @@ public void execute() { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create remote access vpn"); } } catch (ResourceUnavailableException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateVpnConnectionCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateVpnConnectionCmd.java index 84aaafc60ed6..0b5c46d36eb9 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateVpnConnectionCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateVpnConnectionCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.vpn; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; @@ -41,7 +40,6 @@ @APICommand(name = "createVpnConnection", description = "Create site to site vpn connection", responseObject = Site2SiteVpnConnectionResponse.class, entityType = {Site2SiteVpnConnection.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateVpnConnectionCmd extends BaseAsyncCreateCmd { - public static final Logger s_logger = Logger.getLogger(CreateVpnConnectionCmd.class.getName()); ///////////////////////////////////////////////////// @@ -135,8 +133,8 @@ public void create() { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create site to site vpn connection"); } } catch (NetworkRuleConflictException e) { - s_logger.info("Network rule conflict: " + e.getMessage()); - s_logger.trace("Network Rule Conflict: ", e); + logger.info("Network rule conflict: " + e.getMessage()); + logger.trace("Network Rule Conflict: ", e); throw new ServerApiException(ApiErrorCode.NETWORK_RULE_CONFLICT_ERROR, e.getMessage()); } } @@ -153,7 +151,7 @@ public void execute() { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create site to site vpn connection"); } } catch (ResourceUnavailableException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateVpnCustomerGatewayCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateVpnCustomerGatewayCmd.java index 88c6c12fac72..a2fa0d9829c8 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateVpnCustomerGatewayCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateVpnCustomerGatewayCmd.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.api.response.ProjectResponse; import org.apache.cloudstack.api.response.Site2SiteCustomerGatewayResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.ResourceAllocationException; @@ -37,7 +36,6 @@ @APICommand(name = "createVpnCustomerGateway", description = "Creates site to site vpn customer gateway", responseObject = Site2SiteCustomerGatewayResponse.class, entityType = {Site2SiteCustomerGateway.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateVpnCustomerGatewayCmd extends BaseAsyncCreateCmd { - public static final Logger s_logger = Logger.getLogger(CreateVpnCustomerGatewayCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateVpnGatewayCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateVpnGatewayCmd.java index c354e9727373..6f31176c4ff6 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateVpnGatewayCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateVpnGatewayCmd.java @@ -31,12 +31,10 @@ import org.apache.cloudstack.api.response.Site2SiteVpnGatewayResponse; import org.apache.cloudstack.api.response.VpcResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; @APICommand(name = "createVpnGateway", description = "Creates site to site vpn local gateway", responseObject = Site2SiteVpnGatewayResponse.class, entityType = {Site2SiteVpnGateway.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateVpnGatewayCmd extends BaseAsyncCreateCmd { - public static final Logger s_logger = Logger.getLogger(CreateVpnGatewayCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/DeleteRemoteAccessVpnCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/DeleteRemoteAccessVpnCmd.java index dfc80b2fabcd..bf8d01579238 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/DeleteRemoteAccessVpnCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/DeleteRemoteAccessVpnCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.user.vpn; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -38,7 +37,6 @@ @APICommand(name = "deleteRemoteAccessVpn", description = "Destroys a l2tp/ipsec remote access vpn", responseObject = SuccessResponse.class, entityType = {RemoteAccessVpn.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteRemoteAccessVpnCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteRemoteAccessVpnCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/DeleteVpnConnectionCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/DeleteVpnConnectionCmd.java index 8a57dfc8edda..2528d93a0422 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/DeleteVpnConnectionCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/DeleteVpnConnectionCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.vpn; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -35,7 +34,6 @@ @APICommand(name = "deleteVpnConnection", description = "Delete site to site vpn connection", responseObject = SuccessResponse.class, entityType = {Site2SiteVpnConnection.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteVpnConnectionCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteVpnConnectionCmd.class.getName()); ///////////////////////////////////////////////////// @@ -86,7 +84,7 @@ public void execute() { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to delete site to site VPN connection"); } } catch (ResourceUnavailableException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/DeleteVpnCustomerGatewayCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/DeleteVpnCustomerGatewayCmd.java index e2f0aee90ec8..2b657fd3c088 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/DeleteVpnCustomerGatewayCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/DeleteVpnCustomerGatewayCmd.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.command.user.vpn; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -37,7 +36,6 @@ @APICommand(name = "deleteVpnCustomerGateway", description = "Delete site to site vpn customer gateway", responseObject = SuccessResponse.class, entityType = {Site2SiteCustomerGateway.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteVpnCustomerGatewayCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteVpnCustomerGatewayCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/DeleteVpnGatewayCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/DeleteVpnGatewayCmd.java index b7acc5c45069..27ded12dc58d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/DeleteVpnGatewayCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/DeleteVpnGatewayCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.vpn; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -34,7 +33,6 @@ @APICommand(name = "deleteVpnGateway", description = "Delete site to site vpn gateway", responseObject = SuccessResponse.class, entityType = {Site2SiteVpnGateway.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteVpnGatewayCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteVpnGatewayCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/ListRemoteAccessVpnsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/ListRemoteAccessVpnsCmd.java index 1f2f95188e7e..4efc70c84199 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/ListRemoteAccessVpnsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/ListRemoteAccessVpnsCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; @@ -37,7 +36,6 @@ @APICommand(name = "listRemoteAccessVpns", description = "Lists remote access vpns", responseObject = RemoteAccessVpnResponse.class, entityType = {RemoteAccessVpn.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListRemoteAccessVpnsCmd extends BaseListProjectAndAccountResourcesCmd { - public static final Logger s_logger = Logger.getLogger(ListRemoteAccessVpnsCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/ListVpnConnectionsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/ListVpnConnectionsCmd.java index 763a374645b9..aeeae44d0046 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/ListVpnConnectionsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/ListVpnConnectionsCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; @@ -36,7 +35,6 @@ @APICommand(name = "listVpnConnections", description = "Lists site to site vpn connection gateways", responseObject = Site2SiteVpnConnectionResponse.class, entityType = {Site2SiteVpnConnection.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListVpnConnectionsCmd extends BaseListProjectAndAccountResourcesCmd { - public static final Logger s_logger = Logger.getLogger(ListVpnConnectionsCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/ListVpnCustomerGatewaysCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/ListVpnCustomerGatewaysCmd.java index b66c47899f7f..258a8a753ebe 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/ListVpnCustomerGatewaysCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/ListVpnCustomerGatewaysCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -34,7 +33,6 @@ @APICommand(name = "listVpnCustomerGateways", description = "Lists site to site vpn customer gateways", responseObject = Site2SiteCustomerGatewayResponse.class, entityType = {Site2SiteCustomerGateway.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListVpnCustomerGatewaysCmd extends BaseListProjectAndAccountResourcesCmd { - public static final Logger s_logger = Logger.getLogger(ListVpnCustomerGatewaysCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/ListVpnGatewaysCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/ListVpnGatewaysCmd.java index fb9c82620039..d30fbf8d32bb 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/ListVpnGatewaysCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/ListVpnGatewaysCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; @@ -36,7 +35,6 @@ @APICommand(name = "listVpnGateways", description = "Lists site 2 site vpn gateways", responseObject = Site2SiteVpnGatewayResponse.class, entityType = {Site2SiteVpnGateway.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListVpnGatewaysCmd extends BaseListProjectAndAccountResourcesCmd { - public static final Logger s_logger = Logger.getLogger(ListVpnGatewaysCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/ListVpnUsersCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/ListVpnUsersCmd.java index 9f8581eb0140..48591765ec34 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/ListVpnUsersCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/ListVpnUsersCmd.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -34,7 +33,6 @@ @APICommand(name = "listVpnUsers", description = "Lists vpn users", responseObject = VpnUsersResponse.class, entityType = {VpnUser.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListVpnUsersCmd extends BaseListProjectAndAccountResourcesCmd { - public static final Logger s_logger = Logger.getLogger(ListVpnUsersCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/RemoveVpnUserCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/RemoveVpnUserCmd.java index 4adf385b127a..48e7a9ee5193 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/RemoveVpnUserCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/RemoveVpnUserCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.vpn; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -37,7 +36,6 @@ @APICommand(name = "removeVpnUser", description = "Removes vpn user", responseObject = SuccessResponse.class, entityType = {VpnUser.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class RemoveVpnUserCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(RemoveVpnUserCmd.class.getName()); ///////////////////////////////////////////////////// @@ -109,7 +107,7 @@ public void execute() { boolean result = _ravService.removeVpnUser(ownerId, userName, CallContext.current().getCallingAccount()); if (!result) { String errorMessage = String.format("Failed to remove VPN user=[%s]. VPN owner id=[%s].", userName, ownerId); - s_logger.error(errorMessage); + logger.error(errorMessage); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, errorMessage); } @@ -118,13 +116,13 @@ public void execute() { appliedVpnUsers = _ravService.applyVpnUsers(ownerId, userName, true); } catch (ResourceUnavailableException ex) { String errorMessage = String.format("Failed to refresh VPN user=[%s] due to resource unavailable. VPN owner id=[%s].", userName, ownerId); - s_logger.error(errorMessage, ex); + logger.error(errorMessage, ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, errorMessage, ex); } if (!appliedVpnUsers) { String errorMessage = String.format("Failed to refresh VPN user=[%s]. VPN owner id=[%s].", userName, ownerId); - s_logger.debug(errorMessage); + logger.debug(errorMessage); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, errorMessage); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/ResetVpnConnectionCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/ResetVpnConnectionCmd.java index c63126578f2e..736295b4119c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/ResetVpnConnectionCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/ResetVpnConnectionCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.user.vpn; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -36,7 +35,6 @@ @APICommand(name = "resetVpnConnection", description = "Reset site to site vpn connection", responseObject = Site2SiteVpnConnectionResponse.class, entityType = {Site2SiteVpnConnection.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ResetVpnConnectionCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(ResetVpnConnectionCmd.class.getName()); ///////////////////////////////////////////////////// @@ -105,7 +103,7 @@ public void execute() { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to reset site to site VPN connection"); } } catch (ResourceUnavailableException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/UpdateRemoteAccessVpnCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/UpdateRemoteAccessVpnCmd.java index d5b36f662f9e..defde70b63ac 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/UpdateRemoteAccessVpnCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/UpdateRemoteAccessVpnCmd.java @@ -23,7 +23,6 @@ import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.response.AccountResponse; import org.apache.cloudstack.api.response.RemoteAccessVpnResponse; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.InvalidParameterValueException; @@ -32,7 +31,6 @@ @APICommand(name = "updateRemoteAccessVpn", description = "Updates remote access vpn", responseObject = RemoteAccessVpnResponse.class, since = "4.4", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateRemoteAccessVpnCmd extends BaseAsyncCustomIdCmd { - public static final Logger s_logger = Logger.getLogger(UpdateRemoteAccessVpnCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/UpdateVpnConnectionCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/UpdateVpnConnectionCmd.java index 67cb65bdb93f..62dd6167b753 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/UpdateVpnConnectionCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/UpdateVpnConnectionCmd.java @@ -22,7 +22,6 @@ import org.apache.cloudstack.api.BaseAsyncCustomIdCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.response.Site2SiteVpnConnectionResponse; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.network.Site2SiteVpnConnection; @@ -31,7 +30,6 @@ @APICommand(name = "updateVpnConnection", description = "Updates site to site vpn connection", responseObject = Site2SiteVpnConnectionResponse.class, since = "4.4", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateVpnConnectionCmd extends BaseAsyncCustomIdCmd { - public static final Logger s_logger = Logger.getLogger(UpdateVpnConnectionCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/UpdateVpnCustomerGatewayCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/UpdateVpnCustomerGatewayCmd.java index 179bc0407b46..9f3ac2ec4367 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/UpdateVpnCustomerGatewayCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/UpdateVpnCustomerGatewayCmd.java @@ -18,7 +18,6 @@ import org.apache.cloudstack.api.ApiArgValidator; import org.apache.cloudstack.api.ApiCommandResourceType; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -36,7 +35,6 @@ @APICommand(name = "updateVpnCustomerGateway", description = "Update site to site vpn customer gateway", responseObject = Site2SiteCustomerGatewayResponse.class, entityType = {Site2SiteCustomerGateway.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateVpnCustomerGatewayCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(UpdateVpnCustomerGatewayCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/UpdateVpnGatewayCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/UpdateVpnGatewayCmd.java index c69bbb5689f2..9fe5ae0480f7 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/UpdateVpnGatewayCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/UpdateVpnGatewayCmd.java @@ -22,7 +22,6 @@ import org.apache.cloudstack.api.BaseAsyncCustomIdCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.response.Site2SiteVpnGatewayResponse; -import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.network.Site2SiteVpnGateway; @@ -31,7 +30,6 @@ @APICommand(name = "updateVpnGateway", description = "Updates site to site vpn local gateway", responseObject = Site2SiteVpnGatewayResponse.class, since = "4.4", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateVpnGatewayCmd extends BaseAsyncCustomIdCmd { - public static final Logger s_logger = Logger.getLogger(UpdateVpnGatewayCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/zone/ListZonesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/zone/ListZonesCmd.java index c29f3a851061..d926257437e6 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/zone/ListZonesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/zone/ListZonesCmd.java @@ -29,12 +29,10 @@ import org.apache.cloudstack.api.response.DomainResponse; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.ZoneResponse; -import org.apache.log4j.Logger; @APICommand(name = "listZones", description = "Lists zones", responseObject = ZoneResponse.class, responseView = ResponseView.Restricted, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListZonesCmd extends BaseListCmd implements UserCmd { - public static final Logger s_logger = Logger.getLogger(ListZonesCmd.class.getName()); private static final String s_name = "listzonesresponse"; diff --git a/api/src/main/java/org/apache/cloudstack/consoleproxy/ConsoleAccessManager.java b/api/src/main/java/org/apache/cloudstack/consoleproxy/ConsoleAccessManager.java index f19b5398dd9e..5bd9699b2019 100644 --- a/api/src/main/java/org/apache/cloudstack/consoleproxy/ConsoleAccessManager.java +++ b/api/src/main/java/org/apache/cloudstack/consoleproxy/ConsoleAccessManager.java @@ -20,6 +20,7 @@ import org.apache.cloudstack.api.command.user.consoleproxy.ConsoleEndpoint; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; +import java.util.Date; public interface ConsoleAccessManager extends Manager, Configurable { @@ -44,4 +45,7 @@ public interface ConsoleAccessManager extends Manager, Configurable { void removeSessions(String[] sessionUuids); void acquireSession(String sessionUuid); + + String genAccessTicket(String host, String port, String sid, String tag, String sessionUuid); + String genAccessTicket(String host, String port, String sid, String tag, Date normalizedHashTime, String sessionUuid); } diff --git a/api/src/main/java/org/apache/cloudstack/context/CallContext.java b/api/src/main/java/org/apache/cloudstack/context/CallContext.java index ecc109977eb6..69376e4f6d7d 100644 --- a/api/src/main/java/org/apache/cloudstack/context/CallContext.java +++ b/api/src/main/java/org/apache/cloudstack/context/CallContext.java @@ -23,8 +23,8 @@ import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.managed.threadlocal.ManagedThreadLocal; -import org.apache.log4j.Logger; -import org.apache.log4j.NDC; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.exception.CloudAuthenticationException; import com.cloud.projects.Project; @@ -33,6 +33,7 @@ import com.cloud.utils.UuidUtils; import com.cloud.utils.db.EntityManager; import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.logging.log4j.ThreadContext; /** * CallContext records information about the environment the call is made. This @@ -40,7 +41,7 @@ * entry point must set the context and remove it when the thread finishes. */ public class CallContext { - private static final Logger s_logger = Logger.getLogger(CallContext.class); + protected static Logger LOGGER = LogManager.getLogger(CallContext.class); private static ManagedThreadLocal s_currentContext = new ManagedThreadLocal(); private static ManagedThreadLocal> s_currentContextStack = new ManagedThreadLocal>() { @Override @@ -178,9 +179,9 @@ protected static CallContext register(User callingUser, Account callingAccount, callingContext = new CallContext(userId, accountId, contextId); } s_currentContext.set(callingContext); - NDC.push("ctx-" + UuidUtils.first(contextId)); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Registered: " + callingContext); + ThreadContext.push("ctx-" + UuidUtils.first(contextId)); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Registered: " + callingContext); } s_currentContextStack.get().push(callingContext); @@ -209,7 +210,7 @@ public static CallContext registerSystemCallContextOnceOnly() { assert context.getCallingUserId() == User.UID_SYSTEM : "You are calling a very specific method that registers a one time system context. This method is meant for background threads that does processing."; return context; } catch (Exception e) { - s_logger.error("Failed to register the system call context.", e); + LOGGER.error("Failed to register the system call context.", e); throw new CloudRuntimeException("Failed to register system call context", e); } } @@ -278,18 +279,18 @@ public static CallContext unregister() { return null; } s_currentContext.remove(); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Unregistered: " + context); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Unregistered: " + context); } String contextId = context.getContextId(); String sessionIdOnStack = null; String sessionIdPushedToNDC = "ctx-" + UuidUtils.first(contextId); - while ((sessionIdOnStack = NDC.pop()) != null) { + while ((sessionIdOnStack = ThreadContext.pop()) != null) { if (sessionIdOnStack.isEmpty() || sessionIdPushedToNDC.equals(sessionIdOnStack)) { break; } - if (s_logger.isTraceEnabled()) { - s_logger.trace("Popping from NDC: " + contextId); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Popping from NDC: " + contextId); } } diff --git a/api/src/main/java/org/apache/cloudstack/context/LogContext.java b/api/src/main/java/org/apache/cloudstack/context/LogContext.java index c81d0f499cb8..c367975aba3b 100644 --- a/api/src/main/java/org/apache/cloudstack/context/LogContext.java +++ b/api/src/main/java/org/apache/cloudstack/context/LogContext.java @@ -20,8 +20,8 @@ import java.util.Map; import java.util.UUID; -import org.apache.log4j.Logger; -import org.apache.log4j.MDC; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.managed.threadlocal.ManagedThreadLocal; @@ -31,13 +31,14 @@ import com.cloud.utils.UuidUtils; import com.cloud.utils.db.EntityManager; import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.logging.log4j.ThreadContext; /** * LogContext records information about the environment the API call is made. This * class must be always be available in all CloudStack code. */ public class LogContext { - private static final Logger s_logger = Logger.getLogger(LogContext.class); + protected static Logger LOGGER = LogManager.getLogger(LogContext.class); private static ManagedThreadLocal s_currentContext = new ManagedThreadLocal(); private String logContextId; @@ -134,9 +135,9 @@ protected static LogContext register(User callingUser, Account callingAccount, L callingContext = new LogContext(userId, accountId, contextId); } s_currentContext.set(callingContext); - MDC.put("logcontextid", UuidUtils.first(contextId)); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Registered for log: " + callingContext); + ThreadContext.put("logcontextid", UuidUtils.first(contextId)); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Registered for log: " + callingContext); } return callingContext; } @@ -160,7 +161,7 @@ public static LogContext registerSystemLogContextOnceOnly() { assert context.getCallingUserId() == User.UID_SYSTEM : "You are calling a very specific method that registers a one time system context. This method is meant for background threads that does processing."; return context; } catch (Exception e) { - s_logger.error("Failed to register the system log context.", e); + LOGGER.error("Failed to register the system log context.", e); throw new CloudRuntimeException("Failed to register system log context", e); } } @@ -206,11 +207,11 @@ public static void unregister() { LogContext context = s_currentContext.get(); if (context != null) { s_currentContext.remove(); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Unregistered: " + context); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Unregistered: " + context); } } - MDC.clear(); + ThreadContext.clearMap(); } public void setStartEventId(long startEventId) { diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/account/CreateAccountCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/account/CreateAccountCmdTest.java index d26065da21c3..365646de7a33 100644 --- a/api/src/test/java/org/apache/cloudstack/api/command/admin/account/CreateAccountCmdTest.java +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/account/CreateAccountCmdTest.java @@ -22,7 +22,8 @@ import org.apache.cloudstack.api.ApiErrorCode; import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -38,7 +39,7 @@ import com.cloud.user.User; public class CreateAccountCmdTest { - public static final Logger s_logger = Logger.getLogger(CreateAccountCmdTest.class.getName()); + protected Logger logger = LogManager.getLogger(getClass()); @Mock private AccountService accountService; diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/storage/AddObjectStoragePoolCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/storage/AddObjectStoragePoolCmdTest.java index 97737691ed66..a69a7a858ce0 100644 --- a/api/src/test/java/org/apache/cloudstack/api/command/admin/storage/AddObjectStoragePoolCmdTest.java +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/storage/AddObjectStoragePoolCmdTest.java @@ -24,7 +24,6 @@ import org.apache.cloudstack.api.response.ObjectStoreResponse; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.storage.object.ObjectStore; -import org.apache.log4j.Logger; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -43,7 +42,6 @@ @RunWith(MockitoJUnitRunner.class) public class AddObjectStoragePoolCmdTest { - public static final Logger s_logger = Logger.getLogger(AddObjectStoragePoolCmdTest.class.getName()); @Mock StorageService storageService; diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/storage/DeleteObjectStoragePoolCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/storage/DeleteObjectStoragePoolCmdTest.java index 9fb133b1b481..dc5b9f5fa325 100644 --- a/api/src/test/java/org/apache/cloudstack/api/command/admin/storage/DeleteObjectStoragePoolCmdTest.java +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/storage/DeleteObjectStoragePoolCmdTest.java @@ -20,7 +20,6 @@ import com.cloud.storage.StorageService; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -30,7 +29,6 @@ import org.mockito.Spy; public class DeleteObjectStoragePoolCmdTest { - public static final Logger s_logger = Logger.getLogger(DeleteObjectStoragePoolCmdTest.class.getName()); @Mock private StorageService storageService; diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/storage/UpdateObjectStoragePoolCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/storage/UpdateObjectStoragePoolCmdTest.java index 756869ee228f..307d80aa3f83 100644 --- a/api/src/test/java/org/apache/cloudstack/api/command/admin/storage/UpdateObjectStoragePoolCmdTest.java +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/storage/UpdateObjectStoragePoolCmdTest.java @@ -23,7 +23,6 @@ import org.apache.cloudstack.api.response.ObjectStoreResponse; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.storage.object.ObjectStore; -import org.apache.log4j.Logger; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -36,7 +35,6 @@ import static org.mockito.ArgumentMatchers.any; public class UpdateObjectStoragePoolCmdTest { - public static final Logger s_logger = Logger.getLogger(UpdateObjectStoragePoolCmdTest.class.getName()); @Mock private StorageService storageService; diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/user/CreateUserCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/user/CreateUserCmdTest.java index bc1e1854aa77..8a57ac3eb22c 100644 --- a/api/src/test/java/org/apache/cloudstack/api/command/admin/user/CreateUserCmdTest.java +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/user/CreateUserCmdTest.java @@ -22,7 +22,8 @@ import org.apache.cloudstack.api.ApiErrorCode; import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -38,7 +39,7 @@ import com.cloud.user.User; public class CreateUserCmdTest { - public static final Logger s_logger = Logger.getLogger(CreateUserCmdTest.class.getName()); + protected Logger logger = LogManager.getLogger(getClass()); @Mock private AccountService accountService; diff --git a/client/conf/log4j-cloud.xml.in b/client/conf/log4j-cloud.xml.in index 223692881dea..dbcf8c6198bb 100755 --- a/client/conf/log4j-cloud.xml.in +++ b/client/conf/log4j-cloud.xml.in @@ -17,183 +17,118 @@ KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + net.sf.cglib.proxy + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/client/pom.xml b/client/pom.xml index 8b8747bdf930..91399097d648 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -752,7 +752,7 @@ - log4j.configuration + log4j2.configurationFile log4j-cloud.xml diff --git a/client/src/main/java/org/apache/cloudstack/ServerDaemon.java b/client/src/main/java/org/apache/cloudstack/ServerDaemon.java index 763c274c7f51..23312b8517f3 100644 --- a/client/src/main/java/org/apache/cloudstack/ServerDaemon.java +++ b/client/src/main/java/org/apache/cloudstack/ServerDaemon.java @@ -29,7 +29,6 @@ import org.apache.commons.daemon.Daemon; import org.apache.commons.daemon.DaemonContext; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.eclipse.jetty.jmx.MBeanContainer; import org.eclipse.jetty.server.ForwardedRequestCustomizer; import org.eclipse.jetty.server.HttpConfiguration; @@ -51,6 +50,8 @@ import org.eclipse.jetty.util.thread.QueuedThreadPool; import org.eclipse.jetty.util.thread.ScheduledExecutorScheduler; import org.eclipse.jetty.webapp.WebAppContext; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.utils.Pair; import com.cloud.utils.PropertiesUtil; @@ -62,7 +63,7 @@ * Configuration parameters are read from server.properties file available on the classpath. */ public class ServerDaemon implements Daemon { - private static final Logger LOG = Logger.getLogger(ServerDaemon.class); + protected Logger logger = LogManager.getLogger(getClass()); private static final String WEB_XML = "META-INF/webapp/WEB-INF/web.xml"; ///////////////////////////////////////////////////// @@ -116,12 +117,12 @@ public static void main(final String... anArgs) throws Exception { public void init(final DaemonContext context) { final File confFile = PropertiesUtil.findConfigFile("server.properties"); if (confFile == null) { - LOG.warn(String.format("Server configuration file not found. Initializing server daemon on %s, with http.enable=%s, http.port=%s, https.enable=%s, https.port=%s, context.path=%s", + logger.warn(String.format("Server configuration file not found. Initializing server daemon on %s, with http.enable=%s, http.port=%s, https.enable=%s, https.port=%s, context.path=%s", bindInterface, httpEnable, httpPort, httpsEnable, httpsPort, contextPath)); return; } - LOG.info("Server configuration file found: " + confFile.getAbsolutePath()); + logger.info("Server configuration file found: " + confFile.getAbsolutePath()); try { InputStream is = new FileInputStream(confFile); @@ -142,15 +143,15 @@ public void init(final DaemonContext context) { setSessionTimeout(Integer.valueOf(properties.getProperty(SESSION_TIMEOUT, "30"))); setMaxFormContentSize(Integer.valueOf(properties.getProperty(REQUEST_CONTENT_SIZE_KEY, String.valueOf(DEFAULT_REQUEST_CONTENT_SIZE)))); } catch (final IOException e) { - LOG.warn("Failed to read configuration from server.properties file", e); + logger.warn("Failed to read configuration from server.properties file", e); } finally { // make sure that at least HTTP is enabled if both of them are set to false (misconfiguration) if (!httpEnable && !httpsEnable) { setHttpEnable(true); - LOG.warn("Server configuration malformed, neither http nor https is enabled, http will be enabled."); + logger.warn("Server configuration malformed, neither http nor https is enabled, http will be enabled."); } } - LOG.info(String.format("Initializing server daemon on %s, with http.enable=%s, http.port=%s, https.enable=%s, https.port=%s, context.path=%s", + logger.info(String.format("Initializing server daemon on %s, with http.enable=%s, http.port=%s, https.enable=%s, https.port=%s, context.path=%s", bindInterface, httpEnable, httpPort, httpsEnable, httpsPort, contextPath)); } @@ -254,7 +255,7 @@ private void createHttpsConnector(final HttpConfiguration httpConfig) { KeyStoreScanner scanner = new KeyStoreScanner(sslContextFactory); server.addBean(scanner); } catch (Exception ex) { - LOG.error("failed to set up keystore scanner, manual refresh of certificates will be required", ex); + logger.error("failed to set up keystore scanner, manual refresh of certificates will be required", ex); } } } diff --git a/client/src/main/webapp/WEB-INF/web.xml b/client/src/main/webapp/WEB-INF/web.xml index 9a3d8bc5b29a..43bee7e59d88 100644 --- a/client/src/main/webapp/WEB-INF/web.xml +++ b/client/src/main/webapp/WEB-INF/web.xml @@ -21,7 +21,7 @@ version="2.5"> - log4jConfigLocation + log4jConfiguration classpath:log4j-cloud.xml diff --git a/core/src/main/java/com/cloud/agent/api/SecurityGroupRulesCmd.java b/core/src/main/java/com/cloud/agent/api/SecurityGroupRulesCmd.java index ea4ab96c5a7c..5d202172c35a 100644 --- a/core/src/main/java/com/cloud/agent/api/SecurityGroupRulesCmd.java +++ b/core/src/main/java/com/cloud/agent/api/SecurityGroupRulesCmd.java @@ -27,7 +27,6 @@ import org.apache.commons.codec.binary.Base64; import org.apache.commons.codec.digest.DigestUtils; -import org.apache.log4j.Logger; import com.cloud.agent.api.LogLevel.Log4jLevel; import com.cloud.agent.api.to.VirtualMachineTO; @@ -39,7 +38,6 @@ public class SecurityGroupRulesCmd extends Command { public static final char RULE_COMMAND_SEPARATOR = ';'; protected static final String EGRESS_RULE = "E:"; protected static final String INGRESS_RULE = "I:"; - private static final Logger LOGGER = Logger.getLogger(SecurityGroupRulesCmd.class); private final String guestIp; private final String guestIp6; @@ -233,7 +231,7 @@ public String compressStringifiedRules() { dzip.close(); encodedResult = Base64.encodeBase64String(out.toByteArray()); } catch (final IOException e) { - LOGGER.warn("Exception while compressing security group rules"); + logger.warn("Exception while compressing security group rules"); } return encodedResult; } diff --git a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java index 4492947b2cc6..3c86b3a0dcc4 100644 --- a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java +++ b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java @@ -51,7 +51,8 @@ import org.apache.cloudstack.utils.security.KeyStoreUtils; import org.apache.commons.lang3.StringUtils; import org.apache.commons.net.util.SubnetUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.joda.time.Duration; import com.cloud.agent.api.Answer; @@ -85,7 +86,7 @@ **/ public class VirtualRoutingResource { - private static final Logger s_logger = Logger.getLogger(VirtualRoutingResource.class); + protected Logger logger = LogManager.getLogger(getClass()); private VirtualRouterDeployer _vrDeployer; private Map> _vrAggregateCommandsSet; protected Map _vrLockMap = new HashMap(); @@ -117,7 +118,7 @@ public Answer executeRequest(final NetworkElementCommand cmd) { try { ExecutionResult rc = _vrDeployer.prepareCommand(cmd); if (!rc.isSuccess()) { - s_logger.error("Failed to prepare VR command due to " + rc.getDetails()); + logger.error("Failed to prepare VR command due to " + rc.getDetails()); return new Answer(cmd, false, rc.getDetails()); } @@ -164,7 +165,7 @@ public Answer executeRequest(final NetworkElementCommand cmd) { if (!aggregated) { ExecutionResult rc = _vrDeployer.cleanupCommand(cmd); if (!rc.isSuccess()) { - s_logger.error("Failed to cleanup VR command due to " + rc.getDetails()); + logger.error("Failed to cleanup VR command due to " + rc.getDetails()); } } } @@ -220,15 +221,15 @@ private Answer executeQueryCommand(NetworkElementCommand cmd) { } else if (cmd instanceof GetRouterMonitorResultsCommand) { return execute((GetRouterMonitorResultsCommand)cmd); } else { - s_logger.error("Unknown query command in VirtualRoutingResource!"); + logger.error("Unknown query command in VirtualRoutingResource!"); return Answer.createUnsupportedCommandAnswer(cmd); } } - private static String getRouterSshControlIp(NetworkElementCommand cmd) { + private String getRouterSshControlIp(NetworkElementCommand cmd) { String routerIp = cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP); - if (s_logger.isDebugEnabled()) - s_logger.debug("Use router's private IP for SSH control. IP : " + routerIp); + if (logger.isDebugEnabled()) + logger.debug("Use router's private IP for SSH control. IP : " + routerIp); return routerIp; } @@ -243,24 +244,24 @@ private Answer execute(UpdateNetworkCommand cmd) { String subnet = address.split("/")[1]; ExecutionResult result = _vrDeployer.executeInVR(routerIp, VRScripts.VR_UPDATE_INTERFACE_CONFIG, ipAddressTO.getPublicIp() + " " + subnet + " " + ipAddressTO.getMtu() + " " + 15); - if (s_logger.isDebugEnabled()) - s_logger.debug("result: " + result.isSuccess() + ", output: " + result.getDetails()); + if (logger.isDebugEnabled()) + logger.debug("result: " + result.isSuccess() + ", output: " + result.getDetails()); if (!Boolean.TRUE.equals(result.isSuccess())) { if (result.getDetails().contains(String.format("Interface with IP %s not found", ipAddressTO.getPublicIp()))) { - s_logger.warn(String.format("Skipping IP: %s as it isn't configured on router interface", ipAddressTO.getPublicIp())); + logger.warn(String.format("Skipping IP: %s as it isn't configured on router interface", ipAddressTO.getPublicIp())); } else if (ipAddressTO.getDetails().get(ApiConstants.REDUNDANT_STATE).equals(VirtualRouter.RedundantState.PRIMARY.name())) { - s_logger.warn(String.format("Failed to update interface mtu to %s on interface with ip: %s", + logger.warn(String.format("Failed to update interface mtu to %s on interface with ip: %s", ipAddressTO.getMtu(), ipAddressTO.getPublicIp())); finalResult = false; } continue; } - s_logger.info(String.format("Successfully updated mtu to %s on interface with ip: %s", + logger.info(String.format("Successfully updated mtu to %s on interface with ip: %s", ipAddressTO.getMtu(), ipAddressTO.getPublicIp())); finalResult &= true; } catch (Exception e) { String msg = "Prepare UpdateNetwork failed due to " + e.toString(); - s_logger.error(msg, e); + logger.error(msg, e); return new Answer(cmd, e); } } @@ -296,9 +297,9 @@ private Answer applyConfig(NetworkElementCommand cmd, List cfg) { for (ConfigItem configItem : cfg) { long startTimestamp = System.currentTimeMillis(); ExecutionResult result = applyConfigToVR(cmd.getRouterAccessIp(), configItem, VRScripts.VR_SCRIPT_EXEC_TIMEOUT); - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { long elapsed = System.currentTimeMillis() - startTimestamp; - s_logger.debug("Processing " + configItem + " took " + elapsed + "ms"); + logger.debug("Processing " + configItem + " took " + elapsed + "ms"); } if (result == null) { result = new ExecutionResult(false, "null execution result"); @@ -310,7 +311,7 @@ private Answer applyConfig(NetworkElementCommand cmd, List cfg) { // Not sure why this matters, but log it anyway if (cmd.getAnswersCount() != results.size()) { - s_logger.warn("Expected " + cmd.getAnswersCount() + " answers while executing " + cmd.getClass().getSimpleName() + " but received " + results.size()); + logger.warn("Expected " + cmd.getAnswersCount() + " answers while executing " + cmd.getClass().getSimpleName() + " but received " + results.size()); } if (results.size() == 1) { @@ -359,7 +360,7 @@ private GetRouterMonitorResultsAnswer parseLinesForHealthChecks(GetRouterMonitor } else if (!readingFailedChecks && readingMonitorResults) { // Reading monitor checks result monitorResults.append(line); } else { - s_logger.error("Unexpected lines reached while parsing health check response. Skipping line:- " + line); + logger.error("Unexpected lines reached while parsing health check response. Skipping line:- " + line); } } @@ -379,16 +380,16 @@ private GetRouterMonitorResultsAnswer execute(GetRouterMonitorResultsCommand cmd } String args = cmd.shouldPerformFreshChecks() ? "true" : "false"; - s_logger.info("Fetching health check result for " + routerIp + " and executing fresh checks: " + args); + logger.info("Fetching health check result for " + routerIp + " and executing fresh checks: " + args); ExecutionResult result = _vrDeployer.executeInVR(routerIp, VRScripts.ROUTER_MONITOR_RESULTS, args); if (!result.isSuccess()) { - s_logger.warn("Result of " + cmd + " failed with details: " + result.getDetails()); + logger.warn("Result of " + cmd + " failed with details: " + result.getDetails()); return new GetRouterMonitorResultsAnswer(cmd, false, null, result.getDetails()); } if (result.getDetails().isEmpty()) { - s_logger.warn("Result of " + cmd + " received no details."); + logger.warn("Result of " + cmd + " received no details."); return new GetRouterMonitorResultsAnswer(cmd, false, null, "No results available."); } @@ -398,12 +399,12 @@ private GetRouterMonitorResultsAnswer execute(GetRouterMonitorResultsCommand cmd private Pair checkRouterFileSystem(String routerIp) { ExecutionResult fileSystemWritableTestResult = _vrDeployer.executeInVR(routerIp, VRScripts.ROUTER_FILESYSTEM_WRITABLE_CHECK, null); if (fileSystemWritableTestResult.isSuccess()) { - s_logger.debug("Router connectivity and file system writable check passed"); + logger.debug("Router connectivity and file system writable check passed"); return new Pair(true, "success"); } String resultDetails = fileSystemWritableTestResult.getDetails(); - s_logger.warn("File system writable check failed with details: " + resultDetails); + logger.warn("File system writable check failed with details: " + resultDetails); if (StringUtils.isNotBlank(resultDetails)) { final String readOnlyFileSystemError = "Read-only file system"; if (resultDetails.contains(readOnlyFileSystemError)) { @@ -488,8 +489,8 @@ public boolean configureHostParams(final Map params) { if (params.get("router.aggregation.command.each.timeout") != null) { String value = (String)params.get("router.aggregation.command.each.timeout"); _eachTimeout = Duration.standardSeconds(NumbersUtil.parseLong(value, 600)); - if (s_logger.isDebugEnabled()){ - s_logger.debug("The router.aggregation.command.each.timeout in seconds is set to " + _eachTimeout.getStandardSeconds()); + if (logger.isDebugEnabled()){ + logger.debug("The router.aggregation.command.each.timeout in seconds is set to " + _eachTimeout.getStandardSeconds()); } } @@ -510,8 +511,8 @@ public boolean configure(final String name, final Map params) th value = (String)params.get("router.aggregation.command.each.timeout"); _eachTimeout = Duration.standardSeconds(NumbersUtil.parseInt(value, (int)VRScripts.VR_SCRIPT_EXEC_TIMEOUT.getStandardSeconds())); - if (s_logger.isDebugEnabled()){ - s_logger.debug("The router.aggregation.command.each.timeout in seconds is set to " + _eachTimeout.getStandardSeconds()); + if (logger.isDebugEnabled()){ + logger.debug("The router.aggregation.command.each.timeout in seconds is set to " + _eachTimeout.getStandardSeconds()); } if (_vrDeployer == null) { @@ -534,8 +535,8 @@ public boolean connect(final String ipAddress, int retry, int sleep) { for (int i = 0; i <= retry; i++) { SocketChannel sch = null; try { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Trying to connect to " + ipAddress); + if (logger.isDebugEnabled()) { + logger.debug("Trying to connect to " + ipAddress); } sch = SocketChannel.open(); sch.configureBlocking(true); @@ -544,8 +545,8 @@ public boolean connect(final String ipAddress, int retry, int sleep) { sch.connect(addr); return true; } catch (final IOException e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Could not connect to " + ipAddress); + if (logger.isDebugEnabled()) { + logger.debug("Could not connect to " + ipAddress); } } finally { if (sch != null) { @@ -561,7 +562,7 @@ public boolean connect(final String ipAddress, int retry, int sleep) { } } - s_logger.debug("Unable to logon to " + ipAddress); + logger.debug("Unable to logon to " + ipAddress); return false; } @@ -571,7 +572,7 @@ private List generateCommandCfg(NetworkElementCommand cmd) { * [TODO] Still have to migrate LoadBalancerConfigCommand and BumpUpPriorityCommand * [FIXME] Have a look at SetSourceNatConfigItem */ - s_logger.debug("Transforming " + cmd.getClass().getCanonicalName() + " to ConfigItems"); + logger.debug("Transforming " + cmd.getClass().getCanonicalName() + " to ConfigItems"); final AbstractConfigItemFacade configItemFacade = AbstractConfigItemFacade.getInstance(cmd.getClass()); @@ -601,7 +602,7 @@ private Answer execute(AggregationControlCommand cmd) { answerCounts += command.getAnswersCount(); List cfg = generateCommandCfg(command); if (cfg == null) { - s_logger.warn("Unknown commands for VirtualRoutingResource, but continue: " + cmd.toString()); + logger.warn("Unknown commands for VirtualRoutingResource, but continue: " + cmd.toString()); continue; } @@ -616,8 +617,8 @@ private Answer execute(AggregationControlCommand cmd) { ScriptConfigItem scriptConfigItem = new ScriptConfigItem(VRScripts.VR_CFG, "-c " + VRScripts.CONFIG_CACHE_LOCATION + cfgFileName); // 120s is the minimal timeout Duration timeout = _eachTimeout.withDurationAdded(_eachTimeout.getStandardSeconds(), answerCounts); - if (s_logger.isDebugEnabled()){ - s_logger.debug("Aggregate action timeout in seconds is " + timeout.getStandardSeconds()); + if (logger.isDebugEnabled()){ + logger.debug("Aggregate action timeout in seconds is " + timeout.getStandardSeconds()); } ExecutionResult result = applyConfigToVR(cmd.getRouterAccessIp(), fileConfigItem, timeout); diff --git a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/AbstractConfigItemFacade.java b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/AbstractConfigItemFacade.java index bed472b68808..46dd801bebf5 100644 --- a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/AbstractConfigItemFacade.java +++ b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/AbstractConfigItemFacade.java @@ -24,7 +24,6 @@ import java.util.List; import java.util.UUID; -import org.apache.log4j.Logger; import com.cloud.agent.api.BumpUpPriorityCommand; import com.cloud.agent.api.SetupGuestNetworkCommand; @@ -59,10 +58,12 @@ import com.google.gson.FieldNamingPolicy; import com.google.gson.Gson; import com.google.gson.GsonBuilder; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; public abstract class AbstractConfigItemFacade { - private static final Logger s_logger = Logger.getLogger(AbstractConfigItemFacade.class); + protected Logger logger = LogManager.getLogger(getClass()); private final static Gson gson; @@ -123,8 +124,8 @@ protected List generateConfigItems(final ConfigBase configuration) { final List cfg = new LinkedList<>(); final String remoteFilename = appendUuidToJsonFiles(destinationFile); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Transformed filename: " + destinationFile + " to: " + remoteFilename); + if (logger.isDebugEnabled()) { + logger.debug("Transformed filename: " + destinationFile + " to: " + remoteFilename); } final ConfigItem configFile = new FileConfigItem(VRScripts.CONFIG_PERSIST_LOCATION, remoteFilename, gson.toJson(configuration)); diff --git a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/SetMonitorServiceConfigItem.java b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/SetMonitorServiceConfigItem.java index 52d8442b5ac6..227675ebf4ed 100644 --- a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/SetMonitorServiceConfigItem.java +++ b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/SetMonitorServiceConfigItem.java @@ -21,7 +21,6 @@ import java.util.List; -import org.apache.log4j.Logger; import com.cloud.agent.api.routing.NetworkElementCommand; import com.cloud.agent.api.routing.SetMonitorServiceCommand; @@ -32,7 +31,6 @@ import com.cloud.agent.resource.virtualnetwork.model.MonitorService; public class SetMonitorServiceConfigItem extends AbstractConfigItemFacade { - private static final Logger s_logger = Logger.getLogger(SetMonitorServiceConfigItem.class); @Override public List generateConfig(final NetworkElementCommand cmd) { @@ -58,14 +56,14 @@ private void setupHealthChecksRelatedInfo(MonitorService monitorService, SetMoni try { monitorService.setHealthChecksBasicRunInterval(Integer.parseInt(command.getAccessDetail(SetMonitorServiceCommand.ROUTER_HEALTH_CHECKS_BASIC_INTERVAL))); } catch (NumberFormatException exception) { - s_logger.error("Unexpected health check basic interval set" + command.getAccessDetail(SetMonitorServiceCommand.ROUTER_HEALTH_CHECKS_BASIC_INTERVAL) + + logger.error("Unexpected health check basic interval set" + command.getAccessDetail(SetMonitorServiceCommand.ROUTER_HEALTH_CHECKS_BASIC_INTERVAL) + ". Exception: " + exception + "Will use default value"); } try { monitorService.setHealthChecksAdvancedRunInterval(Integer.parseInt(command.getAccessDetail(SetMonitorServiceCommand.ROUTER_HEALTH_CHECKS_ADVANCED_INTERVAL))); } catch (NumberFormatException exception) { - s_logger.error("Unexpected health check advanced interval set" + command.getAccessDetail(SetMonitorServiceCommand.ROUTER_HEALTH_CHECKS_ADVANCED_INTERVAL) + + logger.error("Unexpected health check advanced interval set" + command.getAccessDetail(SetMonitorServiceCommand.ROUTER_HEALTH_CHECKS_ADVANCED_INTERVAL) + ". Exception: " + exception + "Will use default value"); } diff --git a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/SetNetworkAclConfigItem.java b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/SetNetworkAclConfigItem.java index a64328d516af..f4c3275aa2b3 100644 --- a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/SetNetworkAclConfigItem.java +++ b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/facade/SetNetworkAclConfigItem.java @@ -22,7 +22,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import com.cloud.agent.api.routing.NetworkElementCommand; import com.cloud.agent.api.routing.SetNetworkACLCommand; @@ -41,7 +40,6 @@ public class SetNetworkAclConfigItem extends AbstractConfigItemFacade { - public static final Logger s_logger = Logger.getLogger(SetNetworkAclConfigItem.class.getName()); @Override public List generateConfig(final NetworkElementCommand cmd) { @@ -81,7 +79,7 @@ public List generateConfig(final NetworkElementCommand cmd) { try { aclRule = new ProtocolAclRule(ruleParts[4], "ACCEPT".equals(ruleParts[5]), Integer.parseInt(ruleParts[1])); } catch (final Exception e) { - s_logger.warn("Problem occurred when reading the entries in the ruleParts array. Actual array size is '" + ruleParts.length + "', but trying to read from index 5."); + logger.warn("Problem occurred when reading the entries in the ruleParts array. Actual array size is '" + ruleParts.length + "', but trying to read from index 5."); continue; } } diff --git a/core/src/main/java/com/cloud/agent/transport/LoggingExclusionStrategy.java b/core/src/main/java/com/cloud/agent/transport/LoggingExclusionStrategy.java index 90e964e71391..2301c1fc205b 100644 --- a/core/src/main/java/com/cloud/agent/transport/LoggingExclusionStrategy.java +++ b/core/src/main/java/com/cloud/agent/transport/LoggingExclusionStrategy.java @@ -19,7 +19,8 @@ package com.cloud.agent.transport; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import com.google.gson.ExclusionStrategy; import com.google.gson.FieldAttributes; @@ -29,7 +30,9 @@ import com.cloud.agent.api.LogLevel.Log4jLevel; public class LoggingExclusionStrategy implements ExclusionStrategy { - Logger _logger = null; + protected Logger exclusionLogger = null; + + protected Logger logger = LogManager.getLogger(getClass()); @Override public boolean shouldSkipClass(Class clazz) { @@ -40,20 +43,24 @@ public boolean shouldSkipClass(Class clazz) { LogLevel level = clazz.getAnnotation(LogLevel.class); if (level == null) { log4jLevel = LogLevel.Log4jLevel.Debug; + logger.trace("Class {} does not have any log level annotation, considering level as debug.", clazz); } else { log4jLevel = level.value(); } - return !log4jLevel.enabled(_logger); + return !log4jLevel.enabled(exclusionLogger); } @Override public boolean shouldSkipField(FieldAttributes field) { LogLevel level = field.getAnnotation(LogLevel.class); - return level != null && !level.value().enabled(_logger); + return level != null && !level.value().enabled(exclusionLogger); } public LoggingExclusionStrategy(Logger logger) { - _logger = logger; + exclusionLogger = logger; + } + + public LoggingExclusionStrategy() { } } diff --git a/core/src/main/java/com/cloud/agent/transport/Request.java b/core/src/main/java/com/cloud/agent/transport/Request.java index 241ccd4bbd8b..3769dbbd612c 100644 --- a/core/src/main/java/com/cloud/agent/transport/Request.java +++ b/core/src/main/java/com/cloud/agent/transport/Request.java @@ -33,8 +33,9 @@ import java.util.zip.GZIPOutputStream; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.BadCommand; @@ -75,7 +76,7 @@ * */ public class Request { - private static final Logger s_logger = Logger.getLogger(Request.class); + protected static Logger LOGGER = LogManager.getLogger(Request.class); protected static final Gson s_gson = GsonHelper.getGson(); protected static final Gson s_gogger = GsonHelper.getGsonLogger(); @@ -251,10 +252,10 @@ public Command[] getCommands() { jsonReader.setLenient(true); _cmds = s_gson.fromJson(jsonReader, (Type)Command[].class); } catch (JsonParseException e) { - s_logger.error("Caught problem while parsing JSON command " + _content, e); + LOGGER.error("Caught problem while parsing JSON command " + _content, e); _cmds = new Command[] { new BadCommand() }; } catch (RuntimeException e) { - s_logger.error("Caught problem with " + _content, e); + LOGGER.error("Caught problem with " + _content, e); throw e; } } @@ -300,7 +301,7 @@ public static ByteBuffer doDecompress(ByteBuffer buffer, int length) { } in.close(); } catch (IOException e) { - s_logger.error("Fail to decompress the request!", e); + LOGGER.error("Fail to decompress the request!", e); } retBuff.flip(); return retBuff; @@ -321,7 +322,7 @@ public static ByteBuffer doCompress(ByteBuffer buffer, int length) { out.finish(); out.close(); } catch (IOException e) { - s_logger.error("Fail to compress the request!", e); + LOGGER.error("Fail to compress the request!", e); } return ByteBuffer.wrap(byteOut.toByteArray()); } @@ -369,24 +370,24 @@ public void logD(String msg) { } public void logD(String msg, boolean logContent) { - if (s_logger.isDebugEnabled()) { + if (LOGGER.isDebugEnabled()) { String log = log(msg, logContent, Level.DEBUG); if (log != null) { - s_logger.debug(log); + LOGGER.debug(log); } } } public void logT(String msg, boolean logD) { - if (s_logger.isTraceEnabled()) { + if (LOGGER.isTraceEnabled()) { String log = log(msg, true, Level.TRACE); if (log != null) { - s_logger.trace(log); + LOGGER.trace(log); } - } else if (logD && s_logger.isDebugEnabled()) { + } else if (logD && LOGGER.isDebugEnabled()) { String log = log(msg, false, Level.DEBUG); if (log != null) { - s_logger.debug(log); + LOGGER.debug(log); } } } @@ -403,7 +404,7 @@ protected String log(String msg, boolean logContent, Level level) { try { _cmds = s_gson.fromJson(_content, this instanceof Response ? Answer[].class : Command[].class); } catch (RuntimeException e) { - s_logger.error("Unable to deserialize from json: " + _content); + LOGGER.error("Unable to deserialize from json: " + _content); throw e; } } @@ -414,7 +415,7 @@ protected String log(String msg, boolean logContent, Level level) { for (Command cmd : _cmds) { buff.append(cmd.getClass().getSimpleName()).append("/"); } - s_logger.error("Gson serialization error " + buff.toString(), e); + LOGGER.error("Gson serialization error " + buff.toString(), e); assert false : "More gson errors on " + buff.toString(); return ""; } diff --git a/core/src/main/java/com/cloud/network/HAProxyConfigurator.java b/core/src/main/java/com/cloud/network/HAProxyConfigurator.java index 8352895e6212..9d07fc95c2fd 100644 --- a/core/src/main/java/com/cloud/network/HAProxyConfigurator.java +++ b/core/src/main/java/com/cloud/network/HAProxyConfigurator.java @@ -28,7 +28,8 @@ import java.util.Set; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.api.routing.LoadBalancerConfigCommand; import com.cloud.agent.api.to.LoadBalancerTO; @@ -41,7 +42,7 @@ public class HAProxyConfigurator implements LoadBalancerConfigurator { - private static final Logger s_logger = Logger.getLogger(HAProxyConfigurator.class); + protected Logger logger = LogManager.getLogger(getClass()); private static final String blankLine = "\t "; private static String[] globalSection = {"global", "\tlog 127.0.0.1:3914 local0 warning", "\tmaxconn 4096", "\tmaxpipes 1024", "\tchroot /var/lib/haproxy", "\tuser haproxy", "\tgroup haproxy", "\tstats socket /run/haproxy/admin.sock", "\tdaemon"}; @@ -458,7 +459,7 @@ private String getLbSubRuleForStickiness(final LoadBalancerTO lbTO) { * Not supposed to reach here, validation of methods are * done at the higher layer */ - s_logger.warn("Haproxy stickiness policy for lb rule: " + lbTO.getSrcIp() + ":" + lbTO.getSrcPort() + ": Not Applied, cause:invalid method "); + logger.warn("Haproxy stickiness policy for lb rule: " + lbTO.getSrcIp() + ":" + lbTO.getSrcPort() + ": Not Applied, cause:invalid method "); return null; } } @@ -541,7 +542,7 @@ private List getRulesForPool(final LoadBalancerTO lbTO, final boolean ke result.addAll(dstSubRule); } if (stickinessSubRule != null && !destsAvailable) { - s_logger.warn("Haproxy stickiness policy for lb rule: " + lbTO.getSrcIp() + ":" + lbTO.getSrcPort() + ": Not Applied, cause: backends are unavailable"); + logger.warn("Haproxy stickiness policy for lb rule: " + lbTO.getSrcIp() + ":" + lbTO.getSrcPort() + ": Not Applied, cause: backends are unavailable"); } if (publicPort == NetUtils.HTTP_PORT && !keepAliveEnabled || httpbasedStickiness) { sb = new StringBuilder(); @@ -566,7 +567,7 @@ private String generateStatsRule(final LoadBalancerConfigCommand lbCmd, final St final StringBuilder rule = new StringBuilder("\nlisten ").append(ruleName).append("\n\tbind ").append(statsIp).append(":").append(lbCmd.lbStatsPort); // TODO DH: write test for this in both cases if (!lbCmd.keepAliveEnabled) { - s_logger.info("Haproxy mode http enabled"); + logger.info("Haproxy mode http enabled"); rule.append("\n\tmode http\n\toption httpclose"); } rule.append("\n\tstats enable\n\tstats uri ") @@ -575,8 +576,8 @@ private String generateStatsRule(final LoadBalancerConfigCommand lbCmd, final St .append(lbCmd.lbStatsAuth); rule.append("\n"); final String result = rule.toString(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Haproxystats rule: " + result); + if (logger.isDebugEnabled()) { + logger.debug("Haproxystats rule: " + result); } return result; } @@ -590,9 +591,9 @@ public String[] generateConfiguration(final LoadBalancerConfigCommand lbCmd) { // TODO DH: write test for this function final String pipesLine = "\tmaxpipes " + Long.toString(Long.parseLong(lbCmd.maxconn) / 4); gSection.set(3, pipesLine); - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { for (final String s : gSection) { - s_logger.debug("global section: " + s); + logger.debug("global section: " + s); } } result.addAll(gSection); @@ -606,9 +607,9 @@ public String[] generateConfiguration(final LoadBalancerConfigCommand lbCmd) { dSection.set(7, "\tno option httpclose"); } - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { for (final String s : dSection) { - s_logger.debug("default section: " + s); + logger.debug("default section: " + s); } } result.addAll(dSection); diff --git a/core/src/main/java/com/cloud/network/resource/TrafficSentinelResource.java b/core/src/main/java/com/cloud/network/resource/TrafficSentinelResource.java index c6596b3c630f..43fb459978a8 100644 --- a/core/src/main/java/com/cloud/network/resource/TrafficSentinelResource.java +++ b/core/src/main/java/com/cloud/network/resource/TrafficSentinelResource.java @@ -35,7 +35,8 @@ import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.IAgentControl; import com.cloud.agent.api.Answer; @@ -66,7 +67,7 @@ public class TrafficSentinelResource implements ServerResource { private String _inclZones; private String _exclZones; - private static final Logger s_logger = Logger.getLogger(TrafficSentinelResource.class); + protected Logger logger = LogManager.getLogger(getClass()); @Override public boolean configure(String name, Map params) throws ConfigurationException { @@ -242,10 +243,10 @@ private DirectNetworkUsageAnswer getPublicIpBytesSentAndReceived(DirectNetworkUs } } } catch (MalformedURLException e1) { - s_logger.info("Invalid Traffic Sentinel URL", e1); + logger.info("Invalid Traffic Sentinel URL", e1); throw new ExecutionException(e1.getMessage()); } catch (IOException e) { - s_logger.debug("Error in direct network usage accounting", e); + logger.debug("Error in direct network usage accounting", e); throw new ExecutionException(e.getMessage()); } finally { if (os != null) { @@ -256,7 +257,7 @@ private DirectNetworkUsageAnswer getPublicIpBytesSentAndReceived(DirectNetworkUs } } } catch (Exception e) { - s_logger.debug(e); + logger.debug(e); throw new ExecutionException(e.getMessage()); } return answer; diff --git a/core/src/main/java/com/cloud/resource/CommandWrapper.java b/core/src/main/java/com/cloud/resource/CommandWrapper.java index d9c1ea234e82..a839234117be 100644 --- a/core/src/main/java/com/cloud/resource/CommandWrapper.java +++ b/core/src/main/java/com/cloud/resource/CommandWrapper.java @@ -21,10 +21,11 @@ import com.cloud.agent.api.Answer; import com.cloud.agent.api.Command; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; public abstract class CommandWrapper { - protected Logger logger = Logger.getLogger(getClass()); + protected Logger logger = LogManager.getLogger(getClass()); /** * @param T is the command to be used. diff --git a/core/src/main/java/com/cloud/resource/RequestWrapper.java b/core/src/main/java/com/cloud/resource/RequestWrapper.java index e43cf02e13c1..54d8b289c8d6 100644 --- a/core/src/main/java/com/cloud/resource/RequestWrapper.java +++ b/core/src/main/java/com/cloud/resource/RequestWrapper.java @@ -23,7 +23,8 @@ import java.util.Hashtable; import java.util.Set; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.Command; @@ -39,7 +40,7 @@ public CommandNotSupported(String msg, Throwable cause) { } } - private static final Logger s_logger = Logger.getLogger(RequestWrapper.class); + protected Logger logger = LogManager.getLogger(RequestWrapper.class); @SuppressWarnings("rawtypes") protected Hashtable, Hashtable, CommandWrapper>> resources = new Hashtable, Hashtable, CommandWrapper>>(); @@ -141,9 +142,9 @@ protected Hashtable, CommandWrapper> processAnnotations try { commands.put(annotation.handles(), wrapper.newInstance()); } catch (final InstantiationException e) { - s_logger.warn(MessageFormat.format(errorMessage, e.getLocalizedMessage(), wrapper.toString())); + logger.warn(MessageFormat.format(errorMessage, e.getLocalizedMessage(), wrapper.toString())); } catch (final IllegalAccessException e) { - s_logger.warn(MessageFormat.format(errorMessage, e.getLocalizedMessage(), wrapper.toString())); + logger.warn(MessageFormat.format(errorMessage, e.getLocalizedMessage(), wrapper.toString())); } } diff --git a/core/src/main/java/com/cloud/resource/ServerResourceBase.java b/core/src/main/java/com/cloud/resource/ServerResourceBase.java index 18121e21e512..bb44b307047e 100644 --- a/core/src/main/java/com/cloud/resource/ServerResourceBase.java +++ b/core/src/main/java/com/cloud/resource/ServerResourceBase.java @@ -37,7 +37,8 @@ import org.apache.cloudstack.storage.command.browser.ListDataStoreObjectsAnswer; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.IAgentControl; import com.cloud.agent.api.Answer; @@ -47,7 +48,7 @@ import com.cloud.utils.script.Script; public abstract class ServerResourceBase implements ServerResource { - private static final Logger s_logger = Logger.getLogger(ServerResourceBase.class); + protected Logger logger = LogManager.getLogger(getClass()); protected String name; private ArrayList warnings = new ArrayList(); private ArrayList errors = new ArrayList(); @@ -80,7 +81,7 @@ public boolean configure(final String name, Map params) throws C String infos[] = NetUtils.getNetworkParams(privateNic); if (infos == null) { - s_logger.warn("Incorrect details for private Nic during initialization of ServerResourceBase"); + logger.warn("Incorrect details for private Nic during initialization of ServerResourceBase"); return false; } params.put("host.ip", infos[0]); @@ -106,7 +107,7 @@ protected void defineResourceNetworkInterfaces(Map params) { } protected void tryToAutoDiscoverResourcePrivateNetworkInterface() throws ConfigurationException { - s_logger.info("Trying to autodiscover this resource's private network interface."); + logger.info("Trying to autodiscover this resource's private network interface."); List nics; try { @@ -118,11 +119,11 @@ protected void tryToAutoDiscoverResourcePrivateNetworkInterface() throws Configu throw new ConfigurationException(String.format("Could not retrieve the environment NICs due to [%s].", e.getMessage())); } - s_logger.debug(String.format("Searching the private NIC along the environment NICs [%s].", Arrays.toString(nics.toArray()))); + logger.debug(String.format("Searching the private NIC along the environment NICs [%s].", Arrays.toString(nics.toArray()))); for (NetworkInterface nic : nics) { if (isValidNicToUseAsPrivateNic(nic)) { - s_logger.info(String.format("Using NIC [%s] as private NIC.", nic)); + logger.info(String.format("Using NIC [%s] as private NIC.", nic)); privateNic = nic; return; } @@ -134,18 +135,18 @@ protected void tryToAutoDiscoverResourcePrivateNetworkInterface() throws Configu protected boolean isValidNicToUseAsPrivateNic(NetworkInterface nic) { String nicName = nic.getName(); - s_logger.debug(String.format("Verifying if NIC [%s] can be used as private NIC.", nic)); + logger.debug(String.format("Verifying if NIC [%s] can be used as private NIC.", nic)); String[] nicNameStartsToAvoid = {"vnif", "vnbr", "peth", "vif", "virbr"}; if (nic.isVirtual() || StringUtils.startsWithAny(nicName, nicNameStartsToAvoid) || nicName.contains(":")) { - s_logger.debug(String.format("Not using NIC [%s] because it is either virtual, starts with %s, or contains \":\"" + + logger.debug(String.format("Not using NIC [%s] because it is either virtual, starts with %s, or contains \":\"" + " in its name.", Arrays.toString(nicNameStartsToAvoid), nic)); return false; } String[] info = NetUtils.getNicParams(nicName); if (info == null || info[0] == null) { - s_logger.debug(String.format("Not using NIC [%s] because it does not have a valid IP to use as the private IP.", nic)); + logger.debug(String.format("Not using NIC [%s] because it does not have a valid IP to use as the private IP.", nic)); return false; } @@ -190,8 +191,8 @@ protected void fillNetworkInformation(final StartupCommand cmd) { if (privateNic != null) { info = NetUtils.getNetworkParams(privateNic); if (info != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Parameters for private nic: " + info[0] + " - " + info[1] + "-" + info[2]); + if (logger.isDebugEnabled()) { + logger.debug("Parameters for private nic: " + info[0] + " - " + info[1] + "-" + info[2]); } cmd.setPrivateIpAddress(info[0]); cmd.setPrivateMacAddress(info[1]); @@ -200,16 +201,16 @@ protected void fillNetworkInformation(final StartupCommand cmd) { } if (storageNic != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Storage has its now nic: " + storageNic.getName()); + if (logger.isDebugEnabled()) { + logger.debug("Storage has its now nic: " + storageNic.getName()); } info = NetUtils.getNetworkParams(storageNic); } // NOTE: In case you're wondering, this is not here by mistake. if (info != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Parameters for storage nic: " + info[0] + " - " + info[1] + "-" + info[2]); + if (logger.isDebugEnabled()) { + logger.debug("Parameters for storage nic: " + info[0] + " - " + info[1] + "-" + info[2]); } cmd.setStorageIpAddress(info[0]); cmd.setStorageMacAddress(info[1]); @@ -219,8 +220,8 @@ protected void fillNetworkInformation(final StartupCommand cmd) { if (publicNic != null) { info = NetUtils.getNetworkParams(publicNic); if (info != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Parameters for public nic: " + info[0] + " - " + info[1] + "-" + info[2]); + if (logger.isDebugEnabled()) { + logger.debug("Parameters for public nic: " + info[0] + " - " + info[1] + "-" + info[2]); } cmd.setPublicIpAddress(info[0]); cmd.setPublicMacAddress(info[1]); @@ -231,8 +232,8 @@ protected void fillNetworkInformation(final StartupCommand cmd) { if (storageNic2 != null) { info = NetUtils.getNetworkParams(storageNic2); if (info != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Parameters for storage nic 2: " + info[0] + " - " + info[1] + "-" + info[2]); + if (logger.isDebugEnabled()) { + logger.debug("Parameters for storage nic 2: " + info[0] + " - " + info[1] + "-" + info[2]); } cmd.setStorageIpAddressDeux(info[0]); cmd.setStorageMacAddressDeux(info[1]); diff --git a/core/src/main/java/com/cloud/serializer/GsonHelper.java b/core/src/main/java/com/cloud/serializer/GsonHelper.java index c3653e5089c0..8288b7796807 100644 --- a/core/src/main/java/com/cloud/serializer/GsonHelper.java +++ b/core/src/main/java/com/cloud/serializer/GsonHelper.java @@ -21,7 +21,8 @@ import java.util.List; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.google.gson.Gson; import com.google.gson.GsonBuilder; @@ -42,7 +43,7 @@ import com.cloud.utils.Pair; public class GsonHelper { - private static final Logger s_logger = Logger.getLogger(GsonHelper.class); + protected static Logger LOGGER = LogManager.getLogger(GsonHelper.class); protected static final Gson s_gson; protected static final Gson s_gogger; @@ -50,11 +51,11 @@ public class GsonHelper { static { GsonBuilder gsonBuilder = new GsonBuilder(); s_gson = setDefaultGsonConfig(gsonBuilder); - GsonBuilder loggerBuilder = new GsonBuilder(); - loggerBuilder.disableHtmlEscaping(); - loggerBuilder.setExclusionStrategies(new LoggingExclusionStrategy(s_logger)); - s_gogger = setDefaultGsonConfig(loggerBuilder); - s_logger.info("Default Builder inited."); + GsonBuilder LOGGERBuilder = new GsonBuilder(); + LOGGERBuilder.disableHtmlEscaping(); + LOGGERBuilder.setExclusionStrategies(new LoggingExclusionStrategy(LOGGER)); + s_gogger = setDefaultGsonConfig(LOGGERBuilder); + LOGGER.info("Default Builder inited."); } static Gson setDefaultGsonConfig(GsonBuilder builder) { @@ -89,6 +90,6 @@ public final static Gson getGsonLogger() { } public final static Logger getLogger() { - return s_logger; + return LOGGER; } } diff --git a/core/src/main/java/com/cloud/storage/JavaStorageLayer.java b/core/src/main/java/com/cloud/storage/JavaStorageLayer.java index d4c2639d4786..0e51ef7eb205 100644 --- a/core/src/main/java/com/cloud/storage/JavaStorageLayer.java +++ b/core/src/main/java/com/cloud/storage/JavaStorageLayer.java @@ -34,10 +34,11 @@ import java.util.UUID; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; public class JavaStorageLayer implements StorageLayer { - private static final Logger s_logger = Logger.getLogger(JavaStorageLayer.class); + protected Logger logger = LogManager.getLogger(getClass()); private static final String STD_TMP_DIR_PATH = "/tmp"; String _name; boolean _makeWorldWriteable = true; @@ -198,9 +199,9 @@ public File createUniqDir() throws IOException { if (dir.exists()) { if (isWorldReadable(dir)) { if (STD_TMP_DIR_PATH.equals(dir.getAbsolutePath())) { - s_logger.warn(String.format("The temp dir is %s", STD_TMP_DIR_PATH)); + logger.warn(String.format("The temp dir is %s", STD_TMP_DIR_PATH)); } else { - s_logger.warn("The temp dir " + dir.getAbsolutePath() + " is World Readable"); + logger.warn("The temp dir " + dir.getAbsolutePath() + " is World Readable"); } } String uniqDirName = dir.getAbsolutePath() + File.separator + UUID.randomUUID().toString(); diff --git a/core/src/main/java/com/cloud/storage/resource/StorageSubsystemCommandHandlerBase.java b/core/src/main/java/com/cloud/storage/resource/StorageSubsystemCommandHandlerBase.java index 75d5f49d4c6c..7d8225462cab 100644 --- a/core/src/main/java/com/cloud/storage/resource/StorageSubsystemCommandHandlerBase.java +++ b/core/src/main/java/com/cloud/storage/resource/StorageSubsystemCommandHandlerBase.java @@ -35,7 +35,6 @@ import org.apache.cloudstack.storage.command.StorageSubSystemCommand; import org.apache.cloudstack.storage.command.SyncVolumePathCommand; import org.apache.cloudstack.storage.to.VolumeObjectTO; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.Command; @@ -47,9 +46,11 @@ import com.cloud.storage.DataStoreRole; import com.cloud.storage.Volume; import com.google.gson.Gson; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; public class StorageSubsystemCommandHandlerBase implements StorageSubsystemCommandHandler { - private static final Logger s_logger = Logger.getLogger(StorageSubsystemCommandHandlerBase.class); + protected Logger logger = LogManager.getLogger(getClass()); protected static final Gson s_gogger = GsonHelper.getGsonLogger(); protected StorageProcessor processor; @@ -141,7 +142,7 @@ protected Answer execute(CreateObjectCommand cmd) { } return new CreateObjectAnswer("not supported type"); } catch (Exception e) { - s_logger.debug("Failed to create object: " + data.getObjectType() + ": " + e.toString()); + logger.debug("Failed to create object: " + data.getObjectType() + ": " + e.toString()); return new CreateObjectAnswer(e.toString()); } } @@ -184,9 +185,9 @@ protected Answer execute(QuerySnapshotZoneCopyCommand cmd) { private void logCommand(Command cmd) { try { - s_logger.debug(String.format("Executing command %s: [%s].", cmd.getClass().getSimpleName(), s_gogger.toJson(cmd))); + logger.debug(String.format("Executing command %s: [%s].", cmd.getClass().getSimpleName(), s_gogger.toJson(cmd))); } catch (Exception e) { - s_logger.debug(String.format("Executing command %s.", cmd.getClass().getSimpleName())); + logger.debug(String.format("Executing command %s.", cmd.getClass().getSimpleName())); } } } diff --git a/core/src/main/java/com/cloud/storage/template/FtpTemplateUploader.java b/core/src/main/java/com/cloud/storage/template/FtpTemplateUploader.java index eb0c4f846016..14bf6fe1b746 100644 --- a/core/src/main/java/com/cloud/storage/template/FtpTemplateUploader.java +++ b/core/src/main/java/com/cloud/storage/template/FtpTemplateUploader.java @@ -29,11 +29,12 @@ import java.net.URLConnection; import java.util.Date; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; public class FtpTemplateUploader implements TemplateUploader { - public static final Logger s_logger = Logger.getLogger(FtpTemplateUploader.class.getName()); + protected Logger logger = LogManager.getLogger(getClass()); public TemplateUploader.Status status = TemplateUploader.Status.NOT_STARTED; public String errorString = ""; public long totalBytes = 0; @@ -110,11 +111,11 @@ public long upload(UploadCompleteCallback callback) { } catch (MalformedURLException e) { status = TemplateUploader.Status.UNRECOVERABLE_ERROR; errorString = e.getMessage(); - s_logger.error(errorString); + logger.error(errorString); } catch (IOException e) { status = TemplateUploader.Status.UNRECOVERABLE_ERROR; errorString = e.getMessage(); - s_logger.error(errorString); + logger.error(errorString); } finally { try { if (inputStream != null) { @@ -124,7 +125,7 @@ public long upload(UploadCompleteCallback callback) { outputStream.close(); } } catch (IOException ioe) { - s_logger.error(" Caught exception while closing the resources"); + logger.error(" Caught exception while closing the resources"); } if (callback != null) { callback.uploadComplete(status); @@ -139,7 +140,7 @@ public void run() { try { upload(completionCallback); } catch (Throwable t) { - s_logger.warn("Caught exception during upload " + t.getMessage(), t); + logger.warn("Caught exception during upload " + t.getMessage(), t); errorString = "Failed to install: " + t.getMessage(); status = TemplateUploader.Status.UNRECOVERABLE_ERROR; } @@ -207,7 +208,7 @@ public boolean stopUpload() { inputStream.close(); } } catch (IOException e) { - s_logger.error(" Caught exception while closing the resources"); + logger.error(" Caught exception while closing the resources"); } status = TemplateUploader.Status.ABORTED; return true; diff --git a/core/src/main/java/com/cloud/storage/template/HttpTemplateDownloader.java b/core/src/main/java/com/cloud/storage/template/HttpTemplateDownloader.java index d55c387d820a..92865caeb57a 100755 --- a/core/src/main/java/com/cloud/storage/template/HttpTemplateDownloader.java +++ b/core/src/main/java/com/cloud/storage/template/HttpTemplateDownloader.java @@ -45,7 +45,6 @@ import org.apache.commons.httpclient.auth.AuthScope; import org.apache.commons.httpclient.methods.GetMethod; import org.apache.commons.httpclient.params.HttpMethodParams; -import org.apache.log4j.Logger; import com.cloud.storage.StorageLayer; import com.cloud.utils.Pair; @@ -58,7 +57,6 @@ * */ public class HttpTemplateDownloader extends ManagedContextRunnable implements TemplateDownloader { - public static final Logger s_logger = Logger.getLogger(HttpTemplateDownloader.class.getName()); private static final MultiThreadedHttpConnectionManager s_httpClientManager = new MultiThreadedHttpConnectionManager(); private static final int CHUNK_SIZE = 1024 * 1024; //1M @@ -102,9 +100,9 @@ public HttpTemplateDownloader(StorageLayer storageLayer, String downloadUrl, Str } catch (Exception ex) { errorString = "Unable to start download -- check url? "; status = TemplateDownloader.Status.UNRECOVERABLE_ERROR; - s_logger.warn("Exception in constructor -- " + ex.toString()); + logger.warn("Exception in constructor -- " + ex.toString()); } catch (Throwable th) { - s_logger.warn("throwable caught ", th); + logger.warn("throwable caught ", th); } } @@ -127,7 +125,7 @@ private void checkTemporaryDestination(String toDir) { } catch (IOException ex) { errorString = "Unable to start download -- check url? "; status = TemplateDownloader.Status.UNRECOVERABLE_ERROR; - s_logger.warn("Exception in constructor -- " + ex.toString()); + logger.warn("Exception in constructor -- " + ex.toString()); } } @@ -138,9 +136,9 @@ private void checkCredentials(String user, String password) { client.getParams().setAuthenticationPreemptive(true); Credentials defaultcreds = new UsernamePasswordCredentials(user, password); client.getState().setCredentials(new AuthScope(hostAndPort.first(), hostAndPort.second(), AuthScope.ANY_REALM), defaultcreds); - s_logger.info("Added username=" + user + ", password=" + password + "for host " + hostAndPort.first() + ":" + hostAndPort.second()); + logger.info("Added username=" + user + ", password=" + password + "for host " + hostAndPort.first() + ":" + hostAndPort.second()); } else { - s_logger.info("No credentials configured for host=" + hostAndPort.first() + ":" + hostAndPort.second()); + logger.info("No credentials configured for host=" + hostAndPort.first() + ":" + hostAndPort.second()); } } catch (IllegalArgumentException iae) { errorString = iae.getMessage(); @@ -206,7 +204,7 @@ public long download(boolean resume, DownloadCompleteCallback callback) { ) { out.seek(localFileSize); - s_logger.info("Starting download from " + downloadUrl + " to " + toFile + " remoteSize=" + toHumanReadableSize(remoteSize) + " , max size=" + toHumanReadableSize(maxTemplateSizeInBytes)); + logger.info("Starting download from " + downloadUrl + " to " + toFile + " remoteSize=" + toHumanReadableSize(remoteSize) + " , max size=" + toHumanReadableSize(maxTemplateSizeInBytes)); if (copyBytes(file, in, out)) return 0; @@ -278,7 +276,7 @@ private void checkDowloadCompletion() { private boolean canHandleDownloadSize() { if (remoteSize > maxTemplateSizeInBytes) { - s_logger.info("Remote size is too large: " + toHumanReadableSize(remoteSize) + " , max=" + toHumanReadableSize(maxTemplateSizeInBytes)); + logger.info("Remote size is too large: " + toHumanReadableSize(remoteSize) + " , max=" + toHumanReadableSize(maxTemplateSizeInBytes)); status = Status.UNRECOVERABLE_ERROR; errorString = "Download file size is too large"; return false; @@ -347,7 +345,7 @@ private long checkLocalFileSizeForResume(boolean resume, File file) { long localFileSize = 0; if (file.exists() && resume) { localFileSize = file.length(); - s_logger.info("Resuming download to file (current size)=" + toHumanReadableSize(localFileSize)); + logger.info("Resuming download to file (current size)=" + toHumanReadableSize(localFileSize)); } return localFileSize; } @@ -431,7 +429,7 @@ protected void runInContext() { try { download(resume, completionCallback); } catch (Throwable t) { - s_logger.warn("Caught exception during download " + t.getMessage(), t); + logger.warn("Caught exception during download " + t.getMessage(), t); errorString = "Failed to install: " + t.getMessage(); status = TemplateDownloader.Status.UNRECOVERABLE_ERROR; } @@ -519,20 +517,20 @@ public VerifyFormat invoke() { URI str = new URI(downloadUrl); uripath = str.getPath(); } catch (URISyntaxException e) { - s_logger.warn("Invalid download url: " + downloadUrl + ", This should not happen since we have validated the url before!!"); + logger.warn("Invalid download url: " + downloadUrl + ", This should not happen since we have validated the url before!!"); } String unsupportedFormat = ImageStoreUtil.checkTemplateFormat(file.getAbsolutePath(), uripath); if (unsupportedFormat == null || !unsupportedFormat.isEmpty()) { try { request.abort(); } catch (Exception ex) { - s_logger.debug("Error on http connection : " + ex.getMessage()); + logger.debug("Error on http connection : " + ex.getMessage()); } status = Status.UNRECOVERABLE_ERROR; errorString = "Template content is unsupported, or mismatch between selected format and template content. Found : " + unsupportedFormat; throw new CloudRuntimeException(errorString); } else { - s_logger.debug("Verified format of downloading file " + file.getAbsolutePath() + " is supported"); + logger.debug("Verified format of downloading file " + file.getAbsolutePath() + " is supported"); verifiedFormat = true; } return this; diff --git a/core/src/main/java/com/cloud/storage/template/IsoProcessor.java b/core/src/main/java/com/cloud/storage/template/IsoProcessor.java index 4cd2f1a2a020..6ab42effb524 100644 --- a/core/src/main/java/com/cloud/storage/template/IsoProcessor.java +++ b/core/src/main/java/com/cloud/storage/template/IsoProcessor.java @@ -24,14 +24,12 @@ import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.StorageLayer; import com.cloud.utils.component.AdapterBase; public class IsoProcessor extends AdapterBase implements Processor { - private static final Logger s_logger = Logger.getLogger(IsoProcessor.class); StorageLayer _storage; @@ -43,14 +41,14 @@ public FormatInfo process(String templatePath, ImageFormat format, String templa @Override public FormatInfo process(String templatePath, ImageFormat format, String templateName, long processTimeout) { if (format != null) { - s_logger.debug("We don't handle conversion from " + format + " to ISO."); + logger.debug("We don't handle conversion from " + format + " to ISO."); return null; } String isoPath = templatePath + File.separator + templateName + "." + ImageFormat.ISO.getFileExtension(); if (!_storage.exists(isoPath)) { - s_logger.debug("Unable to find the iso file: " + isoPath); + logger.debug("Unable to find the iso file: " + isoPath); return null; } diff --git a/core/src/main/java/com/cloud/storage/template/LocalTemplateDownloader.java b/core/src/main/java/com/cloud/storage/template/LocalTemplateDownloader.java index 564eba3d5ee1..e4044412c521 100644 --- a/core/src/main/java/com/cloud/storage/template/LocalTemplateDownloader.java +++ b/core/src/main/java/com/cloud/storage/template/LocalTemplateDownloader.java @@ -29,12 +29,10 @@ import java.nio.ByteBuffer; import java.nio.channels.FileChannel; -import org.apache.log4j.Logger; import com.cloud.storage.StorageLayer; public class LocalTemplateDownloader extends TemplateDownloaderBase implements TemplateDownloader { - public static final Logger s_logger = Logger.getLogger(LocalTemplateDownloader.class); public LocalTemplateDownloader(StorageLayer storageLayer, String downloadUrl, String toDir, long maxTemplateSizeInBytes, DownloadCompleteCallback callback) { super(storageLayer, downloadUrl, toDir, maxTemplateSizeInBytes, callback); @@ -55,7 +53,7 @@ public long download(boolean resume, DownloadCompleteCallback callback) { try { src = new File(new URI(_downloadUrl)); } catch (URISyntaxException e1) { - s_logger.warn("Invalid URI " + _downloadUrl); + logger.warn("Invalid URI " + _downloadUrl); _status = Status.UNRECOVERABLE_ERROR; return 0; } @@ -77,7 +75,7 @@ public long download(boolean resume, DownloadCompleteCallback callback) { try { fis = new FileInputStream(src); } catch (FileNotFoundException e) { - s_logger.warn("Unable to find " + _downloadUrl); + logger.warn("Unable to find " + _downloadUrl); _errorString = "Unable to find " + _downloadUrl; return -1; } @@ -85,7 +83,7 @@ public long download(boolean resume, DownloadCompleteCallback callback) { try { fos = new FileOutputStream(dst); } catch (FileNotFoundException e) { - s_logger.warn("Unable to find " + _toFile); + logger.warn("Unable to find " + _toFile); return -1; } foc = fos.getChannel(); @@ -102,7 +100,7 @@ public long download(boolean resume, DownloadCompleteCallback callback) { buffer.clear(); } } catch (IOException e) { - s_logger.warn("Unable to download", e); + logger.warn("Unable to download", e); } String downloaded = "(incomplete download)"; @@ -123,7 +121,7 @@ public long download(boolean resume, DownloadCompleteCallback callback) { try { fic.close(); } catch (IOException e) { - s_logger.info("[ignore] error while closing file input channel."); + logger.info("[ignore] error while closing file input channel."); } } @@ -131,7 +129,7 @@ public long download(boolean resume, DownloadCompleteCallback callback) { try { foc.close(); } catch (IOException e) { - s_logger.info("[ignore] error while closing file output channel."); + logger.info("[ignore] error while closing file output channel."); } } @@ -139,7 +137,7 @@ public long download(boolean resume, DownloadCompleteCallback callback) { try { fis.close(); } catch (IOException e) { - s_logger.info("[ignore] error while closing file input stream."); + logger.info("[ignore] error while closing file input stream."); } } @@ -147,7 +145,7 @@ public long download(boolean resume, DownloadCompleteCallback callback) { try { fos.close(); } catch (IOException e) { - s_logger.info("[ignore] error while closing file output stream."); + logger.info("[ignore] error while closing file output stream."); } } diff --git a/core/src/main/java/com/cloud/storage/template/MetalinkTemplateDownloader.java b/core/src/main/java/com/cloud/storage/template/MetalinkTemplateDownloader.java index dd452f2e2d92..bf4be5825a1d 100644 --- a/core/src/main/java/com/cloud/storage/template/MetalinkTemplateDownloader.java +++ b/core/src/main/java/com/cloud/storage/template/MetalinkTemplateDownloader.java @@ -28,7 +28,6 @@ import org.apache.commons.httpclient.methods.GetMethod; import org.apache.commons.httpclient.params.HttpMethodParams; import org.apache.commons.io.IOUtils; -import org.apache.log4j.Logger; import org.springframework.util.CollectionUtils; import java.io.File; @@ -47,7 +46,6 @@ public class MetalinkTemplateDownloader extends TemplateDownloaderBase implement protected GetMethod request; private boolean toFileSet = false; - private static final Logger LOGGER = Logger.getLogger(MetalinkTemplateDownloader.class.getName()); public MetalinkTemplateDownloader(StorageLayer storageLayer, String downloadUrl, String toDir, DownloadCompleteCallback callback, long maxTemplateSize) { super(storageLayer, downloadUrl, toDir, maxTemplateSize, callback); @@ -97,7 +95,7 @@ private boolean downloadTemplate() { try { client.executeMethod(request); } catch (IOException e) { - LOGGER.error("Error on HTTP request: " + e.getMessage()); + logger.error("Error on HTTP request: " + e.getMessage()); return false; } return performDownload(); @@ -110,7 +108,7 @@ private boolean performDownload() { ) { IOUtils.copy(in, out); } catch (IOException e) { - LOGGER.error("Error downloading template from: " + _downloadUrl + " due to: " + e.getMessage()); + logger.error("Error downloading template from: " + _downloadUrl + " due to: " + e.getMessage()); return false; } return true; @@ -121,13 +119,13 @@ public long download(boolean resume, DownloadCompleteCallback callback) { return 0; } - LOGGER.info("Starting metalink download from: " + _downloadUrl); + logger.info("Starting metalink download from: " + _downloadUrl); _start = System.currentTimeMillis(); status = Status.IN_PROGRESS; List metalinkUrls = UriUtils.getMetalinkUrls(_downloadUrl); if (CollectionUtils.isEmpty(metalinkUrls)) { - LOGGER.error("No URLs found for metalink: " + _downloadUrl); + logger.error("No URLs found for metalink: " + _downloadUrl); status = Status.UNRECOVERABLE_ERROR; return 0; } @@ -140,11 +138,11 @@ public long download(boolean resume, DownloadCompleteCallback callback) { i++; } if (!downloaded) { - LOGGER.error("Template couldn't be downloaded"); + logger.error("Template couldn't be downloaded"); status = Status.UNRECOVERABLE_ERROR; return 0; } - LOGGER.info("Template downloaded successfully on: " + _toFile); + logger.info("Template downloaded successfully on: " + _toFile); status = Status.DOWNLOAD_FINISHED; _downloadTime = System.currentTimeMillis() - _start; if (_callback != null) { diff --git a/core/src/main/java/com/cloud/storage/template/OVAProcessor.java b/core/src/main/java/com/cloud/storage/template/OVAProcessor.java index 33f7e28ac039..ab3aa0d0e3a5 100644 --- a/core/src/main/java/com/cloud/storage/template/OVAProcessor.java +++ b/core/src/main/java/com/cloud/storage/template/OVAProcessor.java @@ -34,7 +34,6 @@ import com.cloud.agent.api.to.deployasis.OVFNetworkTO; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.w3c.dom.Document; import org.w3c.dom.Element; import org.w3c.dom.NodeList; @@ -52,7 +51,6 @@ * processes the content of an OVA for registration of a template */ public class OVAProcessor extends AdapterBase implements Processor { - private static final Logger LOGGER = Logger.getLogger(OVAProcessor.class); StorageLayer _storage; @Override @@ -66,11 +64,11 @@ public FormatInfo process(String templatePath, ImageFormat format, String templa return null; } - LOGGER.info("Template processing. templatePath: " + templatePath + ", templateName: " + templateName); + logger.info("Template processing. templatePath: " + templatePath + ", templateName: " + templateName); String templateFilePath = templatePath + File.separator + templateName + "." + ImageFormat.OVA.getFileExtension(); if (!_storage.exists(templateFilePath)) { - if (LOGGER.isInfoEnabled()) { - LOGGER.info("Unable to find the vmware template file: " + templateFilePath); + if (logger.isInfoEnabled()) { + logger.info("Unable to find the vmware template file: " + templateFilePath); } return null; } @@ -114,46 +112,46 @@ private OVFInformationTO createOvfInformationTO(OVFHelper ovfHelper, Document do List disks = ovfHelper.getOVFVolumeInfoFromFile(ovfFilePath, doc, null); if (CollectionUtils.isNotEmpty(disks)) { - if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format("Found %d disks in template %s", disks.size(), ovfFilePath)); + if (logger.isTraceEnabled()) { + logger.trace(String.format("Found %d disks in template %s", disks.size(), ovfFilePath)); } ovfInformationTO.setDisks(disks); } List nets = ovfHelper.getNetPrerequisitesFromDocument(doc); if (CollectionUtils.isNotEmpty(nets)) { - LOGGER.info("Found " + nets.size() + " prerequisite networks"); + logger.info("Found " + nets.size() + " prerequisite networks"); ovfInformationTO.setNetworks(nets); - } else if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format("no net prerequisites found in template %s", ovfFilePath)); + } else if (logger.isTraceEnabled()) { + logger.trace(String.format("no net prerequisites found in template %s", ovfFilePath)); } List ovfProperties = ovfHelper.getConfigurableOVFPropertiesFromDocument(doc); if (CollectionUtils.isNotEmpty(ovfProperties)) { - LOGGER.info("Found " + ovfProperties.size() + " configurable OVF properties"); + logger.info("Found " + ovfProperties.size() + " configurable OVF properties"); ovfInformationTO.setProperties(ovfProperties); - } else if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format("no ovf properties found in template %s", ovfFilePath)); + } else if (logger.isTraceEnabled()) { + logger.trace(String.format("no ovf properties found in template %s", ovfFilePath)); } OVFVirtualHardwareSectionTO hardwareSection = ovfHelper.getVirtualHardwareSectionFromDocument(doc); List configurations = hardwareSection.getConfigurations(); if (CollectionUtils.isNotEmpty(configurations)) { - LOGGER.info("Found " + configurations.size() + " deployment option configurations"); + logger.info("Found " + configurations.size() + " deployment option configurations"); } List hardwareItems = hardwareSection.getCommonHardwareItems(); if (CollectionUtils.isNotEmpty(hardwareItems)) { - LOGGER.info("Found " + hardwareItems.size() + " virtual hardware items"); + logger.info("Found " + hardwareItems.size() + " virtual hardware items"); } if (StringUtils.isNotBlank(hardwareSection.getMinimiumHardwareVersion())) { - LOGGER.info("Found minimum hardware version " + hardwareSection.getMinimiumHardwareVersion()); + logger.info("Found minimum hardware version " + hardwareSection.getMinimiumHardwareVersion()); } ovfInformationTO.setHardwareSection(hardwareSection); List eulaSections = ovfHelper.getEulaSectionsFromDocument(doc); if (CollectionUtils.isNotEmpty(eulaSections)) { - LOGGER.info("Found " + eulaSections.size() + " license agreements"); + logger.info("Found " + eulaSections.size() + " license agreements"); ovfInformationTO.setEulaSections(eulaSections); } Pair guestOsPair = ovfHelper.getOperatingSystemInfoFromDocument(doc); if (guestOsPair != null) { - LOGGER.info("Found guest OS information: " + guestOsPair.first() + " - " + guestOsPair.second()); + logger.info("Found guest OS information: " + guestOsPair.first() + " - " + guestOsPair.second()); ovfInformationTO.setGuestOsInfo(guestOsPair); } return ovfInformationTO; @@ -163,33 +161,33 @@ private void setFileSystemAccessRights(String templatePath) { Script command; String result; - command = new Script("chmod", 0, LOGGER); + command = new Script("chmod", 0, logger); command.add("-R"); command.add("666", templatePath); result = command.execute(); if (result != null) { - LOGGER.warn("Unable to set permissions for files in " + templatePath + " due to " + result); + logger.warn("Unable to set permissions for files in " + templatePath + " due to " + result); } - command = new Script("chmod", 0, LOGGER); + command = new Script("chmod", 0, logger); command.add("777", templatePath); result = command.execute(); if (result != null) { - LOGGER.warn("Unable to set permissions for " + templatePath + " due to " + result); + logger.warn("Unable to set permissions for " + templatePath + " due to " + result); } } private String unpackOva(String templatePath, String templateName, long processTimeout) throws InternalErrorException { - LOGGER.info("Template processing - untar OVA package. templatePath: " + templatePath + ", templateName: " + templateName); + logger.info("Template processing - untar OVA package. templatePath: " + templatePath + ", templateName: " + templateName); String templateFileFullPath = templatePath + File.separator + templateName + "." + ImageFormat.OVA.getFileExtension(); File templateFile = new File(templateFileFullPath); - Script command = new Script("tar", processTimeout, LOGGER); + Script command = new Script("tar", processTimeout, logger); command.add("--no-same-owner"); command.add("--no-same-permissions"); command.add("-xf", templateFileFullPath); command.setWorkDir(templateFile.getParent()); String result = command.execute(); if (result != null) { - LOGGER.info("failed to untar OVA package due to " + result + ". templatePath: " + templatePath + ", templateName: " + templateName); + logger.info("failed to untar OVA package due to " + result + ". templatePath: " + templatePath + ", templateName: " + templateName); throw new InternalErrorException("failed to untar OVA package"); } return templateFileFullPath; @@ -197,13 +195,13 @@ private String unpackOva(String templatePath, String templateName, long processT private boolean conversionChecks(ImageFormat format) { if (format != null) { - if (LOGGER.isInfoEnabled()) { - LOGGER.info("We currently don't handle conversion from " + format + " to OVA."); + if (logger.isInfoEnabled()) { + logger.info("We currently don't handle conversion from " + format + " to OVA."); } return false; } - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("We are handling format " + format + "."); + if (logger.isTraceEnabled()) { + logger.trace("We are handling format " + format + "."); } return true; } @@ -214,7 +212,7 @@ public long getVirtualSize(File file) { long size = getTemplateVirtualSize(file.getParent(), file.getName()); return size; } catch (Exception e) { - LOGGER.info("[ignored]" + logger.info("[ignored]" + "failed to get virtual template size for ova: " + e.getLocalizedMessage()); } return file.length(); @@ -234,7 +232,7 @@ public long getTemplateVirtualSize(String templatePath, String templateName) thr OVFHelper ovfHelper = new OVFHelper(); if (ovfFileName == null) { String msg = "Unable to locate OVF file in template package directory: " + templatePath; - LOGGER.error(msg); + logger.error(msg); throw new InternalErrorException(msg); } try { @@ -248,7 +246,7 @@ public long getTemplateVirtualSize(String templatePath, String templateName) thr diskSize = Long.parseLong(diskSizeValue); } catch (NumberFormatException e) { // ASSUMEably the diskSize contains a property for replacement - LOGGER.warn(String.format("the disksize for disk %s is not a valid number: %s", disk.getAttribute("diskId"), diskSizeValue)); + logger.warn(String.format("the disksize for disk %s is not a valid number: %s", disk.getAttribute("diskId"), diskSizeValue)); // TODO parse the property to get any value can not be done at registration time // and will have to be done at deploytime, so for orchestration purposes // we now assume, a value of one @@ -260,7 +258,7 @@ public long getTemplateVirtualSize(String templatePath, String templateName) thr return virtualSize; } catch (InternalErrorException | NumberFormatException e) { String msg = "getTemplateVirtualSize: Unable to parse OVF XML document " + templatePath + " to get the virtual disk " + templateName + " size due to " + e; - LOGGER.error(msg); + logger.error(msg); throw new InternalErrorException(msg); } } diff --git a/core/src/main/java/com/cloud/storage/template/QCOW2Processor.java b/core/src/main/java/com/cloud/storage/template/QCOW2Processor.java index 56ae078dc519..df1722a0201d 100644 --- a/core/src/main/java/com/cloud/storage/template/QCOW2Processor.java +++ b/core/src/main/java/com/cloud/storage/template/QCOW2Processor.java @@ -27,7 +27,6 @@ import javax.naming.ConfigurationException; import com.cloud.exception.InternalErrorException; -import org.apache.log4j.Logger; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.StorageLayer; @@ -35,7 +34,6 @@ import com.cloud.utils.component.AdapterBase; public class QCOW2Processor extends AdapterBase implements Processor { - private static final Logger s_logger = Logger.getLogger(QCOW2Processor.class); private static final int VIRTUALSIZE_HEADER_LOCATION = 24; private StorageLayer _storage; @@ -48,14 +46,14 @@ public FormatInfo process(String templatePath, ImageFormat format, String templa @Override public FormatInfo process(String templatePath, ImageFormat format, String templateName, long processTimeout) throws InternalErrorException { if (format != null) { - s_logger.debug("We currently don't handle conversion from " + format + " to QCOW2."); + logger.debug("We currently don't handle conversion from " + format + " to QCOW2."); return null; } String qcow2Path = templatePath + File.separator + templateName + "." + ImageFormat.QCOW2.getFileExtension(); if (!_storage.exists(qcow2Path)) { - s_logger.debug("Unable to find the qcow2 file: " + qcow2Path); + logger.debug("Unable to find the qcow2 file: " + qcow2Path); return null; } @@ -70,7 +68,7 @@ public FormatInfo process(String templatePath, ImageFormat format, String templa try { info.virtualSize = getTemplateVirtualSize(qcow2File); } catch (IOException e) { - s_logger.error("Unable to get virtual size from " + qcow2File.getName()); + logger.error("Unable to get virtual size from " + qcow2File.getName()); throw new InternalErrorException("unable to get virtual size from qcow2 file"); } @@ -83,7 +81,7 @@ public long getVirtualSize(File file) throws IOException { long size = getTemplateVirtualSize(file); return size; } catch (Exception e) { - s_logger.info("[ignored]" + "failed to get template virtual size for QCOW2: " + e.getLocalizedMessage()); + logger.info("[ignored]" + "failed to get template virtual size for QCOW2: " + e.getLocalizedMessage()); } return file.length(); } diff --git a/core/src/main/java/com/cloud/storage/template/RawImageProcessor.java b/core/src/main/java/com/cloud/storage/template/RawImageProcessor.java index 5fbc626f271e..d6c1f7a808f6 100644 --- a/core/src/main/java/com/cloud/storage/template/RawImageProcessor.java +++ b/core/src/main/java/com/cloud/storage/template/RawImageProcessor.java @@ -24,7 +24,6 @@ import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.exception.InternalErrorException; import com.cloud.storage.Storage.ImageFormat; @@ -32,7 +31,6 @@ import com.cloud.utils.component.AdapterBase; public class RawImageProcessor extends AdapterBase implements Processor { - private static final Logger s_logger = Logger.getLogger(RawImageProcessor.class); StorageLayer _storage; @Override @@ -53,13 +51,13 @@ public FormatInfo process(String templatePath, ImageFormat format, String templa @Override public FormatInfo process(String templatePath, ImageFormat format, String templateName, long processTimeout) throws InternalErrorException { if (format != null) { - s_logger.debug("We currently don't handle conversion from " + format + " to raw image."); + logger.debug("We currently don't handle conversion from " + format + " to raw image."); return null; } String imgPath = templatePath + File.separator + templateName + "." + ImageFormat.RAW.getFileExtension(); if (!_storage.exists(imgPath)) { - s_logger.debug("Unable to find raw image:" + imgPath); + logger.debug("Unable to find raw image:" + imgPath); return null; } FormatInfo info = new FormatInfo(); @@ -67,7 +65,7 @@ public FormatInfo process(String templatePath, ImageFormat format, String templa info.filename = templateName + "." + ImageFormat.RAW.getFileExtension(); info.size = _storage.getSize(imgPath); info.virtualSize = info.size; - s_logger.debug("Process raw image " + info.filename + " successfully"); + logger.debug("Process raw image " + info.filename + " successfully"); return info; } diff --git a/core/src/main/java/com/cloud/storage/template/S3TemplateDownloader.java b/core/src/main/java/com/cloud/storage/template/S3TemplateDownloader.java index 44565c4416c4..34ba3c6b1eae 100644 --- a/core/src/main/java/com/cloud/storage/template/S3TemplateDownloader.java +++ b/core/src/main/java/com/cloud/storage/template/S3TemplateDownloader.java @@ -38,7 +38,6 @@ import org.apache.commons.httpclient.methods.GetMethod; import org.apache.commons.httpclient.params.HttpMethodParams; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import java.io.BufferedInputStream; import java.io.IOException; @@ -56,7 +55,6 @@ * Execution of the instance is started when runInContext() is called. */ public class S3TemplateDownloader extends ManagedContextRunnable implements TemplateDownloader { - private static final Logger LOGGER = Logger.getLogger(S3TemplateDownloader.class.getName()); private final String downloadUrl; private final String s3Key; @@ -110,7 +108,7 @@ public S3TemplateDownloader(S3TO s3TO, String downloadUrl, String installPath, D public long download(boolean resume, DownloadCompleteCallback callback) { if (!status.equals(Status.NOT_STARTED)) { // Only start downloading if we haven't started yet. - LOGGER.debug("Template download is already started, not starting again. Template: " + downloadUrl); + logger.debug("Template download is already started, not starting again. Template: " + downloadUrl); return 0; } @@ -118,7 +116,7 @@ public long download(boolean resume, DownloadCompleteCallback callback) { int responseCode; if ((responseCode = HTTPUtils.executeMethod(httpClient, getMethod)) == -1) { errorString = "Exception while executing HttpMethod " + getMethod.getName() + " on URL " + downloadUrl; - LOGGER.warn(errorString); + logger.warn(errorString); status = Status.UNRECOVERABLE_ERROR; return 0; @@ -126,7 +124,7 @@ public long download(boolean resume, DownloadCompleteCallback callback) { if (!HTTPUtils.verifyResponseCode(responseCode)) { errorString = "Response code for GetMethod of " + downloadUrl + " is incorrect, responseCode: " + responseCode; - LOGGER.warn(errorString); + logger.warn(errorString); status = Status.UNRECOVERABLE_ERROR; return 0; @@ -139,7 +137,7 @@ public long download(boolean resume, DownloadCompleteCallback callback) { // Check the contentLengthHeader and transferEncodingHeader. if (contentLengthHeader == null) { errorString = "The ContentLengthHeader of " + downloadUrl + " isn't supplied"; - LOGGER.warn(errorString); + logger.warn(errorString); status = Status.UNRECOVERABLE_ERROR; return 0; @@ -150,7 +148,7 @@ public long download(boolean resume, DownloadCompleteCallback callback) { if (remoteSize > maxTemplateSizeInByte) { errorString = "Remote size is too large for template " + downloadUrl + " remote size is " + remoteSize + " max allowed is " + maxTemplateSizeInByte; - LOGGER.warn(errorString); + logger.warn(errorString); status = Status.UNRECOVERABLE_ERROR; return 0; @@ -162,13 +160,13 @@ public long download(boolean resume, DownloadCompleteCallback callback) { inputStream = new BufferedInputStream(getMethod.getResponseBodyAsStream()); } catch (IOException e) { errorString = "Exception occurred while opening InputStream for template " + downloadUrl; - LOGGER.warn(errorString); + logger.warn(errorString); status = Status.UNRECOVERABLE_ERROR; return 0; } - LOGGER.info("Starting download from " + downloadUrl + " to S3 bucket " + s3TO.getBucketName() + " and size " + toHumanReadableSize(remoteSize) + " bytes"); + logger.info("Starting download from " + downloadUrl + " to S3 bucket " + s3TO.getBucketName() + " and size " + toHumanReadableSize(remoteSize) + " bytes"); // Time the upload starts. final Date start = new Date(); @@ -197,7 +195,7 @@ public void progressChanged(ProgressEvent progressEvent) { // Record the amount of bytes transferred. totalBytes += progressEvent.getBytesTransferred(); - LOGGER.trace("Template download from " + downloadUrl + " to S3 bucket " + s3TO.getBucketName() + " transferred " + toHumanReadableSize(totalBytes) + " in " + ((new Date().getTime() - start.getTime()) / 1000) + " seconds"); + logger.trace("Template download from " + downloadUrl + " to S3 bucket " + s3TO.getBucketName() + " transferred " + toHumanReadableSize(totalBytes) + " in " + ((new Date().getTime() - start.getTime()) / 1000) + " seconds"); if (progressEvent.getEventType() == ProgressEventType.TRANSFER_STARTED_EVENT) { status = Status.IN_PROGRESS; @@ -216,15 +214,15 @@ public void progressChanged(ProgressEvent progressEvent) { upload.waitForCompletion(); } catch (InterruptedException e) { // Interruption while waiting for the upload to complete. - LOGGER.warn("Interruption occurred while waiting for upload of " + downloadUrl + " to complete"); + logger.warn("Interruption occurred while waiting for upload of " + downloadUrl + " to complete"); } downloadTime = new Date().getTime() - start.getTime(); if (status == Status.DOWNLOAD_FINISHED) { - LOGGER.info("Template download from " + downloadUrl + " to S3 bucket " + s3TO.getBucketName() + " transferred " + toHumanReadableSize(totalBytes) + " in " + (downloadTime / 1000) + " seconds, completed successfully!"); + logger.info("Template download from " + downloadUrl + " to S3 bucket " + s3TO.getBucketName() + " transferred " + toHumanReadableSize(totalBytes) + " in " + (downloadTime / 1000) + " seconds, completed successfully!"); } else { - LOGGER.warn("Template download from " + downloadUrl + " to S3 bucket " + s3TO.getBucketName() + " transferred " + toHumanReadableSize(totalBytes) + " in " + (downloadTime / 1000) + " seconds, completed with status " + status.toString()); + logger.warn("Template download from " + downloadUrl + " to S3 bucket " + s3TO.getBucketName() + " transferred " + toHumanReadableSize(totalBytes) + " in " + (downloadTime / 1000) + " seconds, completed with status " + status.toString()); } // Close input stream @@ -278,7 +276,7 @@ public InputStream getS3ObjectInputStream() { } public void cleanupAfterError() { - LOGGER.warn("Cleanup after error, trying to remove object: " + s3Key); + logger.warn("Cleanup after error, trying to remove object: " + s3Key); S3Utils.deleteObject(s3TO, s3TO.getBucketName(), s3Key); } diff --git a/core/src/main/java/com/cloud/storage/template/ScpTemplateDownloader.java b/core/src/main/java/com/cloud/storage/template/ScpTemplateDownloader.java index 912809ca722b..44379efcd553 100644 --- a/core/src/main/java/com/cloud/storage/template/ScpTemplateDownloader.java +++ b/core/src/main/java/com/cloud/storage/template/ScpTemplateDownloader.java @@ -23,7 +23,6 @@ import java.net.URI; import java.net.URISyntaxException; -import org.apache.log4j.Logger; import com.trilead.ssh2.SCPClient; @@ -31,7 +30,6 @@ import com.cloud.utils.exception.CloudRuntimeException; public class ScpTemplateDownloader extends TemplateDownloaderBase implements TemplateDownloader { - private static final Logger s_logger = Logger.getLogger(ScpTemplateDownloader.class); public ScpTemplateDownloader(StorageLayer storageLayer, String downloadUrl, String toDir, long maxTemplateSizeInBytes, DownloadCompleteCallback callback) { super(storageLayer, downloadUrl, toDir, maxTemplateSizeInBytes, callback); @@ -40,7 +38,7 @@ public ScpTemplateDownloader(StorageLayer storageLayer, String downloadUrl, Stri try { uri = new URI(_downloadUrl); } catch (URISyntaxException e) { - s_logger.warn("URI syntax error: " + _downloadUrl); + logger.warn("URI syntax error: " + _downloadUrl); _status = Status.UNRECOVERABLE_ERROR; return; } @@ -108,7 +106,7 @@ public long download(boolean resume, DownloadCompleteCallback callback) { if (!file.exists()) { _status = Status.UNRECOVERABLE_ERROR; - s_logger.debug("unable to scp the file " + _downloadUrl); + logger.debug("unable to scp the file " + _downloadUrl); return 0; } @@ -123,7 +121,7 @@ public long download(boolean resume, DownloadCompleteCallback callback) { return _totalBytes; } catch (Exception e) { - s_logger.warn("Unable to download " + _downloadUrl, e); + logger.warn("Unable to download " + _downloadUrl, e); _status = TemplateDownloader.Status.UNRECOVERABLE_ERROR; _errorString = e.getMessage(); return 0; diff --git a/core/src/main/java/com/cloud/storage/template/SimpleHttpMultiFileDownloader.java b/core/src/main/java/com/cloud/storage/template/SimpleHttpMultiFileDownloader.java index 7a0ce47ec996..db4dccb1302d 100644 --- a/core/src/main/java/com/cloud/storage/template/SimpleHttpMultiFileDownloader.java +++ b/core/src/main/java/com/cloud/storage/template/SimpleHttpMultiFileDownloader.java @@ -41,12 +41,10 @@ import org.apache.commons.httpclient.methods.HeadMethod; import org.apache.commons.httpclient.params.HttpMethodParams; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.storage.StorageLayer; public class SimpleHttpMultiFileDownloader extends ManagedContextRunnable implements TemplateDownloader { - public static final Logger s_logger = Logger.getLogger(SimpleHttpMultiFileDownloader.class.getName()); private static final MultiThreadedHttpConnectionManager s_httpClientManager = new MultiThreadedHttpConnectionManager(); private static final int CHUNK_SIZE = 1024 * 1024; //1M @@ -108,7 +106,7 @@ private void checkTemporaryDestination(String toDir) { } catch (IOException ex) { errorString = "Unable to start download -- check url? "; currentStatus = TemplateDownloader.Status.UNRECOVERABLE_ERROR; - s_logger.warn("Exception in constructor -- " + ex.toString()); + logger.warn("Exception in constructor -- " + ex.toString()); } } @@ -151,7 +149,7 @@ private void tryAndGetTotalRemoteSize() { } totalRemoteSize += Long.parseLong(contentLengthHeader.getValue()); } catch (IOException e) { - s_logger.warn(String.format("Cannot reach URL: %s while trying to get remote sizes due to: %s", downloadUrl, e.getMessage()), e); + logger.warn(String.format("Cannot reach URL: %s while trying to get remote sizes due to: %s", downloadUrl, e.getMessage()), e); } finally { headMethod.releaseConnection(); } @@ -159,7 +157,7 @@ private void tryAndGetTotalRemoteSize() { } private long downloadFile(String downloadUrl) { - s_logger.debug("Starting download for " + downloadUrl); + logger.debug("Starting download for " + downloadUrl); currentTotalBytes = 0; currentRemoteSize = 0; File file = null; @@ -178,7 +176,7 @@ private long downloadFile(String downloadUrl) { RandomAccessFile out = new RandomAccessFile(file, "rw"); ) { out.seek(localFileSize); - s_logger.info("Starting download from " + downloadUrl + " to " + currentToFile + " remoteSize=" + toHumanReadableSize(currentRemoteSize) + " , max size=" + toHumanReadableSize(maxTemplateSizeInBytes)); + logger.info("Starting download from " + downloadUrl + " to " + currentToFile + " remoteSize=" + toHumanReadableSize(currentRemoteSize) + " , max size=" + toHumanReadableSize(maxTemplateSizeInBytes)); if (copyBytes(file, in, out)) return 0; checkDownloadCompletion(); } @@ -207,11 +205,11 @@ private long downloadFile(String downloadUrl) { public long download(boolean resume, DownloadCompleteCallback callback) { if (skipDownloadOnStatus()) return 0; if (resume) { - s_logger.error("Resume not allowed for this downloader"); + logger.error("Resume not allowed for this downloader"); status = Status.UNRECOVERABLE_ERROR; return 0; } - s_logger.debug("Starting downloads"); + logger.debug("Starting downloads"); status = Status.IN_PROGRESS; Date start = new Date(); tryAndGetTotalRemoteSize(); @@ -270,7 +268,7 @@ private void checkDownloadCompletion() { private boolean canHandleDownloadSize() { if (currentRemoteSize > maxTemplateSizeInBytes) { - s_logger.info("Remote size is too large: " + toHumanReadableSize(currentRemoteSize) + " , max=" + toHumanReadableSize(maxTemplateSizeInBytes)); + logger.info("Remote size is too large: " + toHumanReadableSize(currentRemoteSize) + " , max=" + toHumanReadableSize(maxTemplateSizeInBytes)); currentStatus = Status.UNRECOVERABLE_ERROR; errorString = "Download file size is too large"; return false; @@ -341,7 +339,7 @@ private long checkLocalFileSizeForResume(boolean resume, File file) { long localFileSize = 0; if (file.exists() && resume) { localFileSize = file.length(); - s_logger.info("Resuming download to file (current size)=" + toHumanReadableSize(localFileSize)); + logger.info("Resuming download to file (current size)=" + toHumanReadableSize(localFileSize)); } return localFileSize; } @@ -425,7 +423,7 @@ protected void runInContext() { try { download(resume, completionCallback); } catch (Throwable t) { - s_logger.warn("Caught exception during download " + t.getMessage(), t); + logger.warn("Caught exception during download " + t.getMessage(), t); errorString = "Failed to install: " + t.getMessage(); currentStatus = TemplateDownloader.Status.UNRECOVERABLE_ERROR; } diff --git a/core/src/main/java/com/cloud/storage/template/TARProcessor.java b/core/src/main/java/com/cloud/storage/template/TARProcessor.java index 51aeb234c50d..70b59336323b 100644 --- a/core/src/main/java/com/cloud/storage/template/TARProcessor.java +++ b/core/src/main/java/com/cloud/storage/template/TARProcessor.java @@ -22,14 +22,12 @@ import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.StorageLayer; import com.cloud.utils.component.AdapterBase; -import org.apache.log4j.Logger; import javax.naming.ConfigurationException; import java.io.File; import java.util.Map; public class TARProcessor extends AdapterBase implements Processor { - private static final Logger s_logger = Logger.getLogger(TARProcessor.class); private StorageLayer _storage; @@ -41,14 +39,14 @@ public FormatInfo process(String templatePath, ImageFormat format, String templa @Override public FormatInfo process(String templatePath, ImageFormat format, String templateName, long processTimeout) { if (format != null) { - s_logger.debug("We currently don't handle conversion from " + format + " to TAR."); + logger.debug("We currently don't handle conversion from " + format + " to TAR."); return null; } String tarPath = templatePath + File.separator + templateName + "." + ImageFormat.TAR.getFileExtension(); if (!_storage.exists(tarPath)) { - s_logger.debug("Unable to find the tar file: " + tarPath); + logger.debug("Unable to find the tar file: " + tarPath); return null; } diff --git a/core/src/main/java/com/cloud/storage/template/TemplateDownloaderBase.java b/core/src/main/java/com/cloud/storage/template/TemplateDownloaderBase.java index f56e4911ca74..cf6f4d27ecd7 100644 --- a/core/src/main/java/com/cloud/storage/template/TemplateDownloaderBase.java +++ b/core/src/main/java/com/cloud/storage/template/TemplateDownloaderBase.java @@ -21,14 +21,12 @@ import java.io.File; -import org.apache.log4j.Logger; import org.apache.cloudstack.managed.context.ManagedContextRunnable; import com.cloud.storage.StorageLayer; public abstract class TemplateDownloaderBase extends ManagedContextRunnable implements TemplateDownloader { - private static final Logger s_logger = Logger.getLogger(TemplateDownloaderBase.class); protected String _downloadUrl; protected String _toFile; @@ -133,7 +131,7 @@ protected void runInContext() { try { download(_resume, _callback); } catch (Exception e) { - s_logger.warn("Unable to complete download due to ", e); + logger.warn("Unable to complete download due to ", e); _errorString = "Failed to install: " + e.getMessage(); _status = TemplateDownloader.Status.UNRECOVERABLE_ERROR; } diff --git a/core/src/main/java/com/cloud/storage/template/TemplateLocation.java b/core/src/main/java/com/cloud/storage/template/TemplateLocation.java index 6ff53a0410a9..563c642f292e 100644 --- a/core/src/main/java/com/cloud/storage/template/TemplateLocation.java +++ b/core/src/main/java/com/cloud/storage/template/TemplateLocation.java @@ -31,15 +31,16 @@ import java.util.Properties; import org.apache.cloudstack.storage.command.DownloadCommand.ResourceType; -import org.apache.log4j.Logger; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.StorageLayer; import com.cloud.storage.template.Processor.FormatInfo; import com.cloud.utils.NumbersUtil; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; public class TemplateLocation { - private static final Logger s_logger = Logger.getLogger(TemplateLocation.class); + protected Logger logger = LogManager.getLogger(getClass()); public final static String Filename = "template.properties"; StorageLayer _storage; @@ -90,8 +91,8 @@ public boolean purge() { if (!isRemoved) { purged = false; } - if (s_logger.isDebugEnabled()) { - s_logger.debug((isRemoved ? "Removed " : "Unable to remove") + file); + if (logger.isDebugEnabled()) { + logger.debug((isRemoved ? "Removed " : "Unable to remove") + file); } } @@ -102,27 +103,27 @@ public boolean load() throws IOException { try (FileInputStream strm = new FileInputStream(_file);) { _props.load(strm); } catch (IOException e) { - s_logger.warn("Unable to load the template properties for '" + _file + "': ", e); + logger.warn("Unable to load the template properties for '" + _file + "': ", e); } for (ImageFormat format : ImageFormat.values()) { String currentExtension = format.getFileExtension(); String ext = _props.getProperty(currentExtension); if (ext != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("File extension '" + currentExtension + "' was found in '" + _file + "'."); + if (logger.isDebugEnabled()) { + logger.debug("File extension '" + currentExtension + "' was found in '" + _file + "'."); } FormatInfo info = new FormatInfo(); info.format = format; info.filename = _props.getProperty(currentExtension + ".filename"); if (info.filename == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Property '" + currentExtension + ".filename' was not found in '" + _file + "'. Current format is ignored."); + if (logger.isDebugEnabled()) { + logger.debug("Property '" + currentExtension + ".filename' was not found in '" + _file + "'. Current format is ignored."); } continue; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Property '" + currentExtension + ".filename' was found in '" + _file + "'. Current format will be parsed."); + if (logger.isDebugEnabled()) { + logger.debug("Property '" + currentExtension + ".filename' was found in '" + _file + "'. Current format will be parsed."); } info.size = NumbersUtil.parseLong(_props.getProperty(currentExtension + ".size"), -1); _props.setProperty("physicalSize", Long.toString(info.size)); @@ -131,18 +132,18 @@ public boolean load() throws IOException { if (!checkFormatValidity(info)) { _isCorrupted = true; - s_logger.warn("Cleaning up inconsistent information for " + format); + logger.warn("Cleaning up inconsistent information for " + format); } } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Format extension '" + currentExtension + "' wasn't found in '" + _file + "'."); + if (logger.isDebugEnabled()) { + logger.debug("Format extension '" + currentExtension + "' wasn't found in '" + _file + "'."); } } } if (_props.getProperty("uniquename") == null || _props.getProperty("virtualsize") == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Property 'uniquename' or 'virtualsize' weren't found in '" + _file + "'. Loading failed."); + if (logger.isDebugEnabled()) { + logger.debug("Property 'uniquename' or 'virtualsize' weren't found in '" + _file + "'. Loading failed."); } return false; } @@ -160,7 +161,7 @@ public boolean save() { try (FileOutputStream strm = new FileOutputStream(_file);) { _props.store(strm, ""); } catch (IOException e) { - s_logger.warn("Unable to save the template properties ", e); + logger.warn("Unable to save the template properties ", e); return false; } return true; @@ -204,9 +205,9 @@ public boolean addFormat(FormatInfo newInfo) { deleteFormat(newInfo.format); if (!checkFormatValidity(newInfo)) { - s_logger.warn("Format is invalid"); - s_logger.debug("Format: " + newInfo.format + " size: " + toHumanReadableSize(newInfo.size) + " virtualsize: " + toHumanReadableSize(newInfo.virtualSize) + " filename: " + newInfo.filename); - s_logger.debug("format, filename cannot be null and size, virtual size should be > 0 "); + logger.warn("Format is invalid"); + logger.debug("Format: " + newInfo.format + " size: " + toHumanReadableSize(newInfo.size) + " virtualsize: " + toHumanReadableSize(newInfo.virtualSize) + " filename: " + newInfo.filename); + logger.debug("format, filename cannot be null and size, virtual size should be > 0 "); return false; } diff --git a/core/src/main/java/com/cloud/storage/template/VhdProcessor.java b/core/src/main/java/com/cloud/storage/template/VhdProcessor.java index baea7bf0db51..9f18d782b426 100644 --- a/core/src/main/java/com/cloud/storage/template/VhdProcessor.java +++ b/core/src/main/java/com/cloud/storage/template/VhdProcessor.java @@ -27,7 +27,6 @@ import org.apache.commons.compress.compressors.CompressorException; import org.apache.commons.compress.compressors.CompressorInputStream; import org.apache.commons.compress.compressors.CompressorStreamFactory; -import org.apache.log4j.Logger; import javax.naming.ConfigurationException; import java.io.BufferedInputStream; @@ -46,7 +45,6 @@ */ public class VhdProcessor extends AdapterBase implements Processor { - private static final Logger s_logger = Logger.getLogger(VhdProcessor.class); StorageLayer _storage; private int vhdFooterSize = 512; private int vhdCookieOffset = 8; @@ -64,13 +62,13 @@ public FormatInfo process(String templatePath, ImageFormat format, String templa @Override public FormatInfo process(String templatePath, ImageFormat format, String templateName, long processTimeout) throws InternalErrorException { if (format != null) { - s_logger.debug("We currently don't handle conversion from " + format + " to VHD."); + logger.debug("We currently don't handle conversion from " + format + " to VHD."); return null; } String vhdPath = templatePath + File.separator + templateName + "." + ImageFormat.VHD.getFileExtension(); if (!_storage.exists(vhdPath)) { - s_logger.debug("Unable to find the vhd file: " + vhdPath); + logger.debug("Unable to find the vhd file: " + vhdPath); return null; } @@ -84,7 +82,7 @@ public FormatInfo process(String templatePath, ImageFormat format, String templa try { info.virtualSize = getTemplateVirtualSize(vhdFile); } catch (IOException e) { - s_logger.error("Unable to get the virtual size for " + vhdPath); + logger.error("Unable to get the virtual size for " + vhdPath); throw new InternalErrorException("unable to get virtual size from vhd file"); } @@ -97,7 +95,7 @@ public long getVirtualSize(File file) throws IOException { long size = getTemplateVirtualSize(file); return size; } catch (Exception e) { - s_logger.info("[ignored]" + "failed to get template virtual size for VHD: " + e.getLocalizedMessage()); + logger.info("[ignored]" + "failed to get template virtual size for VHD: " + e.getLocalizedMessage()); } return file.length(); } @@ -117,7 +115,7 @@ protected long getTemplateVirtualSize(File file) throws IOException { try { strm = new CompressorStreamFactory().createCompressorInputStream(fileStream); } catch (CompressorException e) { - s_logger.info("error opening compressed VHD file " + file.getName()); + logger.info("error opening compressed VHD file " + file.getName()); return file.length(); } } try { @@ -146,7 +144,7 @@ protected long getTemplateVirtualSize(File file) throws IOException { throw new IOException("Unexpected end-of-file"); } } catch (IOException e) { - s_logger.warn("Error reading virtual size from VHD file " + e.getMessage() + " VHD: " + file.getName()); + logger.warn("Error reading virtual size from VHD file " + e.getMessage() + " VHD: " + file.getName()); return file.length(); } finally { if (strm != null) { @@ -180,11 +178,11 @@ private boolean checkCompressed(String fileName) throws IOException { cin = new CompressorStreamFactory().createCompressorInputStream(bin); } catch (CompressorException e) { - s_logger.warn(e.getMessage()); + logger.warn(e.getMessage()); return false; } catch (FileNotFoundException e) { - s_logger.warn(e.getMessage()); + logger.warn(e.getMessage()); return false; } finally { if (cin != null) diff --git a/core/src/main/java/com/cloud/storage/template/VmdkProcessor.java b/core/src/main/java/com/cloud/storage/template/VmdkProcessor.java index 927515f75441..4f53c556667f 100644 --- a/core/src/main/java/com/cloud/storage/template/VmdkProcessor.java +++ b/core/src/main/java/com/cloud/storage/template/VmdkProcessor.java @@ -30,7 +30,6 @@ import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.exception.InternalErrorException; import com.cloud.storage.Storage.ImageFormat; @@ -40,7 +39,6 @@ import static com.cloud.utils.NumbersUtil.toHumanReadableSize; public class VmdkProcessor extends AdapterBase implements Processor { - private static final Logger s_logger = Logger.getLogger(VmdkProcessor.class); StorageLayer _storage; @@ -52,17 +50,17 @@ public FormatInfo process(String templatePath, ImageFormat format, String templa @Override public FormatInfo process(String templatePath, ImageFormat format, String templateName, long processTimeout) throws InternalErrorException { if (format != null) { - if (s_logger.isInfoEnabled()) { - s_logger.info("We currently don't handle conversion from " + format + " to VMDK."); + if (logger.isInfoEnabled()) { + logger.info("We currently don't handle conversion from " + format + " to VMDK."); } return null; } - s_logger.info("Template processing. templatePath: " + templatePath + ", templateName: " + templateName); + logger.info("Template processing. templatePath: " + templatePath + ", templateName: " + templateName); String templateFilePath = templatePath + File.separator + templateName + "." + ImageFormat.VMDK.getFileExtension(); if (!_storage.exists(templateFilePath)) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Unable to find the vmware template file: " + templateFilePath); + if (logger.isInfoEnabled()) { + logger.info("Unable to find the vmware template file: " + templateFilePath); } return null; } @@ -82,7 +80,7 @@ public long getVirtualSize(File file) { long size = getTemplateVirtualSize(file.getParent(), file.getName()); return size; } catch (Exception e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "failed to get template virtual size for vmdk: " + e.getLocalizedMessage()); } return file.length(); @@ -108,15 +106,15 @@ public long getTemplateVirtualSize(String templatePath, String templateName) thr } } catch(FileNotFoundException ex) { String msg = "Unable to open file '" + templateFileFullPath + "' " + ex.toString(); - s_logger.error(msg); + logger.error(msg); throw new InternalErrorException(msg); } catch(IOException ex) { String msg = "Unable read open file '" + templateFileFullPath + "' " + ex.toString(); - s_logger.error(msg); + logger.error(msg); throw new InternalErrorException(msg); } - s_logger.debug("vmdk file had size=" + toHumanReadableSize(virtualSize)); + logger.debug("vmdk file had size=" + toHumanReadableSize(virtualSize)); return virtualSize; } diff --git a/core/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsAnswer.java b/core/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsAnswer.java index 7390e4fcdd2a..6421f45ed0a7 100644 --- a/core/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsAnswer.java +++ b/core/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsAnswer.java @@ -21,13 +21,11 @@ import com.cloud.utils.exception.CloudRuntimeException; import org.apache.commons.lang3.StringUtils; import org.apache.cloudstack.api.ApiConstants; -import org.apache.log4j.Logger; import java.util.HashMap; import java.util.Map; public class DiagnosticsAnswer extends Answer { - public static final Logger LOGGER = Logger.getLogger(DiagnosticsAnswer.class); public DiagnosticsAnswer(DiagnosticsCommand cmd, boolean result, String details) { super(cmd, result, details); diff --git a/core/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadHelper.java b/core/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadHelper.java index 27e35b7074b0..be8841f3c885 100644 --- a/core/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadHelper.java +++ b/core/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadHelper.java @@ -24,11 +24,12 @@ import org.apache.cloudstack.agent.directdownload.HttpsDirectDownloadCommand; import org.apache.cloudstack.agent.directdownload.MetalinkDirectDownloadCommand; import org.apache.cloudstack.agent.directdownload.NfsDirectDownloadCommand; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; public class DirectDownloadHelper { - public static final Logger LOGGER = Logger.getLogger(DirectDownloadHelper.class.getName()); + protected static Logger LOGGER = LogManager.getLogger(DirectDownloadHelper.class); /** * Get direct template downloader from direct download command and destination pool diff --git a/core/src/main/java/org/apache/cloudstack/direct/download/DirectTemplateDownloaderImpl.java b/core/src/main/java/org/apache/cloudstack/direct/download/DirectTemplateDownloaderImpl.java index 9476dbaa5cee..b6a872aca7cd 100644 --- a/core/src/main/java/org/apache/cloudstack/direct/download/DirectTemplateDownloaderImpl.java +++ b/core/src/main/java/org/apache/cloudstack/direct/download/DirectTemplateDownloaderImpl.java @@ -22,7 +22,8 @@ import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.utils.security.DigestHelper; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import java.io.File; import java.io.FileInputStream; @@ -43,7 +44,7 @@ public abstract class DirectTemplateDownloaderImpl implements DirectTemplateDown private boolean redownload = false; protected String temporaryDownloadPath; - public static final Logger s_logger = Logger.getLogger(DirectTemplateDownloaderImpl.class.getName()); + protected Logger logger = LogManager.getLogger(getClass()); protected DirectTemplateDownloaderImpl(final String url, final String destPoolPath, final Long templateId, final String checksum, final String temporaryDownloadPath) { @@ -135,16 +136,16 @@ public boolean validateChecksum() { try { while (!valid && retry > 0) { retry--; - s_logger.info("Performing checksum validation for downloaded template " + templateId + " using " + checksum + ", retries left: " + retry); + logger.info("Performing checksum validation for downloaded template " + templateId + " using " + checksum + ", retries left: " + retry); valid = DigestHelper.check(checksum, new FileInputStream(downloadedFilePath)); if (!valid && retry > 0) { - s_logger.info("Checksum validation failed, re-downloading template"); + logger.info("Checksum validation failed, re-downloading template"); redownload = true; resetDownloadFile(); downloadTemplate(); } } - s_logger.info("Checksum validation for template " + templateId + ": " + (valid ? "succeeded" : "failed")); + logger.info("Checksum validation for template " + templateId + ": " + (valid ? "succeeded" : "failed")); return valid; } catch (IOException e) { throw new CloudRuntimeException("could not check sum for file: " + downloadedFilePath, e); @@ -152,7 +153,7 @@ public boolean validateChecksum() { throw new CloudRuntimeException("Unknown checksum algorithm: " + checksum, e); } } - s_logger.info("No checksum provided, skipping checksum validation"); + logger.info("No checksum provided, skipping checksum validation"); return true; } @@ -161,14 +162,14 @@ public boolean validateChecksum() { */ private void resetDownloadFile() { File f = new File(getDownloadedFilePath()); - s_logger.info("Resetting download file: " + getDownloadedFilePath() + ", in order to re-download and persist template " + templateId + " on it"); + logger.info("Resetting download file: " + getDownloadedFilePath() + ", in order to re-download and persist template " + templateId + " on it"); try { if (f.exists()) { f.delete(); } f.createNewFile(); } catch (IOException e) { - s_logger.error("Error creating file to download on: " + getDownloadedFilePath() + " due to: " + e.getMessage()); + logger.error("Error creating file to download on: " + getDownloadedFilePath() + " due to: " + e.getMessage()); throw new CloudRuntimeException("Failed to create download file for direct download"); } } diff --git a/core/src/main/java/org/apache/cloudstack/direct/download/HttpDirectTemplateDownloader.java b/core/src/main/java/org/apache/cloudstack/direct/download/HttpDirectTemplateDownloader.java index e1b2f1fe4299..70cdb8ed467d 100644 --- a/core/src/main/java/org/apache/cloudstack/direct/download/HttpDirectTemplateDownloader.java +++ b/core/src/main/java/org/apache/cloudstack/direct/download/HttpDirectTemplateDownloader.java @@ -40,13 +40,11 @@ import org.apache.commons.httpclient.methods.GetMethod; import org.apache.commons.httpclient.methods.HeadMethod; import org.apache.commons.io.IOUtils; -import org.apache.log4j.Logger; public class HttpDirectTemplateDownloader extends DirectTemplateDownloaderImpl { protected HttpClient client; private static final MultiThreadedHttpConnectionManager s_httpClientManager = new MultiThreadedHttpConnectionManager(); - public static final Logger s_logger = Logger.getLogger(HttpDirectTemplateDownloader.class.getName()); protected GetMethod request; protected Map reqHeaders = new HashMap<>(); @@ -83,7 +81,7 @@ public Pair downloadTemplate() { try { int status = client.executeMethod(request); if (status != HttpStatus.SC_OK) { - s_logger.warn("Not able to download template, status code: " + status); + logger.warn("Not able to download template, status code: " + status); return new Pair<>(false, null); } return performDownload(); @@ -95,14 +93,14 @@ public Pair downloadTemplate() { } protected Pair performDownload() { - s_logger.info("Downloading template " + getTemplateId() + " from " + getUrl() + " to: " + getDownloadedFilePath()); + logger.info("Downloading template " + getTemplateId() + " from " + getUrl() + " to: " + getDownloadedFilePath()); try ( InputStream in = request.getResponseBodyAsStream(); OutputStream out = new FileOutputStream(getDownloadedFilePath()) ) { IOUtils.copy(in, out); } catch (IOException e) { - s_logger.error("Error downloading template " + getTemplateId() + " due to: " + e.getMessage()); + logger.error("Error downloading template " + getTemplateId() + " due to: " + e.getMessage()); return new Pair<>(false, null); } return new Pair<>(true, getDownloadedFilePath()); @@ -113,12 +111,12 @@ public boolean checkUrl(String url) { HeadMethod httpHead = new HeadMethod(url); try { if (client.executeMethod(httpHead) != HttpStatus.SC_OK) { - s_logger.error(String.format("Invalid URL: %s", url)); + logger.error(String.format("Invalid URL: %s", url)); return false; } return true; } catch (IOException e) { - s_logger.error(String.format("Cannot reach URL: %s due to: %s", url, e.getMessage()), e); + logger.error(String.format("Cannot reach URL: %s due to: %s", url, e.getMessage()), e); return false; } finally { httpHead.releaseConnection(); @@ -142,7 +140,7 @@ public List getMetalinkUrls(String metalinkUrl) { try { status = client.executeMethod(getMethod); } catch (IOException e) { - s_logger.error("Error retrieving urls form metalink: " + metalinkUrl); + logger.error("Error retrieving urls form metalink: " + metalinkUrl); getMethod.releaseConnection(); return null; } @@ -152,7 +150,7 @@ public List getMetalinkUrls(String metalinkUrl) { addMetalinkUrlsToListFromInputStream(is, urls); } } catch (IOException e) { - s_logger.warn(e.getMessage()); + logger.warn(e.getMessage()); } finally { getMethod.releaseConnection(); } @@ -168,7 +166,7 @@ public List getMetalinkChecksums(String metalinkUrl) { return generateChecksumListFromInputStream(is); } } catch (IOException e) { - s_logger.error(String.format("Error obtaining metalink checksums on URL %s: %s", metalinkUrl, e.getMessage()), e); + logger.error(String.format("Error obtaining metalink checksums on URL %s: %s", metalinkUrl, e.getMessage()), e); } finally { getMethod.releaseConnection(); } diff --git a/core/src/main/java/org/apache/cloudstack/direct/download/HttpsDirectTemplateDownloader.java b/core/src/main/java/org/apache/cloudstack/direct/download/HttpsDirectTemplateDownloader.java index 70a3eb29bc7a..fbf5f7281116 100644 --- a/core/src/main/java/org/apache/cloudstack/direct/download/HttpsDirectTemplateDownloader.java +++ b/core/src/main/java/org/apache/cloudstack/direct/download/HttpsDirectTemplateDownloader.java @@ -118,7 +118,7 @@ private SSLContext getSSLContext() { sslContext.init(null, tm, null); return sslContext; } catch (KeyStoreException | NoSuchAlgorithmException | CertificateException | IOException | KeyManagementException e) { - s_logger.error(String.format("Failure getting SSL context for HTTPS downloader, using default SSL context: %s", e.getMessage()), e); + logger.error(String.format("Failure getting SSL context for HTTPS downloader, using default SSL context: %s", e.getMessage()), e); try { return SSLContext.getDefault(); } catch (NoSuchAlgorithmException ex) { @@ -143,7 +143,7 @@ public Pair downloadTemplate() { * Consume response and persist it on getDownloadedFilePath() file */ protected Pair consumeResponse(CloseableHttpResponse response) { - s_logger.info("Downloading template " + getTemplateId() + " from " + getUrl() + " to: " + getDownloadedFilePath()); + logger.info("Downloading template " + getTemplateId() + " from " + getUrl() + " to: " + getDownloadedFilePath()); if (response.getStatusLine().getStatusCode() != 200) { throw new CloudRuntimeException("Error on HTTPS response"); } @@ -153,7 +153,7 @@ protected Pair consumeResponse(CloseableHttpResponse response) OutputStream out = new FileOutputStream(getDownloadedFilePath()); IOUtils.copy(in, out); } catch (Exception e) { - s_logger.error("Error parsing response for template " + getTemplateId() + " due to: " + e.getMessage()); + logger.error("Error parsing response for template " + getTemplateId() + " due to: " + e.getMessage()); return new Pair<>(false, null); } return new Pair<>(true, getDownloadedFilePath()); @@ -165,12 +165,12 @@ public boolean checkUrl(String url) { try { CloseableHttpResponse response = httpsClient.execute(httpHead); if (response.getStatusLine().getStatusCode() != HttpStatus.SC_OK) { - s_logger.error(String.format("Invalid URL: %s", url)); + logger.error(String.format("Invalid URL: %s", url)); return false; } return true; } catch (IOException e) { - s_logger.error(String.format("Cannot reach URL: %s due to: %s", url, e.getMessage()), e); + logger.error(String.format("Cannot reach URL: %s due to: %s", url, e.getMessage()), e); return false; } finally { httpHead.releaseConnection(); @@ -215,11 +215,11 @@ public List getMetalinkUrls(String metalinkUrl) { response = httpsClient.execute(getMethod); if (response.getStatusLine().getStatusCode() != HttpStatus.SC_OK) { String msg = String.format("Cannot access metalink content on URL %s", metalinkUrl); - s_logger.error(msg); + logger.error(msg); throw new IOException(msg); } } catch (IOException e) { - s_logger.error(String.format("Error retrieving urls form metalink URL %s: %s", metalinkUrl, e.getMessage()), e); + logger.error(String.format("Error retrieving urls form metalink URL %s: %s", metalinkUrl, e.getMessage()), e); getMethod.releaseConnection(); return null; } @@ -229,7 +229,7 @@ public List getMetalinkUrls(String metalinkUrl) { ByteArrayInputStream inputStream = new ByteArrayInputStream(responseStr.getBytes(StandardCharsets.UTF_8)); addMetalinkUrlsToListFromInputStream(inputStream, urls); } catch (IOException e) { - s_logger.warn(e.getMessage(), e); + logger.warn(e.getMessage(), e); } finally { getMethod.releaseConnection(); } @@ -246,7 +246,7 @@ public List getMetalinkChecksums(String metalinkUrl) { return generateChecksumListFromInputStream(is); } } catch (IOException e) { - s_logger.error(String.format("Error obtaining metalink checksums on URL %s: %s", metalinkUrl, e.getMessage()), e); + logger.error(String.format("Error obtaining metalink checksums on URL %s: %s", metalinkUrl, e.getMessage()), e); } finally { getMethod.releaseConnection(); } diff --git a/core/src/main/java/org/apache/cloudstack/direct/download/MetalinkDirectTemplateDownloader.java b/core/src/main/java/org/apache/cloudstack/direct/download/MetalinkDirectTemplateDownloader.java index 06578d8c2b28..24d642460c5f 100644 --- a/core/src/main/java/org/apache/cloudstack/direct/download/MetalinkDirectTemplateDownloader.java +++ b/core/src/main/java/org/apache/cloudstack/direct/download/MetalinkDirectTemplateDownloader.java @@ -23,7 +23,6 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import java.io.File; import java.util.List; @@ -39,8 +38,6 @@ public class MetalinkDirectTemplateDownloader extends DirectTemplateDownloaderIm private Integer connectTimeout; private Integer soTimeout; - private static final Logger s_logger = Logger.getLogger(MetalinkDirectTemplateDownloader.class.getName()); - protected DirectTemplateDownloader createDownloaderForMetalinks(String url, Long templateId, String destPoolPath, String checksum, Map headers, @@ -55,7 +52,7 @@ protected DirectTemplateDownloader createDownloaderForMetalinks(String url, Long } else if (url.toLowerCase().startsWith("nfs:")) { return new NfsDirectTemplateDownloader(url); } else { - s_logger.error(String.format("Cannot find a suitable downloader to handle the metalink URL %s", url)); + logger.error(String.format("Cannot find a suitable downloader to handle the metalink URL %s", url)); return null; } } @@ -75,10 +72,10 @@ public MetalinkDirectTemplateDownloader(String url, String destPoolPath, Long te metalinkUrls = downloader.getMetalinkUrls(url); metalinkChecksums = downloader.getMetalinkChecksums(url); if (CollectionUtils.isEmpty(metalinkUrls)) { - s_logger.error(String.format("No urls found on metalink file: %s. Not possible to download template %s ", url, templateId)); + logger.error(String.format("No urls found on metalink file: %s. Not possible to download template %s ", url, templateId)); } else { setUrl(metalinkUrls.get(0)); - s_logger.info("Metalink downloader created, metalink url: " + url + " parsed - " + + logger.info("Metalink downloader created, metalink url: " + url + " parsed - " + metalinkUrls.size() + " urls and " + (CollectionUtils.isNotEmpty(metalinkChecksums) ? metalinkChecksums.size() : "0") + " checksums found"); } @@ -96,7 +93,7 @@ public Pair downloadTemplate() { if (!isRedownload()) { setUrl(metalinkUrls.get(i)); } - s_logger.info("Trying to download template from url: " + getUrl()); + logger.info("Trying to download template from url: " + getUrl()); DirectTemplateDownloader urlDownloader = createDownloaderForMetalinks(getUrl(), getTemplateId(), getDestPoolPath(), getChecksum(), headers, connectTimeout, soTimeout, null, temporaryDownloadPath); try { @@ -109,10 +106,10 @@ public Pair downloadTemplate() { Pair downloadResult = urlDownloader.downloadTemplate(); downloaded = downloadResult.first(); if (downloaded) { - s_logger.info("Successfully downloaded template from url: " + getUrl()); + logger.info("Successfully downloaded template from url: " + getUrl()); } } catch (Exception e) { - s_logger.error(String.format("Error downloading template: %s from URL: %s due to: %s", getTemplateId(), getUrl(), e.getMessage()), e); + logger.error(String.format("Error downloading template: %s from URL: %s due to: %s", getTemplateId(), getUrl(), e.getMessage()), e); } i++; } @@ -125,7 +122,7 @@ public boolean validateChecksum() { if (StringUtils.isBlank(getChecksum()) && CollectionUtils.isNotEmpty(metalinkChecksums)) { String chk = metalinkChecksums.get(random.nextInt(metalinkChecksums.size())); setChecksum(chk); - s_logger.info("Checksum not provided but " + metalinkChecksums.size() + " found on metalink file, performing checksum using one of them: " + chk); + logger.info("Checksum not provided but " + metalinkChecksums.size() + " found on metalink file, performing checksum using one of them: " + chk); } return super.validateChecksum(); } @@ -133,7 +130,7 @@ public boolean validateChecksum() { @Override public boolean checkUrl(String metalinkUrl) { if (!downloader.checkUrl(metalinkUrl)) { - s_logger.error(String.format("Metalink URL check failed for: %s", metalinkUrl)); + logger.error(String.format("Metalink URL check failed for: %s", metalinkUrl)); return false; } diff --git a/core/src/main/java/org/apache/cloudstack/direct/download/NfsDirectTemplateDownloader.java b/core/src/main/java/org/apache/cloudstack/direct/download/NfsDirectTemplateDownloader.java index d606136e297d..0b7866104b18 100644 --- a/core/src/main/java/org/apache/cloudstack/direct/download/NfsDirectTemplateDownloader.java +++ b/core/src/main/java/org/apache/cloudstack/direct/download/NfsDirectTemplateDownloader.java @@ -80,7 +80,7 @@ public boolean checkUrl(String url) { parseUrl(); return true; } catch (CloudRuntimeException e) { - s_logger.error(String.format("Cannot check URL %s is reachable due to: %s", url, e.getMessage()), e); + logger.error(String.format("Cannot check URL %s is reachable due to: %s", url, e.getMessage()), e); return false; } } diff --git a/core/src/test/java/com/cloud/agent/transport/LoggingExclusionStrategyTest.java b/core/src/test/java/com/cloud/agent/transport/LoggingExclusionStrategyTest.java new file mode 100644 index 000000000000..e02fe4516ab7 --- /dev/null +++ b/core/src/test/java/com/cloud/agent/transport/LoggingExclusionStrategyTest.java @@ -0,0 +1,83 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.transport; + +import com.cloud.agent.api.BadCommand; +import com.cloud.agent.api.Command; +import com.cloud.agent.api.GetStorageStatsCommand; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.Logger; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + +import java.util.ArrayList; +import java.util.List; + +@RunWith(MockitoJUnitRunner.class) +public class LoggingExclusionStrategyTest { + + @Mock + Logger loggerMock; + @Spy + @InjectMocks + LoggingExclusionStrategy loggingExclusionStrategySpy; + + @Test + public void shouldSkipClassTestArrayClazz() { + List array = new ArrayList<>(); + + boolean result = loggingExclusionStrategySpy.shouldSkipClass(array.getClass()); + + Assert.assertFalse(result); + } + + @Test + public void shouldSkipClassTestNotSubclassOfCommand() { + Integer integer = 1; + + boolean result = loggingExclusionStrategySpy.shouldSkipClass(integer.getClass()); + + Assert.assertFalse(result); + } + + @Test + public void shouldSkipClassTestNullClassAnnotation() { + Command cmd = new BadCommand(); + Mockito.doReturn(true).when(loggerMock).isEnabled(Level.DEBUG); + + boolean result = loggingExclusionStrategySpy.shouldSkipClass(cmd.getClass()); + + Assert.assertFalse(result); + } + + @Test + public void shouldSkipClassTestWithClassAnnotation() { + Command cmd = new GetStorageStatsCommand(); + Mockito.doReturn(true).when(loggerMock).isEnabled(Level.TRACE); + + boolean result = loggingExclusionStrategySpy.shouldSkipClass(cmd.getClass()); + + Assert.assertFalse(result); + } + +} diff --git a/core/src/test/java/com/cloud/agent/transport/RequestTest.java b/core/src/test/java/com/cloud/agent/transport/RequestTest.java index 6eada0226fce..0fe42c7cede8 100644 --- a/core/src/test/java/com/cloud/agent/transport/RequestTest.java +++ b/core/src/test/java/com/cloud/agent/transport/RequestTest.java @@ -22,8 +22,8 @@ import java.nio.ByteBuffer; import junit.framework.TestCase; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.junit.Assert; import org.mockito.Mockito; @@ -43,7 +43,6 @@ import com.cloud.agent.transport.Request.Version; import com.cloud.exception.UnsupportedVersionException; import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.serializer.GsonHelper; import com.cloud.storage.DataStoreRole; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.TemplateType; @@ -58,47 +57,22 @@ */ public class RequestTest extends TestCase { - private static final Logger s_logger = Logger.getLogger(RequestTest.class); + protected Logger logger = LogManager.getLogger(getClass()); public void testSerDeser() { - s_logger.info("Testing serializing and deserializing works as expected"); + logger.info("Testing serializing and deserializing works as expected"); - s_logger.info("UpdateHostPasswordCommand should have two parameters that doesn't show in logging"); + logger.info("UpdateHostPasswordCommand should have two parameters that doesn't show in logging"); UpdateHostPasswordCommand cmd1 = new UpdateHostPasswordCommand("abc", "def"); - s_logger.info("SecStorageFirewallCfgCommand has a context map that shouldn't show up in debug level"); + logger.info("SecStorageFirewallCfgCommand has a context map that shouldn't show up in debug level"); SecStorageFirewallCfgCommand cmd2 = new SecStorageFirewallCfgCommand(); - s_logger.info("GetHostStatsCommand should not show up at all in debug level"); + logger.info("GetHostStatsCommand should not show up at all in debug level"); GetHostStatsCommand cmd3 = new GetHostStatsCommand("hostguid", "hostname", 101); cmd2.addPortConfig("abc", "24", true, "eth0"); cmd2.addPortConfig("127.0.0.1", "44", false, "eth1"); Request sreq = new Request(2, 3, new Command[] {cmd1, cmd2, cmd3}, true, true); sreq.setSequence(892403717); - Logger logger = Logger.getLogger(GsonHelper.class); - Level level = logger.getLevel(); - - logger.setLevel(Level.DEBUG); - String log = sreq.log("Debug", true, Level.DEBUG); - assert (log.contains(UpdateHostPasswordCommand.class.getSimpleName())); - assert (log.contains(SecStorageFirewallCfgCommand.class.getSimpleName())); - assert (!log.contains(GetHostStatsCommand.class.getSimpleName())); - assert (!log.contains("username")); - assert (!log.contains("password")); - - logger.setLevel(Level.TRACE); - log = sreq.log("Trace", true, Level.TRACE); - assert (log.contains(UpdateHostPasswordCommand.class.getSimpleName())); - assert (log.contains(SecStorageFirewallCfgCommand.class.getSimpleName())); - assert (log.contains(GetHostStatsCommand.class.getSimpleName())); - assert (!log.contains("username")); - assert (!log.contains("password")); - - logger.setLevel(Level.INFO); - log = sreq.log("Info", true, Level.INFO); - assert (log == null); - - logger.setLevel(level); - byte[] bytes = sreq.getBytes(); assert Request.getSequence(bytes) == 892403717; @@ -109,9 +83,9 @@ public void testSerDeser() { try { creq = Request.parse(bytes); } catch (ClassNotFoundException e) { - s_logger.error("Unable to parse bytes: ", e); + logger.error("Unable to parse bytes: ", e); } catch (UnsupportedVersionException e) { - s_logger.error("Unable to parse bytes: ", e); + logger.error("Unable to parse bytes: ", e); } assert creq != null : "Couldn't get the request back"; @@ -127,9 +101,9 @@ public void testSerDeser() { try { sresp = Response.parse(bytes); } catch (ClassNotFoundException e) { - s_logger.error("Unable to parse bytes: ", e); + logger.error("Unable to parse bytes: ", e); } catch (UnsupportedVersionException e) { - s_logger.error("Unable to parse bytes: ", e); + logger.error("Unable to parse bytes: ", e); } assert sresp != null : "Couldn't get the response back"; @@ -138,7 +112,7 @@ public void testSerDeser() { } public void testSerDeserTO() { - s_logger.info("Testing serializing and deserializing interface TO works as expected"); + logger.info("Testing serializing and deserializing interface TO works as expected"); NfsTO nfs = new NfsTO("nfs://192.168.56.10/opt/storage/secondary", DataStoreRole.Image); // SecStorageSetupCommand cmd = new SecStorageSetupCommand(nfs, "nfs://192.168.56.10/opt/storage/secondary", null); @@ -156,9 +130,9 @@ public void testSerDeserTO() { try { creq = Request.parse(bytes); } catch (ClassNotFoundException e) { - s_logger.error("Unable to parse bytes: ", e); + logger.error("Unable to parse bytes: ", e); } catch (UnsupportedVersionException e) { - s_logger.error("Unable to parse bytes: ", e); + logger.error("Unable to parse bytes: ", e); } assert creq != null : "Couldn't get the request back"; @@ -168,7 +142,7 @@ public void testSerDeserTO() { } public void testDownload() { - s_logger.info("Testing Download answer"); + logger.info("Testing Download answer"); VirtualMachineTemplate template = Mockito.mock(VirtualMachineTemplate.class); Mockito.when(template.getId()).thenReturn(1L); Mockito.when(template.getFormat()).thenReturn(ImageFormat.QCOW2); @@ -193,7 +167,7 @@ public void testDownload() { } public void testCompress() { - s_logger.info("testCompress"); + logger.info("testCompress"); int len = 800000; ByteBuffer inputBuffer = ByteBuffer.allocate(len); for (int i = 0; i < len; i++) { @@ -202,7 +176,7 @@ public void testCompress() { inputBuffer.limit(len); ByteBuffer compressedBuffer = ByteBuffer.allocate(len); compressedBuffer = Request.doCompress(inputBuffer, len); - s_logger.info("compressed length: " + compressedBuffer.limit()); + logger.info("compressed length: " + compressedBuffer.limit()); ByteBuffer decompressedBuffer = ByteBuffer.allocate(len); decompressedBuffer = Request.doDecompress(compressedBuffer, len); for (int i = 0; i < len; i++) { @@ -212,29 +186,6 @@ public void testCompress() { } } - public void testLogging() { - s_logger.info("Testing Logging"); - GetHostStatsCommand cmd3 = new GetHostStatsCommand("hostguid", "hostname", 101); - Request sreq = new Request(2, 3, new Command[] {cmd3}, true, true); - sreq.setSequence(1); - Logger logger = Logger.getLogger(GsonHelper.class); - Level level = logger.getLevel(); - - logger.setLevel(Level.DEBUG); - String log = sreq.log("Debug", true, Level.DEBUG); - assert (log == null); - - log = sreq.log("Debug", false, Level.DEBUG); - assert (log != null); - - logger.setLevel(Level.TRACE); - log = sreq.log("Trace", true, Level.TRACE); - assert (log.contains(GetHostStatsCommand.class.getSimpleName())); - s_logger.debug(log); - - logger.setLevel(level); - } - protected void compareRequest(Request req1, Request req2) { assert req1.getSequence() == req2.getSequence(); assert req1.getAgentId() == req2.getAgentId(); @@ -253,24 +204,24 @@ protected void compareRequest(Request req1, Request req2) { } public void testGoodCommand() { - s_logger.info("Testing good Command"); + logger.info("Testing good Command"); String content = "[{\"com.cloud.agent.api.GetVolumeStatsCommand\":{\"volumeUuids\":[\"dcc860ac-4a20-498f-9cb3-bab4d57aa676\"]," + "\"poolType\":{\"name\":\"NetworkFilesystem\"},\"poolUuid\":\"e007c270-2b1b-3ce9-ae92-a98b94eef7eb\",\"contextMap\":{},\"wait\":5}}]"; Request sreq = new Request(Version.v2, 1L, 2L, 3L, 1L, (short)1, content); sreq.setSequence(1); Command cmds[] = sreq.getCommands(); - s_logger.debug("Command class = " + cmds[0].getClass().getSimpleName()); + logger.debug("Command class = " + cmds[0].getClass().getSimpleName()); assert cmds[0].getClass().equals(GetVolumeStatsCommand.class); } public void testBadCommand() { - s_logger.info("Testing Bad Command"); + logger.info("Testing Bad Command"); String content = "[{\"com.cloud.agent.api.SomeJunkCommand\":{\"volumeUuids\":[\"dcc860ac-4a20-498f-9cb3-bab4d57aa676\"]," + "\"poolType\":{\"name\":\"NetworkFilesystem\"},\"poolUuid\":\"e007c270-2b1b-3ce9-ae92-a98b94eef7eb\",\"contextMap\":{},\"wait\":5}}]"; Request sreq = new Request(Version.v2, 1L, 2L, 3L, 1L, (short)1, content); sreq.setSequence(1); Command cmds[] = sreq.getCommands(); - s_logger.debug("Command class = " + cmds[0].getClass().getSimpleName()); + logger.debug("Command class = " + cmds[0].getClass().getSimpleName()); assert cmds[0].getClass().equals(BadCommand.class); } diff --git a/debian/cloudstack-management.postinst b/debian/cloudstack-management.postinst index 4527cbe068c5..fadb7e57eca1 100755 --- a/debian/cloudstack-management.postinst +++ b/debian/cloudstack-management.postinst @@ -57,7 +57,7 @@ if [ "$1" = configure ]; then chgrp cloud ${CONFDIR}/${DBPROPS} chown -R cloud:cloud /var/log/cloudstack/management - ln -sf ${CONFDIR}/log4j-cloud.xml ${CONFDIR}/log4j.xml + ln -sf ${CONFDIR}/log4j-cloud.xml ${CONFDIR}/log4j2.xml # Add jdbc MySQL driver settings to db.properties if not present grep -s -q "db.cloud.driver=jdbc:mysql" ${CONFDIR}/${DBPROPS} || sed -i -e "\$adb.cloud.driver=jdbc:mysql" ${CONFDIR}/${DBPROPS} diff --git a/engine/components-api/src/main/java/com/cloud/event/UsageEventUtils.java b/engine/components-api/src/main/java/com/cloud/event/UsageEventUtils.java index 1a2fab150a7b..27f63c8c64b2 100644 --- a/engine/components-api/src/main/java/com/cloud/event/UsageEventUtils.java +++ b/engine/components-api/src/main/java/com/cloud/event/UsageEventUtils.java @@ -26,7 +26,8 @@ import javax.inject.Inject; import org.apache.commons.collections.MapUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.beans.factory.NoSuchBeanDefinitionException; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; @@ -46,7 +47,7 @@ public class UsageEventUtils { private static UsageEventDao s_usageEventDao; private static AccountDao s_accountDao; private static DataCenterDao s_dcDao; - private static final Logger s_logger = Logger.getLogger(UsageEventUtils.class); + protected static Logger LOGGER = LogManager.getLogger(UsageEventUtils.class); protected static EventBus s_eventBus = null; protected static ConfigurationDao s_configDao; @@ -240,7 +241,7 @@ private static void publishUsageEvent(String usageEventType, Long accountId, Lon try { s_eventBus.publish(event); } catch (EventBusException e) { - s_logger.warn("Failed to publish usage event on the event bus."); + LOGGER.warn("Failed to publish usage event on the event bus."); } } diff --git a/engine/components-api/src/main/java/com/cloud/network/NetworkStateListener.java b/engine/components-api/src/main/java/com/cloud/network/NetworkStateListener.java index 1e1251d8cdcf..24be76e4d3be 100644 --- a/engine/components-api/src/main/java/com/cloud/network/NetworkStateListener.java +++ b/engine/components-api/src/main/java/com/cloud/network/NetworkStateListener.java @@ -27,7 +27,8 @@ import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.events.EventBus; import org.apache.cloudstack.framework.events.EventBusException; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.beans.factory.NoSuchBeanDefinitionException; import com.cloud.event.EventCategory; @@ -44,7 +45,7 @@ public class NetworkStateListener implements StateListener, Method> _handlerMethodMap = new HashMap, Method>(); @@ -99,30 +100,30 @@ public Pair handleVmWorkJob(VmWork work) throws Exceptio if (method != null) { try { - if (s_logger.isDebugEnabled()) - s_logger.debug("Execute VM work job: " + work.getClass().getName() + work); + if (logger.isDebugEnabled()) + logger.debug("Execute VM work job: " + work.getClass().getName() + work); Object obj = method.invoke(_target, work); - if (s_logger.isDebugEnabled()) - s_logger.debug("Done executing VM work job: " + work.getClass().getName() + work); + if (logger.isDebugEnabled()) + logger.debug("Done executing VM work job: " + work.getClass().getName() + work); assert (obj instanceof Pair); return (Pair)obj; } catch (InvocationTargetException e) { - s_logger.error("Invocation exception, caused by: " + e.getCause()); + logger.error("Invocation exception, caused by: " + e.getCause()); // legacy CloudStack code relies on checked exception for error handling // we need to re-throw the real exception here if (e.getCause() != null && e.getCause() instanceof Exception) { - s_logger.info("Rethrow exception " + e.getCause()); + logger.info("Rethrow exception " + e.getCause()); throw (Exception)e.getCause(); } throw e; } } else { - s_logger.error("Unable to find handler for VM work job: " + work.getClass().getName() + _gsonLogger.toJson(work)); + logger.error("Unable to find handler for VM work job: " + work.getClass().getName() + _gsonLogger.toJson(work)); RuntimeException ex = new RuntimeException("Unable to find handler for VM work job: " + work.getClass().getName()); return new Pair(JobInfo.Status.FAILED, JobSerializerHelper.toObjectSerializedString(ex)); diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java index b12a72136dd9..1b7069c6e73a 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentAttache.java @@ -34,7 +34,8 @@ import com.cloud.agent.api.CleanupPersistentNetworkResourceCommand; import org.apache.cloudstack.agent.lb.SetupMSListCommand; import org.apache.cloudstack.managed.context.ManagedContextRunnable; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.Listener; import com.cloud.agent.api.Answer; @@ -69,7 +70,7 @@ * AgentAttache provides basic commands to be implemented. */ public abstract class AgentAttache { - private static final Logger s_logger = Logger.getLogger(AgentAttache.class); + protected Logger logger = LogManager.getLogger(getClass()); private static final ScheduledExecutorService s_listenerExecutor = Executors.newScheduledThreadPool(10, new NamedThreadFactory("ListenerTimer")); private static final Random s_rand = new Random(System.currentTimeMillis()); @@ -196,8 +197,8 @@ protected void cancel(final Request req) { } protected synchronized void cancel(final long seq) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(log(seq, "Cancelling.")); + if (logger.isDebugEnabled()) { + logger.debug(log(seq, "Cancelling.")); } final Listener listener = _waitForList.remove(seq); if (listener != null) { @@ -222,8 +223,8 @@ protected String log(final long seq, final String msg) { } protected void registerListener(final long seq, final Listener listener) { - if (s_logger.isTraceEnabled()) { - s_logger.trace(log(seq, "Registering listener")); + if (logger.isTraceEnabled()) { + logger.trace(log(seq, "Registering listener")); } if (listener.getTimeout() != -1) { s_listenerExecutor.schedule(new Alarm(seq), listener.getTimeout(), TimeUnit.SECONDS); @@ -232,8 +233,8 @@ protected void registerListener(final long seq, final Listener listener) { } protected Listener unregisterListener(final long sequence) { - if (s_logger.isTraceEnabled()) { - s_logger.trace(log(sequence, "Unregistering listener")); + if (logger.isTraceEnabled()) { + logger.trace(log(sequence, "Unregistering listener")); } return _waitForList.remove(sequence); } @@ -266,7 +267,7 @@ public int getNonRecurringListenersSize() { final Listener monitor = entry.getValue(); if (!monitor.isRecurring()) { //TODO - remove this debug statement later - s_logger.debug("Listener is " + entry.getValue() + " waiting on " + entry.getKey()); + logger.debug("Listener is " + entry.getValue() + " waiting on " + entry.getKey()); nonRecurringListenersList.add(monitor); } } @@ -289,13 +290,13 @@ public boolean processAnswers(final long seq, final Response resp) { if (answers[0] != null && answers[0].getResult()) { processed = true; } - if (s_logger.isDebugEnabled()) { - s_logger.debug(log(seq, "Unable to find listener.")); + if (logger.isDebugEnabled()) { + logger.debug(log(seq, "Unable to find listener.")); } } else { processed = monitor.processAnswers(_id, seq, answers); - if (s_logger.isTraceEnabled()) { - s_logger.trace(log(seq, (processed ? "" : " did not ") + " processed ")); + if (logger.isTraceEnabled()) { + logger.trace(log(seq, (processed ? "" : " did not ") + " processed ")); } if (!monitor.isRecurring()) { @@ -323,8 +324,8 @@ protected void cancelAllCommands(final Status state, final boolean cancelActive) final Map.Entry entry = it.next(); it.remove(); final Listener monitor = entry.getValue(); - if (s_logger.isDebugEnabled()) { - s_logger.debug(log(entry.getKey(), "Sending disconnect to " + monitor.getClass())); + if (logger.isDebugEnabled()) { + logger.debug(log(entry.getKey(), "Sending disconnect to " + monitor.getClass())); } monitor.processDisconnect(_id, state); } @@ -356,8 +357,8 @@ public void send(final Request req, final Listener listener) throws AgentUnavail long seq = req.getSequence(); if (listener != null) { registerListener(seq, listener); - } else if (s_logger.isDebugEnabled()) { - s_logger.debug(log(seq, "Routed from " + req.getManagementServerId())); + } else if (logger.isDebugEnabled()) { + logger.debug(log(seq, "Routed from " + req.getManagementServerId())); } synchronized (this) { @@ -380,16 +381,16 @@ public void send(final Request req, final Listener listener) throws AgentUnavail if (req.executeInSequence() && _currentSequence == null) { _currentSequence = seq; - if (s_logger.isTraceEnabled()) { - s_logger.trace(log(seq, " is current sequence")); + if (logger.isTraceEnabled()) { + logger.trace(log(seq, " is current sequence")); } } } catch (AgentUnavailableException e) { - s_logger.info(log(seq, "Unable to send due to " + e.getMessage())); + logger.info(log(seq, "Unable to send due to " + e.getMessage())); cancel(seq); throw e; } catch (Exception e) { - s_logger.warn(log(seq, "Unable to send due to "), e); + logger.warn(log(seq, "Unable to send due to "), e); cancel(seq); throw new AgentUnavailableException("Problem due to other exception " + e.getMessage(), _id); } @@ -408,10 +409,10 @@ public Answer[] send(final Request req, final int wait) throws AgentUnavailableE try { answers = sl.waitFor(wait); } catch (final InterruptedException e) { - s_logger.debug(log(seq, "Interrupted")); + logger.debug(log(seq, "Interrupted")); } if (answers != null) { - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { new Response(req, answers).logD("Received: ", false); } return answers; @@ -419,7 +420,7 @@ public Answer[] send(final Request req, final int wait) throws AgentUnavailableE answers = sl.getAnswers(); // Try it again. if (answers != null) { - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { new Response(req, answers).logD("Received after timeout: ", true); } @@ -429,21 +430,21 @@ public Answer[] send(final Request req, final int wait) throws AgentUnavailableE final Long current = _currentSequence; if (current != null && seq != current) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(log(seq, "Waited too long.")); + if (logger.isDebugEnabled()) { + logger.debug(log(seq, "Waited too long.")); } throw new OperationTimedoutException(req.getCommands(), _id, seq, wait, false); } - if (s_logger.isDebugEnabled()) { - s_logger.debug(log(seq, "Waiting some more time because this is the current command")); + if (logger.isDebugEnabled()) { + logger.debug(log(seq, "Waiting some more time because this is the current command")); } } throw new OperationTimedoutException(req.getCommands(), _id, seq, wait * 2, true); } catch (OperationTimedoutException e) { - s_logger.warn(log(seq, "Timed out on " + req.toString())); + logger.warn(log(seq, "Timed out on " + req.toString())); cancel(seq); final Long current = _currentSequence; if (req.executeInSequence() && (current != null && current == seq)) { @@ -451,7 +452,7 @@ public Answer[] send(final Request req, final int wait) throws AgentUnavailableE } throw e; } catch (Exception e) { - s_logger.warn(log(seq, "Exception while waiting for answer"), e); + logger.warn(log(seq, "Exception while waiting for answer"), e); cancel(seq); final Long current = _currentSequence; if (req.executeInSequence() && (current != null && current == seq)) { @@ -466,21 +467,21 @@ public Answer[] send(final Request req, final int wait) throws AgentUnavailableE protected synchronized void sendNext(final long seq) { _currentSequence = null; if (_requests.isEmpty()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(log(seq, "No more commands found")); + if (logger.isDebugEnabled()) { + logger.debug(log(seq, "No more commands found")); } return; } Request req = _requests.pop(); - if (s_logger.isDebugEnabled()) { - s_logger.debug(log(req.getSequence(), "Sending now. is current sequence.")); + if (logger.isDebugEnabled()) { + logger.debug(log(req.getSequence(), "Sending now. is current sequence.")); } try { send(req); } catch (AgentUnavailableException e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(log(req.getSequence(), "Unable to send the next sequence")); + if (logger.isDebugEnabled()) { + logger.debug(log(req.getSequence(), "Unable to send the next sequence")); } cancel(req.getSequence()); } @@ -527,7 +528,7 @@ protected void runInContext() { listener.processTimeout(_id, _seq); } } catch (Exception e) { - s_logger.warn("Exception ", e); + logger.warn("Exception ", e); } } } diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java index 606a902dce7c..50c7b151d77e 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java @@ -53,8 +53,6 @@ import org.apache.cloudstack.outofbandmanagement.dao.OutOfBandManagementDao; import org.apache.cloudstack.utils.identity.ManagementServerNode; import org.apache.commons.lang3.BooleanUtils; -import org.apache.log4j.Logger; -import org.apache.log4j.MDC; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; @@ -124,12 +122,12 @@ import com.cloud.utils.nio.Task; import com.cloud.utils.time.InaccurateClock; import org.apache.commons.lang3.StringUtils; +import org.apache.logging.log4j.ThreadContext; /** * Implementation of the Agent Manager. This class controls the connection to the agents. **/ public class AgentManagerImpl extends ManagerBase implements AgentManager, HandlerFactory, Configurable { - protected static final Logger s_logger = Logger.getLogger(AgentManagerImpl.class); /** * _agents is a ConcurrentHashMap, but it is used from within a synchronized block. This will be reported by findbugs as JLM_JSR166_UTILCONCURRENT_MONITORENTER. Maybe a @@ -210,12 +208,12 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl @Override public boolean configure(final String name, final Map params) throws ConfigurationException { - s_logger.info("Ping Timeout is " + mgmtServiceConf.getPingTimeout()); + logger.info("Ping Timeout is " + mgmtServiceConf.getPingTimeout()); final int threads = DirectAgentLoadSize.value(); _nodeId = ManagementServerNode.getManagementServerId(); - s_logger.info("Configuring AgentManagerImpl. management server node id(msid): " + _nodeId); + logger.info("Configuring AgentManagerImpl. management server node id(msid): " + _nodeId); final long lastPing = (System.currentTimeMillis() >> 10) - mgmtServiceConf.getTimeout(); _hostDao.markHostsAsDisconnected(_nodeId, lastPing); @@ -231,13 +229,13 @@ public boolean configure(final String name, final Map params) th _connectExecutor.allowCoreThreadTimeOut(true); _connection = new NioServer("AgentManager", Port.value(), Workers.value() + 10, this, caService); - s_logger.info("Listening on " + Port.value() + " with " + Workers.value() + " workers"); + logger.info("Listening on " + Port.value() + " with " + Workers.value() + " workers"); // executes all agent commands other than cron and ping _directAgentExecutor = new ScheduledThreadPoolExecutor(DirectAgentPoolSize.value(), new NamedThreadFactory("DirectAgent")); // executes cron and ping agent commands _cronJobExecutor = new ScheduledThreadPoolExecutor(DirectAgentPoolSize.value(), new NamedThreadFactory("DirectAgentCronJob")); - s_logger.debug("Created DirectAgentAttache pool with size: " + DirectAgentPoolSize.value()); + logger.debug("Created DirectAgentAttache pool with size: " + DirectAgentPoolSize.value()); _directAgentThreadCap = Math.round(DirectAgentPoolSize.value() * DirectAgentThreadCap.value()) + 1; // add 1 to always make the value > 0 _monitorExecutor = new ScheduledThreadPoolExecutor(1, new NamedThreadFactory("AgentMonitor")); @@ -268,8 +266,8 @@ public int registerForHostEvents(final Listener listener, final boolean connecti _cmdMonitors.add(new Pair(_monitorId, listener)); } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Registering listener " + listener.getClass().getSimpleName() + " with id " + _monitorId); + if (logger.isDebugEnabled()) { + logger.debug("Registering listener " + listener.getClass().getSimpleName() + " with id " + _monitorId); } return _monitorId; } @@ -290,7 +288,7 @@ public int registerForInitialConnects(final StartupCommandProcessor creator, fin @Override public void unregisterForHostEvents(final int id) { - s_logger.debug("Deregistering " + id); + logger.debug("Deregistering " + id); _hostMonitors.remove(id); } @@ -305,15 +303,15 @@ private AgentControlAnswer handleControlCommand(final AgentAttache attache, fina } } - s_logger.warn("No handling of agent control command: " + cmd + " sent from " + attache.getId()); + logger.warn("No handling of agent control command: " + cmd + " sent from " + attache.getId()); return new AgentControlAnswer(cmd); } public void handleCommands(final AgentAttache attache, final long sequence, final Command[] cmds) { for (final Pair listener : _cmdMonitors) { final boolean processed = listener.second().processCommands(attache.getId(), sequence, cmds); - if (s_logger.isTraceEnabled()) { - s_logger.trace("SeqA " + attache.getId() + "-" + sequence + ": " + (processed ? "processed" : "not processed") + " by " + listener.getClass()); + if (logger.isTraceEnabled()) { + logger.trace("SeqA " + attache.getId() + "-" + sequence + ": " + (processed ? "processed" : "not processed") + " by " + listener.getClass()); } } } @@ -350,9 +348,9 @@ public Answer sendTo(final Long dcId, final HypervisorType type, final Command c } catch (final Exception e) { String errorMsg = String.format("Error sending command %s to host %s, due to %s", cmd.getClass().getName(), host.getUuid(), e.getLocalizedMessage()); - s_logger.error(errorMsg); - if (s_logger.isDebugEnabled()) { - s_logger.debug(errorMsg, e); + logger.error(errorMsg); + if (logger.isDebugEnabled()) { + logger.debug(errorMsg, e); } } if (answer != null) { @@ -374,7 +372,7 @@ public Answer send(final Long hostId, final Command cmd) throws AgentUnavailable } if (answers != null && answers[0] instanceof UnsupportedAnswer) { - s_logger.warn("Unsupported Command: " + answers[0].getDetails()); + logger.warn("Unsupported Command: " + answers[0].getDetails()); return answers[0]; } @@ -398,7 +396,7 @@ private static void tagCommand(final Command cmd) { cmd.setContextParam("job", "job-" + job.getId()); } } - String logcontextid = (String) MDC.get("logcontextid"); + String logcontextid = ThreadContext.get("logcontextid"); if (StringUtils.isNotEmpty(logcontextid)) { cmd.setContextParam("logid", logcontextid); } @@ -471,14 +469,14 @@ protected Status investigate(final AgentAttache agent) { final Long hostId = agent.getId(); final HostVO host = _hostDao.findById(hostId); if (host != null && host.getType() != null && !host.getType().isVirtual()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("checking if agent (" + hostId + ") is alive"); + if (logger.isDebugEnabled()) { + logger.debug("checking if agent (" + hostId + ") is alive"); } final Answer answer = easySend(hostId, new CheckHealthCommand()); if (answer != null && answer.getResult()) { final Status status = Status.Up; - if (s_logger.isDebugEnabled()) { - s_logger.debug("agent (" + hostId + ") responded to checkHeathCommand, reporting that agent is " + status); + if (logger.isDebugEnabled()) { + logger.debug("agent (" + hostId + ") responded to checkHeathCommand, reporting that agent is " + status); } return status; } @@ -493,7 +491,7 @@ protected AgentAttache getAttache(final Long hostId) throws AgentUnavailableExce } final AgentAttache agent = findAttache(hostId); if (agent == null) { - s_logger.debug("Unable to find agent for " + hostId); + logger.debug("Unable to find agent for " + hostId); throw new AgentUnavailableException("Unable to find agent ", hostId); } @@ -521,8 +519,8 @@ public void removeAgent(final AgentAttache attache, final Status nextState) { return; } final long hostId = attache.getId(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Remove Agent : " + hostId); + if (logger.isDebugEnabled()) { + logger.debug("Remove Agent : " + hostId); } AgentAttache removed = null; boolean conflict = false; @@ -535,15 +533,15 @@ public void removeAgent(final AgentAttache attache, final Status nextState) { } } if (conflict) { - s_logger.debug("Agent for host " + hostId + " is created when it is being disconnected"); + logger.debug("Agent for host " + hostId + " is created when it is being disconnected"); } if (removed != null) { removed.disconnect(nextState); } for (final Pair monitor : _hostMonitors) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Sending Disconnect to listener: " + monitor.second().getClass().getName()); + if (logger.isDebugEnabled()) { + logger.debug("Sending Disconnect to listener: " + monitor.second().getClass().getName()); } monitor.second().processDisconnect(hostId, nextState); } @@ -552,8 +550,8 @@ public void removeAgent(final AgentAttache attache, final Status nextState) { @Override public void notifyMonitorsOfNewlyAddedHost(long hostId) { for (final Pair monitor : _hostMonitors) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Sending host added to listener: " + monitor.second().getClass().getSimpleName()); + if (logger.isDebugEnabled()) { + logger.debug("Sending host added to listener: " + monitor.second().getClass().getSimpleName()); } monitor.second().processHostAdded(hostId); @@ -564,8 +562,8 @@ protected AgentAttache notifyMonitorsOfConnection(final AgentAttache attache, fi final long hostId = attache.getId(); final HostVO host = _hostDao.findById(hostId); for (final Pair monitor : _hostMonitors) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Sending Connect to listener: " + monitor.second().getClass().getSimpleName()); + if (logger.isDebugEnabled()) { + logger.debug("Sending Connect to listener: " + monitor.second().getClass().getSimpleName()); } for (int i = 0; i < cmd.length; i++) { try { @@ -574,11 +572,11 @@ protected AgentAttache notifyMonitorsOfConnection(final AgentAttache attache, fi if (e instanceof ConnectionException) { final ConnectionException ce = (ConnectionException)e; if (ce.isSetupError()) { - s_logger.warn("Monitor " + monitor.second().getClass().getSimpleName() + " says there is an error in the connect process for " + hostId + " due to " + e.getMessage()); + logger.warn("Monitor " + monitor.second().getClass().getSimpleName() + " says there is an error in the connect process for " + hostId + " due to " + e.getMessage()); handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true, true); throw ce; } else { - s_logger.info("Monitor " + monitor.second().getClass().getSimpleName() + " says not to continue the connect process for " + hostId + " due to " + e.getMessage()); + logger.info("Monitor " + monitor.second().getClass().getSimpleName() + " says not to continue the connect process for " + hostId + " due to " + e.getMessage()); handleDisconnectWithoutInvestigation(attache, Event.ShutdownRequested, true, true); return attache; } @@ -586,7 +584,7 @@ protected AgentAttache notifyMonitorsOfConnection(final AgentAttache attache, fi handleDisconnectWithoutInvestigation(attache, Event.ShutdownRequested, true, true); throw new CloudRuntimeException("Unable to connect " + attache.getId(), e); } else { - s_logger.error("Monitor " + monitor.second().getClass().getSimpleName() + " says there is an error in the connect process for " + hostId + " due to " + e.getMessage(), e); + logger.error("Monitor " + monitor.second().getClass().getSimpleName() + " says there is an error in the connect process for " + hostId + " due to " + e.getMessage(), e); handleDisconnectWithoutInvestigation(attache, Event.AgentDisconnected, true, true); throw new CloudRuntimeException("Unable to connect " + attache.getId(), e); } @@ -609,7 +607,7 @@ protected AgentAttache notifyMonitorsOfConnection(final AgentAttache attache, fi Map detailsMap = readyAnswer.getDetailsMap(); if (detailsMap != null) { String uefiEnabled = detailsMap.get(Host.HOST_UEFI_ENABLE); - s_logger.debug(String.format("Got HOST_UEFI_ENABLE [%s] for hostId [%s]:", uefiEnabled, host.getUuid())); + logger.debug(String.format("Got HOST_UEFI_ENABLE [%s] for hostId [%s]:", uefiEnabled, host.getUuid())); if (uefiEnabled != null) { _hostDao.loadDetails(host); if (!uefiEnabled.equals(host.getDetails().get(Host.HOST_UEFI_ENABLE))) { @@ -633,7 +631,7 @@ public boolean start() { try { _connection.start(); } catch (final NioConnectionException e) { - s_logger.error("Error when connecting to the NioServer!", e); + logger.error("Error when connecting to the NioServer!", e); } } @@ -657,19 +655,19 @@ private ServerResource loadResourcesWithoutHypervisor(final HostVO host) { final Constructor constructor = clazz.getConstructor(); resource = (ServerResource)constructor.newInstance(); } catch (final ClassNotFoundException e) { - s_logger.warn("Unable to find class " + host.getResource(), e); + logger.warn("Unable to find class " + host.getResource(), e); } catch (final InstantiationException e) { - s_logger.warn("Unable to instantiate class " + host.getResource(), e); + logger.warn("Unable to instantiate class " + host.getResource(), e); } catch (final IllegalAccessException e) { - s_logger.warn("Illegal access " + host.getResource(), e); + logger.warn("Illegal access " + host.getResource(), e); } catch (final SecurityException e) { - s_logger.warn("Security error on " + host.getResource(), e); + logger.warn("Security error on " + host.getResource(), e); } catch (final NoSuchMethodException e) { - s_logger.warn("NoSuchMethodException error on " + host.getResource(), e); + logger.warn("NoSuchMethodException error on " + host.getResource(), e); } catch (final IllegalArgumentException e) { - s_logger.warn("IllegalArgumentException error on " + host.getResource(), e); + logger.warn("IllegalArgumentException error on " + host.getResource(), e); } catch (final InvocationTargetException e) { - s_logger.warn("InvocationTargetException error on " + host.getResource(), e); + logger.warn("InvocationTargetException error on " + host.getResource(), e); } if (resource != null) { @@ -703,12 +701,12 @@ private ServerResource loadResourcesWithoutHypervisor(final HostVO host) { try { resource.configure(host.getName(), params); } catch (final ConfigurationException e) { - s_logger.warn("Unable to configure resource due to " + e.getMessage()); + logger.warn("Unable to configure resource due to " + e.getMessage()); return null; } if (!resource.start()) { - s_logger.warn("Unable to start the resource"); + logger.warn("Unable to start the resource"); return null; } } @@ -726,14 +724,14 @@ protected boolean loadDirectlyConnectedHost(final HostVO host, final boolean for // load the respective discoverer final Discoverer discoverer = _resourceMgr.getMatchingDiscover(host.getHypervisorType()); if (discoverer == null) { - s_logger.info("Could not to find a Discoverer to load the resource: " + host.getId() + " for hypervisor type: " + host.getHypervisorType()); + logger.info("Could not to find a Discoverer to load the resource: " + host.getId() + " for hypervisor type: " + host.getHypervisorType()); resource = loadResourcesWithoutHypervisor(host); } else { resource = discoverer.reloadResource(host); } if (resource == null) { - s_logger.warn("Unable to load the resource: " + host.getId()); + logger.warn("Unable to load the resource: " + host.getId()); return false; } @@ -759,7 +757,7 @@ protected boolean loadDirectlyConnectedHost(final HostVO host, final boolean for } protected AgentAttache createAttacheForDirectConnect(final Host host, final ServerResource resource) throws ConnectionException { - s_logger.debug("create DirectAgentAttache for " + host.getId()); + logger.debug("create DirectAgentAttache for " + host.getId()); final DirectAgentAttache attache = new DirectAgentAttache(this, host.getId(), host.getName(), resource, host.isInMaintenanceStates()); AgentAttache old = null; @@ -780,13 +778,13 @@ public boolean stop() { _connection.stop(); } - s_logger.info("Disconnecting agents: " + _agents.size()); + logger.info("Disconnecting agents: " + _agents.size()); synchronized (_agents) { for (final AgentAttache agent : _agents.values()) { final HostVO host = _hostDao.findById(agent.getId()); if (host == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cant not find host " + agent.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Cant not find host " + agent.getId()); } } else { if (!agent.forForward()) { @@ -805,8 +803,8 @@ protected Status getNextStatusOnDisconnection(Host host, final Status.Event even final Status currentStatus = host.getStatus(); Status nextStatus; if (currentStatus == Status.Down || currentStatus == Status.Alert || currentStatus == Status.Removed) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Host %s is already %s", host.getUuid(), currentStatus)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Host %s is already %s", host.getUuid(), currentStatus)); } nextStatus = currentStatus; } else { @@ -814,12 +812,12 @@ protected Status getNextStatusOnDisconnection(Host host, final Status.Event even nextStatus = currentStatus.getNextStatus(event); } catch (final NoTransitionException e) { final String err = String.format("Cannot find next status for %s as current status is %s for agent %s", event, currentStatus, host.getUuid()); - s_logger.debug(err); + logger.debug(err); throw new CloudRuntimeException(err); } - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("The next status of agent %s is %s, current status is %s", host.getUuid(), nextStatus, currentStatus)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("The next status of agent %s is %s, current status is %s", host.getUuid(), nextStatus, currentStatus)); } } return nextStatus; @@ -832,19 +830,19 @@ protected boolean handleDisconnectWithoutInvestigation(final AgentAttache attach GlobalLock joinLock = getHostJoinLock(hostId); if (joinLock.lock(60)) { try { - s_logger.info(String.format("Host %d is disconnecting with event %s", hostId, event)); + logger.info(String.format("Host %d is disconnecting with event %s", hostId, event)); Status nextStatus = null; final HostVO host = _hostDao.findById(hostId); if (host == null) { - s_logger.warn(String.format("Can't find host with %d", hostId)); + logger.warn(String.format("Can't find host with %d", hostId)); nextStatus = Status.Removed; } else { nextStatus = getNextStatusOnDisconnection(host, event); caService.purgeHostCertificate(host); } - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Deregistering link for %d with state %s", hostId, nextStatus)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Deregistering link for %d with state %s", hostId, nextStatus)); } removeAgent(attache, nextStatus); @@ -875,50 +873,50 @@ protected boolean handleDisconnectWithInvestigation(final AgentAttache attache, * Agent may be currently in status of Down, Alert, Removed, namely there is no next status for some events. Why this can happen? Ask God not me. I hate there was * no piece of comment for code handling race condition. God knew what race condition the code dealt with! */ - s_logger.debug("Caught exception while getting agent's next status", ne); + logger.debug("Caught exception while getting agent's next status", ne); } if (nextStatus == Status.Alert) { /* OK, we are going to the bad status, let's see what happened */ - s_logger.info("Investigating why host " + hostId + " has disconnected with event " + event); + logger.info("Investigating why host " + hostId + " has disconnected with event " + event); Status determinedState = investigate(attache); // if state cannot be determined do nothing and bail out if (determinedState == null) { if ((System.currentTimeMillis() >> 10) - host.getLastPinged() > AlertWait.value()) { - s_logger.warn("Agent " + hostId + " state cannot be determined for more than " + AlertWait + "(" + AlertWait.value() + ") seconds, will go to Alert state"); + logger.warn("Agent " + hostId + " state cannot be determined for more than " + AlertWait + "(" + AlertWait.value() + ") seconds, will go to Alert state"); determinedState = Status.Alert; } else { - s_logger.warn("Agent " + hostId + " state cannot be determined, do nothing"); + logger.warn("Agent " + hostId + " state cannot be determined, do nothing"); return false; } } final Status currentStatus = host.getStatus(); - s_logger.info("The agent from host " + hostId + " state determined is " + determinedState); + logger.info("The agent from host " + hostId + " state determined is " + determinedState); if (determinedState == Status.Down) { final String message = "Host is down: " + host.getId() + "-" + host.getName() + ". Starting HA on the VMs"; - s_logger.error(message); + logger.error(message); if (host.getType() != Host.Type.SecondaryStorage && host.getType() != Host.Type.ConsoleProxy) { _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "Host down, " + host.getId(), message); } event = Status.Event.HostDown; } else if (determinedState == Status.Up) { /* Got ping response from host, bring it back */ - s_logger.info("Agent is determined to be up and running"); + logger.info("Agent is determined to be up and running"); agentStatusTransitTo(host, Status.Event.Ping, _nodeId); return false; } else if (determinedState == Status.Disconnected) { - s_logger.warn("Agent is disconnected but the host is still up: " + host.getId() + "-" + host.getName() + + logger.warn("Agent is disconnected but the host is still up: " + host.getId() + "-" + host.getName() + '-' + host.getResourceState()); if (currentStatus == Status.Disconnected || (currentStatus == Status.Up && host.getResourceState() == ResourceState.PrepareForMaintenance)) { if ((System.currentTimeMillis() >> 10) - host.getLastPinged() > AlertWait.value()) { - s_logger.warn("Host " + host.getId() + " has been disconnected past the wait time it should be disconnected."); + logger.warn("Host " + host.getId() + " has been disconnected past the wait time it should be disconnected."); event = Status.Event.WaitedTooLong; } else { - s_logger.debug("Host " + host.getId() + " has been determined to be disconnected but it hasn't passed the wait time yet."); + logger.debug("Host " + host.getId() + " has been determined to be disconnected but it hasn't passed the wait time yet."); return false; } } else if (currentStatus == Status.Up) { @@ -941,7 +939,7 @@ protected boolean handleDisconnectWithInvestigation(final AgentAttache attache, "In availability zone " + host.getDataCenterId() + ", host is in alert state: " + host.getId() + "-" + host.getName()); } } else { - s_logger.debug("The next status of agent " + host.getId() + " is not Alert, no need to investigate what happened"); + logger.debug("The next status of agent " + host.getId() + " is not Alert, no need to investigate what happened"); } } handleDisconnectWithoutInvestigation(attache, event, true, true); @@ -972,7 +970,7 @@ protected void runInContext() { handleDisconnectWithoutInvestigation(_attache, _event, true, false); } } catch (final Exception e) { - s_logger.error("Exception caught while handling disconnect: ", e); + logger.error("Exception caught while handling disconnect: ", e); } } } @@ -982,34 +980,34 @@ public Answer easySend(final Long hostId, final Command cmd) { try { final Host h = _hostDao.findById(hostId); if (h == null || h.getRemoved() != null) { - s_logger.debug("Host with id " + hostId + " doesn't exist"); + logger.debug("Host with id " + hostId + " doesn't exist"); return null; } final Status status = h.getStatus(); if (!status.equals(Status.Up) && !status.equals(Status.Connecting)) { - s_logger.debug("Can not send command " + cmd + " due to Host " + hostId + " is not up"); + logger.debug("Can not send command " + cmd + " due to Host " + hostId + " is not up"); return null; } final Answer answer = send(hostId, cmd); if (answer == null) { - s_logger.warn("send returns null answer"); + logger.warn("send returns null answer"); return null; } - if (s_logger.isDebugEnabled() && answer.getDetails() != null) { - s_logger.debug("Details from executing " + cmd.getClass() + ": " + answer.getDetails()); + if (logger.isDebugEnabled() && answer.getDetails() != null) { + logger.debug("Details from executing " + cmd.getClass() + ": " + answer.getDetails()); } return answer; } catch (final AgentUnavailableException e) { - s_logger.warn(e.getMessage()); + logger.warn(e.getMessage()); return null; } catch (final OperationTimedoutException e) { - s_logger.warn("Operation timed out: " + e.getMessage()); + logger.warn("Operation timed out: " + e.getMessage()); return null; } catch (final Exception e) { - s_logger.warn("Exception while sending", e); + logger.warn("Exception while sending", e); return null; } } @@ -1037,7 +1035,7 @@ public void reconnect(final long hostId) throws AgentUnavailableException { } if (host.getStatus() == Status.Disconnected) { - s_logger.debug("Host is already disconnected, no work to be done: " + hostId); + logger.debug("Host is already disconnected, no work to be done: " + hostId); return; } @@ -1055,8 +1053,8 @@ public void reconnect(final long hostId) throws AgentUnavailableException { @Override public void notifyMonitorsOfHostAboutToBeRemoved(long hostId) { for (final Pair monitor : _hostMonitors) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Sending host about to be removed to listener: " + monitor.second().getClass().getSimpleName()); + if (logger.isDebugEnabled()) { + logger.debug("Sending host about to be removed to listener: " + monitor.second().getClass().getSimpleName()); } monitor.second().processHostAboutToBeRemoved(hostId); @@ -1066,8 +1064,8 @@ public void notifyMonitorsOfHostAboutToBeRemoved(long hostId) { @Override public void notifyMonitorsOfRemovedHost(long hostId, long clusterId) { for (final Pair monitor : _hostMonitors) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Sending host removed to listener: " + monitor.second().getClass().getSimpleName()); + if (logger.isDebugEnabled()) { + logger.debug("Sending host removed to listener: " + monitor.second().getClass().getSimpleName()); } monitor.second().processHostRemoved(hostId, clusterId); @@ -1076,8 +1074,8 @@ public void notifyMonitorsOfRemovedHost(long hostId, long clusterId) { public boolean executeUserRequest(final long hostId, final Event event) throws AgentUnavailableException { if (event == Event.AgentDisconnected) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Received agent disconnect event for host " + hostId); + if (logger.isDebugEnabled()) { + logger.debug("Received agent disconnect event for host " + hostId); } AgentAttache attache = null; attache = findAttache(hostId); @@ -1090,7 +1088,7 @@ public boolean executeUserRequest(final long hostId, final Event event) throws A try { reconnect(hostId); } catch (CloudRuntimeException e) { - s_logger.debug("Error on shutdown request for hostID: " + hostId, e); + logger.debug("Error on shutdown request for hostID: " + hostId, e); return false; } return true; @@ -1105,7 +1103,7 @@ public boolean isAgentAttached(final long hostId) { } protected AgentAttache createAttacheForConnect(final HostVO host, final Link link) throws ConnectionException { - s_logger.debug("create ConnectedAgentAttache for " + host.getId()); + logger.debug("create ConnectedAgentAttache for " + host.getId()); final AgentAttache attache = new ConnectedAgentAttache(this, host.getId(), host.getName(), link, host.isInMaintenanceStates()); link.attach(attache); @@ -1143,7 +1141,7 @@ private AgentAttache sendReadyAndGetAttache(HostVO host, ReadyCommand ready, Lin ready.setMsHostList(newMSList); ready.setLbAlgorithm(indirectAgentLB.getLBAlgorithmName()); ready.setLbCheckInterval(indirectAgentLB.getLBPreferredHostCheckInterval(host.getClusterId())); - s_logger.debug("Agent's management server host list is not up to date, sending list update:" + newMSList); + logger.debug("Agent's management server host list is not up to date, sending list update:" + newMSList); } attache = createAttacheForConnect(host, link); @@ -1168,7 +1166,7 @@ private AgentAttache handleConnectedAgent(final Link link, final StartupCommand[ attache = sendReadyAndGetAttache(host, ready, link, startup); } } catch (final Exception e) { - s_logger.debug("Failed to handle host connection: ", e); + logger.debug("Failed to handle host connection: ", e); ready = new ReadyCommand(null); ready.setDetails(e.toString()); } finally { @@ -1185,7 +1183,7 @@ private AgentAttache handleConnectedAgent(final Link link, final StartupCommand[ easySend(attache.getId(), ready); } } catch (final Exception e) { - s_logger.debug("Failed to send ready command:" + e.toString()); + logger.debug("Failed to send ready command:" + e.toString()); } return attache; } @@ -1204,28 +1202,28 @@ public SimulateStartTask(final long id, final ServerResource resource, final Map @Override protected void runInContext() { try { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Simulating start for resource " + resource.getName() + " id " + id); + if (logger.isDebugEnabled()) { + logger.debug("Simulating start for resource " + resource.getName() + " id " + id); } if (tapLoadingAgents(id, TapAgentsAction.Add)) { try { final AgentAttache agentattache = findAttache(id); if (agentattache == null) { - s_logger.debug("Creating agent for host " + id); + logger.debug("Creating agent for host " + id); _resourceMgr.createHostAndAgent(id, resource, details, false, null, false); - s_logger.debug("Completed creating agent for host " + id); + logger.debug("Completed creating agent for host " + id); } else { - s_logger.debug("Agent already created in another thread for host " + id + ", ignore this"); + logger.debug("Agent already created in another thread for host " + id + ", ignore this"); } } finally { tapLoadingAgents(id, TapAgentsAction.Del); } } else { - s_logger.debug("Agent creation already getting processed in another thread for host " + id + ", ignore this"); + logger.debug("Agent creation already getting processed in another thread for host " + id + ", ignore this"); } } catch (final Exception e) { - s_logger.warn("Unable to simulate start on resource " + id + " name " + resource.getName(), e); + logger.warn("Unable to simulate start on resource " + id + " name " + resource.getName(), e); } } } @@ -1251,7 +1249,7 @@ protected void runInContext() { final AgentAttache attache = handleConnectedAgent(_link, startups, _request); if (attache == null) { - s_logger.warn("Unable to create attache for agent: " + _request); + logger.warn("Unable to create attache for agent: " + _request); } } } @@ -1274,7 +1272,7 @@ protected void connectAgent(final Link link, final Command[] cmds, final Request try { link.send(response.toBytes()); } catch (final ClosedChannelException e) { - s_logger.debug("Failed to send startupanswer: " + e.toString()); + logger.debug("Failed to send startupanswer: " + e.toString()); } _connectExecutor.execute(new HandleAgentConnectTask(link, cmds, request)); } @@ -1290,11 +1288,11 @@ private void processHostHealthCheckResult(Boolean hostHealthCheckResult, long ho } HostVO host = _hostDao.findById(hostId); if (host == null) { - s_logger.error(String.format("Unable to find host with ID: %s", hostId)); + logger.error(String.format("Unable to find host with ID: %s", hostId)); return; } if (!BooleanUtils.toBoolean(EnableKVMAutoEnableDisable.valueIn(host.getClusterId()))) { - s_logger.debug(String.format("%s is disabled for the cluster %s, cannot process the health check result " + + logger.debug(String.format("%s is disabled for the cluster %s, cannot process the health check result " + "received for the host %s", EnableKVMAutoEnableDisable.key(), host.getClusterId(), host.getName())); return; } @@ -1302,19 +1300,19 @@ private void processHostHealthCheckResult(Boolean hostHealthCheckResult, long ho ResourceState.Event resourceEvent = hostHealthCheckResult ? ResourceState.Event.Enable : ResourceState.Event.Disable; try { - s_logger.info(String.format("Host health check %s, auto %s KVM host: %s", + logger.info(String.format("Host health check %s, auto %s KVM host: %s", hostHealthCheckResult ? "succeeds" : "fails", hostHealthCheckResult ? "enabling" : "disabling", host.getName())); _resourceMgr.autoUpdateHostAllocationState(hostId, resourceEvent); } catch (NoTransitionException e) { - s_logger.error(String.format("Cannot Auto %s host: %s", resourceEvent, host.getName()), e); + logger.error(String.format("Cannot Auto %s host: %s", resourceEvent, host.getName()), e); } } private void processStartupRoutingCommand(StartupRoutingCommand startup, long hostId) { if (startup == null) { - s_logger.error("Empty StartupRoutingCommand received"); + logger.error("Empty StartupRoutingCommand received"); return; } Boolean hostHealthCheckResult = startup.getHostHealthCheckResult(); @@ -1323,7 +1321,7 @@ private void processStartupRoutingCommand(StartupRoutingCommand startup, long ho private void processPingRoutingCommand(PingRoutingCommand pingRoutingCommand, long hostId) { if (pingRoutingCommand == null) { - s_logger.error("Empty PingRoutingCommand received"); + logger.error("Empty PingRoutingCommand received"); return; } Boolean hostHealthCheckResult = pingRoutingCommand.getHostHealthCheckResult(); @@ -1338,7 +1336,7 @@ protected void processRequest(final Link link, final Request request) { if (attache == null) { if (!(cmd instanceof StartupCommand)) { - s_logger.warn("Throwing away a request because it came through as the first command on a connect: " + request); + logger.warn("Throwing away a request because it came through as the first command on a connect: " + request); } else { // submit the task for execution request.logD("Scheduling the first command "); @@ -1352,17 +1350,17 @@ protected void processRequest(final Link link, final Request request) { final long hostId = attache.getId(); final String hostName = attache.getName(); - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { if (cmd instanceof PingRoutingCommand) { logD = false; - s_logger.debug("Ping from Routing host " + hostId + "(" + hostName + ")"); - s_logger.trace("SeqA " + hostId + "-" + request.getSequence() + ": Processing " + request); + logger.debug("Ping from Routing host " + hostId + "(" + hostName + ")"); + logger.trace("SeqA " + hostId + "-" + request.getSequence() + ": Processing " + request); } else if (cmd instanceof PingCommand) { logD = false; - s_logger.debug("Ping from " + hostId + "(" + hostName + ")"); - s_logger.trace("SeqA " + hostId + "-" + request.getSequence() + ": Processing " + request); + logger.debug("Ping from " + hostId + "(" + hostName + ")"); + logger.trace("SeqA " + hostId + "-" + request.getSequence() + ": Processing " + request); } else { - s_logger.debug("SeqA " + hostId + "-" + request.getSequence() + ": Processing " + request); + logger.debug("SeqA " + hostId + "-" + request.getSequence() + ": Processing " + request); } } @@ -1387,7 +1385,7 @@ protected void processRequest(final Link link, final Request request) { } else if (cmd instanceof ShutdownCommand) { final ShutdownCommand shutdown = (ShutdownCommand)cmd; final String reason = shutdown.getReason(); - s_logger.info("Host " + attache.getId() + " has informed us that it is shutting down with reason " + reason + " and detail " + shutdown.getDetail()); + logger.info("Host " + attache.getId() + " has informed us that it is shutting down with reason " + reason + " and detail " + shutdown.getDetail()); if (reason.equals(ShutdownCommand.Update)) { // disconnectWithoutInvestigation(attache, Event.UpdateNeeded); throw new CloudRuntimeException("Agent update not implemented"); @@ -1425,7 +1423,7 @@ protected void processRequest(final Link link, final Request request) { _alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_ROUTING, host.getDataCenterId(), host.getPodId()); } } else { - s_logger.debug("Not processing " + PingRoutingCommand.class.getSimpleName() + " for agent id=" + cmdHostId + "; can't find the host in the DB"); + logger.debug("Not processing " + PingRoutingCommand.class.getSimpleName() + " for agent id=" + cmdHostId + "; can't find the host in the DB"); } } if (host!= null && host.getStatus() != Status.Up && gatewayAccessible) { @@ -1435,8 +1433,8 @@ protected void processRequest(final Link link, final Request request) { } else if (cmd instanceof ReadyAnswer) { final HostVO host = _hostDao.findById(attache.getId()); if (host == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cant not find host " + attache.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Cant not find host " + attache.getId()); } } answer = new Answer(cmd); @@ -1445,33 +1443,33 @@ protected void processRequest(final Link link, final Request request) { } } } catch (final Throwable th) { - s_logger.warn("Caught: ", th); + logger.warn("Caught: ", th); answer = new Answer(cmd, false, th.getMessage()); } answers[i] = answer; } final Response response = new Response(request, answers, _nodeId, attache.getId()); - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { if (logD) { - s_logger.debug("SeqA " + attache.getId() + "-" + response.getSequence() + ": Sending " + response); + logger.debug("SeqA " + attache.getId() + "-" + response.getSequence() + ": Sending " + response); } else { - s_logger.trace("SeqA " + attache.getId() + "-" + response.getSequence() + ": Sending " + response); + logger.trace("SeqA " + attache.getId() + "-" + response.getSequence() + ": Sending " + response); } } try { link.send(response.toBytes()); } catch (final ClosedChannelException e) { - s_logger.warn("Unable to send response because connection is closed: " + response); + logger.warn("Unable to send response because connection is closed: " + response); } } protected void processResponse(final Link link, final Response response) { final AgentAttache attache = (AgentAttache)link.attachment(); if (attache == null) { - s_logger.warn("Unable to process: " + response); + logger.warn("Unable to process: " + response); } else if (!attache.processAnswers(response.getSequence(), response)) { - s_logger.info("Host " + attache.getId() + " - Seq " + response.getSequence() + ": Response is not processed: " + response); + logger.info("Host " + attache.getId() + " - Seq " + response.getSequence() + ": Response is not processed: " + response); } } @@ -1490,11 +1488,11 @@ protected void doTask(final Task task) throws TaskExecutionException { processRequest(task.getLink(), event); } } catch (final UnsupportedVersionException e) { - s_logger.warn(e.getMessage()); + logger.warn(e.getMessage()); // upgradeAgent(task.getLink(), data, e.getReason()); } catch (final ClassNotFoundException e) { final String message = String.format("Exception occurred when executing tasks! Error '%s'", e.getMessage()); - s_logger.error(message); + logger.error(message); throw new TaskExecutionException(message, e); } } else if (type == Task.Type.CONNECT) { @@ -1504,7 +1502,7 @@ protected void doTask(final Task task) throws TaskExecutionException { if (attache != null) { disconnectWithInvestigation(attache, Event.AgentDisconnected); } else { - s_logger.info("Connection from " + link.getIpAddress() + " closed but no cleanup was done."); + logger.info("Connection from " + link.getIpAddress() + " closed but no cleanup was done."); link.close(); link.terminated(); } @@ -1541,20 +1539,20 @@ public boolean tapLoadingAgents(final Long hostId, final TapAgentsAction action) public boolean agentStatusTransitTo(final HostVO host, final Status.Event e, final long msId) { try { _agentStatusLock.lock(); - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { final ResourceState state = host.getResourceState(); final StringBuilder msg = new StringBuilder("Transition:"); msg.append("[Resource state = ").append(state); msg.append(", Agent event = ").append(e.toString()); msg.append(", Host id = ").append(host.getId()).append(", name = " + host.getName()).append("]"); - s_logger.debug(msg); + logger.debug(msg); } host.setManagementServerId(msId); try { return _statusStateMachine.transitTo(host, e, host.getId(), _hostDao); } catch (final NoTransitionException e1) { - s_logger.debug("Cannot transit agent status with event " + e + " for host " + host.getId() + ", name=" + host.getName() + ", management server id is " + msId); + logger.debug("Cannot transit agent status with event " + e + " for host " + host.getId() + ", name=" + host.getName() + ", management server id is " + msId); throw new CloudRuntimeException("Cannot transit agent status with event " + e + " for host " + host.getId() + ", management server id is " + msId + "," + e1.getMessage()); } } finally { @@ -1583,7 +1581,7 @@ public void disconnectWithInvestigation(final AgentAttache attache, final Status protected boolean isHostOwnerSwitched(final long hostId) { final HostVO host = _hostDao.findById(hostId); if (host == null) { - s_logger.warn("Can't find the host " + hostId); + logger.warn("Can't find the host " + hostId); return false; } return isHostOwnerSwitched(host); @@ -1608,7 +1606,7 @@ private void disconnectInternal(final long hostId, final Status.Event event, fin } else { /* Agent is still in connecting process, don't allow to disconnect right away */ if (tapLoadingAgents(hostId, TapAgentsAction.Contains)) { - s_logger.info("Host " + hostId + " is being loaded so no disconnects needed."); + logger.info("Host " + hostId + " is being loaded so no disconnects needed."); return; } @@ -1686,14 +1684,14 @@ public Long getAgentPingTime(final long agentId) { public void pingBy(final long agentId) { // Update PingMap with the latest time if agent entry exists in the PingMap if (_pingMap.replace(agentId, InaccurateClock.getTimeInSeconds()) == null) { - s_logger.info("PingMap for agent: " + agentId + " will not be updated because agent is no longer in the PingMap"); + logger.info("PingMap for agent: " + agentId + " will not be updated because agent is no longer in the PingMap"); } } protected class MonitorTask extends ManagedContextRunnable { @Override protected void runInContext() { - s_logger.trace("Agent Monitor is started."); + logger.trace("Agent Monitor is started."); try { final List behindAgents = findAgentsBehindOnPing(); @@ -1707,17 +1705,17 @@ protected void runInContext() { /* * Host is in non-operation state, so no investigation and direct put agent to Disconnected */ - s_logger.debug("Ping timeout but agent " + agentId + " is in resource state of " + resourceState + ", so no investigation"); + logger.debug("Ping timeout but agent " + agentId + " is in resource state of " + resourceState + ", so no investigation"); disconnectWithoutInvestigation(agentId, Event.ShutdownRequested); } else { final HostVO host = _hostDao.findById(agentId); if (host != null && (host.getType() == Host.Type.ConsoleProxy || host.getType() == Host.Type.SecondaryStorageVM || host.getType() == Host.Type.SecondaryStorageCmdExecutor)) { - s_logger.warn("Disconnect agent for CPVM/SSVM due to physical connection close. host: " + host.getId()); + logger.warn("Disconnect agent for CPVM/SSVM due to physical connection close. host: " + host.getId()); disconnectWithoutInvestigation(agentId, Event.ShutdownRequested); } else { - s_logger.debug("Ping timeout for agent " + agentId + ", do invstigation"); + logger.debug("Ping timeout for agent " + agentId + ", do invstigation"); disconnectWithInvestigation(agentId, Event.PingTimeout); } } @@ -1740,10 +1738,10 @@ protected void runInContext() { } } } catch (final Throwable th) { - s_logger.error("Caught the following exception: ", th); + logger.error("Caught the following exception: ", th); } - s_logger.trace("Agent Monitor is leaving the building!"); + logger.trace("Agent Monitor is leaving the building!"); } protected List findAgentsBehindOnPing() { @@ -1756,7 +1754,7 @@ protected List findAgentsBehindOnPing() { } if (agentsBehind.size() > 0) { - s_logger.info("Found the following agents behind on ping: " + agentsBehind); + logger.info("Found the following agents behind on ping: " + agentsBehind); } return agentsBehind; @@ -1880,7 +1878,7 @@ public void processConnect(final Host host, final StartupCommand cmd, final bool Commands c = new Commands(cmds); send(host.getId(), c, this); } catch (AgentUnavailableException e) { - s_logger.debug("Failed to send host params on host: " + host.getId()); + logger.debug("Failed to send host params on host: " + host.getId()); } } } @@ -1939,7 +1937,7 @@ private void sendCommandToAgents(Map> hostsPerZone, Map> hostsPerZone, Map params) { if (params != null && ! params.isEmpty()) { - s_logger.debug("Propagating changes on host parameters to the agents"); + logger.debug("Propagating changes on host parameters to the agents"); Map> hostsPerZone = getHostsPerZone(); sendCommandToAgents(hostsPerZone, params); } diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentAttache.java index 306c47ff60cd..beafb4da8eb4 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentAttache.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentAttache.java @@ -25,7 +25,6 @@ import javax.net.ssl.SSLEngine; -import org.apache.log4j.Logger; import com.cloud.agent.Listener; import com.cloud.agent.api.Command; @@ -35,7 +34,6 @@ import com.cloud.utils.nio.Link; public class ClusteredAgentAttache extends ConnectedAgentAttache implements Routable { - private final static Logger s_logger = Logger.getLogger(ClusteredAgentAttache.class); private static ClusteredAgentManagerImpl s_clusteredAgentMgr; protected ByteBuffer _buffer = ByteBuffer.allocate(2048); private boolean _forward = false; @@ -92,10 +90,10 @@ public void cancel(final long seq) { String peerName = synchronous.getPeer(); if (peerName != null) { if (s_clusteredAgentMgr != null) { - s_logger.debug(log(seq, "Forwarding to peer to cancel due to timeout")); + logger.debug(log(seq, "Forwarding to peer to cancel due to timeout")); s_clusteredAgentMgr.cancel(peerName, _id, seq, "Timed Out"); } else { - s_logger.error("Unable to forward cancel, ClusteredAgentAttache is not properly initialized"); + logger.error("Unable to forward cancel, ClusteredAgentAttache is not properly initialized"); } } @@ -107,13 +105,13 @@ public void cancel(final long seq) { @Override public void routeToAgent(final byte[] data) throws AgentUnavailableException { - if (s_logger.isDebugEnabled()) { - s_logger.debug(log(Request.getSequence(data), "Routing from " + Request.getManagementServerId(data))); + if (logger.isDebugEnabled()) { + logger.debug(log(Request.getSequence(data), "Routing from " + Request.getManagementServerId(data))); } if (_link == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(log(Request.getSequence(data), "Link is closed")); + if (logger.isDebugEnabled()) { + logger.debug(log(Request.getSequence(data), "Link is closed")); } throw new AgentUnavailableException("Link is closed", _id); } @@ -121,14 +119,14 @@ public void routeToAgent(final byte[] data) throws AgentUnavailableException { try { _link.send(data); } catch (ClosedChannelException e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(log(Request.getSequence(data), "Channel is closed")); + if (logger.isDebugEnabled()) { + logger.debug(log(Request.getSequence(data), "Channel is closed")); } throw new AgentUnavailableException("Channel to agent is closed", _id); } catch (NullPointerException e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(log(Request.getSequence(data), "Link is closed")); + if (logger.isDebugEnabled()) { + logger.debug(log(Request.getSequence(data), "Link is closed")); } // Note: since this block is not in synchronized. It is possible for _link to become null. throw new AgentUnavailableException("Channel to agent is null", _id); @@ -150,8 +148,8 @@ public void send(final Request req, final Listener listener) throws AgentUnavail if (_transferMode) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(log(seq, "Holding request as the corresponding agent is in transfer mode: ")); + if (logger.isDebugEnabled()) { + logger.debug(log(seq, "Holding request as the corresponding agent is in transfer mode: ")); } synchronized (this) { @@ -176,8 +174,8 @@ public void send(final Request req, final Listener listener) throws AgentUnavail ch = s_clusteredAgentMgr.connectToPeer(peerName, ch); if (ch == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(log(seq, "Unable to forward " + req.toString())); + if (logger.isDebugEnabled()) { + logger.debug(log(seq, "Unable to forward " + req.toString())); } continue; } @@ -188,8 +186,8 @@ public void send(final Request req, final Listener listener) throws AgentUnavail } try { - if (s_logger.isDebugEnabled()) { - s_logger.debug(log(seq, "Forwarding " + req.toString() + " to " + peerName)); + if (logger.isDebugEnabled()) { + logger.debug(log(seq, "Forwarding " + req.toString() + " to " + peerName)); } if (req.executeInSequence() && listener != null && listener instanceof SynchronousListener) { SynchronousListener synchronous = (SynchronousListener)listener; @@ -199,12 +197,12 @@ public void send(final Request req, final Listener listener) throws AgentUnavail error = false; return; } catch (IOException e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(log(seq, "Error on connecting to management node: " + req.toString() + " try = " + i)); + if (logger.isDebugEnabled()) { + logger.debug(log(seq, "Error on connecting to management node: " + req.toString() + " try = " + i)); } - if (s_logger.isInfoEnabled()) { - s_logger.info("IOException " + e.getMessage() + " when sending data to peer " + peerName + ", close peer connection and let it re-open"); + if (logger.isInfoEnabled()) { + logger.info("IOException " + e.getMessage() + " when sending data to peer " + peerName + ", close peer connection and let it re-open"); } } } diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java index bd4e259a7885..1fe6b19ab681 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java @@ -57,7 +57,6 @@ import org.apache.cloudstack.shutdown.command.TriggerShutdownManagementServerHostCommand; import org.apache.cloudstack.utils.identity.ManagementServerNode; import org.apache.cloudstack.utils.security.SSLUtils; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.CancelCommand; @@ -102,7 +101,6 @@ import com.google.gson.Gson; public class ClusteredAgentManagerImpl extends AgentManagerImpl implements ClusterManagerListener, ClusteredAgentRebalanceService { - final static Logger s_logger = Logger.getLogger(ClusteredAgentManagerImpl.class); private static final ScheduledExecutorService s_transferExecutor = Executors.newScheduledThreadPool(2, new NamedThreadFactory("Cluster-AgentRebalancingExecutor")); private final long rebalanceTimeOut = 300000; // 5 mins - after this time remove the agent from the transfer list @@ -154,7 +152,7 @@ public boolean configure(final String name, final Map xmlParams) _sslEngines = new HashMap(7); _nodeId = ManagementServerNode.getManagementServerId(); - s_logger.info("Configuring ClusterAgentManagerImpl. management server node id(msid): " + _nodeId); + logger.info("Configuring ClusterAgentManagerImpl. management server node id(msid): " + _nodeId); ClusteredAgentAttache.initialize(this); @@ -172,8 +170,8 @@ public boolean start() { return false; } _timer.schedule(new DirectAgentScanTimerTask(), STARTUP_DELAY, ScanInterval.value()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Scheduled direct agent scan task to run at an interval of " + ScanInterval.value() + " seconds"); + if (logger.isDebugEnabled()) { + logger.debug("Scheduled direct agent scan task to run at an interval of " + ScanInterval.value() + " seconds"); } // Schedule tasks for agent rebalancing @@ -188,8 +186,8 @@ public boolean start() { public void scheduleHostScanTask() { _timer.schedule(new DirectAgentScanTimerTask(), 0); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Scheduled a direct agent scan task"); + if (logger.isDebugEnabled()) { + logger.debug("Scheduled a direct agent scan task"); } } @@ -198,8 +196,8 @@ private void runDirectAgentScanTimerTask() { } private void scanDirectAgentToLoad() { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Begin scanning directly connected hosts"); + if (logger.isTraceEnabled()) { + logger.trace("Begin scanning directly connected hosts"); } // for agents that are self-managed, threshold to be considered as disconnected after pingtimeout @@ -210,15 +208,15 @@ private void scanDirectAgentToLoad() { if (hosts != null) { hosts.addAll(appliances); if (hosts.size() > 0) { - s_logger.debug("Found " + hosts.size() + " unmanaged direct hosts, processing connect for them..."); + logger.debug("Found " + hosts.size() + " unmanaged direct hosts, processing connect for them..."); for (final HostVO host : hosts) { try { final AgentAttache agentattache = findAttache(host.getId()); if (agentattache != null) { // already loaded, skip if (agentattache.forForward()) { - if (s_logger.isInfoEnabled()) { - s_logger.info(host + " is detected down, but we have a forward attache running, disconnect this one before launching the host"); + if (logger.isInfoEnabled()) { + logger.info(host + " is detected down, but we have a forward attache running, disconnect this one before launching the host"); } removeAgent(agentattache, Status.Disconnected); } else { @@ -226,18 +224,18 @@ private void scanDirectAgentToLoad() { } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Loading directly connected host " + host.getId() + "(" + host.getName() + ")"); + if (logger.isDebugEnabled()) { + logger.debug("Loading directly connected host " + host.getId() + "(" + host.getName() + ")"); } loadDirectlyConnectedHost(host, false); } catch (final Throwable e) { - s_logger.warn(" can not load directly connected host " + host.getId() + "(" + host.getName() + ") due to ", e); + logger.warn(" can not load directly connected host " + host.getId() + "(" + host.getName() + ") due to ", e); } } } } - if (s_logger.isTraceEnabled()) { - s_logger.trace("End scanning directly connected hosts"); + if (logger.isTraceEnabled()) { + logger.trace("End scanning directly connected hosts"); } } @@ -247,7 +245,7 @@ protected void runInContext() { try { runDirectAgentScanTimerTask(); } catch (final Throwable e) { - s_logger.error("Unexpected exception " + e.getMessage(), e); + logger.error("Unexpected exception " + e.getMessage(), e); } } } @@ -258,7 +256,7 @@ public Task create(final Task.Type type, final Link link, final byte[] data) { } protected AgentAttache createAttache(final long id) { - s_logger.debug("create forwarding ClusteredAgentAttache for " + id); + logger.debug("create forwarding ClusteredAgentAttache for " + id); final HostVO host = _hostDao.findById(id); final AgentAttache attache = new ClusteredAgentAttache(this, id, host.getName()); AgentAttache old = null; @@ -267,8 +265,8 @@ protected AgentAttache createAttache(final long id) { _agents.put(id, attache); } if (old != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Remove stale agent attache from current management server"); + if (logger.isDebugEnabled()) { + logger.debug("Remove stale agent attache from current management server"); } removeAgent(old, Status.Removed); } @@ -277,7 +275,7 @@ protected AgentAttache createAttache(final long id) { @Override protected AgentAttache createAttacheForConnect(final HostVO host, final Link link) { - s_logger.debug("create ClusteredAgentAttache for " + host.getId()); + logger.debug("create ClusteredAgentAttache for " + host.getId()); final AgentAttache attache = new ClusteredAgentAttache(this, host.getId(), host.getName(), link, host.isInMaintenanceStates()); link.attach(attache); AgentAttache old = null; @@ -293,7 +291,7 @@ protected AgentAttache createAttacheForConnect(final HostVO host, final Link lin @Override protected AgentAttache createAttacheForDirectConnect(final Host host, final ServerResource resource) { - s_logger.debug(String.format("Create ClusteredDirectAgentAttache for %s.", host)); + logger.debug(String.format("Create ClusteredDirectAgentAttache for %s.", host)); final DirectAgentAttache attache = new ClusteredDirectAgentAttache(this, host.getId(), host.getName(), _nodeId, resource, host.isInMaintenanceStates()); AgentAttache old = null; synchronized (_agents) { @@ -337,8 +335,8 @@ protected boolean handleDisconnect(final AgentAttache agent, final Status.Event @Override public boolean executeUserRequest(final long hostId, final Event event) throws AgentUnavailableException { if (event == Event.AgentDisconnected) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Received agent disconnect event for host " + hostId); + if (logger.isDebugEnabled()) { + logger.debug("Received agent disconnect event for host " + hostId); } final AgentAttache attache = findAttache(hostId); if (attache != null) { @@ -347,7 +345,7 @@ public boolean executeUserRequest(final long hostId, final Event event) throws A final HostTransferMapVO transferVO = _hostTransferDao.findById(hostId); if (transferVO != null) { if (transferVO.getFutureOwner() == _nodeId && transferVO.getState() == HostTransferState.TransferStarted) { - s_logger.debug("Not processing " + Event.AgentDisconnected + " event for the host id=" + hostId + " as the host is being connected to " + _nodeId); + logger.debug("Not processing " + Event.AgentDisconnected + " event for the host id=" + hostId + " as the host is being connected to " + _nodeId); return true; } } @@ -356,7 +354,7 @@ public boolean executeUserRequest(final long hostId, final Event event) throws A // don't process disconnect if the disconnect came for the host via delayed cluster notification, // but the host has already reconnected to the current management server if (!attache.forForward()) { - s_logger.debug("Not processing " + Event.AgentDisconnected + " event for the host id=" + hostId + " as the host is directly connected to the current management server " + _nodeId); + logger.debug("Not processing " + Event.AgentDisconnected + " event for the host id=" + hostId + " as the host is directly connected to the current management server " + _nodeId); return true; } @@ -382,32 +380,32 @@ public void reconnect(final long hostId) throws CloudRuntimeException, AgentUnav } public void notifyNodesInCluster(final AgentAttache attache) { - s_logger.debug("Notifying other nodes of to disconnect"); + logger.debug("Notifying other nodes of to disconnect"); final Command[] cmds = new Command[] {new ChangeAgentCommand(attache.getId(), Event.AgentDisconnected)}; _clusterMgr.broadcast(attache.getId(), _gson.toJson(cmds)); } // notifies MS peers to schedule a host scan task immediately, triggered during addHost operation public void notifyNodesInClusterToScheduleHostScanTask() { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Notifying other MS nodes to run host scan task"); + if (logger.isDebugEnabled()) { + logger.debug("Notifying other MS nodes to run host scan task"); } final Command[] cmds = new Command[] {new ScheduleHostScanTaskCommand()}; _clusterMgr.broadcast(0, _gson.toJson(cmds)); } - protected static void logT(final byte[] bytes, final String msg) { - s_logger.trace("Seq " + Request.getAgentId(bytes) + "-" + Request.getSequence(bytes) + ": MgmtId " + Request.getManagementServerId(bytes) + ": " + protected void logT(final byte[] bytes, final String msg) { + logger.trace("Seq " + Request.getAgentId(bytes) + "-" + Request.getSequence(bytes) + ": MgmtId " + Request.getManagementServerId(bytes) + ": " + (Request.isRequest(bytes) ? "Req: " : "Resp: ") + msg); } - protected static void logD(final byte[] bytes, final String msg) { - s_logger.debug("Seq " + Request.getAgentId(bytes) + "-" + Request.getSequence(bytes) + ": MgmtId " + Request.getManagementServerId(bytes) + ": " + protected void logD(final byte[] bytes, final String msg) { + logger.debug("Seq " + Request.getAgentId(bytes) + "-" + Request.getSequence(bytes) + ": MgmtId " + Request.getManagementServerId(bytes) + ": " + (Request.isRequest(bytes) ? "Req: " : "Resp: ") + msg); } - protected static void logI(final byte[] bytes, final String msg) { - s_logger.info("Seq " + Request.getAgentId(bytes) + "-" + Request.getSequence(bytes) + ": MgmtId " + Request.getManagementServerId(bytes) + ": " + protected void logI(final byte[] bytes, final String msg) { + logger.info("Seq " + Request.getAgentId(bytes) + "-" + Request.getSequence(bytes) + ": MgmtId " + Request.getManagementServerId(bytes) + ": " + (Request.isRequest(bytes) ? "Req: " : "Resp: ") + msg); } @@ -432,7 +430,7 @@ public boolean routeToPeer(final String peer, final byte[] bytes) { return false; } try { - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { logD(bytes, "Routing to peer"); } Link.write(ch, new ByteBuffer[] {ByteBuffer.wrap(bytes)}, sslEngine); @@ -471,7 +469,7 @@ public void closePeer(final String peerName) { try { ch.close(); } catch (final IOException e) { - s_logger.warn("Unable to close peer socket connection to " + peerName); + logger.warn("Unable to close peer socket connection to " + peerName); } } _peers.remove(peerName); @@ -487,13 +485,13 @@ public SocketChannel connectToPeer(final String peerName, final SocketChannel pr try { prevCh.close(); } catch (final Exception e) { - s_logger.info("[ignored]" + "failed to get close resource for previous channel Socket: " + e.getLocalizedMessage()); + logger.info("[ignored]" + "failed to get close resource for previous channel Socket: " + e.getLocalizedMessage()); } } if (ch == null || ch == prevCh) { final ManagementServerHost ms = _clusterMgr.getPeer(peerName); if (ms == null) { - s_logger.info("Unable to find peer: " + peerName); + logger.info("Unable to find peer: " + peerName); return null; } final String ip = ms.getServiceIP(); @@ -520,13 +518,13 @@ public SocketChannel connectToPeer(final String peerName, final SocketChannel pr ch1.close(); throw new IOException(String.format("SSL: Handshake failed with peer management server '%s' on %s:%d ", peerName, ip, port)); } - s_logger.info(String.format("SSL: Handshake done with peer management server '%s' on %s:%d ", peerName, ip, port)); + logger.info(String.format("SSL: Handshake done with peer management server '%s' on %s:%d ", peerName, ip, port)); } catch (final Exception e) { ch1.close(); throw new IOException("SSL: Fail to init SSL! " + e); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Connection to peer opened: " + peerName + ", ip: " + ip); + if (logger.isDebugEnabled()) { + logger.debug("Connection to peer opened: " + peerName + ", ip: " + ip); } _peers.put(peerName, ch1); _sslEngines.put(peerName, sslEngine); @@ -536,16 +534,16 @@ public SocketChannel connectToPeer(final String peerName, final SocketChannel pr try { ch1.close(); } catch (final IOException ex) { - s_logger.error("failed to close failed peer socket: " + ex); + logger.error("failed to close failed peer socket: " + ex); } } - s_logger.warn("Unable to connect to peer management server: " + peerName + ", ip: " + ip + " due to " + e.getMessage(), e); + logger.warn("Unable to connect to peer management server: " + peerName + ", ip: " + ip + " due to " + e.getMessage(), e); return null; } } - if (s_logger.isTraceEnabled()) { - s_logger.trace("Found open channel for peer: " + peerName); + if (logger.isTraceEnabled()) { + logger.trace("Found open channel for peer: " + peerName); } return ch; } @@ -571,8 +569,8 @@ protected AgentAttache getAttache(final Long hostId) throws AgentUnavailableExce AgentAttache agent = findAttache(hostId); if (agent == null || !agent.forForward()) { if (isHostOwnerSwitched(host)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host " + hostId + " has switched to another management server, need to update agent map with a forwarding agent attache"); + if (logger.isDebugEnabled()) { + logger.debug("Host " + hostId + " has switched to another management server, need to update agent map with a forwarding agent attache"); } agent = createAttache(hostId); } @@ -591,10 +589,10 @@ public boolean stop() { if (_peers != null) { for (final SocketChannel ch : _peers.values()) { try { - s_logger.info("Closing: " + ch.toString()); + logger.info("Closing: " + ch.toString()); ch.close(); } catch (final IOException e) { - s_logger.info("[ignored] error on closing channel: " + ch.toString(), e); + logger.info("[ignored] error on closing channel: " + ch.toString(), e); } } } @@ -631,7 +629,7 @@ protected void doTask(final Task task) throws TaskExecutionException { final byte[] data = task.getData(); final Version ver = Request.getVersion(data); if (ver.ordinal() != Version.v1.ordinal() && ver.ordinal() != Version.v3.ordinal()) { - s_logger.warn("Wrong version for clustered agent request"); + logger.warn("Wrong version for clustered agent request"); super.doTask(task); return; } @@ -651,7 +649,7 @@ protected void doTask(final Task task) throws TaskExecutionException { final Request req = Request.parse(data); final Command[] cmds = req.getCommands(); final CancelCommand cancel = (CancelCommand)cmds[0]; - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { logD(data, "Cancel request received"); } agent.cancel(cancel.getSequence()); @@ -699,7 +697,7 @@ protected void doTask(final Task task) throws TaskExecutionException { final AgentAttache attache = (AgentAttache)link.attachment(); if (attache != null) { attache.sendNext(Request.getSequence(data)); - } else if (s_logger.isDebugEnabled()) { + } else if (logger.isDebugEnabled()) { logD(data, "No attache to process " + Request.parse(data).toString()); } } @@ -712,11 +710,11 @@ protected void doTask(final Task task) throws TaskExecutionException { final Response response = Response.parse(data); final AgentAttache attache = findAttache(response.getAgentId()); if (attache == null) { - s_logger.info("SeqA " + response.getAgentId() + "-" + response.getSequence() + "Unable to find attache to forward " + response.toString()); + logger.info("SeqA " + response.getAgentId() + "-" + response.getSequence() + "Unable to find attache to forward " + response.toString()); return; } if (!attache.processAnswers(response.getSequence(), response)) { - s_logger.info("SeqA " + attache.getId() + "-" + response.getSequence() + ": Response is not processed: " + response.toString()); + logger.info("SeqA " + attache.getId() + "-" + response.getSequence() + ": Response is not processed: " + response.toString()); } } return; @@ -724,11 +722,11 @@ protected void doTask(final Task task) throws TaskExecutionException { } } catch (final ClassNotFoundException e) { final String message = String.format("ClassNotFoundException occurred when executing tasks! Error '%s'", e.getMessage()); - s_logger.error(message); + logger.error(message); throw new TaskExecutionException(message, e); } catch (final UnsupportedVersionException e) { final String message = String.format("UnsupportedVersionException occurred when executing tasks! Error '%s'", e.getMessage()); - s_logger.error(message); + logger.error(message); throw new TaskExecutionException(message, e); } finally { txn.close(); @@ -743,12 +741,12 @@ public void onManagementNodeJoined(final List no @Override public void onManagementNodeLeft(final List nodeList, final long selfNodeId) { for (final ManagementServerHost vo : nodeList) { - s_logger.info("Marking hosts as disconnected on Management server" + vo.getMsid()); + logger.info("Marking hosts as disconnected on Management server" + vo.getMsid()); final long lastPing = (System.currentTimeMillis() >> 10) - mgmtServiceConf.getTimeout(); _hostDao.markHostsAsDisconnected(vo.getMsid(), lastPing); outOfBandManagementDao.expireServerOwnership(vo.getMsid()); haConfigDao.expireServerOwnership(vo.getMsid()); - s_logger.info("Deleting entries from op_host_transfer table for Management server " + vo.getMsid()); + logger.info("Deleting entries from op_host_transfer table for Management server " + vo.getMsid()); cleanupTransferMap(vo.getMsid()); } } @@ -775,7 +773,7 @@ public boolean executeRebalanceRequest(final long agentId, final long currentOwn try { result = rebalanceHost(agentId, currentOwnerId, futureOwnerId); } catch (final Exception e) { - s_logger.warn("Unable to rebalance host id=" + agentId, e); + logger.warn("Unable to rebalance host id=" + agentId, e); } } return result; @@ -790,14 +788,14 @@ public class AgentLoadBalancerTask extends ManagedContextTimerTask { protected volatile boolean cancelled = false; public AgentLoadBalancerTask() { - s_logger.debug("Agent load balancer task created"); + logger.debug("Agent load balancer task created"); } @Override public synchronized boolean cancel() { if (!cancelled) { cancelled = true; - s_logger.debug("Agent load balancer task cancelled"); + logger.debug("Agent load balancer task cancelled"); return super.cancel(); } return true; @@ -808,19 +806,19 @@ protected synchronized void runInContext() { try { if (!cancelled) { startRebalanceAgents(); - if (s_logger.isInfoEnabled()) { - s_logger.info("The agent load balancer task is now being cancelled"); + if (logger.isInfoEnabled()) { + logger.info("The agent load balancer task is now being cancelled"); } cancelled = true; } } catch (final Throwable e) { - s_logger.error("Unexpected exception " + e.toString(), e); + logger.error("Unexpected exception " + e.toString(), e); } } } public void startRebalanceAgents() { - s_logger.debug("Management server " + _nodeId + " is asking other peers to rebalance their agents"); + logger.debug("Management server " + _nodeId + " is asking other peers to rebalance their agents"); final List allMS = _mshostDao.listBy(ManagementServerHost.State.Up); final QueryBuilder sc = QueryBuilder.create(HostVO.class); sc.and(sc.entity().getManagementServerId(), Op.NNULL); @@ -832,16 +830,16 @@ public void startRebalanceAgents() { if (!allManagedAgents.isEmpty() && !allMS.isEmpty()) { avLoad = allManagedAgents.size() / allMS.size(); } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("There are no hosts to rebalance in the system. Current number of active management server nodes in the system is " + allMS.size() + "; number of managed agents is " + if (logger.isDebugEnabled()) { + logger.debug("There are no hosts to rebalance in the system. Current number of active management server nodes in the system is " + allMS.size() + "; number of managed agents is " + allManagedAgents.size()); } return; } if (avLoad == 0L) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("As calculated average load is less than 1, rounding it to 1"); + if (logger.isDebugEnabled()) { + logger.debug("As calculated average load is less than 1, rounding it to 1"); } avLoad = 1; } @@ -855,19 +853,19 @@ public void startRebalanceAgents() { if (hostsToRebalance != null && !hostsToRebalance.isEmpty()) { break; } else { - s_logger.debug("Agent load balancer planner " + lbPlanner.getName() + " found no hosts to be rebalanced from management server " + node.getMsid()); + logger.debug("Agent load balancer planner " + lbPlanner.getName() + " found no hosts to be rebalanced from management server " + node.getMsid()); } } if (hostsToRebalance != null && !hostsToRebalance.isEmpty()) { - s_logger.debug("Found " + hostsToRebalance.size() + " hosts to rebalance from management server " + node.getMsid()); + logger.debug("Found " + hostsToRebalance.size() + " hosts to rebalance from management server " + node.getMsid()); for (final HostVO host : hostsToRebalance) { final long hostId = host.getId(); - s_logger.debug("Asking management server " + node.getMsid() + " to give away host id=" + hostId); + logger.debug("Asking management server " + node.getMsid() + " to give away host id=" + hostId); boolean result = true; if (_hostTransferDao.findById(hostId) != null) { - s_logger.warn("Somebody else is already rebalancing host id: " + hostId); + logger.warn("Somebody else is already rebalancing host id: " + hostId); continue; } @@ -876,18 +874,18 @@ public void startRebalanceAgents() { transfer = _hostTransferDao.startAgentTransfering(hostId, node.getMsid(), _nodeId); final Answer[] answer = sendRebalanceCommand(node.getMsid(), hostId, node.getMsid(), _nodeId, Event.RequestAgentRebalance); if (answer == null) { - s_logger.warn("Failed to get host id=" + hostId + " from management server " + node.getMsid()); + logger.warn("Failed to get host id=" + hostId + " from management server " + node.getMsid()); result = false; } } catch (final Exception ex) { - s_logger.warn("Failed to get host id=" + hostId + " from management server " + node.getMsid(), ex); + logger.warn("Failed to get host id=" + hostId + " from management server " + node.getMsid(), ex); result = false; } finally { if (transfer != null) { final HostTransferMapVO transferState = _hostTransferDao.findByIdAndFutureOwnerId(transfer.getId(), _nodeId); if (!result && transferState != null && transferState.getState() == HostTransferState.TransferRequested) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Removing mapping from op_host_transfer as it failed to be set to transfer mode"); + if (logger.isDebugEnabled()) { + logger.debug("Removing mapping from op_host_transfer as it failed to be set to transfer mode"); } // just remove the mapping (if exists) as nothing was done on the peer management // server yet @@ -897,7 +895,7 @@ public void startRebalanceAgents() { } } } else { - s_logger.debug("Found no hosts to rebalance from the management server " + node.getMsid()); + logger.debug("Found no hosts to rebalance from the management server " + node.getMsid()); } } } @@ -911,8 +909,8 @@ private Answer[] sendRebalanceCommand(final long peer, final long agentId, final final Command[] cmds = commands.toCommands(); try { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Forwarding " + cmds[0].toString() + " to " + peer); + if (logger.isDebugEnabled()) { + logger.debug("Forwarding " + cmds[0].toString() + " to " + peer); } final String peerName = Long.toString(peer); final String cmdStr = _gson.toJson(cmds); @@ -920,7 +918,7 @@ private Answer[] sendRebalanceCommand(final long peer, final long agentId, final final Answer[] answers = _gson.fromJson(ansStr, Answer[].class); return answers; } catch (final Exception e) { - s_logger.warn("Caught exception while talking to " + currentOwnerId, e); + logger.warn("Caught exception while talking to " + currentOwnerId, e); return null; } } @@ -944,8 +942,8 @@ public Boolean propagateAgentEvent(final long agentId, final Event event) throws return null; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Propagating agent change request event:" + event.toString() + " to agent:" + agentId); + if (logger.isDebugEnabled()) { + logger.debug("Propagating agent change request event:" + event.toString() + " to agent:" + agentId); } final Command[] cmds = new Command[1]; cmds[0] = new ChangeAgentCommand(agentId, event); @@ -957,8 +955,8 @@ public Boolean propagateAgentEvent(final long agentId, final Event event) throws final Answer[] answers = _gson.fromJson(ansStr, Answer[].class); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Result for agent change is " + answers[0].getResult()); + if (logger.isDebugEnabled()) { + logger.debug("Result for agent change is " + answers[0].getResult()); } return answers[0].getResult(); @@ -969,12 +967,12 @@ private Runnable getTransferScanTask() { @Override protected void runInContext() { try { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Clustered agent transfer scan check, management server id:" + _nodeId); + if (logger.isTraceEnabled()) { + logger.trace("Clustered agent transfer scan check, management server id:" + _nodeId); } synchronized (_agentToTransferIds) { if (_agentToTransferIds.size() > 0) { - s_logger.debug("Found " + _agentToTransferIds.size() + " agents to transfer"); + logger.debug("Found " + _agentToTransferIds.size() + " agents to transfer"); // for (Long hostId : _agentToTransferIds) { for (final Iterator iterator = _agentToTransferIds.iterator(); iterator.hasNext();) { final Long hostId = iterator.next(); @@ -990,14 +988,14 @@ protected void runInContext() { final HostTransferMapVO transferMap = _hostTransferDao.findActiveHostTransferMapByHostId(hostId, new Date(cutTime.getTime() - rebalanceTimeOut)); if (transferMap == null) { - s_logger.debug("Timed out waiting for the host id=" + hostId + " to be ready to transfer, skipping rebalance for the host"); + logger.debug("Timed out waiting for the host id=" + hostId + " to be ready to transfer, skipping rebalance for the host"); iterator.remove(); _hostTransferDao.completeAgentTransfer(hostId); continue; } if (transferMap.getInitialOwner() != _nodeId || attache == null || attache.forForward()) { - s_logger.debug("Management server " + _nodeId + " doesn't own host id=" + hostId + " any more, skipping rebalance for the host"); + logger.debug("Management server " + _nodeId + " doesn't own host id=" + hostId + " any more, skipping rebalance for the host"); iterator.remove(); _hostTransferDao.completeAgentTransfer(hostId); continue; @@ -1005,7 +1003,7 @@ protected void runInContext() { final ManagementServerHostVO ms = _mshostDao.findByMsid(transferMap.getFutureOwner()); if (ms != null && ms.getState() != ManagementServerHost.State.Up) { - s_logger.debug("Can't transfer host " + hostId + " as it's future owner is not in UP state: " + ms + ", skipping rebalance for the host"); + logger.debug("Can't transfer host " + hostId + " as it's future owner is not in UP state: " + ms + ", skipping rebalance for the host"); iterator.remove(); _hostTransferDao.completeAgentTransfer(hostId); continue; @@ -1016,31 +1014,31 @@ protected void runInContext() { try { _executor.execute(new RebalanceTask(hostId, transferMap.getInitialOwner(), transferMap.getFutureOwner())); } catch (final RejectedExecutionException ex) { - s_logger.warn("Failed to submit rebalance task for host id=" + hostId + "; postponing the execution"); + logger.warn("Failed to submit rebalance task for host id=" + hostId + "; postponing the execution"); continue; } } else { - s_logger.debug("Agent " + hostId + " can't be transferred yet as its request queue size is " + attache.getQueueSize() + " and listener queue size is " + logger.debug("Agent " + hostId + " can't be transferred yet as its request queue size is " + attache.getQueueSize() + " and listener queue size is " + attache.getNonRecurringListenersSize()); } } } else { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Found no agents to be transferred by the management server " + _nodeId); + if (logger.isTraceEnabled()) { + logger.trace("Found no agents to be transferred by the management server " + _nodeId); } } } } catch (final Throwable e) { - s_logger.error("Problem with the clustered agent transfer scan check!", e); + logger.error("Problem with the clustered agent transfer scan check!", e); } } }; } private boolean setToWaitForRebalance(final long hostId, final long currentOwnerId, final long futureOwnerId) { - s_logger.debug("Adding agent " + hostId + " to the list of agents to transfer"); + logger.debug("Adding agent " + hostId + " to the list of agents to transfer"); synchronized (_agentToTransferIds) { return _agentToTransferIds.add(hostId); } @@ -1051,7 +1049,7 @@ protected boolean rebalanceHost(final long hostId, final long currentOwnerId, fi boolean result = true; if (currentOwnerId == _nodeId) { if (!startRebalance(hostId)) { - s_logger.debug("Failed to start agent rebalancing"); + logger.debug("Failed to start agent rebalancing"); finishRebalance(hostId, futureOwnerId, Event.RebalanceFailed); return false; } @@ -1062,23 +1060,23 @@ protected boolean rebalanceHost(final long hostId, final long currentOwnerId, fi } } catch (final Exception ex) { - s_logger.warn("Host " + hostId + " failed to connect to the management server " + futureOwnerId + " as a part of rebalance process", ex); + logger.warn("Host " + hostId + " failed to connect to the management server " + futureOwnerId + " as a part of rebalance process", ex); result = false; } if (result) { - s_logger.debug("Successfully transferred host id=" + hostId + " to management server " + futureOwnerId); + logger.debug("Successfully transferred host id=" + hostId + " to management server " + futureOwnerId); finishRebalance(hostId, futureOwnerId, Event.RebalanceCompleted); } else { - s_logger.warn("Failed to transfer host id=" + hostId + " to management server " + futureOwnerId); + logger.warn("Failed to transfer host id=" + hostId + " to management server " + futureOwnerId); finishRebalance(hostId, futureOwnerId, Event.RebalanceFailed); } } else if (futureOwnerId == _nodeId) { final HostVO host = _hostDao.findById(hostId); try { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Disconnecting host " + host.getId() + "(" + host.getName() + " as a part of rebalance process without notification"); + if (logger.isDebugEnabled()) { + logger.debug("Disconnecting host " + host.getId() + "(" + host.getName() + " as a part of rebalance process without notification"); } final AgentAttache attache = findAttache(hostId); @@ -1087,24 +1085,24 @@ protected boolean rebalanceHost(final long hostId, final long currentOwnerId, fi } if (result) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Loading directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process"); + if (logger.isDebugEnabled()) { + logger.debug("Loading directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process"); } result = loadDirectlyConnectedHost(host, true); } else { - s_logger.warn("Failed to disconnect " + host.getId() + "(" + host.getName() + " as a part of rebalance process without notification"); + logger.warn("Failed to disconnect " + host.getId() + "(" + host.getName() + " as a part of rebalance process without notification"); } } catch (final Exception ex) { - s_logger.warn("Failed to load directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process due to:", + logger.warn("Failed to load directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process due to:", ex); result = false; } if (result) { - s_logger.debug("Successfully loaded directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process"); + logger.debug("Successfully loaded directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process"); } else { - s_logger.warn("Failed to load directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process"); + logger.warn("Failed to load directly connected host " + host.getId() + "(" + host.getName() + ") to the management server " + _nodeId + " as a part of rebalance process"); } } @@ -1114,13 +1112,13 @@ protected boolean rebalanceHost(final long hostId, final long currentOwnerId, fi protected void finishRebalance(final long hostId, final long futureOwnerId, final Event event) { final boolean success = event == Event.RebalanceCompleted ? true : false; - if (s_logger.isDebugEnabled()) { - s_logger.debug("Finishing rebalancing for the agent " + hostId + " with event " + event); + if (logger.isDebugEnabled()) { + logger.debug("Finishing rebalancing for the agent " + hostId + " with event " + event); } final AgentAttache attache = findAttache(hostId); if (attache == null || !(attache instanceof ClusteredAgentAttache)) { - s_logger.debug("Unable to find forward attache for the host id=" + hostId + ", assuming that the agent disconnected already"); + logger.debug("Unable to find forward attache for the host id=" + hostId + ", assuming that the agent disconnected already"); _hostTransferDao.completeAgentTransfer(hostId); return; } @@ -1135,7 +1133,7 @@ protected void finishRebalance(final long hostId, final long futureOwnerId, fina // 2) Get all transfer requests and route them to peer Request requestToTransfer = forwardAttache.getRequestToTransfer(); while (requestToTransfer != null) { - s_logger.debug("Forwarding request " + requestToTransfer.getSequence() + " held in transfer attache " + hostId + " from the management server " + _nodeId + " to " + futureOwnerId); + logger.debug("Forwarding request " + requestToTransfer.getSequence() + " held in transfer attache " + hostId + " from the management server " + _nodeId + " to " + futureOwnerId); final boolean routeResult = routeToPeer(Long.toString(futureOwnerId), requestToTransfer.getBytes()); if (!routeResult) { logD(requestToTransfer.getBytes(), "Failed to route request to peer"); @@ -1144,23 +1142,23 @@ protected void finishRebalance(final long hostId, final long futureOwnerId, fina requestToTransfer = forwardAttache.getRequestToTransfer(); } - s_logger.debug("Management server " + _nodeId + " completed agent " + hostId + " rebalance to " + futureOwnerId); + logger.debug("Management server " + _nodeId + " completed agent " + hostId + " rebalance to " + futureOwnerId); } else { failRebalance(hostId); } - s_logger.debug("Management server " + _nodeId + " completed agent " + hostId + " rebalance"); + logger.debug("Management server " + _nodeId + " completed agent " + hostId + " rebalance"); _hostTransferDao.completeAgentTransfer(hostId); } protected void failRebalance(final long hostId) { try { - s_logger.debug("Management server " + _nodeId + " failed to rebalance agent " + hostId); + logger.debug("Management server " + _nodeId + " failed to rebalance agent " + hostId); _hostTransferDao.completeAgentTransfer(hostId); handleDisconnectWithoutInvestigation(findAttache(hostId), Event.RebalanceFailed, true, true); } catch (final Exception ex) { - s_logger.warn("Failed to reconnect host id=" + hostId + " as a part of failed rebalance task cleanup"); + logger.warn("Failed to reconnect host id=" + hostId + " as a part of failed rebalance task cleanup"); } } @@ -1168,7 +1166,7 @@ protected boolean startRebalance(final long hostId) { final HostVO host = _hostDao.findById(hostId); if (host == null || host.getRemoved() != null) { - s_logger.warn("Unable to find host record, fail start rebalancing process"); + logger.warn("Unable to find host record, fail start rebalancing process"); return false; } @@ -1178,17 +1176,17 @@ protected boolean startRebalance(final long hostId) { handleDisconnectWithoutInvestigation(attache, Event.StartAgentRebalance, true, true); final ClusteredAgentAttache forwardAttache = (ClusteredAgentAttache)createAttache(hostId); if (forwardAttache == null) { - s_logger.warn("Unable to create a forward attache for the host " + hostId + " as a part of rebalance process"); + logger.warn("Unable to create a forward attache for the host " + hostId + " as a part of rebalance process"); return false; } - s_logger.debug("Putting agent id=" + hostId + " to transfer mode"); + logger.debug("Putting agent id=" + hostId + " to transfer mode"); forwardAttache.setTransferMode(true); _agents.put(hostId, forwardAttache); } else { if (attache == null) { - s_logger.warn("Attache for the agent " + hostId + " no longer exists on management server " + _nodeId + ", can't start host rebalancing"); + logger.warn("Attache for the agent " + hostId + " no longer exists on management server " + _nodeId + ", can't start host rebalancing"); } else { - s_logger.warn("Attache for the agent " + hostId + " has request queue size= " + attache.getQueueSize() + " and listener queue size " + attache.getNonRecurringListenersSize() + logger.warn("Attache for the agent " + hostId + " has request queue size= " + attache.getQueueSize() + " and listener queue size " + attache.getNonRecurringListenersSize() + ", can't start host rebalancing"); } return false; @@ -1225,19 +1223,19 @@ public RebalanceTask(final long hostId, final long currentOwnerId, final long fu @Override protected void runInContext() { try { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Rebalancing host id=" + hostId); + if (logger.isDebugEnabled()) { + logger.debug("Rebalancing host id=" + hostId); } rebalanceHost(hostId, currentOwnerId, futureOwnerId); } catch (final Exception e) { - s_logger.warn("Unable to rebalance host id=" + hostId, e); + logger.warn("Unable to rebalance host id=" + hostId, e); } } } private String handleScheduleHostScanTaskCommand(final ScheduleHostScanTaskCommand cmd) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Intercepting resource manager command: " + _gson.toJson(cmd)); + if (logger.isDebugEnabled()) { + logger.debug("Intercepting resource manager command: " + _gson.toJson(cmd)); } try { @@ -1245,7 +1243,7 @@ private String handleScheduleHostScanTaskCommand(final ScheduleHostScanTaskComma } catch (final Exception e) { // Scheduling host scan task in peer MS is a best effort operation during host add, regular host scan // happens at fixed intervals anyways. So handling any exceptions that may be thrown - s_logger.warn( + logger.warn( "Exception happened while trying to schedule host scan task on mgmt server " + _clusterMgr.getSelfPeerName() + ", ignoring as regular host scan happens at fixed interval anyways", e); return null; @@ -1273,8 +1271,8 @@ public String getName() { @Override public String dispatch(final ClusterServicePdu pdu) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Dispatch ->" + pdu.getAgentId() + ", json: " + pdu.getJsonPackage()); + if (logger.isDebugEnabled()) { + logger.debug("Dispatch ->" + pdu.getAgentId() + ", json: " + pdu.getJsonPackage()); } Command[] cmds = null; @@ -1282,24 +1280,24 @@ public String dispatch(final ClusterServicePdu pdu) { cmds = _gson.fromJson(pdu.getJsonPackage(), Command[].class); } catch (final Throwable e) { assert false; - s_logger.error("Exception in gson decoding : ", e); + logger.error("Exception in gson decoding : ", e); } if (cmds.length == 1 && cmds[0] instanceof ChangeAgentCommand) { // intercepted final ChangeAgentCommand cmd = (ChangeAgentCommand)cmds[0]; - if (s_logger.isDebugEnabled()) { - s_logger.debug("Intercepting command for agent change: agent " + cmd.getAgentId() + " event: " + cmd.getEvent()); + if (logger.isDebugEnabled()) { + logger.debug("Intercepting command for agent change: agent " + cmd.getAgentId() + " event: " + cmd.getEvent()); } boolean result = false; try { result = executeAgentUserRequest(cmd.getAgentId(), cmd.getEvent()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Result is " + result); + if (logger.isDebugEnabled()) { + logger.debug("Result is " + result); } } catch (final AgentUnavailableException e) { - s_logger.warn("Agent is unavailable", e); + logger.warn("Agent is unavailable", e); return null; } @@ -1309,21 +1307,21 @@ public String dispatch(final ClusterServicePdu pdu) { } else if (cmds.length == 1 && cmds[0] instanceof TransferAgentCommand) { final TransferAgentCommand cmd = (TransferAgentCommand)cmds[0]; - if (s_logger.isDebugEnabled()) { - s_logger.debug("Intercepting command for agent rebalancing: agent " + cmd.getAgentId() + " event: " + cmd.getEvent()); + if (logger.isDebugEnabled()) { + logger.debug("Intercepting command for agent rebalancing: agent " + cmd.getAgentId() + " event: " + cmd.getEvent()); } boolean result = false; try { result = rebalanceAgent(cmd.getAgentId(), cmd.getEvent(), cmd.getCurrentOwner(), cmd.getFutureOwner()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Result is " + result); + if (logger.isDebugEnabled()) { + logger.debug("Result is " + result); } } catch (final AgentUnavailableException e) { - s_logger.warn("Agent is unavailable", e); + logger.warn("Agent is unavailable", e); return null; } catch (final OperationTimedoutException e) { - s_logger.warn("Operation timed out", e); + logger.warn("Operation timed out", e); return null; } final Answer[] answers = new Answer[1]; @@ -1332,14 +1330,14 @@ public String dispatch(final ClusterServicePdu pdu) { } else if (cmds.length == 1 && cmds[0] instanceof PropagateResourceEventCommand) { final PropagateResourceEventCommand cmd = (PropagateResourceEventCommand)cmds[0]; - s_logger.debug("Intercepting command to propagate event " + cmd.getEvent().name() + " for host " + cmd.getHostId()); + logger.debug("Intercepting command to propagate event " + cmd.getEvent().name() + " for host " + cmd.getHostId()); boolean result = false; try { result = _resourceMgr.executeUserRequest(cmd.getHostId(), cmd.getEvent()); - s_logger.debug("Result is " + result); + logger.debug("Result is " + result); } catch (final AgentUnavailableException ex) { - s_logger.warn("Agent is unavailable", ex); + logger.warn("Agent is unavailable", ex); return null; } @@ -1356,30 +1354,30 @@ public String dispatch(final ClusterServicePdu pdu) { try { final long startTick = System.currentTimeMillis(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Dispatch -> " + pdu.getAgentId() + ", json: " + pdu.getJsonPackage()); + if (logger.isDebugEnabled()) { + logger.debug("Dispatch -> " + pdu.getAgentId() + ", json: " + pdu.getJsonPackage()); } final Answer[] answers = sendToAgent(pdu.getAgentId(), cmds, pdu.isStopOnError()); if (answers != null) { final String jsonReturn = _gson.toJson(answers); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Completed dispatching -> " + pdu.getAgentId() + ", json: " + pdu.getJsonPackage() + " in " + (System.currentTimeMillis() - startTick) + " ms, return result: " + if (logger.isDebugEnabled()) { + logger.debug("Completed dispatching -> " + pdu.getAgentId() + ", json: " + pdu.getJsonPackage() + " in " + (System.currentTimeMillis() - startTick) + " ms, return result: " + jsonReturn); } return jsonReturn; } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug( + if (logger.isDebugEnabled()) { + logger.debug( "Completed dispatching -> " + pdu.getAgentId() + ", json: " + pdu.getJsonPackage() + " in " + (System.currentTimeMillis() - startTick) + " ms, return null result"); } } } catch (final AgentUnavailableException e) { - s_logger.warn("Agent is unavailable", e); + logger.warn("Agent is unavailable", e); } catch (final OperationTimedoutException e) { - s_logger.warn("Timed Out", e); + logger.warn("Timed Out", e); } return null; @@ -1387,7 +1385,7 @@ public String dispatch(final ClusterServicePdu pdu) { private String handleShutdownManagementServerHostCommand(BaseShutdownManagementServerHostCommand cmd) { if (cmd instanceof PrepareForShutdownManagementServerHostCommand) { - s_logger.debug("Received BaseShutdownManagementServerHostCommand - preparing to shut down"); + logger.debug("Received BaseShutdownManagementServerHostCommand - preparing to shut down"); try { shutdownManager.prepareForShutdown(); return "Successfully prepared for shutdown"; @@ -1396,7 +1394,7 @@ private String handleShutdownManagementServerHostCommand(BaseShutdownManagementS } } if (cmd instanceof TriggerShutdownManagementServerHostCommand) { - s_logger.debug("Received TriggerShutdownManagementServerHostCommand - triggering a shut down"); + logger.debug("Received TriggerShutdownManagementServerHostCommand - triggering a shut down"); try { shutdownManager.triggerShutdown(); return "Successfully triggered shutdown"; @@ -1405,7 +1403,7 @@ private String handleShutdownManagementServerHostCommand(BaseShutdownManagementS } } if (cmd instanceof CancelShutdownManagementServerHostCommand) { - s_logger.debug("Received CancelShutdownManagementServerHostCommand - cancelling shut down"); + logger.debug("Received CancelShutdownManagementServerHostCommand - cancelling shut down"); try { shutdownManager.cancelShutdown(); return "Successfully prepared for shutdown"; @@ -1434,8 +1432,8 @@ private Runnable getAgentRebalanceScanTask() { @Override protected void runInContext() { try { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Agent rebalance task check, management server id:" + _nodeId); + if (logger.isTraceEnabled()) { + logger.trace("Agent rebalance task check, management server id:" + _nodeId); } // initiate agent lb task will be scheduled and executed only once, and only when number of agents // loaded exceeds _connectedAgentsThreshold @@ -1453,16 +1451,16 @@ protected void runInContext() { if (allHostsCount > 0.0) { final double load = managedHostsCount / allHostsCount; if (load > ConnectedAgentThreshold.value()) { - s_logger.debug("Scheduling agent rebalancing task as the average agent load " + load + " is more than the threshold " + ConnectedAgentThreshold.value()); + logger.debug("Scheduling agent rebalancing task as the average agent load " + load + " is more than the threshold " + ConnectedAgentThreshold.value()); scheduleRebalanceAgents(); _agentLbHappened = true; } else { - s_logger.debug("Not scheduling agent rebalancing task as the average load " + load + " has not crossed the threshold " + ConnectedAgentThreshold.value()); + logger.debug("Not scheduling agent rebalancing task as the average load " + load + " has not crossed the threshold " + ConnectedAgentThreshold.value()); } } } } catch (final Throwable e) { - s_logger.error("Problem with the clustered agent transfer scan check!", e); + logger.error("Problem with the clustered agent transfer scan check!", e); } } }; @@ -1471,13 +1469,13 @@ protected void runInContext() { @Override public void rescan() { // schedule a scan task immediately - if (s_logger.isDebugEnabled()) { - s_logger.debug("Scheduling a host scan task"); + if (logger.isDebugEnabled()) { + logger.debug("Scheduling a host scan task"); } // schedule host scan task on current MS scheduleHostScanTask(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Notifying all peer MS to schedule host scan task"); + if (logger.isDebugEnabled()) { + logger.debug("Notifying all peer MS to schedule host scan task"); } } diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/ConnectedAgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/ConnectedAgentAttache.java index 82423205a657..81c026348c8c 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/ConnectedAgentAttache.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/ConnectedAgentAttache.java @@ -18,7 +18,6 @@ import java.nio.channels.ClosedChannelException; -import org.apache.log4j.Logger; import com.cloud.agent.transport.Request; import com.cloud.exception.AgentUnavailableException; @@ -29,7 +28,6 @@ * ConnectedAgentAttache implements a direct connection to this management server. */ public class ConnectedAgentAttache extends AgentAttache { - private static final Logger s_logger = Logger.getLogger(ConnectedAgentAttache.class); protected Link _link; @@ -55,7 +53,7 @@ public synchronized boolean isClosed() { @Override public void disconnect(final Status state) { synchronized (this) { - s_logger.debug("Processing Disconnect."); + logger.debug("Processing Disconnect."); if (_link != null) { _link.close(); _link.terminated(); @@ -100,7 +98,7 @@ protected void finalize() throws Throwable { assert _link == null : "Duh...Says you....Forgot to call disconnect()!"; synchronized (this) { if (_link != null) { - s_logger.warn("Lost attache " + _id + "(" + _name + ")"); + logger.warn("Lost attache " + _id + "(" + _name + ")"); disconnect(Status.Alert); } } diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java index 6514685aa46e..969af3d1ba61 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/DirectAgentAttache.java @@ -23,10 +23,8 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.log4j.Logger; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.managed.context.ManagedContextRunnable; -import org.apache.log4j.MDC; import com.cloud.agent.api.Answer; import com.cloud.agent.api.Command; @@ -38,9 +36,9 @@ import com.cloud.exception.AgentUnavailableException; import com.cloud.host.Status; import com.cloud.resource.ServerResource; +import org.apache.logging.log4j.ThreadContext; public class DirectAgentAttache extends AgentAttache { - private final static Logger s_logger = Logger.getLogger(DirectAgentAttache.class); protected final ConfigKey _HostPingRetryCount = new ConfigKey("Advanced", Integer.class, "host.ping.retry.count", "0", "Number of times retrying a host ping while waiting for check results", true); @@ -62,8 +60,8 @@ public DirectAgentAttache(AgentManagerImpl agentMgr, long id, String name, Serve @Override public void disconnect(Status state) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Processing disconnect " + _id + "(" + _name + ")"); + if (logger.isDebugEnabled()) { + logger.debug("Processing disconnect " + _id + "(" + _name + ")"); } for (ScheduledFuture future : _futures) { @@ -119,7 +117,7 @@ public void process(Answer[] answers) { if (answers != null && answers[0] instanceof StartupAnswer) { StartupAnswer startup = (StartupAnswer)answers[0]; int interval = startup.getPingInterval(); - s_logger.info("StartupAnswer received " + startup.getHostId() + " Interval = " + interval); + logger.info("StartupAnswer received " + startup.getHostId() + " Interval = " + interval); _futures.add(_agentMgr.getCronJobPool().scheduleAtFixedRate(new PingTask(), interval, interval, TimeUnit.SECONDS)); } } @@ -130,7 +128,7 @@ protected void finalize() throws Throwable { assert _resource == null : "Come on now....If you're going to dabble in agent code, you better know how to close out our resources. Ever considered why there's a method called disconnect()?"; synchronized (this) { if (_resource != null) { - s_logger.warn("Lost attache for " + _id + "(" + _name + ")"); + logger.warn("Lost attache for " + _id + "(" + _name + ")"); disconnect(Status.Alert); } } @@ -144,8 +142,8 @@ private synchronized void queueTask(Task task) { } private synchronized void scheduleFromQueue() { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Agent attache=" + _id + ", task queue size=" + tasks.size() + ", outstanding tasks=" + _outstandingTaskCount.get()); + if (logger.isTraceEnabled()) { + logger.trace("Agent attache=" + _id + ", task queue size=" + tasks.size() + ", outstanding tasks=" + _outstandingTaskCount.get()); } while (!tasks.isEmpty() && _outstandingTaskCount.get() < _agentMgr.getDirectAgentThreadCap()) { _outstandingTaskCount.incrementAndGet(); @@ -158,7 +156,7 @@ protected class PingTask extends ManagedContextRunnable { protected synchronized void runInContext() { try { if (_outstandingCronTaskCount.incrementAndGet() >= _agentMgr.getDirectAgentThreadCap()) { - s_logger.warn("PingTask execution for direct attache(" + _id + ") has reached maximum outstanding limit(" + _agentMgr.getDirectAgentThreadCap() + "), bailing out"); + logger.warn("PingTask execution for direct attache(" + _id + ") has reached maximum outstanding limit(" + _agentMgr.getDirectAgentThreadCap() + "), bailing out"); return; } @@ -173,28 +171,28 @@ protected synchronized void runInContext() { } if (cmd == null) { - s_logger.warn("Unable to get current status on " + _id + "(" + _name + ")"); + logger.warn("Unable to get current status on " + _id + "(" + _name + ")"); return; } if (cmd.getContextParam("logid") != null) { - MDC.put("logcontextid", cmd.getContextParam("logid")); + ThreadContext.put("logcontextid", cmd.getContextParam("logid")); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Ping from " + _id + "(" + _name + ")"); + if (logger.isDebugEnabled()) { + logger.debug("Ping from " + _id + "(" + _name + ")"); } long seq = _seq++; - if (s_logger.isTraceEnabled()) { - s_logger.trace("SeqA " + _id + "-" + seq + ": " + new Request(_id, -1, cmd, false).toString()); + if (logger.isTraceEnabled()) { + logger.trace("SeqA " + _id + "-" + seq + ": " + new Request(_id, -1, cmd, false).toString()); } _agentMgr.handleCommands(DirectAgentAttache.this, seq, new Command[] {cmd}); } else { - s_logger.debug("Unable to send ping because agent is disconnected " + _id + "(" + _name + ")"); + logger.debug("Unable to send ping because agent is disconnected " + _id + "(" + _name + ")"); } } catch (Exception e) { - s_logger.warn("Unable to complete the ping task", e); + logger.warn("Unable to complete the ping task", e); } finally { _outstandingCronTaskCount.decrementAndGet(); } @@ -220,7 +218,7 @@ private void bailout() { Response resp = new Response(_req, answers.toArray(new Answer[answers.size()])); processAnswers(seq, resp); } catch (Exception e) { - s_logger.warn(log(seq, "Exception caught in bailout "), e); + logger.warn(log(seq, "Exception caught in bailout "), e); } } @@ -229,7 +227,7 @@ protected void runInContext() { long seq = _req.getSequence(); try { if (_outstandingCronTaskCount.incrementAndGet() >= _agentMgr.getDirectAgentThreadCap()) { - s_logger.warn("CronTask execution for direct attache(" + _id + ") has reached maximum outstanding limit(" + _agentMgr.getDirectAgentThreadCap() + "), bailing out"); + logger.warn("CronTask execution for direct attache(" + _id + ") has reached maximum outstanding limit(" + _agentMgr.getDirectAgentThreadCap() + "), bailing out"); bailout(); return; } @@ -238,47 +236,47 @@ protected void runInContext() { Command[] cmds = _req.getCommands(); boolean stopOnError = _req.stopOnError(); - if (s_logger.isDebugEnabled()) { - s_logger.debug(log(seq, "Executing request")); + if (logger.isDebugEnabled()) { + logger.debug(log(seq, "Executing request")); } ArrayList answers = new ArrayList(cmds.length); for (int i = 0; i < cmds.length; i++) { Answer answer = null; Command currentCmd = cmds[i]; if (currentCmd.getContextParam("logid") != null) { - MDC.put("logcontextid", currentCmd.getContextParam("logid")); + ThreadContext.put("logcontextid", currentCmd.getContextParam("logid")); } try { if (resource != null) { answer = resource.executeRequest(cmds[i]); if (answer == null) { - s_logger.warn("Resource returned null answer!"); + logger.warn("Resource returned null answer!"); answer = new Answer(cmds[i], false, "Resource returned null answer"); } } else { answer = new Answer(cmds[i], false, "Agent is disconnected"); } } catch (Exception e) { - s_logger.warn(log(seq, "Exception Caught while executing command"), e); + logger.warn(log(seq, "Exception Caught while executing command"), e); answer = new Answer(cmds[i], false, e.toString()); } answers.add(answer); if (!answer.getResult() && stopOnError) { - if (i < cmds.length - 1 && s_logger.isDebugEnabled()) { - s_logger.debug(log(seq, "Cancelling because one of the answers is false and it is stop on error.")); + if (i < cmds.length - 1 && logger.isDebugEnabled()) { + logger.debug(log(seq, "Cancelling because one of the answers is false and it is stop on error.")); } break; } } Response resp = new Response(_req, answers.toArray(new Answer[answers.size()])); - if (s_logger.isDebugEnabled()) { - s_logger.debug(log(seq, "Response Received: ")); + if (logger.isDebugEnabled()) { + logger.debug(log(seq, "Response Received: ")); } processAnswers(seq, resp); } catch (Exception e) { - s_logger.warn(log(seq, "Exception caught "), e); + logger.warn(log(seq, "Exception caught "), e); } finally { _outstandingCronTaskCount.decrementAndGet(); } @@ -300,21 +298,21 @@ protected void runInContext() { Command[] cmds = _req.getCommands(); boolean stopOnError = _req.stopOnError(); - if (s_logger.isDebugEnabled()) { - s_logger.debug(log(seq, "Executing request")); + if (logger.isDebugEnabled()) { + logger.debug(log(seq, "Executing request")); } ArrayList answers = new ArrayList(cmds.length); for (int i = 0; i < cmds.length; i++) { Answer answer = null; Command currentCmd = cmds[i]; if (currentCmd.getContextParam("logid") != null) { - MDC.put("logcontextid", currentCmd.getContextParam("logid")); + ThreadContext.put("logcontextid", currentCmd.getContextParam("logid")); } try { if (resource != null) { answer = resource.executeRequest(cmds[i]); if (answer == null) { - s_logger.warn("Resource returned null answer!"); + logger.warn("Resource returned null answer!"); answer = new Answer(cmds[i], false, "Resource returned null answer"); } } else { @@ -322,27 +320,27 @@ protected void runInContext() { } } catch (Throwable t) { // Catch Throwable as all exceptions will otherwise be eaten by the executor framework - s_logger.warn(log(seq, "Throwable caught while executing command"), t); + logger.warn(log(seq, "Throwable caught while executing command"), t); answer = new Answer(cmds[i], false, t.toString()); } answers.add(answer); if (!answer.getResult() && stopOnError) { - if (i < cmds.length - 1 && s_logger.isDebugEnabled()) { - s_logger.debug(log(seq, "Cancelling because one of the answers is false and it is stop on error.")); + if (i < cmds.length - 1 && logger.isDebugEnabled()) { + logger.debug(log(seq, "Cancelling because one of the answers is false and it is stop on error.")); } break; } } Response resp = new Response(_req, answers.toArray(new Answer[answers.size()])); - if (s_logger.isDebugEnabled()) { - s_logger.debug(log(seq, "Response Received: ")); + if (logger.isDebugEnabled()) { + logger.debug(log(seq, "Response Received: ")); } processAnswers(seq, resp); } catch (Throwable t) { // This is pretty serious as processAnswers might not be called and the calling process is stuck waiting for the full timeout - s_logger.error(log(seq, "Throwable caught in runInContext, this will cause the management to become unpredictable"), t); + logger.error(log(seq, "Throwable caught in runInContext, this will cause the management to become unpredictable"), t); } finally { _outstandingTaskCount.decrementAndGet(); scheduleFromQueue(); diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/SynchronousListener.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/SynchronousListener.java index 96d40777f8e1..b5687e2636a1 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/SynchronousListener.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/SynchronousListener.java @@ -16,7 +16,8 @@ // under the License. package com.cloud.agent.manager; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.Listener; import com.cloud.agent.api.AgentControlAnswer; @@ -29,7 +30,7 @@ import com.cloud.utils.Profiler; public class SynchronousListener implements Listener { - private static final Logger s_logger = Logger.getLogger(SynchronousListener.class); + protected Logger logger = LogManager.getLogger(getClass()); protected Answer[] _answers; protected boolean _disconnected; @@ -70,8 +71,8 @@ public synchronized boolean processAnswers(long agentId, long seq, Answer[] resp @Override public synchronized boolean processDisconnect(long agentId, Status state) { - if (s_logger.isTraceEnabled()) - s_logger.trace("Agent disconnected, agent id: " + agentId + ", state: " + state + ". Will notify waiters"); + if (logger.isTraceEnabled()) + logger.trace("Agent disconnected, agent id: " + agentId + ", state: " + state + ". Will notify waiters"); _disconnected = true; notifyAll(); @@ -127,8 +128,8 @@ public synchronized Answer[] waitFor(int s) throws InterruptedException { } profiler.stop(); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Synchronized command - sending completed, time: " + profiler.getDurationInMillis() + ", answer: " + + if (logger.isTraceEnabled()) { + logger.trace("Synchronized command - sending completed, time: " + profiler.getDurationInMillis() + ", answer: " + (_answers != null ? _answers[0].toString() : "null")); } return _answers; diff --git a/engine/orchestration/src/main/java/com/cloud/cluster/agentlb/ClusterBasedAgentLoadBalancerPlanner.java b/engine/orchestration/src/main/java/com/cloud/cluster/agentlb/ClusterBasedAgentLoadBalancerPlanner.java index c774470b758f..641ae4414805 100644 --- a/engine/orchestration/src/main/java/com/cloud/cluster/agentlb/ClusterBasedAgentLoadBalancerPlanner.java +++ b/engine/orchestration/src/main/java/com/cloud/cluster/agentlb/ClusterBasedAgentLoadBalancerPlanner.java @@ -26,7 +26,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.host.Host; @@ -39,7 +38,6 @@ @Component public class ClusterBasedAgentLoadBalancerPlanner extends AdapterBase implements AgentLoadBalancerPlanner { - private static final Logger s_logger = Logger.getLogger(AgentLoadBalancerPlanner.class); @Inject HostDao _hostDao = null; @@ -52,7 +50,7 @@ public List getHostsToRebalance(long msId, int avLoad) { List allHosts = sc.list(); if (allHosts.size() <= avLoad) { - s_logger.debug("Agent load = " + allHosts.size() + " for management server " + msId + " doesn't exceed average system agent load = " + avLoad + + logger.debug("Agent load = " + allHosts.size() + " for management server " + msId + " doesn't exceed average system agent load = " + avLoad + "; so it doesn't participate in agent rebalancing process"); return null; } @@ -64,7 +62,7 @@ public List getHostsToRebalance(long msId, int avLoad) { List directHosts = sc.list(); if (directHosts.isEmpty()) { - s_logger.debug("No direct agents in status " + Status.Up + " exist for the management server " + msId + + logger.debug("No direct agents in status " + Status.Up + " exist for the management server " + msId + "; so it doesn't participate in agent rebalancing process"); return null; } @@ -90,23 +88,23 @@ public List getHostsToRebalance(long msId, int avLoad) { int hostsLeft = directHosts.size(); List hostsToReturn = new ArrayList(); - s_logger.debug("Management server " + msId + " can give away " + hostsToGive + " as it currently owns " + allHosts.size() + + logger.debug("Management server " + msId + " can give away " + hostsToGive + " as it currently owns " + allHosts.size() + " and the average agent load in the system is " + avLoad + "; finalyzing list of hosts to give away..."); for (Long cluster : hostToClusterMap.keySet()) { List hostsInCluster = hostToClusterMap.get(cluster); hostsLeft = hostsLeft - hostsInCluster.size(); if (hostsToReturn.size() < hostsToGive) { - s_logger.debug("Trying cluster id=" + cluster); + logger.debug("Trying cluster id=" + cluster); if (hostsInCluster.size() > hostsLeftToGive) { - s_logger.debug("Skipping cluster id=" + cluster + " as it has more hosts than we need: " + hostsInCluster.size() + " vs " + hostsLeftToGive); + logger.debug("Skipping cluster id=" + cluster + " as it has more hosts than we need: " + hostsInCluster.size() + " vs " + hostsLeftToGive); if (hostsLeft >= hostsLeftToGive) { continue; } else { break; } } else { - s_logger.debug("Taking all " + hostsInCluster.size() + " hosts: " + hostsInCluster + " from cluster id=" + cluster); + logger.debug("Taking all " + hostsInCluster.size() + " hosts: " + hostsInCluster + " from cluster id=" + cluster); hostsToReturn.addAll(hostsInCluster); hostsLeftToGive = hostsLeftToGive - hostsInCluster.size(); } @@ -115,7 +113,7 @@ public List getHostsToRebalance(long msId, int avLoad) { } } - s_logger.debug("Management server " + msId + " is ready to give away " + hostsToReturn.size() + " hosts"); + logger.debug("Management server " + msId + " is ready to give away " + hostsToReturn.size() + " hosts"); return hostsToReturn; } diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index 29c606983988..5e7be6d448ac 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -88,7 +88,6 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang.BooleanUtils; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; @@ -271,7 +270,6 @@ import com.cloud.vm.snapshot.dao.VMSnapshotDao; public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMachineManager, VmWorkJobHandler, Listener, Configurable { - private static final Logger s_logger = Logger.getLogger(VirtualMachineManagerImpl.class); public static final String VM_WORK_JOB_HANDLER = VirtualMachineManagerImpl.class.getSimpleName(); @@ -459,14 +457,14 @@ public void allocate(final String vmInstanceName, final VirtualMachineTemplate t final LinkedHashMap> auxiliaryNetworks, final DeploymentPlan plan, final HypervisorType hyperType, final Map> extraDhcpOptions, final Map datadiskTemplateToDiskOfferingMap) throws InsufficientCapacityException { - s_logger.info(String.format("allocating virtual machine from template:%s with hostname:%s and %d networks", template.getUuid(), vmInstanceName, auxiliaryNetworks.size())); + logger.info(String.format("allocating virtual machine from template:%s with hostname:%s and %d networks", template.getUuid(), vmInstanceName, auxiliaryNetworks.size())); VMInstanceVO persistedVm = null; try { final VMInstanceVO vm = _vmDao.findVMByInstanceName(vmInstanceName); final Account owner = _entityMgr.findById(Account.class, vm.getAccountId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Allocating entries for VM: " + vm); + if (logger.isDebugEnabled()) { + logger.debug("Allocating entries for VM: " + vm); } vm.setDataCenterId(plan.getDataCenterId()); @@ -484,8 +482,8 @@ public void allocate(final String vmInstanceName, final VirtualMachineTemplate t } final Long rootDiskSizeFinal = rootDiskSize; - if (s_logger.isDebugEnabled()) { - s_logger.debug("Allocating nics for " + persistedVm); + if (logger.isDebugEnabled()) { + logger.debug("Allocating nics for " + persistedVm); } try { @@ -496,8 +494,8 @@ public void allocate(final String vmInstanceName, final VirtualMachineTemplate t throw new CloudRuntimeException("Concurrent operation while trying to allocate resources for the VM", e); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Allocating disks for " + persistedVm); + if (logger.isDebugEnabled()) { + logger.debug("Allocating disks for " + persistedVm); } allocateRootVolume(persistedVm, template, rootDiskOfferingInfo, owner, rootDiskSizeFinal); @@ -527,8 +525,8 @@ public void allocate(final String vmInstanceName, final VirtualMachineTemplate t CallContext.unregister(); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Allocation completed for VM: " + persistedVm); + if (logger.isDebugEnabled()) { + logger.debug("Allocation completed for VM: " + persistedVm); } } catch (InsufficientCapacityException | CloudRuntimeException e) { // Failed VM will be in Stopped. Transition it to Error, so it can be expunged by ExpungeTask or similar @@ -537,7 +535,7 @@ public void allocate(final String vmInstanceName, final VirtualMachineTemplate t stateTransitTo(persistedVm, VirtualMachine.Event.OperationFailedToError, null); } } catch (NoTransitionException nte) { - s_logger.error(String.format("Failed to transition %s in %s state to Error state", persistedVm, persistedVm.getState().toString())); + logger.error(String.format("Failed to transition %s in %s state to Error state", persistedVm, persistedVm.getState().toString())); } throw e; } @@ -552,7 +550,7 @@ private void allocateRootVolume(VMInstanceVO vm, VirtualMachineTemplate template volumeMgr.allocateRawVolume(Type.ROOT, rootVolumeName, rootDiskOfferingInfo.getDiskOffering(), rootDiskOfferingInfo.getSize(), rootDiskOfferingInfo.getMinIops(), rootDiskOfferingInfo.getMaxIops(), vm, template, owner, null); } else if (template.getFormat() == ImageFormat.BAREMETAL) { - s_logger.debug(String.format("%s has format [%s]. Skipping ROOT volume [%s] allocation.", template.toString(), ImageFormat.BAREMETAL, rootVolumeName)); + logger.debug(String.format("%s has format [%s]. Skipping ROOT volume [%s] allocation.", template.toString(), ImageFormat.BAREMETAL, rootVolumeName)); } else { volumeMgr.allocateTemplatedVolumes(Type.ROOT, rootVolumeName, rootDiskOfferingInfo.getDiskOffering(), rootDiskSizeFinal, rootDiskOfferingInfo.getMinIops(), rootDiskOfferingInfo.getMaxIops(), template, vm, owner); @@ -601,8 +599,8 @@ private boolean isValidSystemVMType(VirtualMachine vm) { protected void advanceExpunge(VMInstanceVO vm) throws ResourceUnavailableException, OperationTimedoutException, ConcurrentOperationException { if (vm == null || vm.getRemoved() != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to find vm or vm is expunged: " + vm); + if (logger.isDebugEnabled()) { + logger.debug("Unable to find vm or vm is expunged: " + vm); } return; } @@ -612,17 +610,17 @@ protected void advanceExpunge(VMInstanceVO vm) throws ResourceUnavailableExcepti try { if (!stateTransitTo(vm, VirtualMachine.Event.ExpungeOperation, vm.getHostId())) { - s_logger.debug("Unable to expunge the vm because it is not in the correct state: " + vm); + logger.debug("Unable to expunge the vm because it is not in the correct state: " + vm); throw new CloudRuntimeException("Unable to expunge " + vm); } } catch (final NoTransitionException e) { - s_logger.debug("Unable to expunge the vm because it is not in the correct state: " + vm); + logger.debug("Unable to expunge the vm because it is not in the correct state: " + vm); throw new CloudRuntimeException("Unable to expunge " + vm, e); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Expunging vm " + vm); + if (logger.isDebugEnabled()) { + logger.debug("Expunging vm " + vm); } final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); @@ -630,11 +628,11 @@ protected void advanceExpunge(VMInstanceVO vm) throws ResourceUnavailableExcepti final HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vm.getHypervisorType()); List vmNics = profile.getNics(); - s_logger.debug(String.format("Cleaning up NICS [%s] of %s.", vmNics.stream().map(nic -> nic.toString()).collect(Collectors.joining(", ")),vm.toString())); + logger.debug(String.format("Cleaning up NICS [%s] of %s.", vmNics.stream().map(nic -> nic.toString()).collect(Collectors.joining(", ")),vm.toString())); final List nicExpungeCommands = hvGuru.finalizeExpungeNics(vm, profile.getNics()); _networkMgr.cleanupNics(profile); - s_logger.debug(String.format("Cleaning up hypervisor data structures (ex. SRs in XenServer) for managed storage. Data from %s.", vm.toString())); + logger.debug(String.format("Cleaning up hypervisor data structures (ex. SRs in XenServer) for managed storage. Data from %s.", vm.toString())); final List volumeExpungeCommands = hvGuru.finalizeExpungeVolumes(vm); @@ -683,7 +681,7 @@ protected void advanceExpunge(VMInstanceVO vm) throws ResourceUnavailableExcepti if (!cmds.isSuccessful()) { for (final Answer answer : cmds.getAnswers()) { if (!answer.getResult()) { - s_logger.warn("Failed to expunge vm due to: " + answer.getDetails()); + logger.warn("Failed to expunge vm due to: " + answer.getDetails()); throw new CloudRuntimeException("Unable to expunge " + vm + " due to " + answer.getDetails()); } } @@ -691,8 +689,8 @@ protected void advanceExpunge(VMInstanceVO vm) throws ResourceUnavailableExcepti } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Expunged " + vm); + if (logger.isDebugEnabled()) { + logger.debug("Expunged " + vm); } } @@ -701,15 +699,15 @@ protected void handleUnsuccessfulCommands(Commands cmds, VMInstanceVO vm) throws String vmToString = vm.toString(); if (cmds.isSuccessful()) { - s_logger.debug(String.format("The commands [%s] to %s were successful.", cmdsStr, vmToString)); + logger.debug(String.format("The commands [%s] to %s were successful.", cmdsStr, vmToString)); return; } - s_logger.info(String.format("The commands [%s] to %s were unsuccessful. Handling answers.", cmdsStr, vmToString)); + logger.info(String.format("The commands [%s] to %s were unsuccessful. Handling answers.", cmdsStr, vmToString)); Answer[] answers = cmds.getAnswers(); if (answers == null) { - s_logger.debug(String.format("There are no answers to commands [%s] to %s.", cmdsStr, vmToString)); + logger.debug(String.format("There are no answers to commands [%s] to %s.", cmdsStr, vmToString)); return; } @@ -717,11 +715,11 @@ protected void handleUnsuccessfulCommands(Commands cmds, VMInstanceVO vm) throws String details = answer.getDetails(); if (!answer.getResult()) { String message = String.format("Unable to expunge %s due to [%s].", vmToString, details); - s_logger.error(message); + logger.error(message); throw new CloudRuntimeException(message); } - s_logger.debug(String.format("Commands [%s] to %s got answer [%s].", cmdsStr, vmToString, details)); + logger.debug(String.format("Commands [%s] to %s got answer [%s].", cmdsStr, vmToString, details)); } } @@ -731,8 +729,8 @@ private void addAllExpungeCommandsFromList(List cmdList, Commands cmds, } for (final Command command : cmdList) { command.setBypassHostMaintenance(isValidSystemVMType(vm)); - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("Adding expunge command [%s] for VM [%s]", command.toString(), vm.toString())); + if (logger.isTraceEnabled()) { + logger.trace(String.format("Adding expunge command [%s] for VM [%s]", command.toString(), vm.toString())); } cmds.addCommand(command); } @@ -785,12 +783,12 @@ private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { Answer answer = _agentMgr.easySend(hostId, cmd); if (answer == null) { - s_logger.warn(String.format("Unable to get an answer to the modify targets command. Targets [%s].", cmd.getTargets().stream().map(target -> target.toString()).collect(Collectors.joining(", ")))); + logger.warn(String.format("Unable to get an answer to the modify targets command. Targets [%s].", cmd.getTargets().stream().map(target -> target.toString()).collect(Collectors.joining(", ")))); return; } if (!answer.getResult()) { - s_logger.warn(String.format("Unable to modify targets [%s] on the host [%s].", cmd.getTargets().stream().map(target -> target.toString()).collect(Collectors.joining(", ")), hostId)); + logger.warn(String.format("Unable to modify targets [%s] on the host [%s].", cmd.getTargets().stream().map(target -> target.toString()).collect(Collectors.joining(", ")), hostId)); } } @@ -853,39 +851,39 @@ protected boolean checkWorkItems(final VMInstanceVO vm, final State state) throw while (true) { final ItWorkVO vo = _workDao.findByOutstandingWork(vm.getId(), state); if (vo == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to find work for VM: " + vm + " and state: " + state); + if (logger.isDebugEnabled()) { + logger.debug("Unable to find work for VM: " + vm + " and state: " + state); } return true; } if (vo.getStep() == Step.Done) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Work for " + vm + " is " + vo.getStep()); + if (logger.isDebugEnabled()) { + logger.debug("Work for " + vm + " is " + vo.getStep()); } return true; } final VMInstanceVO instance = _vmDao.findById(vm.getId()); if (instance != null && instance.getState() == State.Running) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("VM is already started in DB: " + vm); + if (logger.isDebugEnabled()) { + logger.debug("VM is already started in DB: " + vm); } return true; } if (vo.getSecondsTaskIsInactive() > VmOpCancelInterval.value()) { - s_logger.warn("The task item for vm " + vm + " has been inactive for " + vo.getSecondsTaskIsInactive()); + logger.warn("The task item for vm " + vm + " has been inactive for " + vo.getSecondsTaskIsInactive()); return false; } try { Thread.sleep(VmOpWaitInterval.value()*1000); } catch (final InterruptedException e) { - s_logger.info("Waiting for " + vm + " but is interrupted"); + logger.info("Waiting for " + vm + " but is interrupted"); throw new ConcurrentOperationException("Waiting for " + vm + " but is interrupted"); } - s_logger.debug("Waiting some more to make sure there's no activity on " + vm); + logger.debug("Waiting some more to make sure there's no activity on " + vm); } } @@ -904,13 +902,13 @@ protected Ternary changeToStartState Transaction.execute(new TransactionCallbackWithException, NoTransitionException>() { @Override public Ternary doInTransaction(final TransactionStatus status) throws NoTransitionException { - final Journal journal = new Journal.LogJournal("Creating " + vm, s_logger); + final Journal journal = new Journal.LogJournal("Creating " + vm, logger); final ItWorkVO work = _workDao.persist(workFinal); final ReservationContextImpl context = new ReservationContextImpl(work.getId(), journal, caller, account); if (stateTransitTo(vm, Event.StartRequested, null, work.getId())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully transitioned to start state for " + vm + " reservation id = " + work.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Successfully transitioned to start state for " + vm + " reservation id = " + work.getId()); } return new Ternary<>(vm, context, work); } @@ -924,8 +922,8 @@ public Ternary doInTransaction(final return result; } } catch (final NoTransitionException e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to transition into Starting state due to " + e.getMessage()); + if (logger.isDebugEnabled()) { + logger.debug("Unable to transition into Starting state due to " + e.getMessage()); } } @@ -934,14 +932,14 @@ public Ternary doInTransaction(final throw new ConcurrentOperationException("Unable to acquire lock on " + vm); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Determining why we're unable to update the state to Starting for " + instance + ". Retry=" + retry); + if (logger.isDebugEnabled()) { + logger.debug("Determining why we're unable to update the state to Starting for " + instance + ". Retry=" + retry); } final State state = instance.getState(); if (state == State.Running) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("VM is already started: " + vm); + if (logger.isDebugEnabled()) { + logger.debug("VM is already started: " + vm); } return null; } @@ -956,7 +954,7 @@ public Ternary doInTransaction(final if (state != State.Stopped) { String msg = String.format("Cannot start %s in %s state", vm, state); - s_logger.warn(msg); + logger.warn(msg); throw new CloudRuntimeException(msg); } } @@ -1000,8 +998,8 @@ public void advanceStart(final String vmUuid, final Map":params.get(VirtualMachineProfile.Param.BootIntoSetup)))); } @@ -1016,8 +1014,8 @@ public void advanceStart(final String vmUuid, final Map":params.get(VirtualMachineProfile.Param.BootIntoSetup)))); } @@ -1039,10 +1037,10 @@ private void setupAgentSecurity(final Host vmHost, final Map ssh new ArrayList<>(ipAddressDetails.values()), CAManager.CertValidityPeriod.value(), null); final boolean result = caManager.deployCertificate(vmHost, certificate, false, sshAccessDetails); if (!result) { - s_logger.error("Failed to setup certificate for system vm: " + vm.getInstanceName()); + logger.error("Failed to setup certificate for system vm: " + vm.getInstanceName()); } } else { - s_logger.error("Failed to setup keystore and generate CSR for system vm: " + vm.getInstanceName()); + logger.error("Failed to setup keystore and generate CSR for system vm: " + vm.getInstanceName()); } } @@ -1060,13 +1058,13 @@ protected void checkIfTemplateNeededForCreatingVmVolumes(VMInstanceVO vm) { final VMTemplateVO template = _templateDao.findById(vm.getTemplateId()); if (template == null) { String msg = "Template for the VM instance can not be found, VM instance configuration needs to be updated"; - s_logger.error(String.format("%s. Template ID: %d seems to be removed", msg, vm.getTemplateId())); + logger.error(String.format("%s. Template ID: %d seems to be removed", msg, vm.getTemplateId())); throw new CloudRuntimeException(msg); } final VMTemplateZoneVO templateZoneVO = templateZoneDao.findByZoneTemplate(vm.getDataCenterId(), template.getId()); if (templateZoneVO == null) { String msg = "Template for the VM instance can not be found in the zone ID: %s, VM instance configuration needs to be updated"; - s_logger.error(String.format("%s. %s", msg, template)); + logger.error(String.format("%s. %s", msg, template)); throw new CloudRuntimeException(msg); } } @@ -1084,7 +1082,7 @@ protected void checkAndAttemptMigrateVmAcrossCluster(final VMInstanceVO vm, fina } Answer[] answer = attemptHypervisorMigration(vm, volumePoolMap, lastHost.getId()); if (answer == null) { - s_logger.warn("Hypervisor inter-cluster migration during VM start failed"); + logger.warn("Hypervisor inter-cluster migration during VM start failed"); return; } // Other network related updates will be done using caller @@ -1118,8 +1116,8 @@ public void orchestrateStart(final String vmUuid, final Map vols = _volsDao.findReadyRootVolumesByInstance(vm.getId()); for (final VolumeVO vol : vols) { final Long volTemplateId = vol.getTemplateId(); if (volTemplateId != null && volTemplateId != template.getId()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(vol + " of " + vm + " is READY, but template ids don't match, let the planner reassign a new pool"); + if (logger.isDebugEnabled()) { + logger.debug(vol + " of " + vm + " is READY, but template ids don't match, let the planner reassign a new pool"); } continue; } final StoragePool pool = (StoragePool)dataStoreMgr.getPrimaryDataStore(vol.getPoolId()); if (!pool.isInMaintenance()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Root volume is ready, need to place VM in volume's cluster"); + if (logger.isDebugEnabled()) { + logger.debug("Root volume is ready, need to place VM in volume's cluster"); } final long rootVolDcId = pool.getDataCenterId(); final Long rootVolPodId = pool.getPodId(); @@ -1183,8 +1181,8 @@ public void orchestrateStart(final String vmUuid, final Map(ipAddressDetails.values()), CAManager.CertValidityPeriod.value(), null); final boolean result = caManager.deployCertificate(vmHost, certificate, false, sshAccessDetails); if (!result) { - s_logger.error("Failed to setup certificate for system vm: " + vm.getInstanceName()); + logger.error("Failed to setup certificate for system vm: " + vm.getInstanceName()); } return; } catch (final Exception e) { - s_logger.error("Retrying after catching exception while trying to secure agent for systemvm id=" + vm.getId(), e); + logger.error("Retrying after catching exception while trying to secure agent for systemvm id=" + vm.getId(), e); } } throw new CloudRuntimeException("Failed to setup and secure agent for systemvm id=" + vm.getId()); } return; } else { - if (s_logger.isDebugEnabled()) { - s_logger.info("The guru did not like the answers so stopping " + vm); + if (logger.isDebugEnabled()) { + logger.info("The guru did not like the answers so stopping " + vm); } StopCommand stopCmd = new StopCommand(vm, getExecuteInSequence(vm.getHypervisorType()), false); stopCmd.setControlIp(getControlNicIpForVM(vm)); @@ -1381,49 +1379,49 @@ public void orchestrateStart(final String vmUuid, final Map para log = true; } if (log) { - s_logger.info(msgBuf.toString()); + logger.info(msgBuf.toString()); } } @@ -1685,7 +1683,7 @@ public boolean unmanage(String vmUuid) { final List pendingWorkJobs = _workJobDao.listPendingWorkJobs(VirtualMachine.Type.Instance, vm.getId()); if (CollectionUtils.isNotEmpty(pendingWorkJobs) || _haMgr.hasPendingHaWork(vm.getId())) { String msg = "There are pending jobs or HA tasks working on the VM with id: " + vm.getId() + ", can't unmanage the VM."; - s_logger.info(msg); + logger.info(msg); throw new ConcurrentOperationException(msg); } @@ -1693,8 +1691,8 @@ public boolean unmanage(String vmUuid) { @Override public Boolean doInTransaction(TransactionStatus status) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unmanaging vm " + vm); + if (logger.isDebugEnabled()) { + logger.debug("Unmanaging vm " + vm); } final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); @@ -1707,7 +1705,7 @@ public Boolean doInTransaction(TransactionStatus status) { guru.finalizeUnmanage(vm); } catch (Exception e) { - s_logger.error("Error while unmanaging VM " + vm, e); + logger.error("Error while unmanaging VM " + vm, e); return false; } @@ -1747,10 +1745,10 @@ private void unmanageVMVolumes(VMInstanceVO vm) { * - If 'unmanage.vm.preserve.nics' = false: then the NICs are removed while unmanaging */ private void unmanageVMNics(VirtualMachineProfile profile, VMInstanceVO vm) { - s_logger.debug(String.format("Cleaning up NICs of %s.", vm.toString())); + logger.debug(String.format("Cleaning up NICs of %s.", vm.toString())); Boolean preserveNics = UnmanagedVMsManager.UnmanageVMPreserveNic.valueIn(vm.getDataCenterId()); if (BooleanUtils.isTrue(preserveNics)) { - s_logger.debug("Preserve NICs configuration enabled"); + logger.debug("Preserve NICs configuration enabled"); profile.setParameter(VirtualMachineProfile.Param.PreserveNics, true); } _networkMgr.unmanageNics(profile); @@ -1816,7 +1814,7 @@ protected boolean sendStop(final VirtualMachineGuru guru, final VirtualMachinePr } if (!answer.getResult()) { final String details = answer.getDetails(); - s_logger.debug("Unable to stop VM due to " + details); + logger.debug("Unable to stop VM due to " + details); return false; } @@ -1830,12 +1828,12 @@ protected boolean sendStop(final VirtualMachineGuru guru, final VirtualMachinePr } } } else { - s_logger.error("Invalid answer received in response to a StopCommand for " + vm.getInstanceName()); + logger.error("Invalid answer received in response to a StopCommand for " + vm.getInstanceName()); return false; } } catch (final AgentUnavailableException | OperationTimedoutException e) { - s_logger.warn(String.format("Unable to stop %s due to [%s].", vm.toString(), e.getMessage()), e); + logger.warn(String.format("Unable to stop %s due to [%s].", vm.toString(), e.getMessage()), e); if (!force) { return false; } @@ -1847,33 +1845,33 @@ protected boolean sendStop(final VirtualMachineGuru guru, final VirtualMachinePr protected boolean cleanup(final VirtualMachineGuru guru, final VirtualMachineProfile profile, final ItWorkVO work, final Event event, final boolean cleanUpEvenIfUnableToStop) { final VirtualMachine vm = profile.getVirtualMachine(); final State state = vm.getState(); - s_logger.debug("Cleaning up resources for the vm " + vm + " in " + state + " state"); + logger.debug("Cleaning up resources for the vm " + vm + " in " + state + " state"); try { if (state == State.Starting) { if (work != null) { final Step step = work.getStep(); if (step == Step.Starting && !cleanUpEvenIfUnableToStop) { - s_logger.warn("Unable to cleanup vm " + vm + "; work state is incorrect: " + step); + logger.warn("Unable to cleanup vm " + vm + "; work state is incorrect: " + step); return false; } if (step == Step.Started || step == Step.Starting || step == Step.Release) { if (vm.getHostId() != null) { if (!sendStop(guru, profile, cleanUpEvenIfUnableToStop, false)) { - s_logger.warn("Failed to stop vm " + vm + " in " + State.Starting + " state as a part of cleanup process"); + logger.warn("Failed to stop vm " + vm + " in " + State.Starting + " state as a part of cleanup process"); return false; } } } if (step != Step.Release && step != Step.Prepare && step != Step.Started && step != Step.Starting) { - s_logger.debug("Cleanup is not needed for vm " + vm + "; work state is incorrect: " + step); + logger.debug("Cleanup is not needed for vm " + vm + "; work state is incorrect: " + step); return true; } } else { if (vm.getHostId() != null) { if (!sendStop(guru, profile, cleanUpEvenIfUnableToStop, false)) { - s_logger.warn("Failed to stop vm " + vm + " in " + State.Starting + " state as a part of cleanup process"); + logger.warn("Failed to stop vm " + vm + " in " + State.Starting + " state as a part of cleanup process"); return false; } } @@ -1882,26 +1880,26 @@ protected boolean cleanup(final VirtualMachineGuru guru, final VirtualMachinePro } else if (state == State.Stopping) { if (vm.getHostId() != null) { if (!sendStop(guru, profile, cleanUpEvenIfUnableToStop, false)) { - s_logger.warn("Failed to stop vm " + vm + " in " + State.Stopping + " state as a part of cleanup process"); + logger.warn("Failed to stop vm " + vm + " in " + State.Stopping + " state as a part of cleanup process"); return false; } } } else if (state == State.Migrating) { if (vm.getHostId() != null) { if (!sendStop(guru, profile, cleanUpEvenIfUnableToStop, false)) { - s_logger.warn("Failed to stop vm " + vm + " in " + State.Migrating + " state as a part of cleanup process"); + logger.warn("Failed to stop vm " + vm + " in " + State.Migrating + " state as a part of cleanup process"); return false; } } if (vm.getLastHostId() != null) { if (!sendStop(guru, profile, cleanUpEvenIfUnableToStop, false)) { - s_logger.warn("Failed to stop vm " + vm + " in " + State.Migrating + " state as a part of cleanup process"); + logger.warn("Failed to stop vm " + vm + " in " + State.Migrating + " state as a part of cleanup process"); return false; } } } else if (state == State.Running) { if (!sendStop(guru, profile, cleanUpEvenIfUnableToStop, false)) { - s_logger.warn("Failed to stop vm " + vm + " in " + State.Running + " state as a part of cleanup process"); + logger.warn("Failed to stop vm " + vm + " in " + State.Running + " state as a part of cleanup process"); return false; } } @@ -1917,21 +1915,21 @@ protected void releaseVmResources(final VirtualMachineProfile profile, final boo final State state = vm.getState(); try { _networkMgr.release(profile, forced); - s_logger.debug(String.format("Successfully released network resources for the VM %s in %s state", vm, state)); + logger.debug(String.format("Successfully released network resources for the VM %s in %s state", vm, state)); } catch (final Exception e) { - s_logger.warn(String.format("Unable to release some network resources for the VM %s in %s state", vm, state), e); + logger.warn(String.format("Unable to release some network resources for the VM %s in %s state", vm, state), e); } try { if (vm.getHypervisorType() != HypervisorType.BareMetal) { volumeMgr.release(profile); - s_logger.debug(String.format("Successfully released storage resources for the VM %s in %s state", vm, state)); + logger.debug(String.format("Successfully released storage resources for the VM %s in %s state", vm, state)); } } catch (final Exception e) { - s_logger.warn(String.format("Unable to release storage resources for the VM %s in %s state", vm, state), e); + logger.warn(String.format("Unable to release storage resources for the VM %s in %s state", vm, state), e); } - s_logger.debug(String.format("Successfully cleaned up resources for the VM %s in %s state", vm, state)); + logger.debug(String.format("Successfully cleaned up resources for the VM %s in %s state", vm, state)); } @Override @@ -2033,42 +2031,42 @@ private void advanceStop(final VMInstanceVO vm, final boolean cleanUpEvenIfUnabl ConcurrentOperationException { final State state = vm.getState(); if (state == State.Stopped) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("VM is already stopped: " + vm); + if (logger.isDebugEnabled()) { + logger.debug("VM is already stopped: " + vm); } return; } if (state == State.Destroyed || state == State.Expunging || state == State.Error) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Stopped called on " + vm + " but the state is " + state); + if (logger.isDebugEnabled()) { + logger.debug("Stopped called on " + vm + " but the state is " + state); } return; } final ItWorkVO work = _workDao.findByOutstandingWork(vm.getId(), vm.getState()); if (work != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found an outstanding work item for this vm " + vm + " with state:" + vm.getState() + ", work id:" + work.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Found an outstanding work item for this vm " + vm + " with state:" + vm.getState() + ", work id:" + work.getId()); } } final Long hostId = vm.getHostId(); if (hostId == null) { if (!cleanUpEvenIfUnableToStop) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("HostId is null but this is not a forced stop, cannot stop vm " + vm + " with state:" + vm.getState()); + if (logger.isDebugEnabled()) { + logger.debug("HostId is null but this is not a forced stop, cannot stop vm " + vm + " with state:" + vm.getState()); } throw new CloudRuntimeException("Unable to stop " + vm); } try { stateTransitTo(vm, Event.AgentReportStopped, null, null); } catch (final NoTransitionException e) { - s_logger.warn(e.getMessage()); + logger.warn(e.getMessage()); } if (work != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Updating work item to Done, id:" + work.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Updating work item to Done, id:" + work.getId()); } work.setStep(Step.Done); _workDao.update(work.getId(), work); @@ -2077,7 +2075,7 @@ private void advanceStop(final VMInstanceVO vm, final boolean cleanUpEvenIfUnabl } else { HostVO host = _hostDao.findById(hostId); if (!cleanUpEvenIfUnableToStop && vm.getState() == State.Running && host.getResourceState() == ResourceState.PrepareForMaintenance) { - s_logger.debug("Host is in PrepareForMaintenance state - Stop VM operation on the VM id: " + vm.getId() + " is not allowed"); + logger.debug("Host is in PrepareForMaintenance state - Stop VM operation on the VM id: " + vm.getId() + " is not allowed"); throw new CloudRuntimeException("Stop VM operation on the VM id: " + vm.getId() + " is not allowed as host is preparing for maintenance mode"); } } @@ -2094,27 +2092,27 @@ private void advanceStop(final VMInstanceVO vm, final boolean cleanUpEvenIfUnabl throw new CloudRuntimeException("We cannot stop " + vm + " when it is in state " + vm.getState()); } final boolean doCleanup = true; - if (s_logger.isDebugEnabled()) { - s_logger.warn("Unable to transition the state but we're moving on because it's forced stop", e1); + if (logger.isDebugEnabled()) { + logger.warn("Unable to transition the state but we're moving on because it's forced stop", e1); } if (doCleanup) { if (cleanup(vmGuru, new VirtualMachineProfileImpl(vm), work, Event.StopRequested, cleanUpEvenIfUnableToStop)) { try { - if (s_logger.isDebugEnabled() && work != null) { - s_logger.debug("Updating work item to Done, id:" + work.getId()); + if (logger.isDebugEnabled() && work != null) { + logger.debug("Updating work item to Done, id:" + work.getId()); } if (!changeState(vm, Event.AgentReportStopped, null, work, Step.Done)) { throw new CloudRuntimeException("Unable to stop " + vm); } } catch (final NoTransitionException e) { - s_logger.warn("Unable to cleanup " + vm); + logger.warn("Unable to cleanup " + vm); throw new CloudRuntimeException("Unable to stop " + vm, e); } } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Failed to cleanup VM: " + vm); + if (logger.isDebugEnabled()) { + logger.debug("Failed to cleanup VM: " + vm); } throw new CloudRuntimeException("Failed to cleanup " + vm + " , current state " + vm.getState()); } @@ -2165,19 +2163,19 @@ private void advanceStop(final VMInstanceVO vm, final boolean cleanUpEvenIfUnabl } } catch (AgentUnavailableException | OperationTimedoutException e) { - s_logger.warn(String.format("Unable to stop %s due to [%s].", profile.toString(), e.toString()), e); + logger.warn(String.format("Unable to stop %s due to [%s].", profile.toString(), e.toString()), e); } finally { if (!stopped) { if (!cleanUpEvenIfUnableToStop) { - s_logger.warn("Unable to stop vm " + vm); + logger.warn("Unable to stop vm " + vm); try { stateTransitTo(vm, Event.OperationFailed, vm.getHostId()); } catch (final NoTransitionException e) { - s_logger.warn("Unable to transition the state " + vm, e); + logger.warn("Unable to transition the state " + vm, e); } throw new CloudRuntimeException("Unable to stop " + vm); } else { - s_logger.warn("Unable to actually stop " + vm + " but continue with release because it's a force stop"); + logger.warn("Unable to actually stop " + vm + " but continue with release because it's a force stop"); vmGuru.finalizeStop(profile, answer); } } else { @@ -2191,16 +2189,16 @@ private void advanceStop(final VMInstanceVO vm, final boolean cleanUpEvenIfUnabl } } - if (s_logger.isDebugEnabled()) { - s_logger.debug(vm + " is stopped on the host. Proceeding to release resource held."); + if (logger.isDebugEnabled()) { + logger.debug(vm + " is stopped on the host. Proceeding to release resource held."); } releaseVmResources(profile, cleanUpEvenIfUnableToStop); try { if (work != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Updating the outstanding work item to Done, id:" + work.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Updating the outstanding work item to Done, id:" + work.getId()); } work.setStep(Step.Done); _workDao.update(work.getId(), work); @@ -2217,7 +2215,7 @@ private void advanceStop(final VMInstanceVO vm, final boolean cleanUpEvenIfUnabl } } catch (final NoTransitionException e) { String message = String.format("Unable to stop %s due to [%s].", vm.toString(), e.getMessage()); - s_logger.warn(message, e); + logger.warn(message, e); throw new CloudRuntimeException(message, e); } } @@ -2252,14 +2250,14 @@ public boolean stateTransitTo(final VirtualMachine vm1, final VirtualMachine.Eve public void destroy(final String vmUuid, final boolean expunge) throws AgentUnavailableException, OperationTimedoutException, ConcurrentOperationException { VMInstanceVO vm = _vmDao.findByUuid(vmUuid); if (vm == null || vm.getState() == State.Destroyed || vm.getState() == State.Expunging || vm.getRemoved() != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to find vm or vm is destroyed: " + vm); + if (logger.isDebugEnabled()) { + logger.debug("Unable to find vm or vm is destroyed: " + vm); } return; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Destroying vm " + vm + ", expunge flag " + (expunge ? "on" : "off")); + if (logger.isDebugEnabled()) { + logger.debug("Destroying vm " + vm + ", expunge flag " + (expunge ? "on" : "off")); } advanceStop(vmUuid, VmDestroyForcestop.value()); @@ -2272,19 +2270,19 @@ public void doInTransactionWithoutResult(final TransactionStatus status) throws VMInstanceVO vm = _vmDao.findByUuid(vmUuid); try { if (!stateTransitTo(vm, VirtualMachine.Event.DestroyRequested, vm.getHostId())) { - s_logger.debug("Unable to destroy the vm because it is not in the correct state: " + vm); + logger.debug("Unable to destroy the vm because it is not in the correct state: " + vm); throw new CloudRuntimeException("Unable to destroy " + vm); } else { if (expunge) { if (!stateTransitTo(vm, VirtualMachine.Event.ExpungeOperation, vm.getHostId())) { - s_logger.debug("Unable to expunge the vm because it is not in the correct state: " + vm); + logger.debug("Unable to expunge the vm because it is not in the correct state: " + vm); throw new CloudRuntimeException("Unable to expunge " + vm); } } } } catch (final NoTransitionException e) { String message = String.format("Unable to destroy %s due to [%s].", vm.toString(), e.getMessage()); - s_logger.debug(message, e); + logger.debug(message, e); throw new CloudRuntimeException(message, e); } } @@ -2301,7 +2299,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) throws private void deleteVMSnapshots(VMInstanceVO vm, boolean expunge) { if (! vm.getHypervisorType().equals(HypervisorType.VMware)) { if (!_vmSnapshotMgr.deleteAllVMSnapshots(vm.getId(), null)) { - s_logger.debug("Unable to delete all snapshots for " + vm); + logger.debug("Unable to delete all snapshots for " + vm); throw new CloudRuntimeException("Unable to delete vm snapshots for " + vm); } } @@ -2331,7 +2329,7 @@ protected boolean checkVmOnHost(final VirtualMachine vm, final long hostId) thro if (command != null) { RestoreVMSnapshotAnswer restoreVMSnapshotAnswer = (RestoreVMSnapshotAnswer) _agentMgr.send(hostId, command); if (restoreVMSnapshotAnswer == null || !restoreVMSnapshotAnswer.getResult()) { - s_logger.warn("Unable to restore the vm snapshot from image file after live migration of vm with vmsnapshots: " + restoreVMSnapshotAnswer == null ? "null answer" : restoreVMSnapshotAnswer.getDetails()); + logger.warn("Unable to restore the vm snapshot from image file after live migration of vm with vmsnapshots: " + restoreVMSnapshotAnswer == null ? "null answer" : restoreVMSnapshotAnswer.getDetails()); } } } @@ -2372,8 +2370,8 @@ private void orchestrateStorageMigration(final String vmUuid, final Map volumeToPoolMap = prepareVmStorageMigration(vm, volumeToPool); try { - if(s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Offline migration of %s vm %s with volumes", + if(logger.isDebugEnabled()) { + logger.debug(String.format("Offline migration of %s vm %s with volumes", vm.getHypervisorType().toString(), vm.getInstanceName())); } @@ -2384,14 +2382,14 @@ private void orchestrateStorageMigration(final String vmUuid, final Map volumeToPool, Long sourceClusterId, Answer[] hypervisorMigrationResults) throws InsufficientCapacityException { - boolean isDebugEnabled = s_logger.isDebugEnabled(); + boolean isDebugEnabled = logger.isDebugEnabled(); if(isDebugEnabled) { String msg = String.format("Cleaning up after hypervisor pool migration volumes for VM %s(%s)", vm.getInstanceName(), vm.getUuid()); - s_logger.debug(msg); + logger.debug(msg); } StoragePool rootVolumePool = null; @@ -2439,7 +2437,7 @@ private void afterHypervisorMigrationCleanup(VMInstanceVO vm, Map(); } List volumes = _volsDao.findUsableVolumesForInstance(vm.getId()); - if(s_logger.isDebugEnabled()) { + if(logger.isDebugEnabled()) { String msg = String.format("Found %d volumes for VM %s(uuid:%s, id:%d)", results.size(), vm.getInstanceName(), vm.getUuid(), vm.getId()); - s_logger.debug(msg); + logger.debug(msg); } for (VolumeObjectTO result : results ) { - if(s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Updating volume (%d) with path '%s' on pool '%s'", result.getId(), result.getPath(), result.getDataStoreUuid())); + if(logger.isDebugEnabled()) { + logger.debug(String.format("Updating volume (%d) with path '%s' on pool '%s'", result.getId(), result.getPath(), result.getDataStoreUuid())); } VolumeVO volume = _volsDao.findById(result.getId()); StoragePool pool = _storagePoolDao.findPoolByUUID(result.getDataStoreUuid()); @@ -2503,7 +2501,7 @@ private void migrateThroughHypervisorOrStorage(VMInstanceVO vm, Map prepareVmStorageMigration(VMInstanceVO vm, Map< } if (dataCenterId == null) { String msg = "Unable to migrate vm: failed to create deployment destination with given volume to pool map"; - s_logger.debug(msg); + logger.debug(msg); throw new CloudRuntimeException(msg); } final DataCenterDeployment destination = new DataCenterDeployment(dataCenterId, podId, clusterId, null, null, null); @@ -2545,7 +2543,7 @@ private Map prepareVmStorageMigration(VMInstanceVO vm, Map< stateTransitTo(vm, Event.StorageMigrationRequested, null); } catch (final NoTransitionException e) { String msg = String.format("Unable to migrate vm: %s", vm.getUuid()); - s_logger.warn(msg, e); + logger.warn(msg, e); throw new CloudRuntimeException(msg, e); } return volumeToPoolMap; @@ -2607,9 +2605,9 @@ private void postStorageMigrationCleanup(VMInstanceVO vm, Map volumes = _volsDao.findCreatedByInstance(vm.getId()); for (final VolumeVO volume : volumes) { if (!_storagePoolDao.findById(volume.getPoolId()).getScope().equals(ScopeType.ZONE)) { - s_logger.info("Source and destination host are not in same cluster and all volumes are not on zone wide primary store, unable to migrate to host: " + logger.info("Source and destination host are not in same cluster and all volumes are not on zone wide primary store, unable to migrate to host: " + dest.getHost().getId()); throw new CloudRuntimeException( "Source and destination host are not in same cluster and all volumes are not on zone wide primary store, unable to migrate to host: " @@ -2705,8 +2703,8 @@ protected void migrate(final VMInstanceVO vm, final long srcHostId, final Deploy final VirtualMachineGuru vmGuru = getVmGuru(vm); if (vm.getState() != State.Running) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("VM is not Running, unable to migrate the vm " + vm); + if (logger.isDebugEnabled()) { + logger.debug("VM is not Running, unable to migrate the vm " + vm); } throw new CloudRuntimeException("VM is not Running, unable to migrate the vm currently " + vm + " , current state: " + vm.getState().toString()); } @@ -2768,24 +2766,24 @@ protected void migrate(final VMInstanceVO vm, final long srcHostId, final Deploy volumeMgr.release(vm.getId(), dstHostId); } - s_logger.info("Migration cancelled because state has changed: " + vm); + logger.info("Migration cancelled because state has changed: " + vm); throw new ConcurrentOperationException("Migration cancelled because state has changed: " + vm); } } catch (final NoTransitionException e1) { _networkMgr.rollbackNicForMigration(vmSrc, profile); volumeMgr.release(vm.getId(), dstHostId); - s_logger.info("Migration cancelled because " + e1.getMessage()); + logger.info("Migration cancelled because " + e1.getMessage()); throw new ConcurrentOperationException("Migration cancelled because " + e1.getMessage()); } catch (final CloudRuntimeException e2) { _networkMgr.rollbackNicForMigration(vmSrc, profile); volumeMgr.release(vm.getId(), dstHostId); - s_logger.info("Migration cancelled because " + e2.getMessage()); + logger.info("Migration cancelled because " + e2.getMessage()); work.setStep(Step.Done); _workDao.update(work.getId(), work); try { stateTransitTo(vm, Event.OperationFailed, srcHostId); } catch (final NoTransitionException e3) { - s_logger.warn(e3.getMessage()); + logger.warn(e3.getMessage()); } throw new CloudRuntimeException("Migration cancelled because " + e2.getMessage()); } @@ -2803,7 +2801,7 @@ protected void migrate(final VMInstanceVO vm, final long srcHostId, final Deploy } } catch (final OperationTimedoutException e) { if (e.isActive()) { - s_logger.warn("Active migration command so scheduling a restart for " + vm, e); + logger.warn("Active migration command so scheduling a restart for " + vm, e); _haMgr.scheduleRestart(vm, true); } throw new AgentUnavailableException("Operation timed out on migrating " + vm, dstHostId); @@ -2819,22 +2817,22 @@ protected void migrate(final VMInstanceVO vm, final long srcHostId, final Deploy try { if (!checkVmOnHost(vm, dstHostId)) { - s_logger.error("Unable to complete migration for " + vm); + logger.error("Unable to complete migration for " + vm); try { _agentMgr.send(srcHostId, new Commands(cleanup(vm, dpdkInterfaceMapping)), null); } catch (final AgentUnavailableException e) { - s_logger.error("AgentUnavailableException while cleanup on source host: " + srcHostId, e); + logger.error("AgentUnavailableException while cleanup on source host: " + srcHostId, e); } cleanup(vmGuru, new VirtualMachineProfileImpl(vm), work, Event.AgentReportStopped, true); throw new CloudRuntimeException("Unable to complete migration for " + vm); } } catch (final OperationTimedoutException e) { - s_logger.warn("Error while checking the vm " + vm + " on host " + dstHostId, e); + logger.warn("Error while checking the vm " + vm + " on host " + dstHostId, e); } migrated = true; } finally { if (!migrated) { - s_logger.info("Migration was unsuccessful. Cleaning up: " + vm); + logger.info("Migration was unsuccessful. Cleaning up: " + vm); _networkMgr.rollbackNicForMigration(vmSrc, profile); volumeMgr.release(vm.getId(), dstHostId); @@ -2844,13 +2842,13 @@ protected void migrate(final VMInstanceVO vm, final long srcHostId, final Deploy try { _agentMgr.send(dstHostId, new Commands(cleanup(vm, dpdkInterfaceMapping)), null); } catch (final AgentUnavailableException ae) { - s_logger.warn("Looks like the destination Host is unavailable for cleanup", ae); + logger.warn("Looks like the destination Host is unavailable for cleanup", ae); } _networkMgr.setHypervisorHostname(profile, dest, false); try { stateTransitTo(vm, Event.OperationFailed, srcHostId); } catch (final NoTransitionException e) { - s_logger.warn(e.getMessage()); + logger.warn(e.getMessage()); } } else { _networkMgr.commitNicForMigration(vmSrc, profile); @@ -2876,7 +2874,7 @@ protected MigrateCommand buildMigrateCommand(VMInstanceVO vmInstance, VirtualMac Map vlanToPersistenceMap = getVlanToPersistenceMapForVM(vmInstance.getId()); if (MapUtils.isNotEmpty(vlanToPersistenceMap)) { - s_logger.debug(String.format("Setting VLAN persistence to [%s] as part of migrate command for VM [%s].", new Gson().toJson(vlanToPersistenceMap), virtualMachineTO)); + logger.debug(String.format("Setting VLAN persistence to [%s] as part of migrate command for VM [%s].", new Gson().toJson(vlanToPersistenceMap), virtualMachineTO)); migrateCommand.setVlanToPersistenceMap(vlanToPersistenceMap); } @@ -2887,7 +2885,7 @@ protected MigrateCommand buildMigrateCommand(VMInstanceVO vmInstance, VirtualMac Map answerDpdkInterfaceMapping = prepareForMigrationAnswer.getDpdkInterfaceMapping(); if (MapUtils.isNotEmpty(answerDpdkInterfaceMapping) && dpdkInterfaceMapping != null) { - s_logger.debug(String.format("Setting DPDK interface mapping to [%s] as part of migrate command for VM [%s].", new Gson().toJson(vlanToPersistenceMap), + logger.debug(String.format("Setting DPDK interface mapping to [%s] as part of migrate command for VM [%s].", new Gson().toJson(vlanToPersistenceMap), virtualMachineTO)); dpdkInterfaceMapping.putAll(answerDpdkInterfaceMapping); migrateCommand.setDpdkInterfaceMapping(dpdkInterfaceMapping); @@ -2895,7 +2893,7 @@ protected MigrateCommand buildMigrateCommand(VMInstanceVO vmInstance, VirtualMac Integer newVmCpuShares = prepareForMigrationAnswer.getNewVmCpuShares(); if (newVmCpuShares != null) { - s_logger.debug(String.format("Setting CPU shares to [%d] as part of migrate command for VM [%s].", newVmCpuShares, virtualMachineTO)); + logger.debug(String.format("Setting CPU shares to [%d] as part of migrate command for VM [%s].", newVmCpuShares, virtualMachineTO)); migrateCommand.setNewVmCpuShares(newVmCpuShares); } @@ -2969,7 +2967,7 @@ protected Map buildMapUsingUserInformation(VirtualMachinePr volume.getUuid(), targetPool.getUuid(), profile.getUuid(), targetHost.getUuid())); } if (currentPool.getId() == targetPool.getId()) { - s_logger.info(String.format("The volume [%s] is already allocated in storage pool [%s].", volume.getUuid(), targetPool.getUuid())); + logger.info(String.format("The volume [%s] is already allocated in storage pool [%s].", volume.getUuid(), targetPool.getUuid())); } volumeToPoolObjectMap.put(volume, targetPool); } @@ -3128,11 +3126,11 @@ protected List getCandidateStoragePoolsToMigrateLocalVolume(Virtual private void moveVmToMigratingState(final T vm, final Long hostId, final ItWorkVO work) throws ConcurrentOperationException { try { if (!changeState(vm, Event.MigrationRequested, hostId, work, Step.Migrating)) { - s_logger.error("Migration cancelled because state has changed: " + vm); + logger.error("Migration cancelled because state has changed: " + vm); throw new ConcurrentOperationException("Migration cancelled because state has changed: " + vm); } } catch (final NoTransitionException e) { - s_logger.error("Migration cancelled because " + e.getMessage(), e); + logger.error("Migration cancelled because " + e.getMessage(), e); throw new ConcurrentOperationException("Migration cancelled because " + e.getMessage()); } } @@ -3140,11 +3138,11 @@ private void moveVmToMigratingState(final T vm, final L private void moveVmOutofMigratingStateOnSuccess(final T vm, final Long hostId, final ItWorkVO work) throws ConcurrentOperationException { try { if (!changeState(vm, Event.OperationSucceeded, hostId, work, Step.Started)) { - s_logger.error("Unable to change the state for " + vm); + logger.error("Unable to change the state for " + vm); throw new ConcurrentOperationException("Unable to change the state for " + vm); } } catch (final NoTransitionException e) { - s_logger.error("Unable to change state due to " + e.getMessage(), e); + logger.error("Unable to change state due to " + e.getMessage(), e); throw new ConcurrentOperationException("Unable to change state due to " + e.getMessage()); } } @@ -3256,9 +3254,9 @@ private void orchestrateMigrateWithStorage(final String vmUuid, final long srcHo AttachOrDettachConfigDriveCommand dettachCommand = new AttachOrDettachConfigDriveCommand(vm.getInstanceName(), vmData, VmConfigDriveLabel.value(), false); try { _agentMgr.send(srcHost.getId(), dettachCommand); - s_logger.debug("Deleted config drive ISO for vm " + vm.getInstanceName() + " In host " + srcHost); + logger.debug("Deleted config drive ISO for vm " + vm.getInstanceName() + " In host " + srcHost); } catch (OperationTimedoutException e) { - s_logger.error("TIme out occurred while exeuting command AttachOrDettachConfigDrive " + e.getMessage(), e); + logger.error("TIme out occurred while exeuting command AttachOrDettachConfigDrive " + e.getMessage(), e); } } @@ -3270,22 +3268,22 @@ private void orchestrateMigrateWithStorage(final String vmUuid, final long srcHo try { if (!checkVmOnHost(vm, destHostId)) { - s_logger.error("Vm not found on destination host. Unable to complete migration for " + vm); + logger.error("Vm not found on destination host. Unable to complete migration for " + vm); try { _agentMgr.send(srcHostId, new Commands(cleanup(vm.getInstanceName())), null); } catch (final AgentUnavailableException e) { - s_logger.error("AgentUnavailableException while cleanup on source host: " + srcHostId, e); + logger.error("AgentUnavailableException while cleanup on source host: " + srcHostId, e); } cleanup(vmGuru, new VirtualMachineProfileImpl(vm), work, Event.AgentReportStopped, true); throw new CloudRuntimeException("VM not found on destination host. Unable to complete migration for " + vm); } } catch (final OperationTimedoutException e) { - s_logger.error("Error while checking the vm " + vm + " is on host " + destHost, e); + logger.error("Error while checking the vm " + vm + " is on host " + destHost, e); } migrated = true; } finally { if (!migrated) { - s_logger.info("Migration was unsuccessful. Cleaning up: " + vm); + logger.info("Migration was unsuccessful. Cleaning up: " + vm); _networkMgr.rollbackNicForMigration(vmSrc, profile); volumeMgr.release(vm.getId(), destHostId); @@ -3297,9 +3295,9 @@ private void orchestrateMigrateWithStorage(final String vmUuid, final long srcHo vm.setPodIdToDeployIn(srcHost.getPodId()); stateTransitTo(vm, Event.OperationFailed, srcHostId); } catch (final AgentUnavailableException e) { - s_logger.warn("Looks like the destination Host is unavailable for cleanup.", e); + logger.warn("Looks like the destination Host is unavailable for cleanup.", e); } catch (final NoTransitionException e) { - s_logger.error("Error while transitioning vm from migrating to running state.", e); + logger.error("Error while transitioning vm from migrating to running state.", e); } _networkMgr.setHypervisorHostname(profile, destination, false); } else { @@ -3328,7 +3326,7 @@ protected void cancelWorkItems(final long nodeId) { try { final List works = _workDao.listWorkInProgressFor(nodeId); for (final ItWorkVO work : works) { - s_logger.info("Handling unfinished work item: " + work); + logger.info("Handling unfinished work item: " + work); try { final VMInstanceVO vm = _vmDao.findById(work.getInstanceId()); if (vm != null) { @@ -3349,7 +3347,7 @@ protected void cancelWorkItems(final long nodeId) { } } } catch (final Exception e) { - s_logger.error("Error while handling " + work, e); + logger.error("Error while handling " + work, e); } } } finally { @@ -3371,7 +3369,7 @@ public void migrateAway(final String vmUuid, final long srcHostId) throws Insuff try { orchestrateMigrateAway(vmUuid, srcHostId, null); } catch (final InsufficientServerCapacityException e) { - s_logger.warn("Failed to deploy vm " + vmUuid + " with original planner, sending HAPlanner"); + logger.warn("Failed to deploy vm " + vmUuid + " with original planner, sending HAPlanner"); orchestrateMigrateAway(vmUuid, srcHostId, _haMgr.getHAPlanner()); } } finally { @@ -3394,7 +3392,7 @@ private void orchestrateMigrateAway(final String vmUuid, final long srcHostId, f final VMInstanceVO vm = _vmDao.findByUuid(vmUuid); if (vm == null) { String message = String.format("Unable to find VM with uuid [%s].", vmUuid); - s_logger.warn(message); + logger.warn(message); throw new CloudRuntimeException(message); } @@ -3404,7 +3402,7 @@ private void orchestrateMigrateAway(final String vmUuid, final long srcHostId, f final Long hostId = vm.getHostId(); if (hostId == null) { String message = String.format("Unable to migrate %s due to it does not have a host id.", vm.toString()); - s_logger.warn(message); + logger.warn(message); throw new CloudRuntimeException(message); } @@ -3430,15 +3428,15 @@ private void orchestrateMigrateAway(final String vmUuid, final long srcHostId, f dest = _dpMgr.planDeployment(profile, plan, excludes, planner); } catch (final AffinityConflictException e2) { String message = String.format("Unable to create deployment, affinity rules associated to the %s conflict.", vm.toString()); - s_logger.warn(message, e2); + logger.warn(message, e2); throw new CloudRuntimeException(message, e2); } if (dest == null) { - s_logger.warn("Unable to find destination for migrating the vm " + profile); + logger.warn("Unable to find destination for migrating the vm " + profile); throw new InsufficientServerCapacityException("Unable to find a server to migrate to.", DataCenter.class, host.getDataCenterId()); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found destination " + dest + " for migrating to."); + if (logger.isDebugEnabled()) { + logger.debug("Found destination " + dest + " for migrating to."); } excludes.addHost(dest.getHost().getId()); @@ -3446,14 +3444,14 @@ private void orchestrateMigrateAway(final String vmUuid, final long srcHostId, f migrate(vm, srcHostId, dest); return; } catch (ResourceUnavailableException | ConcurrentOperationException e) { - s_logger.warn(String.format("Unable to migrate %s to %s due to [%s]", vm.toString(), dest.getHost().toString(), e.getMessage()), e); + logger.warn(String.format("Unable to migrate %s to %s due to [%s]", vm.toString(), dest.getHost().toString(), e.getMessage()), e); } try { advanceStop(vmUuid, true); throw new CloudRuntimeException("Unable to migrate " + vm); } catch (final ResourceUnavailableException | ConcurrentOperationException | OperationTimedoutException e) { - s_logger.error(String.format("Unable to stop %s due to [%s].", vm.toString(), e.getMessage()), e); + logger.error(String.format("Unable to stop %s due to [%s].", vm.toString(), e.getMessage()), e); throw new CloudRuntimeException("Unable to migrate " + vm); } } @@ -3477,7 +3475,7 @@ public boolean checkIfVmHasClusterWideVolumes(Long vmId) { public DataCenterDeployment getMigrationDeployment(final VirtualMachine vm, final Host host, final Long poolId, final ExcludeList excludes) { if (MIGRATE_VM_ACROSS_CLUSTERS.valueIn(host.getDataCenterId()) && (HypervisorType.VMware.equals(host.getHypervisorType()) || !checkIfVmHasClusterWideVolumes(vm.getId()))) { - s_logger.info("Searching for hosts in the zone for vm migration"); + logger.info("Searching for hosts in the zone for vm migration"); List clustersToExclude = _clusterDao.listAllClusters(host.getDataCenterId()); List clusterList = _clusterDao.listByDcHyType(host.getDataCenterId(), host.getHypervisorType().toString()); for (ClusterVO cluster : clusterList) { @@ -3497,13 +3495,13 @@ public DataCenterDeployment getMigrationDeployment(final VirtualMachine vm, fina protected class CleanupTask extends ManagedContextRunnable { @Override protected void runInContext() { - s_logger.debug("VM Operation Thread Running"); + logger.debug("VM Operation Thread Running"); try { _workDao.cleanup(VmOpCleanupWait.value()); final Date cutDate = new Date(DateUtil.currentGMTTime().getTime() - VmOpCleanupInterval.value() * 1000); _workJobDao.expungeCompletedWorkJobs(cutDate); } catch (final Exception e) { - s_logger.error("VM Operations failed due to ", e); + logger.error("VM Operations failed due to ", e); } } } @@ -3539,8 +3537,8 @@ public void advanceReboot(final String vmUuid, final Map":params.get(VirtualMachineProfile.Param.BootIntoSetup)))); } orchestrateReboot(vmUuid, params); @@ -3550,8 +3548,8 @@ public void advanceReboot(final String vmUuid, final Map":params.get(VirtualMachineProfile.Param.BootIntoSetup)))); } final Outcome outcome = rebootVmThroughJobQueue(vmUuid, params); @@ -3566,7 +3564,7 @@ private void orchestrateReboot(final String vmUuid, final Map nicDetails = nic.getDetails() == null ? new HashMap<>() : nic.getDetails(); - s_logger.debug("Found PVLAN type: " + pvlanTypeDetail.getValue() + " on network details, adding it as part of the PlugNicCommand"); + logger.debug("Found PVLAN type: " + pvlanTypeDetail.getValue() + " on network details, adding it as part of the PlugNicCommand"); nicDetails.putIfAbsent(NetworkOffering.Detail.pvlanType, pvlanTypeDetail.getValue()); nic.setDetails(nicDetails); } @@ -4549,7 +4547,7 @@ public boolean plugNic(final Network network, final NicTO nic, final VirtualMach _agentMgr.send(dest.getHost().getId(), cmds); final PlugNicAnswer plugNicAnswer = cmds.getAnswer(PlugNicAnswer.class); if (plugNicAnswer == null || !plugNicAnswer.getResult()) { - s_logger.warn("Unable to plug nic for vm " + vm.getName()); + logger.warn("Unable to plug nic for vm " + vm.getName()); result = false; } } catch (final OperationTimedoutException e) { @@ -4557,7 +4555,7 @@ public boolean plugNic(final Network network, final NicTO nic, final VirtualMach } } else { String message = String.format("Unable to apply PlugNic, VM [%s] is not in the right state (\"Running\"). VM state [%s].", router.toString(), router.getState()); - s_logger.warn(message); + logger.warn(message); throw new ResourceUnavailableException(message, DataCenter.class, router.getDataCenterId()); @@ -4589,17 +4587,17 @@ public boolean unplugNic(final Network network, final NicTO nic, final VirtualMa final UnPlugNicAnswer unplugNicAnswer = cmds.getAnswer(UnPlugNicAnswer.class); if (unplugNicAnswer == null || !unplugNicAnswer.getResult()) { - s_logger.warn("Unable to unplug nic from router " + router); + logger.warn("Unable to unplug nic from router " + router); result = false; } } catch (final OperationTimedoutException e) { throw new AgentUnavailableException("Unable to unplug nic from rotuer " + router + " from network " + network, dest.getHost().getId(), e); } } else if (router.getState() == State.Stopped || router.getState() == State.Stopping) { - s_logger.debug("Vm " + router.getInstanceName() + " is in " + router.getState() + ", so not sending unplug nic command to the backend"); + logger.debug("Vm " + router.getInstanceName() + " is in " + router.getState() + ", so not sending unplug nic command to the backend"); } else { String message = String.format("Unable to apply unplug nic, VM [%s] is not in the right state (\"Running\"). VM state [%s].", router.toString(), router.getState()); - s_logger.warn(message); + logger.warn(message); throw new ResourceUnavailableException(message, DataCenter.class, router.getDataCenterId()); } @@ -4685,7 +4683,7 @@ private VMInstanceVO orchestrateReConfigureVm(String vmUuid, ServiceOffering old Answer reconfigureAnswer = _agentMgr.send(vm.getHostId(), scaleVmCommand); if (reconfigureAnswer == null || !reconfigureAnswer.getResult()) { - s_logger.error("Unable to scale vm due to " + (reconfigureAnswer == null ? "" : reconfigureAnswer.getDetails())); + logger.error("Unable to scale vm due to " + (reconfigureAnswer == null ? "" : reconfigureAnswer.getDetails())); throw new CloudRuntimeException("Unable to scale vm due to " + (reconfigureAnswer == null ? "" : reconfigureAnswer.getDetails())); } @@ -4791,10 +4789,10 @@ protected void HandlePowerStateReport(final String subject, final String senderA break; } } else { - s_logger.warn("VM " + vmId + " no longer exists when processing VM state report"); + logger.warn("VM " + vmId + " no longer exists when processing VM state report"); } } else { - s_logger.info("There is pending job or HA tasks working on the VM. vm id: " + vmId + ", postpone power-change report by resetting power-change counters"); + logger.info("There is pending job or HA tasks working on the VM. vm id: " + vmId + ", postpone power-change report by resetting power-change counters"); _vmDao.resetVmPowerStateTracking(vmId); } } @@ -4805,15 +4803,15 @@ private void handlePowerOnReportWithNoPendingJobsOnVM(final VMInstanceVO vm) { switch (vm.getState()) { case Starting: - s_logger.info("VM " + vm.getInstanceName() + " is at " + vm.getState() + " and we received a power-on report while there is no pending jobs on it"); + logger.info("VM " + vm.getInstanceName() + " is at " + vm.getState() + " and we received a power-on report while there is no pending jobs on it"); try { stateTransitTo(vm, VirtualMachine.Event.FollowAgentPowerOnReport, vm.getPowerHostId()); } catch (final NoTransitionException e) { - s_logger.warn("Unexpected VM state transition exception, race-condition?", e); + logger.warn("Unexpected VM state transition exception, race-condition?", e); } - s_logger.info("VM " + vm.getInstanceName() + " is sync-ed to at Running state according to power-on report from hypervisor"); + logger.info("VM " + vm.getInstanceName() + " is sync-ed to at Running state according to power-on report from hypervisor"); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_SYNC, vm.getDataCenterId(), vm.getPodIdToDeployIn(), VM_SYNC_ALERT_SUBJECT, "VM " + vm.getHostName() + "(" + vm.getInstanceName() @@ -4823,23 +4821,23 @@ private void handlePowerOnReportWithNoPendingJobsOnVM(final VMInstanceVO vm) { case Running: try { if (vm.getHostId() != null && !vm.getHostId().equals(vm.getPowerHostId())) { - s_logger.info("Detected out of band VM migration from host " + vm.getHostId() + " to host " + vm.getPowerHostId()); + logger.info("Detected out of band VM migration from host " + vm.getHostId() + " to host " + vm.getPowerHostId()); } stateTransitTo(vm, VirtualMachine.Event.FollowAgentPowerOnReport, vm.getPowerHostId()); } catch (final NoTransitionException e) { - s_logger.warn("Unexpected VM state transition exception, race-condition?", e); + logger.warn("Unexpected VM state transition exception, race-condition?", e); } break; case Stopping: case Stopped: - s_logger.info("VM " + vm.getInstanceName() + " is at " + vm.getState() + " and we received a power-on report while there is no pending jobs on it"); + logger.info("VM " + vm.getInstanceName() + " is at " + vm.getState() + " and we received a power-on report while there is no pending jobs on it"); try { stateTransitTo(vm, VirtualMachine.Event.FollowAgentPowerOnReport, vm.getPowerHostId()); } catch (final NoTransitionException e) { - s_logger.warn("Unexpected VM state transition exception, race-condition?", e); + logger.warn("Unexpected VM state transition exception, race-condition?", e); } _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_SYNC, vm.getDataCenterId(), vm.getPodIdToDeployIn(), VM_SYNC_ALERT_SUBJECT, "VM " + vm.getHostName() + "(" + vm.getInstanceName() + ") state is sync-ed (" + vm.getState() @@ -4847,28 +4845,28 @@ private void handlePowerOnReportWithNoPendingJobsOnVM(final VMInstanceVO vm) { ActionEventUtils.onActionEvent(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM, vm.getDomainId(), EventTypes.EVENT_VM_START, "Out of band VM power on", vm.getId(), ApiCommandResourceType.VirtualMachine.toString()); - s_logger.info("VM " + vm.getInstanceName() + " is sync-ed to at Running state according to power-on report from hypervisor"); + logger.info("VM " + vm.getInstanceName() + " is sync-ed to at Running state according to power-on report from hypervisor"); break; case Destroyed: case Expunging: - s_logger.info("Receive power on report when VM is in destroyed or expunging state. vm: " + logger.info("Receive power on report when VM is in destroyed or expunging state. vm: " + vm.getId() + ", state: " + vm.getState()); break; case Migrating: - s_logger.info("VM " + vm.getInstanceName() + " is at " + vm.getState() + " and we received a power-on report while there is no pending jobs on it"); + logger.info("VM " + vm.getInstanceName() + " is at " + vm.getState() + " and we received a power-on report while there is no pending jobs on it"); try { stateTransitTo(vm, VirtualMachine.Event.FollowAgentPowerOnReport, vm.getPowerHostId()); } catch (final NoTransitionException e) { - s_logger.warn("Unexpected VM state transition exception, race-condition?", e); + logger.warn("Unexpected VM state transition exception, race-condition?", e); } - s_logger.info("VM " + vm.getInstanceName() + " is sync-ed to at Running state according to power-on report from hypervisor"); + logger.info("VM " + vm.getInstanceName() + " is sync-ed to at Running state according to power-on report from hypervisor"); break; case Error: default: - s_logger.info("Receive power on report when VM is in error or unexpected state. vm: " + logger.info("Receive power on report when VM is in error or unexpected state. vm: " + vm.getId() + ", state: " + vm.getState()); break; } @@ -4883,8 +4881,8 @@ private void handlePowerOffReportWithNoPendingJobsOnVM(final VMInstanceVO vm) { ActionEventUtils.onActionEvent(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM,vm.getDomainId(), EventTypes.EVENT_VM_STOP, "Out of band VM power off", vm.getId(), ApiCommandResourceType.VirtualMachine.toString()); case Migrating: - if (s_logger.isInfoEnabled()) { - s_logger.info( + if (logger.isInfoEnabled()) { + logger.info( String.format("VM %s is at %s and we received a %s report while there is no pending jobs on it" , vm.getInstanceName(), vm.getState(), vm.getPowerState())); } @@ -4892,11 +4890,11 @@ private void handlePowerOffReportWithNoPendingJobsOnVM(final VMInstanceVO vm) { && HaVmRestartHostUp.value() && vm.getHypervisorType() != HypervisorType.VMware && vm.getHypervisorType() != HypervisorType.Hyperv) { - s_logger.info("Detected out-of-band stop of a HA enabled VM " + vm.getInstanceName() + ", will schedule restart"); + logger.info("Detected out-of-band stop of a HA enabled VM " + vm.getInstanceName() + ", will schedule restart"); if (!_haMgr.hasPendingHaWork(vm.getId())) { _haMgr.scheduleRestart(vm, true); } else { - s_logger.info("VM " + vm.getInstanceName() + " already has an pending HA task working on it"); + logger.info("VM " + vm.getInstanceName() + " already has an pending HA task working on it"); } return; } @@ -4919,14 +4917,14 @@ private void handlePowerOffReportWithNoPendingJobsOnVM(final VMInstanceVO vm) { try { stateTransitTo(vm, VirtualMachine.Event.FollowAgentPowerOffReport, null); } catch (final NoTransitionException e) { - s_logger.warn("Unexpected VM state transition exception, race-condition?", e); + logger.warn("Unexpected VM state transition exception, race-condition?", e); } _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_SYNC, vm.getDataCenterId(), vm.getPodIdToDeployIn(), VM_SYNC_ALERT_SUBJECT, "VM " + vm.getHostName() + "(" + vm.getInstanceName() + ") state is sync-ed (" + vm.getState() + " -> Stopped) from out-of-context transition."); - s_logger.info("VM " + vm.getInstanceName() + " is sync-ed to at Stopped state according to power-off report from hypervisor"); + logger.info("VM " + vm.getInstanceName() + " is sync-ed to at Stopped state according to power-off report from hypervisor"); break; @@ -4996,7 +4994,7 @@ private List listStalledVMInTransitionStateOnUpHost(final long hostId, fin l.add(rs.getLong(1)); } } catch (SQLException e) { - s_logger.error(String.format("Unable to execute SQL [%s] with params {\"h.id\": %s, \"i.power_state_update_time\": \"%s\"} due to [%s].", sql, hostId, cutTimeStr, e.getMessage()), e); + logger.error(String.format("Unable to execute SQL [%s] with params {\"h.id\": %s, \"i.power_state_update_time\": \"%s\"} due to [%s].", sql, hostId, cutTimeStr, e.getMessage()), e); } } return l; @@ -5025,7 +5023,7 @@ private List listVMInTransitionStateWithRecentReportOnUpHost(final long ho l.add(rs.getLong(1)); } } catch (final SQLException e) { - s_logger.error(String.format("Unable to execute SQL [%s] with params {\"h.id\": %s, \"i.power_state_update_time\": \"%s\", \"j.job_status\": %s} due to [%s].", sql, hostId, cutTimeStr, jobStatusInProgress, e.getMessage()), e); + logger.error(String.format("Unable to execute SQL [%s] with params {\"h.id\": %s, \"i.power_state_update_time\": \"%s\", \"j.job_status\": %s} due to [%s].", sql, hostId, cutTimeStr, jobStatusInProgress, e.getMessage()), e); } return l; } @@ -5053,7 +5051,7 @@ private List listStalledVMInTransitionStateOnDisconnectedHosts(final Date l.add(rs.getLong(1)); } } catch (final SQLException e) { - s_logger.error(String.format("Unable to execute SQL [%s] with params {\"i.power_state_update_time\": \"%s\", \"j.job_status\": %s} due to [%s].", sql, cutTimeStr, jobStatusInProgress, e.getMessage()), e); + logger.error(String.format("Unable to execute SQL [%s] with params {\"i.power_state_update_time\": \"%s\", \"j.job_status\": %s} due to [%s].", sql, cutTimeStr, jobStatusInProgress, e.getMessage()), e); } return l; } @@ -5331,8 +5329,8 @@ public Outcome addVmToNetworkThroughJobQueue( } workJob = pendingWorkJobs.get(0); } else { - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("no jobs to add network %s for vm %s yet", network, vm)); + if (logger.isTraceEnabled()) { + logger.trace(String.format("no jobs to add network %s for vm %s yet", network, vm)); } workJob = createVmWorkJobToAddNetwork(vm, network, requested, context, user, account); @@ -5372,7 +5370,7 @@ private VmWorkJobVO createVmWorkJobToAddNetwork( } catch (CloudRuntimeException e) { if (e.getCause() instanceof EntityExistsException) { String msg = String.format("A job to add a nic for network %s to vm %s already exists", network.getUuid(), vm.getUuid()); - s_logger.warn(msg, e); + logger.warn(msg, e); } throw e; } @@ -5449,15 +5447,15 @@ private Pair orchestrateStart(final VmWorkStart work) th VMInstanceVO vm = findVmById(work.getVmId()); Boolean enterSetup = (Boolean)work.getParams().get(VirtualMachineProfile.Param.BootIntoSetup); - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("orchestrating VM start for '%s' %s set to %s", vm.getInstanceName(), VirtualMachineProfile.Param.BootIntoSetup, enterSetup)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("orchestrating VM start for '%s' %s set to %s", vm.getInstanceName(), VirtualMachineProfile.Param.BootIntoSetup, enterSetup)); } try { orchestrateStart(vm.getUuid(), work.getParams(), work.getPlan(), _dpMgr.getDeploymentPlannerByName(work.getDeploymentPlanner())); } catch (CloudRuntimeException e){ String message = String.format("Unable to orchestrate start %s due to [%s].", vm.toString(), e.getMessage()); - s_logger.warn(message, e); + logger.warn(message, e); CloudRuntimeException ex = new CloudRuntimeException(message); return new Pair<>(JobInfo.Status.FAILED, JobSerializerHelper.toObjectSerializedString(ex)); } @@ -5469,7 +5467,7 @@ private Pair orchestrateStop(final VmWorkStop work) thro final VMInstanceVO vm = _entityMgr.findById(VMInstanceVO.class, work.getVmId()); if (vm == null) { String message = String.format("Unable to find VM [%s].", work.getVmId()); - s_logger.warn(message); + logger.warn(message); throw new CloudRuntimeException(message); } @@ -5492,7 +5490,7 @@ private Pair orchestrateMigrateAway(final VmWorkMigrateA try { orchestrateMigrateAway(vm.getUuid(), work.getSrcHostId(), null); } catch (final InsufficientServerCapacityException e) { - s_logger.warn("Failed to deploy vm " + vm.getId() + " with original planner, sending HAPlanner", e); + logger.warn("Failed to deploy vm " + vm.getId() + " with original planner, sending HAPlanner", e); orchestrateMigrateAway(vm.getUuid(), work.getSrcHostId(), _haMgr.getHAPlanner()); } @@ -5651,7 +5649,7 @@ public UserVm restoreVirtualMachine(final long vmId, final Long newTemplateId) t } private UserVm orchestrateRestoreVirtualMachine(final long vmId, final Long newTemplateId) throws ResourceUnavailableException, InsufficientCapacityException { - s_logger.debug("Restoring vm " + vmId + " with new templateId " + newTemplateId); + logger.debug("Restoring vm " + vmId + " with new templateId " + newTemplateId); final CallContext context = CallContext.current(); final Account account = context.getCallingAccount(); return _userVmService.restoreVirtualMachine(account, vmId, newTemplateId); @@ -5719,7 +5717,7 @@ public Boolean updateDefaultNicForVM(final VirtualMachine vm, final Nic nic, fin private Boolean orchestrateUpdateDefaultNicForVM(final VirtualMachine vm, final Nic nic, final Nic defaultNic) { - s_logger.debug("Updating default nic of vm " + vm + " from nic " + defaultNic.getUuid() + " to nic " + nic.getUuid()); + logger.debug("Updating default nic of vm " + vm + " from nic " + defaultNic.getUuid() + " to nic " + nic.getUuid()); Integer chosenID = nic.getDeviceId(); Integer existingID = defaultNic.getDeviceId(); NicVO nicVO = _nicsDao.findById(nic.getId()); @@ -5802,8 +5800,8 @@ public Pair findClusterAndHostIdForVm(VirtualMachine vm, boolean ski Long clusterId = null; if(hostId == null) { hostId = vm.getLastHostId(); - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("host id is null, using last host id %d", hostId) ); + if (logger.isDebugEnabled()) { + logger.debug(String.format("host id is null, using last host id %d", hostId) ); } } if (hostId == null) { @@ -5889,7 +5887,7 @@ protected Pair retrievePendingWorkJob(Long vmId, String vmUui if (vm == null) { String message = String.format("Could not find a VM with the uuid [%s]. Unable to continue validations with command [%s] through job queue.", vmUuid, commandName); - s_logger.error(message); + logger.error(message); throw new RuntimeException(message); } @@ -5946,7 +5944,7 @@ protected VMInstanceVO findVmById(Long vmId) { VMInstanceVO vm = _entityMgr.findById(VMInstanceVO.class, vmId); if (vm == null) { - s_logger.warn(String.format("Could not find VM [%s].", vmId)); + logger.warn(String.format("Could not find VM [%s].", vmId)); } assert vm != null; @@ -5978,12 +5976,12 @@ protected VMInstanceVO findVmById(Long vmId) { } Answer answer = _agentMgr.easySend(hostId, new GetVmStatsCommand(new ArrayList<>(vmNames.keySet()), _hostDao.findById(hostId).getGuid(), hostName)); if (answer == null || !answer.getResult()) { - s_logger.warn("Unable to obtain VM statistics."); + logger.warn("Unable to obtain VM statistics."); return vmStatsById; } else { HashMap vmStatsByName = ((GetVmStatsAnswer)answer).getVmStatsMap(); if (vmStatsByName == null) { - s_logger.warn("Unable to obtain VM statistics."); + logger.warn("Unable to obtain VM statistics."); return vmStatsById; } for (Map.Entry entry : vmStatsByName.entrySet()) { @@ -6005,12 +6003,12 @@ public HashMap> getVmDiskStatistics(long hostI } Answer answer = _agentMgr.easySend(hostId, new GetVmDiskStatsCommand(new ArrayList<>(vmNames.keySet()), _hostDao.findById(hostId).getGuid(), hostName)); if (answer == null || !answer.getResult()) { - s_logger.warn("Unable to obtain VM disk statistics."); + logger.warn("Unable to obtain VM disk statistics."); return vmDiskStatsById; } else { HashMap> vmDiskStatsByName = ((GetVmDiskStatsAnswer)answer).getVmDiskStatsMap(); if (vmDiskStatsByName == null) { - s_logger.warn("Unable to obtain VM disk statistics."); + logger.warn("Unable to obtain VM disk statistics."); return vmDiskStatsById; } for (Map.Entry> entry: vmDiskStatsByName.entrySet()) { @@ -6032,12 +6030,12 @@ public HashMap> getVmNetworkStatistics(long } Answer answer = _agentMgr.easySend(hostId, new GetVmNetworkStatsCommand(new ArrayList<>(vmNames.keySet()), _hostDao.findById(hostId).getGuid(), hostName)); if (answer == null || !answer.getResult()) { - s_logger.warn("Unable to obtain VM network statistics."); + logger.warn("Unable to obtain VM network statistics."); return vmNetworkStatsById; } else { HashMap> vmNetworkStatsByName = ((GetVmNetworkStatsAnswer)answer).getVmNetworkStatsMap(); if (vmNetworkStatsByName == null) { - s_logger.warn("Unable to obtain VM network statistics."); + logger.warn("Unable to obtain VM network statistics."); return vmNetworkStatsById; } for (Map.Entry> entry: vmNetworkStatsByName.entrySet()) { diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java index 3eb3569cab0a..bbd4510f6f59 100644 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java @@ -26,7 +26,8 @@ import org.apache.cloudstack.framework.messagebus.MessageBus; import org.apache.cloudstack.framework.messagebus.PublishScope; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.api.HostVmStateReportEntry; import com.cloud.configuration.ManagementServiceConfiguration; @@ -35,7 +36,7 @@ import com.cloud.vm.dao.VMInstanceDao; public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStateSync { - private static final Logger s_logger = Logger.getLogger(VirtualMachinePowerStateSyncImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject MessageBus _messageBus; @Inject VMInstanceDao _instanceDao; @@ -46,13 +47,13 @@ public VirtualMachinePowerStateSyncImpl() { @Override public void resetHostSyncState(long hostId) { - s_logger.info("Reset VM power state sync for host: " + hostId); + logger.info("Reset VM power state sync for host: " + hostId); _instanceDao.resetHostPowerStateTracking(hostId); } @Override public void processHostVmStateReport(long hostId, Map report) { - s_logger.debug("Process host VM state report. host: " + hostId); + logger.debug("Process host VM state report. host: " + hostId); Map translatedInfo = convertVmStateReport(report); processReport(hostId, translatedInfo, false); @@ -60,8 +61,8 @@ public void processHostVmStateReport(long hostId, Map report, boolean force) { - if (s_logger.isDebugEnabled()) - s_logger.debug("Process host VM state report from ping process. host: " + hostId); + if (logger.isDebugEnabled()) + logger.debug("Process host VM state report from ping process. host: " + hostId); Map translatedInfo = convertVmStateReport(report); processReport(hostId, translatedInfo, force); @@ -69,24 +70,24 @@ public void processHostVmStatePingReport(long hostId, Map translatedInfo, boolean force) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Process VM state report. host: " + hostId + ", number of records in report: " + translatedInfo.size()); + if (logger.isDebugEnabled()) { + logger.debug("Process VM state report. host: " + hostId + ", number of records in report: " + translatedInfo.size()); } for (Map.Entry entry : translatedInfo.entrySet()) { - if (s_logger.isDebugEnabled()) - s_logger.debug("VM state report. host: " + hostId + ", vm id: " + entry.getKey() + ", power state: " + entry.getValue()); + if (logger.isDebugEnabled()) + logger.debug("VM state report. host: " + hostId + ", vm id: " + entry.getKey() + ", power state: " + entry.getValue()); if (_instanceDao.updatePowerState(entry.getKey(), hostId, entry.getValue(), DateUtil.currentGMTTime())) { - if (s_logger.isInfoEnabled()) { - s_logger.debug("VM state report is updated. host: " + hostId + ", vm id: " + entry.getKey() + ", power state: " + entry.getValue()); + if (logger.isInfoEnabled()) { + logger.debug("VM state report is updated. host: " + hostId + ", vm id: " + entry.getKey() + ", power state: " + entry.getValue()); } _messageBus.publish(null, VirtualMachineManager.Topics.VM_POWER_STATE, PublishScope.GLOBAL, entry.getKey()); } else { - if (s_logger.isTraceEnabled()) { - s_logger.trace("VM power state does not change, skip DB writing. vm id: " + entry.getKey()); + if (logger.isTraceEnabled()) { + logger.trace("VM power state does not change, skip DB writing. vm id: " + entry.getKey()); } } } @@ -106,8 +107,8 @@ private void processReport(long hostId, Map tra // here we need to be wary of out of band migration as opposed to other, more unexpected state changes if (vmsThatAreMissingReport.size() > 0) { Date currentTime = DateUtil.currentGMTTime(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Run missing VM report. current time: " + currentTime.getTime()); + if (logger.isDebugEnabled()) { + logger.debug("Run missing VM report. current time: " + currentTime.getTime()); } // 2 times of sync-update interval for graceful period @@ -118,28 +119,28 @@ private void processReport(long hostId, Map tra // Make sure powerState is up to date for missing VMs try { if (!force && !_instanceDao.isPowerStateUpToDate(instance.getId())) { - s_logger.warn("Detected missing VM but power state is outdated, wait for another process report run for VM id: " + instance.getId()); + logger.warn("Detected missing VM but power state is outdated, wait for another process report run for VM id: " + instance.getId()); _instanceDao.resetVmPowerStateTracking(instance.getId()); continue; } } catch (CloudRuntimeException e) { - s_logger.warn("Checked for missing powerstate of a none existing vm", e); + logger.warn("Checked for missing powerstate of a none existing vm", e); continue; } Date vmStateUpdateTime = instance.getPowerStateUpdateTime(); if (vmStateUpdateTime == null) { - s_logger.warn("VM power state update time is null, falling back to update time for vm id: " + instance.getId()); + logger.warn("VM power state update time is null, falling back to update time for vm id: " + instance.getId()); vmStateUpdateTime = instance.getUpdateTime(); if (vmStateUpdateTime == null) { - s_logger.warn("VM update time is null, falling back to creation time for vm id: " + instance.getId()); + logger.warn("VM update time is null, falling back to creation time for vm id: " + instance.getId()); vmStateUpdateTime = instance.getCreated(); } } - if (s_logger.isInfoEnabled()) { + if (logger.isInfoEnabled()) { String lastTime = new SimpleDateFormat("yyyy/MM/dd'T'HH:mm:ss.SSS'Z'").format(vmStateUpdateTime); - s_logger.debug( + logger.debug( String.format("Detected missing VM. host: %d, vm id: %d(%s), power state: %s, last state update: %s" , hostId , instance.getId() @@ -151,30 +152,30 @@ private void processReport(long hostId, Map tra long milliSecondsSinceLastStateUpdate = currentTime.getTime() - vmStateUpdateTime.getTime(); if (force || milliSecondsSinceLastStateUpdate > milliSecondsGracefullPeriod) { - s_logger.debug("vm id: " + instance.getId() + " - time since last state update(" + milliSecondsSinceLastStateUpdate + "ms) has passed graceful period"); + logger.debug("vm id: " + instance.getId() + " - time since last state update(" + milliSecondsSinceLastStateUpdate + "ms) has passed graceful period"); // this is were a race condition might have happened if we don't re-fetch the instance; // between the startime of this job and the currentTime of this missing-branch // an update might have occurred that we should not override in case of out of band migration if (_instanceDao.updatePowerState(instance.getId(), hostId, VirtualMachine.PowerState.PowerReportMissing, startTime)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("VM state report is updated. host: " + hostId + ", vm id: " + instance.getId() + ", power state: PowerReportMissing "); + if (logger.isDebugEnabled()) { + logger.debug("VM state report is updated. host: " + hostId + ", vm id: " + instance.getId() + ", power state: PowerReportMissing "); } _messageBus.publish(null, VirtualMachineManager.Topics.VM_POWER_STATE, PublishScope.GLOBAL, instance.getId()); } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("VM power state does not change, skip DB writing. vm id: " + instance.getId()); + if (logger.isDebugEnabled()) { + logger.debug("VM power state does not change, skip DB writing. vm id: " + instance.getId()); } } } else { - s_logger.debug("vm id: " + instance.getId() + " - time since last state update(" + milliSecondsSinceLastStateUpdate + "ms) has not passed graceful period yet"); + logger.debug("vm id: " + instance.getId() + " - time since last state update(" + milliSecondsSinceLastStateUpdate + "ms) has not passed graceful period yet"); } } } - if (s_logger.isDebugEnabled()) - s_logger.debug("Done with process of VM state report. host: " + hostId); + if (logger.isDebugEnabled()) + logger.debug("Done with process of VM state report. host: " + hostId); } @Override @@ -189,7 +190,7 @@ public Map convertVmStateReport(Map joinRecords = _joinMapDao.listJoinRecords(job.getId()); if (joinRecords.size() != 1) { - s_logger.warn("AsyncJob-" + job.getId() + logger.warn("AsyncJob-" + job.getId() + " received wakeup call with un-supported joining job number: " + joinRecords.size()); // if we fail wakeup-execution for any reason, avoid release sync-source if there is any @@ -84,7 +82,7 @@ public void runJob(AsyncJob job) { try { workClz = Class.forName(job.getCmd()); } catch (ClassNotFoundException e) { - s_logger.error("VM work class " + job.getCmd() + " is not found", e); + logger.error("VM work class " + job.getCmd() + " is not found", e); return; } @@ -105,14 +103,14 @@ public void runJob(AsyncJob job) { handler.invoke(_vmMgr); } else { assert (false); - s_logger.error("Unable to find wakeup handler " + joinRecord.getWakeupHandler() + + logger.error("Unable to find wakeup handler " + joinRecord.getWakeupHandler() + " when waking up job-" + job.getId()); } } finally { CallContext.unregister(); } } catch (Throwable e) { - s_logger.warn("Unexpected exception in waking up job-" + job.getId()); + logger.warn("Unexpected exception in waking up job-" + job.getId()); // if we fail wakeup-execution for any reason, avoid release sync-source if there is any job.setSyncSource(null); @@ -132,11 +130,11 @@ private Method getHandler(String wakeupHandler) { method.setAccessible(true); } catch (SecurityException e) { assert (false); - s_logger.error("Unexpected exception", e); + logger.error("Unexpected exception", e); return null; } catch (NoSuchMethodException e) { assert (false); - s_logger.error("Unexpected exception", e); + logger.error("Unexpected exception", e); return null; } diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VmWorkStart.java b/engine/orchestration/src/main/java/com/cloud/vm/VmWorkStart.java index 5a7acdd9edb5..132bc9e19bef 100644 --- a/engine/orchestration/src/main/java/com/cloud/vm/VmWorkStart.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VmWorkStart.java @@ -25,7 +25,6 @@ import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.jobs.impl.JobSerializerHelper; -import org.apache.log4j.Logger; import com.cloud.deploy.DataCenterDeployment; import com.cloud.deploy.DeploymentPlan; @@ -35,7 +34,6 @@ public class VmWorkStart extends VmWork { private static final long serialVersionUID = 9038937399817468894L; - private static final Logger s_logger = Logger.getLogger(VmWorkStart.class); long dcId; Long podId; @@ -67,7 +65,7 @@ public DeploymentPlan getPlan() { // this has to be refactored together with migrating legacy code into the new way ReservationContext context = null; if (reservationId != null) { - Journal journal = new Journal.LogJournal("VmWorkStart", s_logger); + Journal journal = new Journal.LogJournal("VmWorkStart", logger); context = new ReservationContextImpl(reservationId, journal, CallContext.current().getCallingUser(), CallContext.current().getCallingAccount()); diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/cloud/entity/api/VMEntityManagerImpl.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/cloud/entity/api/VMEntityManagerImpl.java index 896b55734d7a..8d4fa21754cb 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/cloud/entity/api/VMEntityManagerImpl.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/cloud/entity/api/VMEntityManagerImpl.java @@ -30,7 +30,8 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.commons.collections.MapUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.stereotype.Component; import com.cloud.dc.DataCenter; @@ -68,7 +69,7 @@ @Component public class VMEntityManagerImpl implements VMEntityManager { - private static final Logger s_logger = Logger.getLogger(VMEntityManagerImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject protected VMInstanceDao _vmDao; @@ -213,8 +214,8 @@ public String reserveVirtualMachine(VMEntityVO vmEntityVO, DeploymentPlanner pla if (reservationId != null) { return reservationId; } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cannot finalize the VM reservation for this destination found, retrying"); + if (logger.isDebugEnabled()) { + logger.debug("Cannot finalize the VM reservation for this destination found, retrying"); } exclude.addHost(dest.getHost().getId()); continue; diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineClusterDaoImpl.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineClusterDaoImpl.java index 10c75d56db0b..cc33f9eb3355 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineClusterDaoImpl.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineClusterDaoImpl.java @@ -27,7 +27,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.datacenter.entity.api.DataCenterResourceEntity; @@ -51,7 +50,6 @@ @Component(value = "EngineClusterDao") public class EngineClusterDaoImpl extends GenericDaoBase implements EngineClusterDao { - private static final Logger s_logger = Logger.getLogger(EngineClusterDaoImpl.class); protected final SearchBuilder PodSearch; protected final SearchBuilder HyTypeWithoutGuidSearch; @@ -272,7 +270,7 @@ public boolean updateState(State currentState, Event event, State nextState, Dat int rows = update(vo, sc); - if (rows == 0 && s_logger.isDebugEnabled()) { + if (rows == 0 && logger.isDebugEnabled()) { EngineClusterVO dbCluster = findByIdIncludingRemoved(vo.getId()); if (dbCluster != null) { StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString()); @@ -299,7 +297,7 @@ public boolean updateState(State currentState, Event event, State nextState, Dat .append("; updatedTime=") .append(oldUpdatedTime); } else { - s_logger.debug("Unable to update dataCenter: id=" + vo.getId() + ", as there is no such dataCenter exists in the database anymore"); + logger.debug("Unable to update dataCenter: id=" + vo.getId() + ", as there is no such dataCenter exists in the database anymore"); } } return rows > 0; diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineDataCenterDaoImpl.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineDataCenterDaoImpl.java index f4b2362d0557..03b4bd9eaaf4 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineDataCenterDaoImpl.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineDataCenterDaoImpl.java @@ -24,7 +24,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.datacenter.entity.api.DataCenterResourceEntity; @@ -50,7 +49,6 @@ **/ @Component(value = "EngineDataCenterDao") public class EngineDataCenterDaoImpl extends GenericDaoBase implements EngineDataCenterDao { - private static final Logger s_logger = Logger.getLogger(EngineDataCenterDaoImpl.class); protected SearchBuilder NameSearch; protected SearchBuilder ListZonesByDomainIdSearch; @@ -242,7 +240,7 @@ public EngineDataCenterVO findByTokenOrIdOrName(String tokenOrIdOrName) { Long dcId = Long.parseLong(tokenOrIdOrName); return findById(dcId); } catch (NumberFormatException nfe) { - s_logger.debug("Cannot parse " + tokenOrIdOrName + " into long. " + nfe); + logger.debug("Cannot parse " + tokenOrIdOrName + " into long. " + nfe); } } } @@ -280,7 +278,7 @@ public boolean updateState(State currentState, Event event, State nextState, Dat int rows = update(vo, sc); - if (rows == 0 && s_logger.isDebugEnabled()) { + if (rows == 0 && logger.isDebugEnabled()) { EngineDataCenterVO dbDC = findByIdIncludingRemoved(vo.getId()); if (dbDC != null) { StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString()); @@ -302,7 +300,7 @@ public boolean updateState(State currentState, Event event, State nextState, Dat .append("; updatedTime=") .append(oldUpdatedTime); } else { - s_logger.debug("Unable to update dataCenter: id=" + vo.getId() + ", as there is no such dataCenter exists in the database anymore"); + logger.debug("Unable to update dataCenter: id=" + vo.getId() + ", as there is no such dataCenter exists in the database anymore"); } } return rows > 0; diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostDaoImpl.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostDaoImpl.java index 819bd32146ed..2099ebadb9f7 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostDaoImpl.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostDaoImpl.java @@ -25,7 +25,6 @@ import javax.inject.Inject; import javax.persistence.TableGenerator; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.datacenter.entity.api.DataCenterResourceEntity; @@ -54,7 +53,6 @@ @DB @TableGenerator(name = "host_req_sq", table = "op_host", pkColumnName = "id", valueColumnName = "sequence", allocationSize = 1) public class EngineHostDaoImpl extends GenericDaoBase implements EngineHostDao { - private static final Logger s_logger = Logger.getLogger(EngineHostDaoImpl.class); private final SearchBuilder TypePodDcStatusSearch; @@ -431,7 +429,7 @@ public boolean updateState(State currentState, DataCenterResourceEntity.State.Ev int rows = update(vo, sc); - if (rows == 0 && s_logger.isDebugEnabled()) { + if (rows == 0 && logger.isDebugEnabled()) { EngineHostVO dbHost = findByIdIncludingRemoved(vo.getId()); if (dbHost != null) { StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString()); @@ -453,7 +451,7 @@ public boolean updateState(State currentState, DataCenterResourceEntity.State.Ev .append("; updatedTime=") .append(oldUpdatedTime); } else { - s_logger.debug("Unable to update dataCenter: id=" + vo.getId() + ", as there is no such dataCenter exists in the database anymore"); + logger.debug("Unable to update dataCenter: id=" + vo.getId() + ", as there is no such dataCenter exists in the database anymore"); } } return rows > 0; diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostPodDaoImpl.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostPodDaoImpl.java index 1eb0857a61d6..535e396a376c 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostPodDaoImpl.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostPodDaoImpl.java @@ -25,7 +25,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.datacenter.entity.api.DataCenterResourceEntity; @@ -44,7 +43,6 @@ @Component(value = "EngineHostPodDao") public class EngineHostPodDaoImpl extends GenericDaoBase implements EngineHostPodDao { - private static final Logger s_logger = Logger.getLogger(EngineHostPodDaoImpl.class); protected SearchBuilder DataCenterAndNameSearch; protected SearchBuilder DataCenterIdSearch; @@ -111,7 +109,7 @@ public HashMap> getCurrentPodCidrSubnets(long zoneId, long po currentPodCidrSubnets.put(podId, cidrPair); } } catch (SQLException ex) { - s_logger.warn("DB exception " + ex.getMessage(), ex); + logger.warn("DB exception " + ex.getMessage(), ex); return null; } @@ -163,7 +161,7 @@ public boolean updateState(State currentState, Event event, State nextState, Dat int rows = update(vo, sc); - if (rows == 0 && s_logger.isDebugEnabled()) { + if (rows == 0 && logger.isDebugEnabled()) { EngineHostPodVO dbDC = findByIdIncludingRemoved(vo.getId()); if (dbDC != null) { StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString()); @@ -185,7 +183,7 @@ public boolean updateState(State currentState, Event event, State nextState, Dat .append("; updatedTime=") .append(oldUpdatedTime); } else { - s_logger.debug("Unable to update dataCenter: id=" + vo.getId() + ", as there is no such dataCenter exists in the database anymore"); + logger.debug("Unable to update dataCenter: id=" + vo.getId() + ", as there is no such dataCenter exists in the database anymore"); } } return rows > 0; diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java index 0a761cb7fbb1..31230442f170 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java @@ -46,7 +46,6 @@ import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao; import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO; -import org.apache.log4j.Logger; import com.cloud.host.HostVO; import com.cloud.host.Status; @@ -62,9 +61,11 @@ import com.cloud.vm.SecondaryStorageVmVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.dao.SecondaryStorageVmDao; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; public class DataMigrationUtility { - private static Logger LOGGER = Logger.getLogger(DataMigrationUtility.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject SecondaryStorageVmDao secStorageVmDao; @@ -96,15 +97,15 @@ public boolean filesReadyToMigrate(Long srcDataStoreId, List setupNetwork(final Account owner, final NetworkOf .getBroadcastDomainType() == BroadcastDomainType.Vxlan)) { final List configs = _networksDao.listBy(owner.getId(), offering.getId(), plan.getDataCenterId()); if (configs.size() > 0) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found existing network configuration for offering " + offering + ": " + configs.get(0)); + if (logger.isDebugEnabled()) { + logger.debug("Found existing network configuration for offering " + offering + ": " + configs.get(0)); } if (errorIfAlreadySetup) { @@ -821,7 +819,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { return networks; } finally { - s_logger.debug("Releasing lock for " + locked); + logger.debug("Releasing lock for " + locked); _accountDao.releaseFromLockTable(locked.getId()); } } @@ -831,8 +829,8 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { public void allocate(final VirtualMachineProfile vm, final LinkedHashMap> networks, final Map> extraDhcpOptions) throws InsufficientCapacityException, ConcurrentOperationException { - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("allocating networks for %s(template %s); %d networks", vm.getInstanceName(), vm.getTemplate().getUuid(), networks.size())); + if (logger.isTraceEnabled()) { + logger.trace(String.format("allocating networks for %s(template %s); %d networks", vm.getInstanceName(), vm.getTemplate().getUuid(), networks.size())); } int deviceId = 0; int size; @@ -977,7 +975,7 @@ private int determineNumberOfNicsRequired(final VirtualMachineProfile vm, final */ private void createExtraNics(final VirtualMachineProfile vm, int size, List nics, Network finalNetwork) throws InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException { if (nics.size() != size) { - s_logger.warn("Number of nics " + nics.size() + " doesn't match number of requested nics " + size); + logger.warn("Number of nics " + nics.size() + " doesn't match number of requested nics " + size); if (nics.size() > size) { throw new CloudRuntimeException("Number of nics " + nics.size() + " doesn't match number of requested networks " + size); } else { @@ -1016,7 +1014,7 @@ public Pair allocateNic(final NicProfile requested, final N throws InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException, ConcurrentOperationException { final NetworkVO ntwkVO = _networksDao.findById(network.getId()); - s_logger.debug("Allocating nic for vm " + vm.getVirtualMachine() + " in network " + network + " with requested profile " + requested); + logger.debug("Allocating nic for vm " + vm.getVirtualMachine() + " in network " + network + " with requested profile " + requested); final NetworkGuru guru = AdapterBase.getAdapterByName(networkGurus, ntwkVO.getGuruName()); if (requested != null && requested.getMode() == null) { @@ -1370,21 +1368,21 @@ private void setupPersistentNetwork(NetworkVO network, NetworkOfferingVO offerin final SetupPersistentNetworkAnswer answer = (SetupPersistentNetworkAnswer) _agentMgr.send(host.getId(), cmd); if (answer == null) { - s_logger.warn("Unable to get an answer to the SetupPersistentNetworkCommand from agent:" + host.getId()); + logger.warn("Unable to get an answer to the SetupPersistentNetworkCommand from agent:" + host.getId()); clusterToHostsMap.get(host.getClusterId()).remove(host.getId()); continue; } if (!answer.getResult()) { - s_logger.warn("Unable to setup agent " + host.getId() + " due to " + answer.getDetails()); + logger.warn("Unable to setup agent " + host.getId() + " due to " + answer.getDetails()); clusterToHostsMap.get(host.getClusterId()).remove(host.getId()); } } catch (Exception e) { - s_logger.warn("Failed to connect to host: " + host.getName()); + logger.warn("Failed to connect to host: " + host.getName()); } } if (clusterToHostsMap.keySet().size() != clusterVOs.size()) { - s_logger.warn("Hosts on all clusters may not have been configured with network devices."); + logger.warn("Hosts on all clusters may not have been configured with network devices."); } } @@ -1407,7 +1405,7 @@ public Pair implementNetwork(final long networkId, final NetworkVO network = _networksDao.findById(networkId); final NetworkGuru guru = AdapterBase.getAdapterByName(networkGurus, network.getGuruName()); if (isNetworkImplemented(network)) { - s_logger.debug("Network id=" + networkId + " is already implemented"); + logger.debug("Network id=" + networkId + " is already implemented"); implemented.set(guru, network); return implemented; } @@ -1421,19 +1419,19 @@ public Pair implementNetwork(final long networkId, final throw ex; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Lock is acquired for network id " + networkId + " as a part of network implement"); + if (logger.isDebugEnabled()) { + logger.debug("Lock is acquired for network id " + networkId + " as a part of network implement"); } try { if (isNetworkImplemented(network)) { - s_logger.debug("Network id=" + networkId + " is already implemented"); + logger.debug("Network id=" + networkId + " is already implemented"); implemented.set(guru, network); return implemented; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Asking " + guru.getName() + " to implement " + network); + if (logger.isDebugEnabled()) { + logger.debug("Asking " + guru.getName() + " to implement " + network); } final NetworkOfferingVO offering = _networkOfferingDao.findById(network.getNetworkOfferingId()); @@ -1471,14 +1469,14 @@ public Pair implementNetwork(final long networkId, final implemented.set(guru, network); return implemented; } catch (final NoTransitionException e) { - s_logger.error(e.getMessage()); + logger.error(e.getMessage()); return new Pair(null, null); } catch (final CloudRuntimeException | OperationTimedoutException e) { - s_logger.error("Caught exception: " + e.getMessage()); + logger.error("Caught exception: " + e.getMessage()); return new Pair(null, null); } finally { if (implemented.first() == null) { - s_logger.debug("Cleaning up because we're unable to implement the network " + network); + logger.debug("Cleaning up because we're unable to implement the network " + network); try { if (isSharedNetworkWithServices(network)) { network.setState(Network.State.Shutdown); @@ -1487,20 +1485,20 @@ public Pair implementNetwork(final long networkId, final stateTransitTo(network, Event.OperationFailed); } } catch (final NoTransitionException e) { - s_logger.error(e.getMessage()); + logger.error(e.getMessage()); } try { shutdownNetwork(networkId, context, false); } catch (final Exception e) { // Don't throw this exception as it would hide the original thrown exception, just log - s_logger.error("Exception caught while shutting down a network as part of a failed implementation", e); + logger.error("Exception caught while shutting down a network as part of a failed implementation", e); } } _networksDao.releaseFromLockTable(networkId); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Lock is released for network id " + networkId + " as a part of network implement"); + if (logger.isDebugEnabled()) { + logger.debug("Lock is released for network id " + networkId + " as a part of network implement"); } } } @@ -1526,13 +1524,13 @@ public void implementNetworkElementsAndResources(final DeployDestination dest, f ips = _ipAddressDao.listByAssociatedVpc(network.getVpcId(), true); if (ips.isEmpty()) { final Vpc vpc = _vpcMgr.getActiveVpc(network.getVpcId()); - s_logger.debug("Creating a source nat ip for vpc " + vpc); + logger.debug("Creating a source nat ip for vpc " + vpc); _vpcMgr.assignSourceNatIpAddressToVpc(owner, vpc); } } else { ips = _ipAddressDao.listByAssociatedNetwork(network.getId(), true); if (ips.isEmpty()) { - s_logger.debug("Creating a source nat ip for network " + network); + logger.debug("Creating a source nat ip for network " + network); _ipAddrMgr.assignSourceNatIpAddressToGuestNetwork(owner, network); } } @@ -1557,9 +1555,9 @@ public void implementNetworkElementsAndResources(final DeployDestination dest, f try { // reapply all the firewall/staticNat/lb rules - s_logger.debug("Reprogramming network " + network + " as a part of network implement"); + logger.debug("Reprogramming network " + network + " as a part of network implement"); if (!reprogramNetworkRules(network.getId(), CallContext.current().getCallingAccount(), network)) { - s_logger.warn("Failed to re-program the network as a part of network " + network + " implement"); + logger.warn("Failed to re-program the network as a part of network " + network + " implement"); // see DataCenterVO.java final ResourceUnavailableException ex = new ResourceUnavailableException("Unable to apply network rules as a part of network " + network + " implement", DataCenter.class, network.getDataCenterId()); @@ -1569,7 +1567,7 @@ public void implementNetworkElementsAndResources(final DeployDestination dest, f for (final NetworkElement element : networkElements) { if (element instanceof AggregatedCommandExecutor && providersToImplement.contains(element.getProvider())) { if (!((AggregatedCommandExecutor) element).completeAggregatedExecution(network, dest)) { - s_logger.warn("Failed to re-program the network as a part of network " + network + " implement due to aggregated commands execution failure!"); + logger.warn("Failed to re-program the network as a part of network " + network + " implement due to aggregated commands execution failure!"); // see DataCenterVO.java final ResourceUnavailableException ex = new ResourceUnavailableException("Unable to apply network rules as a part of network " + network + " implement", DataCenter.class, network.getDataCenterId()); @@ -1600,8 +1598,8 @@ private void implementNetworkElements(final DeployDestination dest, final Reserv + network.getPhysicalNetworkId()); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Asking " + element.getName() + " to implement " + network); + if (logger.isDebugEnabled()) { + logger.debug("Asking " + element.getName() + " to implement " + network); } if (!element.implement(network, offering, dest, context)) { @@ -1627,50 +1625,50 @@ protected boolean reprogramNetworkRules(final long networkId, final Account call _firewallMgr.applyDefaultEgressFirewallRule(network.getId(), offering.isEgressDefaultPolicy(), true); } if (!_firewallMgr.applyFirewallRules(firewallEgressRulesToApply, false, caller)) { - s_logger.warn("Failed to reapply firewall Egress rule(s) as a part of network id=" + networkId + " restart"); + logger.warn("Failed to reapply firewall Egress rule(s) as a part of network id=" + networkId + " restart"); success = false; } // associate all ip addresses if (!_ipAddrMgr.applyIpAssociations(network, false)) { - s_logger.warn("Failed to apply ip addresses as a part of network id" + networkId + " restart"); + logger.warn("Failed to apply ip addresses as a part of network id" + networkId + " restart"); success = false; } // apply static nat if (!_rulesMgr.applyStaticNatsForNetwork(networkId, false, caller)) { - s_logger.warn("Failed to apply static nats a part of network id" + networkId + " restart"); + logger.warn("Failed to apply static nats a part of network id" + networkId + " restart"); success = false; } // apply firewall rules final List firewallIngressRulesToApply = _firewallDao.listByNetworkPurposeTrafficType(networkId, Purpose.Firewall, FirewallRule.TrafficType.Ingress); if (!_firewallMgr.applyFirewallRules(firewallIngressRulesToApply, false, caller)) { - s_logger.warn("Failed to reapply Ingress firewall rule(s) as a part of network id=" + networkId + " restart"); + logger.warn("Failed to reapply Ingress firewall rule(s) as a part of network id=" + networkId + " restart"); success = false; } // apply port forwarding rules if (!_rulesMgr.applyPortForwardingRulesForNetwork(networkId, false, caller)) { - s_logger.warn("Failed to reapply port forwarding rule(s) as a part of network id=" + networkId + " restart"); + logger.warn("Failed to reapply port forwarding rule(s) as a part of network id=" + networkId + " restart"); success = false; } // apply static nat rules if (!_rulesMgr.applyStaticNatRulesForNetwork(networkId, false, caller)) { - s_logger.warn("Failed to reapply static nat rule(s) as a part of network id=" + networkId + " restart"); + logger.warn("Failed to reapply static nat rule(s) as a part of network id=" + networkId + " restart"); success = false; } // apply public load balancer rules if (!_lbMgr.applyLoadBalancersForNetwork(networkId, Scheme.Public)) { - s_logger.warn("Failed to reapply Public load balancer rules as a part of network id=" + networkId + " restart"); + logger.warn("Failed to reapply Public load balancer rules as a part of network id=" + networkId + " restart"); success = false; } // apply internal load balancer rules if (!_lbMgr.applyLoadBalancersForNetwork(networkId, Scheme.Internal)) { - s_logger.warn("Failed to reapply internal load balancer rules as a part of network id=" + networkId + " restart"); + logger.warn("Failed to reapply internal load balancer rules as a part of network id=" + networkId + " restart"); success = false; } @@ -1680,7 +1678,7 @@ protected boolean reprogramNetworkRules(final long networkId, final Account call for (final RemoteAccessVpn vpn : vpnsToReapply) { // Start remote access vpn per ip if (_vpnMgr.startRemoteAccessVpn(vpn.getServerAddressId(), false) == null) { - s_logger.warn("Failed to reapply vpn rules as a part of network id=" + networkId + " restart"); + logger.warn("Failed to reapply vpn rules as a part of network id=" + networkId + " restart"); success = false; } } @@ -1688,7 +1686,7 @@ protected boolean reprogramNetworkRules(final long networkId, final Account call //apply network ACLs if (!_networkACLMgr.applyACLToNetwork(networkId)) { - s_logger.warn("Failed to reapply network ACLs as a part of of network id=" + networkId + " restart"); + logger.warn("Failed to reapply network ACLs as a part of of network id=" + networkId + " restart"); success = false; } @@ -1785,14 +1783,14 @@ public void cleanupConfigForServicesInNetwork(List services, final Netwo Account caller = _accountDao.findById(Account.ACCOUNT_ID_SYSTEM); long userId = User.UID_SYSTEM; //remove all PF/Static Nat rules for the network - s_logger.info("Services:" + services + " are no longer supported in network:" + network.getUuid() + + logger.info("Services:" + services + " are no longer supported in network:" + network.getUuid() + " after applying new network offering:" + network.getNetworkOfferingId() + " removing the related configuration"); if (services.contains(Service.StaticNat.getName()) || services.contains(Service.PortForwarding.getName())) { try { if (_rulesMgr.revokeAllPFStaticNatRulesForNetwork(networkId, userId, caller)) { - s_logger.debug("Successfully cleaned up portForwarding/staticNat rules for network id=" + networkId); + logger.debug("Successfully cleaned up portForwarding/staticNat rules for network id=" + networkId); } else { - s_logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id=" + networkId + " cleanup"); + logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id=" + networkId + " cleanup"); } if (services.contains(Service.StaticNat.getName())) { //removing static nat configured on ips. @@ -1811,7 +1809,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { }); } } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id=" + networkId + " cleanup due to resourceUnavailable ", ex); + logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id=" + networkId + " cleanup due to resourceUnavailable ", ex); } } if (services.contains(Service.SourceNat.getName())) { @@ -1830,9 +1828,9 @@ public void doInTransactionWithoutResult(TransactionStatus status) { if (services.contains(Service.Lb.getName())) { //remove all LB rules for the network if (_lbMgr.removeAllLoadBalanacersForNetwork(networkId, caller, userId)) { - s_logger.debug("Successfully cleaned up load balancing rules for network id=" + networkId); + logger.debug("Successfully cleaned up load balancing rules for network id=" + networkId); } else { - s_logger.warn("Failed to cleanup LB rules as a part of network id=" + networkId + " cleanup"); + logger.warn("Failed to cleanup LB rules as a part of network id=" + networkId + " cleanup"); } } @@ -1840,12 +1838,12 @@ public void doInTransactionWithoutResult(TransactionStatus status) { //revoke all firewall rules for the network try { if (_firewallMgr.revokeAllFirewallRulesForNetwork(networkId, userId, caller)) { - s_logger.debug("Successfully cleaned up firewallRules rules for network id=" + networkId); + logger.debug("Successfully cleaned up firewallRules rules for network id=" + networkId); } else { - s_logger.warn("Failed to cleanup Firewall rules as a part of network id=" + networkId + " cleanup"); + logger.warn("Failed to cleanup Firewall rules as a part of network id=" + networkId + " cleanup"); } } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to cleanup Firewall rules as a part of network id=" + networkId + " cleanup due to resourceUnavailable ", ex); + logger.warn("Failed to cleanup Firewall rules as a part of network id=" + networkId + " cleanup due to resourceUnavailable ", ex); } } @@ -1855,7 +1853,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { try { _vpnMgr.destroyRemoteAccessVpnForIp(vpn.getServerAddressId(), caller, true); } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to cleanup remote access vpn resources of network:" + network.getUuid() + " due to Exception: ", ex); + logger.warn("Failed to cleanup remote access vpn resources of network:" + network.getUuid() + " due to Exception: ", ex); } } } @@ -1942,10 +1940,10 @@ private void setHypervisorHostnameInNetwork(VirtualMachineProfile vm, DeployDest try { final UserDataServiceProvider sp = (UserDataServiceProvider) element; if (!sp.saveHypervisorHostname(profile, network, vm, dest)) { - s_logger.error(errorMsg); + logger.error(errorMsg); } } catch (ResourceUnavailableException e) { - s_logger.error(String.format("%s, error states %s", errorMsg, e)); + logger.error(String.format("%s, error states %s", errorMsg, e)); } } } @@ -1959,7 +1957,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { _nicDao.update(nic.getId(), nic); if (nic.getVmType() == VirtualMachine.Type.User) { - s_logger.debug("Changing active number of nics for network id=" + networkId + " on " + count); + logger.debug("Changing active number of nics for network id=" + networkId + " on " + count); _networksDao.changeActiveNicsBy(networkId, count); } @@ -1992,7 +1990,7 @@ public int compare(final NicVO nic1, final NicVO nic2) { for (final NicVO nic : nics) { final Pair implemented = implementNetwork(nic.getNetworkId(), dest, context, vmProfile.getVirtualMachine().getType() == Type.DomainRouter); if (implemented == null || implemented.first() == null) { - s_logger.warn("Failed to implement network id=" + nic.getNetworkId() + " as a part of preparing nic id=" + nic.getId()); + logger.warn("Failed to implement network id=" + nic.getNetworkId() + " as a part of preparing nic id=" + nic.getId()); throw new CloudRuntimeException("Failed to implement network id=" + nic.getNetworkId() + " as a part preparing nic id=" + nic.getId()); } @@ -2067,8 +2065,8 @@ public NicProfile prepareNic(final VirtualMachineProfile vmProfile, final Deploy throw new CloudRuntimeException("Service provider " + element.getProvider().getName() + " either doesn't exist or is not enabled in physical network id: " + network.getPhysicalNetworkId()); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Asking " + element.getName() + " to prepare for " + nic); + if (logger.isDebugEnabled()) { + logger.debug("Asking " + element.getName() + " to prepare for " + nic); } if (!prepareElement(element, network, profile, vmProfile, dest, context)) { throw new InsufficientAddressCapacityException("unable to configure the dhcp service, due to insufficiant address capacity", Network.class, network.getId()); @@ -2109,7 +2107,7 @@ public void prepareNicForMigration(final VirtualMachineProfile vm, final DeployD _networkModel.getNetworkTag(vm.getHypervisorType(), network)); if (guru instanceof NetworkMigrationResponder) { if (!((NetworkMigrationResponder) guru).prepareMigration(profile, network, vm, dest, context)) { - s_logger.error("NetworkGuru " + guru + " prepareForMigration failed."); // XXX: Transaction error + logger.error("NetworkGuru " + guru + " prepareForMigration failed."); // XXX: Transaction error } } @@ -2126,7 +2124,7 @@ public void prepareNicForMigration(final VirtualMachineProfile vm, final DeployD } if (element instanceof NetworkMigrationResponder) { if (!((NetworkMigrationResponder) element).prepareMigration(profile, network, vm, dest, context)) { - s_logger.error("NetworkElement " + element + " prepareForMigration failed."); // XXX: Transaction error + logger.error("NetworkElement " + element + " prepareForMigration failed."); // XXX: Transaction error } } } @@ -2158,7 +2156,7 @@ public void prepareAllNicsForMigration(final VirtualMachineProfile vm, final Dep _networkModel.isSecurityGroupSupportedInNetwork(network), _networkModel.getNetworkTag(vm.getHypervisorType(), network)); if (guru instanceof NetworkMigrationResponder) { if (!((NetworkMigrationResponder) guru).prepareMigration(profile, network, vm, dest, context)) { - s_logger.error("NetworkGuru " + guru + " prepareForMigration failed."); // XXX: Transaction error + logger.error("NetworkGuru " + guru + " prepareForMigration failed."); // XXX: Transaction error } } final List providersToImplement = getNetworkProviders(network.getId()); @@ -2169,7 +2167,7 @@ public void prepareAllNicsForMigration(final VirtualMachineProfile vm, final Dep } if (element instanceof NetworkMigrationResponder) { if (!((NetworkMigrationResponder) element).prepareMigration(profile, network, vm, dest, context)) { - s_logger.error("NetworkElement " + element + " prepareForMigration failed."); // XXX: Transaction error + logger.error("NetworkElement " + element + " prepareForMigration failed."); // XXX: Transaction error } } } @@ -2190,7 +2188,7 @@ public void prepareAllNicsForMigration(final VirtualMachineProfile vm, final Dep if (nic == null && !addedURIs.contains(broadcastUri.toString())) { //Nic details are not available in DB //Create nic profile for migration - s_logger.debug("Creating nic profile for migration. BroadcastUri: " + broadcastUri.toString() + " NetworkId: " + ntwkId + " Vm: " + vm.getId()); + logger.debug("Creating nic profile for migration. BroadcastUri: " + broadcastUri.toString() + " NetworkId: " + ntwkId + " Vm: " + vm.getId()); final NetworkVO network = _networksDao.findById(ntwkId); final NetworkGuru guru = AdapterBase.getAdapterByName(networkGurus, network.getGuruName()); final NicProfile profile = new NicProfile(); @@ -2367,8 +2365,8 @@ public Pair doInTransaction(final TransactionStatus status) final List providersToImplement = getNetworkProviders(network.getId()); for (final NetworkElement element : networkElements) { if (providersToImplement.contains(element.getProvider())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Asking " + element.getName() + " to release " + profile); + if (logger.isDebugEnabled()) { + logger.debug("Asking " + element.getName() + " to release " + profile); } //NOTE: Context appear to never be used in release method //implementations. Consider removing it from interface Element @@ -2380,8 +2378,8 @@ public Pair doInTransaction(final TransactionStatus status) @Override public void cleanupNics(final VirtualMachineProfile vm) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cleaning network for vm: " + vm.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Cleaning network for vm: " + vm.getId()); } final List nics = _nicDao.listByVmId(vm.getId()); @@ -2403,7 +2401,7 @@ protected void removeNic(final VirtualMachineProfile vm, final NicVO nic) { try { releaseNic(vm, nic.getId()); } catch (final Exception ex) { - s_logger.warn("Failed to release nic: " + nic.toString() + " as part of remove operation due to", ex); + logger.warn("Failed to release nic: " + nic.toString() + " as part of remove operation due to", ex); } } @@ -2434,15 +2432,15 @@ protected void removeNic(final VirtualMachineProfile vm, final NicVO nic) { final List providersToImplement = getNetworkProviders(network.getId()); for (final NetworkElement element : networkElements) { if (providersToImplement.contains(element.getProvider())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Asking " + element.getName() + " to release " + nic); + if (logger.isDebugEnabled()) { + logger.debug("Asking " + element.getName() + " to release " + nic); } try { element.release(network, profile, vm, null); } catch (final ConcurrentOperationException ex) { - s_logger.warn("release failed during the nic " + nic.toString() + " removeNic due to ", ex); + logger.warn("release failed during the nic " + nic.toString() + " removeNic due to ", ex); } catch (final ResourceUnavailableException ex) { - s_logger.warn("release failed during the nic " + nic.toString() + " removeNic due to ", ex); + logger.warn("release failed during the nic " + nic.toString() + " removeNic due to ", ex); } } } @@ -2465,11 +2463,11 @@ && isDhcpAccrossMultipleSubnetsSupported(dhcpServiceProvider)) { if (dnsServiceProvider != null) { try { if (!dnsServiceProvider.removeDnsSupportForSubnet(network)) { - s_logger.warn("Failed to remove the ip alias on the dns server"); + logger.warn("Failed to remove the ip alias on the dns server"); } } catch (final ResourceUnavailableException e) { //failed to remove the dnsconfig. - s_logger.info("Unable to delete the ip alias due to unable to contact the dns server."); + logger.info("Unable to delete the ip alias due to unable to contact the dns server."); } } } @@ -2481,7 +2479,7 @@ && isDhcpAccrossMultipleSubnetsSupported(dhcpServiceProvider)) { _nicDao.remove(nic.getId()); } - s_logger.debug("Removed nic id=" + nic.getId()); + logger.debug("Removed nic id=" + nic.getId()); // release assigned IPv6 for Isolated Network VR NIC if (Type.User.equals(vm.getType()) && GuestType.Isolated.equals(network.getGuestType()) @@ -2494,7 +2492,7 @@ && isDhcpAccrossMultipleSubnetsSupported(dhcpServiceProvider)) { //remove the secondary ip addresses corresponding to this nic if (!removeVmSecondaryIpsOfNic(nic.getId())) { - s_logger.debug("Removing nic " + nic.getId() + " secondary ip addresses failed"); + logger.debug("Removing nic " + nic.getId() + " secondary ip addresses failed"); } } @@ -2533,12 +2531,12 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { } }); if (!dhcpServiceProvider.removeDhcpSupportForSubnet(network)) { - s_logger.warn("Failed to remove the ip alias on the router, marking it as removed in db and freed the allocated ip " + ipAlias.getIp4Address()); + logger.warn("Failed to remove the ip alias on the router, marking it as removed in db and freed the allocated ip " + ipAlias.getIp4Address()); } } } catch (final ResourceUnavailableException e) { //failed to remove the dhcpconfig on the router. - s_logger.info("Unable to delete the ip alias due to unable to contact the virtualrouter."); + logger.info("Unable to delete the ip alias due to unable to contact the virtualrouter."); } } @@ -2586,7 +2584,7 @@ private Network createGuestNetwork(final long networkOfferingId, final String na final DataCenterVO zone = _dcDao.findById(zoneId); // this method supports only guest network creation if (ntwkOff.getTrafficType() != TrafficType.Guest) { - s_logger.warn("Only guest networks can be created using this method"); + logger.warn("Only guest networks can be created using this method"); return null; } @@ -3019,12 +3017,12 @@ protected void checkL2OfferingServices(NetworkOfferingVO ntwkOff) { public boolean shutdownNetwork(final long networkId, final ReservationContext context, final boolean cleanupElements) { NetworkVO network = _networksDao.findById(networkId); if (network.getState() == Network.State.Allocated) { - s_logger.debug("Network is already shutdown: " + network); + logger.debug("Network is already shutdown: " + network); return true; } if (network.getState() != Network.State.Implemented && network.getState() != Network.State.Shutdown) { - s_logger.debug("Network is not implemented: " + network); + logger.debug("Network is not implemented: " + network); return false; } @@ -3032,20 +3030,20 @@ public boolean shutdownNetwork(final long networkId, final ReservationContext co //do global lock for the network network = _networksDao.acquireInLockTable(networkId, NetworkLockTimeout.value()); if (network == null) { - s_logger.warn("Unable to acquire lock for the network " + network + " as a part of network shutdown"); + logger.warn("Unable to acquire lock for the network " + network + " as a part of network shutdown"); return false; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Lock is acquired for network " + network + " as a part of network shutdown"); + if (logger.isDebugEnabled()) { + logger.debug("Lock is acquired for network " + network + " as a part of network shutdown"); } if (network.getState() == Network.State.Allocated) { - s_logger.debug("Network is already shutdown: " + network); + logger.debug("Network is already shutdown: " + network); return true; } if (network.getState() != Network.State.Implemented && network.getState() != Network.State.Shutdown) { - s_logger.debug("Network is not implemented: " + network); + logger.debug("Network is not implemented: " + network); return false; } @@ -3070,8 +3068,8 @@ public Boolean doInTransaction(final TransactionStatus status) { boolean result = false; if (success) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Network id=" + networkId + " is shutdown successfully, cleaning up corresponding resources now."); + if (logger.isDebugEnabled()) { + logger.debug("Network id=" + networkId + " is shutdown successfully, cleaning up corresponding resources now."); } final NetworkGuru guru = AdapterBase.getAdapterByName(networkGurus, networkFinal.getGuruName()); final NetworkProfile profile = convertNetworkToNetworkProfile(networkFinal.getId()); @@ -3110,8 +3108,8 @@ public Boolean doInTransaction(final TransactionStatus status) { } finally { if (network != null) { _networksDao.releaseFromLockTable(network.getId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Lock is released for network " + network + " as a part of network shutdown"); + if (logger.isDebugEnabled()) { + logger.debug("Lock is released for network " + network + " as a part of network shutdown"); } } } @@ -3138,11 +3136,11 @@ public boolean shutdownNetworkElementsAndResources(final ReservationContext cont cleanupResult = shutdownNetworkResources(network.getId(), context.getAccount(), context.getCaller().getId()); } } catch (final Exception ex) { - s_logger.warn("shutdownNetworkRules failed during the network " + network + " shutdown due to ", ex); + logger.warn("shutdownNetworkRules failed during the network " + network + " shutdown due to ", ex); } finally { // just warn the administrator that the network elements failed to shutdown if (!cleanupResult) { - s_logger.warn("Failed to cleanup network id=" + network.getId() + " resources as a part of shutdownNetwork"); + logger.warn("Failed to cleanup network id=" + network.getId() + " resources as a part of shutdownNetwork"); } } @@ -3151,21 +3149,21 @@ public boolean shutdownNetworkElementsAndResources(final ReservationContext cont for (final NetworkElement element : networkElements) { if (providersToShutdown.contains(element.getProvider())) { try { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Sending network shutdown to " + element.getName()); + if (logger.isDebugEnabled()) { + logger.debug("Sending network shutdown to " + element.getName()); } if (!element.shutdown(network, context, cleanupElements)) { - s_logger.warn("Unable to complete shutdown of the network elements due to element: " + element.getName()); + logger.warn("Unable to complete shutdown of the network elements due to element: " + element.getName()); success = false; } } catch (final ResourceUnavailableException e) { - s_logger.warn("Unable to complete shutdown of the network elements due to element: " + element.getName(), e); + logger.warn("Unable to complete shutdown of the network elements due to element: " + element.getName(), e); success = false; } catch (final ConcurrentOperationException e) { - s_logger.warn("Unable to complete shutdown of the network elements due to element: " + element.getName(), e); + logger.warn("Unable to complete shutdown of the network elements due to element: " + element.getName(), e); success = false; } catch (final Exception e) { - s_logger.warn("Unable to complete shutdown of the network elements due to element: " + element.getName(), e); + logger.warn("Unable to complete shutdown of the network elements due to element: " + element.getName(), e); success = false; } } @@ -3186,15 +3184,15 @@ private void cleanupPersistentnNetworkResources(NetworkVO network) { CleanupPersistentNetworkResourceCommand cmd = new CleanupPersistentNetworkResourceCommand(to); CleanupPersistentNetworkResourceAnswer answer = (CleanupPersistentNetworkResourceAnswer) _agentMgr.send(host.getId(), cmd); if (answer == null) { - s_logger.warn("Unable to get an answer to the CleanupPersistentNetworkResourceCommand from agent:" + host.getId()); + logger.warn("Unable to get an answer to the CleanupPersistentNetworkResourceCommand from agent:" + host.getId()); continue; } if (!answer.getResult()) { - s_logger.warn("Unable to setup agent " + host.getId() + " due to " + answer.getDetails()); + logger.warn("Unable to setup agent " + host.getId() + " due to " + answer.getDetails()); } } catch (Exception e) { - s_logger.warn("Failed to cleanup network resources on host: " + host.getName()); + logger.warn("Failed to cleanup network resources on host: " + host.getName()); } } } @@ -3208,7 +3206,7 @@ public boolean destroyNetwork(final long networkId, final ReservationContext con NetworkVO network = _networksDao.findById(networkId); if (network == null) { - s_logger.debug("Unable to find network with id: " + networkId); + logger.debug("Unable to find network with id: " + networkId); return false; } // Make sure that there are no user vms in the network that are not Expunged/Error @@ -3216,7 +3214,7 @@ public boolean destroyNetwork(final long networkId, final ReservationContext con for (final UserVmVO vm : userVms) { if (!(vm.getState() == VirtualMachine.State.Expunging && vm.getRemoved() != null)) { - s_logger.warn("Can't delete the network, not all user vms are expunged. Vm " + vm + " is in " + vm.getState() + " state"); + logger.warn("Can't delete the network, not all user vms are expunged. Vm " + vm + " is in " + vm.getState() + " state"); return false; } } @@ -3224,7 +3222,7 @@ public boolean destroyNetwork(final long networkId, final ReservationContext con // Don't allow to delete network via api call when it has vms assigned to it final int nicCount = getActiveNicsInNetwork(networkId); if (nicCount > 0) { - s_logger.debug("The network id=" + networkId + " has active Nics, but shouldn't."); + logger.debug("The network id=" + networkId + " has active Nics, but shouldn't."); // at this point we have already determined that there are no active user vms in network // if the op_networks table shows active nics, it's a bug in releasing nics updating op_networks _networksDao.changeActiveNicsBy(networkId, -1 * nicCount); @@ -3235,7 +3233,7 @@ public boolean destroyNetwork(final long networkId, final ReservationContext con if (zone.getNetworkType() == NetworkType.Basic) { final List systemVms = _vmDao.listNonRemovedVmsByTypeAndNetwork(network.getId(), Type.ConsoleProxy, Type.SecondaryStorageVm); if (systemVms != null && !systemVms.isEmpty()) { - s_logger.warn("Can't delete the network, not all consoleProxy/secondaryStorage vms are expunged"); + logger.warn("Can't delete the network, not all consoleProxy/secondaryStorage vms are expunged"); return false; } } @@ -3248,13 +3246,13 @@ public boolean destroyNetwork(final long networkId, final ReservationContext con // get updated state for the network network = _networksDao.findById(networkId); if (network.getState() != Network.State.Allocated && network.getState() != Network.State.Setup && !forced) { - s_logger.debug("Network is not in the correct state to be destroyed: " + network.getState()); + logger.debug("Network is not in the correct state to be destroyed: " + network.getState()); return false; } boolean success = true; if (!cleanupNetworkResources(networkId, callerAccount, context.getCaller().getId())) { - s_logger.warn("Unable to delete network id=" + networkId + ": failed to cleanup network resources"); + logger.warn("Unable to delete network id=" + networkId + ": failed to cleanup network resources"); return false; } @@ -3263,30 +3261,30 @@ public boolean destroyNetwork(final long networkId, final ReservationContext con for (final NetworkElement element : networkElements) { if (providersToDestroy.contains(element.getProvider())) { try { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Sending destroy to " + element); + if (logger.isDebugEnabled()) { + logger.debug("Sending destroy to " + element); } if (!element.destroy(network, context)) { success = false; - s_logger.warn("Unable to complete destroy of the network: failed to destroy network element " + element.getName()); + logger.warn("Unable to complete destroy of the network: failed to destroy network element " + element.getName()); } } catch (final ResourceUnavailableException e) { - s_logger.warn("Unable to complete destroy of the network due to element: " + element.getName(), e); + logger.warn("Unable to complete destroy of the network due to element: " + element.getName(), e); success = false; } catch (final ConcurrentOperationException e) { - s_logger.warn("Unable to complete destroy of the network due to element: " + element.getName(), e); + logger.warn("Unable to complete destroy of the network due to element: " + element.getName(), e); success = false; } catch (final Exception e) { - s_logger.warn("Unable to complete destroy of the network due to element: " + element.getName(), e); + logger.warn("Unable to complete destroy of the network due to element: " + element.getName(), e); success = false; } } } if (success) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Network id=" + networkId + " is destroyed successfully, cleaning up corresponding resources now."); + if (logger.isDebugEnabled()) { + logger.debug("Network id=" + networkId + " is destroyed successfully, cleaning up corresponding resources now."); } final NetworkVO networkFinal = network; @@ -3301,7 +3299,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { } if (!deleteVlansInNetwork(networkFinal, context.getCaller().getId(), callerAccount)) { - s_logger.warn("Failed to delete network " + networkFinal + "; was unable to cleanup corresponding ip ranges"); + logger.warn("Failed to delete network " + networkFinal + "; was unable to cleanup corresponding ip ranges"); throw new CloudRuntimeException("Failed to delete network " + networkFinal + "; was unable to cleanup corresponding ip ranges"); } else { // commit transaction only when ips and vlans for the network are released successfully @@ -3311,7 +3309,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { try { stateTransitTo(networkFinal, Event.DestroyNetwork); } catch (final NoTransitionException e) { - s_logger.debug(e.getMessage()); + logger.debug(e.getMessage()); } if (_networksDao.remove(networkFinal.getId())) { final NetworkDomainVO networkDomain = _networkDomainDao.getDomainNetworkMapByNetworkId(networkFinal.getId()); @@ -3343,7 +3341,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { } return true; } catch (final CloudRuntimeException e) { - s_logger.error("Failed to delete network", e); + logger.error("Failed to delete network", e); return false; } } @@ -3365,7 +3363,7 @@ protected boolean deleteVlansInNetwork(final NetworkVO network, final long userI boolean result = true; for (final VlanVO vlan : publicVlans) { if (!_configMgr.deleteVlanAndPublicIpRange(userId, vlan.getId(), callerAccount)) { - s_logger.warn("Failed to delete vlan " + vlan.getId() + ");"); + logger.warn("Failed to delete vlan " + vlan.getId() + ");"); result = false; } } @@ -3373,16 +3371,16 @@ protected boolean deleteVlansInNetwork(final NetworkVO network, final long userI //cleanup private vlans final int privateIpAllocCount = _privateIpDao.countAllocatedByNetworkId(networkId); if (privateIpAllocCount > 0) { - s_logger.warn("Can't delete Private ip range for network " + networkId + " as it has allocated ip addresses"); + logger.warn("Can't delete Private ip range for network " + networkId + " as it has allocated ip addresses"); result = false; } else { _privateIpDao.deleteByNetworkId(networkId); - s_logger.debug("Deleted ip range for private network id=" + networkId); + logger.debug("Deleted ip range for private network id=" + networkId); } // release vlans of user-shared networks without specifyvlan if (isSharedNetworkWithoutSpecifyVlan(_networkOfferingDao.findById(network.getNetworkOfferingId()))) { - s_logger.debug("Releasing vnet for the network id=" + network.getId()); + logger.debug("Releasing vnet for the network id=" + network.getId()); _dcDao.releaseVnet(BroadcastDomainType.getValue(network.getBroadcastUri()), network.getDataCenterId(), network.getPhysicalNetworkId(), network.getAccountId(), network.getReservationId()); } @@ -3414,7 +3412,7 @@ public void reallyRun() { final List networkIds = _networksDao.findNetworksToGarbageCollect(); final int netGcWait = NumbersUtil.parseInt(_configDao.getValue(NetworkGcWait.key()), 60); - s_logger.info("NetworkGarbageCollector uses '" + netGcWait + "' seconds for GC interval."); + logger.info("NetworkGarbageCollector uses '" + netGcWait + "' seconds for GC interval."); for (final Long networkId : networkIds) { if (!_networkModel.isNetworkReadyForGc(networkId)) { @@ -3422,19 +3420,19 @@ public void reallyRun() { } if (!networkDetailsDao.findDetails(Network.AssociatedNetworkId, String.valueOf(networkId), null).isEmpty()) { - s_logger.debug(String.format("Network %s is associated to a shared network, skipping", networkId)); + logger.debug(String.format("Network %s is associated to a shared network, skipping", networkId)); continue; } final Long time = _lastNetworkIdsToFree.remove(networkId); if (time == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("We found network " + networkId + " to be free for the first time. Adding it to the list: " + currentTime); + if (logger.isDebugEnabled()) { + logger.debug("We found network " + networkId + " to be free for the first time. Adding it to the list: " + currentTime); } stillFree.put(networkId, currentTime); } else if (time > currentTime - netGcWait) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Network " + networkId + " is still free but it's not time to shutdown yet: " + time); + if (logger.isDebugEnabled()) { + logger.debug("Network " + networkId + " is still free but it's not time to shutdown yet: " + time); } stillFree.put(networkId, time); } else { @@ -3450,7 +3448,7 @@ public void reallyRun() { // If network is removed, unset gc flag for it if (_networksDao.findById(networkId) == null) { - s_logger.debug("Network id=" + networkId + " is removed, so clearing up corresponding gc check"); + logger.debug("Network id=" + networkId + " is removed, so clearing up corresponding gc check"); _networksDao.clearCheckForGc(networkId); } else { try { @@ -3462,12 +3460,12 @@ public void reallyRun() { shutdownNetwork(networkId, context, false); } catch (final Exception e) { - s_logger.warn("Unable to shutdown network: " + networkId); + logger.warn("Unable to shutdown network: " + networkId); } } } } catch (final Exception e) { - s_logger.warn("Caught exception while running network gc: ", e); + logger.warn("Caught exception while running network gc: ", e); } } } @@ -3485,10 +3483,10 @@ public boolean startNetwork(final long networkId, final DeployDestination dest, } // implement the network - s_logger.debug("Starting network " + network + "..."); + logger.debug("Starting network " + network + "..."); final Pair implementedNetwork = implementNetwork(networkId, dest, context); if (implementedNetwork == null || implementedNetwork.first() == null) { - s_logger.warn("Failed to start the network " + network); + logger.warn("Failed to start the network " + network); return false; } else { return true; @@ -3502,7 +3500,7 @@ public boolean restartNetwork(final Long networkId, final Account callerAccount, boolean restartRequired = false; final NetworkVO network = _networksDao.findById(networkId); - s_logger.debug("Restarting network " + networkId + "..."); + logger.debug("Restarting network " + networkId + "..."); final ReservationContext context = new ReservationContextImpl(null, null, callerUser, callerAccount); final NetworkOffering offering = _networkOfferingDao.findByIdIncludingRemoved(network.getNetworkOfferingId()); @@ -3526,12 +3524,12 @@ public boolean restartNetwork(final Long networkId, final Account callerAccount, try { VMInstanceVO instanceVO = _vmDao.findById(router.getId()); if (instanceVO == null) { - s_logger.info("Did not find a virtual router instance for the network"); + logger.info("Did not find a virtual router instance for the network"); continue; } Pair patched = mgr.updateSystemVM(instanceVO, true); if (patched.first()) { - s_logger.info(String.format("Successfully patched router %s", router)); + logger.info(String.format("Successfully patched router %s", router)); } } catch (CloudRuntimeException e) { throw new CloudRuntimeException(String.format("Failed to live patch router: %s", router), e); @@ -3540,13 +3538,13 @@ public boolean restartNetwork(final Long networkId, final Account callerAccount, } } - s_logger.debug("Implementing the network " + network + " elements and resources as a part of network restart without cleanup"); + logger.debug("Implementing the network " + network + " elements and resources as a part of network restart without cleanup"); try { implementNetworkElementsAndResources(dest, context, network, offering); setRestartRequired(network, false); return true; } catch (final Exception ex) { - s_logger.warn("Failed to implement network " + network + " elements and resources as a part of network restart due to ", ex); + logger.warn("Failed to implement network " + network + " elements and resources as a part of network restart due to ", ex); return false; } } @@ -3559,7 +3557,7 @@ public void destroyExpendableRouters(final List routers router.getState() == VirtualMachine.State.Error || router.getState() == VirtualMachine.State.Shutdown || router.getState() == VirtualMachine.State.Unknown) { - s_logger.debug("Destroying old router " + router); + logger.debug("Destroying old router " + router); _routerService.destroyRouter(router.getId(), context.getAccount(), context.getCaller().getId()); } else { remainingRouters.add(router); @@ -3588,7 +3586,7 @@ public void destroyExpendableRouters(final List routers public boolean areRoutersRunning(final List routers) { for (final VirtualRouter router : routers) { if (router.getState() != VirtualMachine.State.Running) { - s_logger.debug("Found new router " + router.getInstanceName() + " to be in non-Running state: " + router.getState() + ". Please try restarting network again."); + logger.debug("Found new router " + router.getInstanceName() + " to be in non-Running state: " + router.getState() + ". Please try restarting network again."); return false; } } @@ -3615,7 +3613,7 @@ public void cleanupNicDhcpDnsEntry(Network network, VirtualMachineProfile vmProf try { sp.removeDhcpEntry(network, nicProfile, vmProfile); } catch (ResourceUnavailableException e) { - s_logger.error("Failed to remove dhcp-dns entry due to: ", e); + logger.error("Failed to remove dhcp-dns entry due to: ", e); } } } @@ -3643,10 +3641,10 @@ private boolean rollingRestartRouters(final NetworkVO network, final NetworkOffe implementNetworkElementsAndResources(dest, context, network, offering); return true; } - s_logger.debug("Failed to shutdown the network elements and resources as a part of network restart: " + network.getState()); + logger.debug("Failed to shutdown the network elements and resources as a part of network restart: " + network.getState()); return false; } - s_logger.debug("Performing rolling restart of routers of network " + network); + logger.debug("Performing rolling restart of routers of network " + network); destroyExpendableRouters(routerDao.findByNetwork(network.getId()), context); final List providersToImplement = getNetworkProviders(network.getId()); @@ -3687,7 +3685,7 @@ private boolean rollingRestartRouters(final NetworkVO network, final NetworkOffe } private void setRestartRequired(final NetworkVO network, final boolean restartRequired) { - s_logger.debug("Marking network " + network + " with restartRequired=" + restartRequired); + logger.debug("Marking network " + network + " with restartRequired=" + restartRequired); network.setRestartRequired(restartRequired); _networksDao.update(network.getId(), network); } @@ -3711,7 +3709,7 @@ public UserDataServiceProvider getPasswordResetProvider(final Network network) { final String passwordProvider = _ntwkSrvcDao.getProviderForServiceInNetwork(network.getId(), Service.UserData); if (passwordProvider == null) { - s_logger.debug("Network " + network + " doesn't support service " + Service.UserData.getName()); + logger.debug("Network " + network + " doesn't support service " + Service.UserData.getName()); return null; } @@ -3723,7 +3721,7 @@ public UserDataServiceProvider getSSHKeyResetProvider(final Network network) { final String SSHKeyProvider = _ntwkSrvcDao.getProviderForServiceInNetwork(network.getId(), Service.UserData); if (SSHKeyProvider == null) { - s_logger.debug("Network " + network + " doesn't support service " + Service.UserData.getName()); + logger.debug("Network " + network + " doesn't support service " + Service.UserData.getName()); return null; } @@ -3735,7 +3733,7 @@ public DhcpServiceProvider getDhcpServiceProvider(final Network network) { final String DhcpProvider = _ntwkSrvcDao.getProviderForServiceInNetwork(network.getId(), Service.Dhcp); if (DhcpProvider == null) { - s_logger.debug("Network " + network + " doesn't support service " + Service.Dhcp.getName()); + logger.debug("Network " + network + " doesn't support service " + Service.Dhcp.getName()); return null; } @@ -3752,7 +3750,7 @@ public DnsServiceProvider getDnsServiceProvider(final Network network) { final String dnsProvider = _ntwkSrvcDao.getProviderForServiceInNetwork(network.getId(), Service.Dns); if (dnsProvider == null) { - s_logger.debug("Network " + network + " doesn't support service " + Service.Dhcp.getName()); + logger.debug("Network " + network + " doesn't support service " + Service.Dhcp.getName()); return null; } @@ -3799,7 +3797,7 @@ public List listVmNics(final long vmId, final Long nicId, final L for (final NicVO nic : result) { if (_networkModel.isProviderForNetwork(Provider.NiciraNvp, nic.getNetworkId())) { //For NSX Based networks, add nsxlogicalswitch, nsxlogicalswitchport to each result - s_logger.info("Listing NSX logical switch and logical switch por for each nic"); + logger.info("Listing NSX logical switch and logical switch por for each nic"); final NetworkVO network = _networksDao.findById(nic.getNetworkId()); final NetworkGuru guru = AdapterBase.getAdapterByName(networkGurus, network.getGuruName()); final NetworkGuruAdditionalFunctions guruFunctions = (NetworkGuruAdditionalFunctions) guru; @@ -3848,51 +3846,51 @@ private boolean cleanupNetworkResources(final long networkId, final Account call //remove all PF/Static Nat rules for the network try { if (_rulesMgr.revokeAllPFStaticNatRulesForNetwork(networkId, callerUserId, caller)) { - s_logger.debug("Successfully cleaned up portForwarding/staticNat rules for network id=" + networkId); + logger.debug("Successfully cleaned up portForwarding/staticNat rules for network id=" + networkId); } else { success = false; - s_logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id=" + networkId + " cleanup"); + logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id=" + networkId + " cleanup"); } } catch (final ResourceUnavailableException ex) { success = false; // shouldn't even come here as network is being cleaned up after all network elements are shutdown - s_logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id=" + networkId + " cleanup due to resourceUnavailable ", ex); + logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id=" + networkId + " cleanup due to resourceUnavailable ", ex); } //remove all LB rules for the network if (_lbMgr.removeAllLoadBalanacersForNetwork(networkId, caller, callerUserId)) { - s_logger.debug("Successfully cleaned up load balancing rules for network id=" + networkId); + logger.debug("Successfully cleaned up load balancing rules for network id=" + networkId); } else { // shouldn't even come here as network is being cleaned up after all network elements are shutdown success = false; - s_logger.warn("Failed to cleanup LB rules as a part of network id=" + networkId + " cleanup"); + logger.warn("Failed to cleanup LB rules as a part of network id=" + networkId + " cleanup"); } //revoke all firewall rules for the network try { if (_firewallMgr.revokeAllFirewallRulesForNetwork(networkId, callerUserId, caller)) { - s_logger.debug("Successfully cleaned up firewallRules rules for network id=" + networkId); + logger.debug("Successfully cleaned up firewallRules rules for network id=" + networkId); } else { success = false; - s_logger.warn("Failed to cleanup Firewall rules as a part of network id=" + networkId + " cleanup"); + logger.warn("Failed to cleanup Firewall rules as a part of network id=" + networkId + " cleanup"); } } catch (final ResourceUnavailableException ex) { success = false; // shouldn't even come here as network is being cleaned up after all network elements are shutdown - s_logger.warn("Failed to cleanup Firewall rules as a part of network id=" + networkId + " cleanup due to resourceUnavailable ", ex); + logger.warn("Failed to cleanup Firewall rules as a part of network id=" + networkId + " cleanup due to resourceUnavailable ", ex); } //revoke all network ACLs for network try { if (_networkACLMgr.revokeACLItemsForNetwork(networkId)) { - s_logger.debug("Successfully cleaned up NetworkACLs for network id=" + networkId); + logger.debug("Successfully cleaned up NetworkACLs for network id=" + networkId); } else { success = false; - s_logger.warn("Failed to cleanup NetworkACLs as a part of network id=" + networkId + " cleanup"); + logger.warn("Failed to cleanup NetworkACLs as a part of network id=" + networkId + " cleanup"); } } catch (final ResourceUnavailableException ex) { success = false; - s_logger.warn("Failed to cleanup Network ACLs as a part of network id=" + networkId + " cleanup due to resourceUnavailable ", ex); + logger.warn("Failed to cleanup Network ACLs as a part of network id=" + networkId + " cleanup due to resourceUnavailable ", ex); } //release all ip addresses @@ -3907,7 +3905,7 @@ private boolean cleanupNetworkResources(final long networkId, final Account call // so as part of network clean up just break IP association with guest network ipToRelease.setAssociatedWithNetworkId(null); _ipAddressDao.update(ipToRelease.getId(), ipToRelease); - s_logger.debug("Portable IP address " + ipToRelease + " is no longer associated with any network"); + logger.debug("Portable IP address " + ipToRelease + " is no longer associated with any network"); } } else { _vpcMgr.unassignIPFromVpcNetwork(ipToRelease.getId(), network.getId()); @@ -3916,7 +3914,7 @@ private boolean cleanupNetworkResources(final long networkId, final Account call try { if (!_ipAddrMgr.applyIpAssociations(network, true)) { - s_logger.warn("Unable to apply ip address associations for " + network); + logger.warn("Unable to apply ip address associations for " + network); success = false; } } catch (final ResourceUnavailableException e) { @@ -3935,34 +3933,34 @@ private boolean shutdownNetworkResources(final long networkId, final Account cal // Mark all PF rules as revoked and apply them on the backend (not in the DB) final List pfRules = _portForwardingRulesDao.listByNetwork(networkId); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing " + pfRules.size() + " port forwarding rules for network id=" + networkId + " as a part of shutdownNetworkRules"); + if (logger.isDebugEnabled()) { + logger.debug("Releasing " + pfRules.size() + " port forwarding rules for network id=" + networkId + " as a part of shutdownNetworkRules"); } for (final PortForwardingRuleVO pfRule : pfRules) { - s_logger.trace("Marking pf rule " + pfRule + " with Revoke state"); + logger.trace("Marking pf rule " + pfRule + " with Revoke state"); pfRule.setState(FirewallRule.State.Revoke); } try { if (!_firewallMgr.applyRules(pfRules, true, false)) { - s_logger.warn("Failed to cleanup pf rules as a part of shutdownNetworkRules"); + logger.warn("Failed to cleanup pf rules as a part of shutdownNetworkRules"); success = false; } } catch (final ResourceUnavailableException ex) { - s_logger.warn("Failed to cleanup pf rules as a part of shutdownNetworkRules due to ", ex); + logger.warn("Failed to cleanup pf rules as a part of shutdownNetworkRules due to ", ex); success = false; } // Mark all static rules as revoked and apply them on the backend (not in the DB) final List firewallStaticNatRules = _firewallDao.listByNetworkAndPurpose(networkId, Purpose.StaticNat); final List staticNatRules = new ArrayList(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing " + firewallStaticNatRules.size() + " static nat rules for network id=" + networkId + " as a part of shutdownNetworkRules"); + if (logger.isDebugEnabled()) { + logger.debug("Releasing " + firewallStaticNatRules.size() + " static nat rules for network id=" + networkId + " as a part of shutdownNetworkRules"); } for (final FirewallRuleVO firewallStaticNatRule : firewallStaticNatRules) { - s_logger.trace("Marking static nat rule " + firewallStaticNatRule + " with Revoke state"); + logger.trace("Marking static nat rule " + firewallStaticNatRule + " with Revoke state"); final IpAddress ip = _ipAddressDao.findById(firewallStaticNatRule.getSourceIpAddressId()); final FirewallRuleVO ruleVO = _firewallDao.findById(firewallStaticNatRule.getId()); @@ -3977,58 +3975,58 @@ private boolean shutdownNetworkResources(final long networkId, final Account cal try { if (!_firewallMgr.applyRules(staticNatRules, true, false)) { - s_logger.warn("Failed to cleanup static nat rules as a part of shutdownNetworkRules"); + logger.warn("Failed to cleanup static nat rules as a part of shutdownNetworkRules"); success = false; } } catch (final ResourceUnavailableException ex) { - s_logger.warn("Failed to cleanup static nat rules as a part of shutdownNetworkRules due to ", ex); + logger.warn("Failed to cleanup static nat rules as a part of shutdownNetworkRules due to ", ex); success = false; } try { if (!_lbMgr.revokeLoadBalancersForNetwork(networkId, Scheme.Public)) { - s_logger.warn("Failed to cleanup public lb rules as a part of shutdownNetworkRules"); + logger.warn("Failed to cleanup public lb rules as a part of shutdownNetworkRules"); success = false; } } catch (final ResourceUnavailableException ex) { - s_logger.warn("Failed to cleanup public lb rules as a part of shutdownNetworkRules due to ", ex); + logger.warn("Failed to cleanup public lb rules as a part of shutdownNetworkRules due to ", ex); success = false; } try { if (!_lbMgr.revokeLoadBalancersForNetwork(networkId, Scheme.Internal)) { - s_logger.warn("Failed to cleanup internal lb rules as a part of shutdownNetworkRules"); + logger.warn("Failed to cleanup internal lb rules as a part of shutdownNetworkRules"); success = false; } } catch (final ResourceUnavailableException ex) { - s_logger.warn("Failed to cleanup public lb rules as a part of shutdownNetworkRules due to ", ex); + logger.warn("Failed to cleanup public lb rules as a part of shutdownNetworkRules due to ", ex); success = false; } // revoke all firewall rules for the network w/o applying them on the DB final List firewallRules = _firewallDao.listByNetworkPurposeTrafficType(networkId, Purpose.Firewall, FirewallRule.TrafficType.Ingress); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing " + firewallRules.size() + " firewall ingress rules for network id=" + networkId + " as a part of shutdownNetworkRules"); + if (logger.isDebugEnabled()) { + logger.debug("Releasing " + firewallRules.size() + " firewall ingress rules for network id=" + networkId + " as a part of shutdownNetworkRules"); } for (final FirewallRuleVO firewallRule : firewallRules) { - s_logger.trace("Marking firewall ingress rule " + firewallRule + " with Revoke state"); + logger.trace("Marking firewall ingress rule " + firewallRule + " with Revoke state"); firewallRule.setState(FirewallRule.State.Revoke); } try { if (!_firewallMgr.applyRules(firewallRules, true, false)) { - s_logger.warn("Failed to cleanup firewall ingress rules as a part of shutdownNetworkRules"); + logger.warn("Failed to cleanup firewall ingress rules as a part of shutdownNetworkRules"); success = false; } } catch (final ResourceUnavailableException ex) { - s_logger.warn("Failed to cleanup firewall ingress rules as a part of shutdownNetworkRules due to ", ex); + logger.warn("Failed to cleanup firewall ingress rules as a part of shutdownNetworkRules due to ", ex); success = false; } final List firewallEgressRules = _firewallDao.listByNetworkPurposeTrafficType(networkId, Purpose.Firewall, FirewallRule.TrafficType.Egress); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing " + firewallEgressRules.size() + " firewall egress rules for network id=" + networkId + " as a part of shutdownNetworkRules"); + if (logger.isDebugEnabled()) { + logger.debug("Releasing " + firewallEgressRules.size() + " firewall egress rules for network id=" + networkId + " as a part of shutdownNetworkRules"); } try { @@ -4041,38 +4039,38 @@ private boolean shutdownNetworkResources(final long networkId, final Account cal } } catch (final ResourceUnavailableException ex) { - s_logger.warn("Failed to cleanup firewall default egress rule as a part of shutdownNetworkRules due to ", ex); + logger.warn("Failed to cleanup firewall default egress rule as a part of shutdownNetworkRules due to ", ex); success = false; } for (final FirewallRuleVO firewallRule : firewallEgressRules) { - s_logger.trace("Marking firewall egress rule " + firewallRule + " with Revoke state"); + logger.trace("Marking firewall egress rule " + firewallRule + " with Revoke state"); firewallRule.setState(FirewallRule.State.Revoke); } try { if (!_firewallMgr.applyRules(firewallEgressRules, true, false)) { - s_logger.warn("Failed to cleanup firewall egress rules as a part of shutdownNetworkRules"); + logger.warn("Failed to cleanup firewall egress rules as a part of shutdownNetworkRules"); success = false; } } catch (final ResourceUnavailableException ex) { - s_logger.warn("Failed to cleanup firewall egress rules as a part of shutdownNetworkRules due to ", ex); + logger.warn("Failed to cleanup firewall egress rules as a part of shutdownNetworkRules due to ", ex); success = false; } if (network.getVpcId() != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing Network ACL Items for network id=" + networkId + " as a part of shutdownNetworkRules"); + if (logger.isDebugEnabled()) { + logger.debug("Releasing Network ACL Items for network id=" + networkId + " as a part of shutdownNetworkRules"); } try { //revoke all Network ACLs for the network w/o applying them in the DB if (!_networkACLMgr.revokeACLItemsForNetwork(networkId)) { - s_logger.warn("Failed to cleanup network ACLs as a part of shutdownNetworkRules"); + logger.warn("Failed to cleanup network ACLs as a part of shutdownNetworkRules"); success = false; } } catch (final ResourceUnavailableException ex) { - s_logger.warn("Failed to cleanup network ACLs as a part of shutdownNetworkRules due to ", ex); + logger.warn("Failed to cleanup network ACLs as a part of shutdownNetworkRules due to ", ex); success = false; } @@ -4080,7 +4078,7 @@ private boolean shutdownNetworkResources(final long networkId, final Account cal //release all static nats for the network if (!_rulesMgr.applyStaticNatForNetwork(networkId, false, caller, true)) { - s_logger.warn("Failed to disable static nats as part of shutdownNetworkRules for network id " + networkId); + logger.warn("Failed to disable static nats as part of shutdownNetworkRules for network id " + networkId); success = false; } @@ -4097,7 +4095,7 @@ private boolean shutdownNetworkResources(final long networkId, final Account cal try { if (!_ipAddrMgr.applyIpAssociations(network, true, true, publicIpsToRelease)) { - s_logger.warn("Unable to apply ip address associations for " + network + " as a part of shutdownNetworkRules"); + logger.warn("Unable to apply ip address associations for " + network + " as a part of shutdownNetworkRules"); success = false; } } catch (final ResourceUnavailableException e) { @@ -4150,8 +4148,8 @@ public void processConnect(final Host host, final StartupCommand cmd, final bool dcId = dc.getId(); final HypervisorType hypervisorType = startup.getHypervisorType(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host's hypervisorType is: " + hypervisorType); + if (logger.isDebugEnabled()) { + logger.debug("Host's hypervisorType is: " + hypervisorType); } final List networkInfoList = new ArrayList(); @@ -4179,20 +4177,20 @@ public void processConnect(final Host host, final StartupCommand cmd, final bool } // send the names to the agent - if (s_logger.isDebugEnabled()) { - s_logger.debug("Sending CheckNetworkCommand to check the Network is setup correctly on Agent"); + if (logger.isDebugEnabled()) { + logger.debug("Sending CheckNetworkCommand to check the Network is setup correctly on Agent"); } final CheckNetworkCommand nwCmd = new CheckNetworkCommand(networkInfoList); final CheckNetworkAnswer answer = (CheckNetworkAnswer) _agentMgr.easySend(hostId, nwCmd); if (answer == null) { - s_logger.warn("Unable to get an answer to the CheckNetworkCommand from agent:" + host.getId()); + logger.warn("Unable to get an answer to the CheckNetworkCommand from agent:" + host.getId()); throw new ConnectionException(true, "Unable to get an answer to the CheckNetworkCommand from agent: " + host.getId()); } if (!answer.getResult()) { - s_logger.warn("Unable to setup agent " + hostId + " due to " + answer.getDetails()); + logger.warn("Unable to setup agent " + hostId + " due to " + answer.getDetails()); final String msg = "Incorrect Network setup on agent, Reinitialize agent after network names are setup, details : " + answer.getDetails(); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, dcId, host.getPodId(), msg, msg); throw new ConnectionException(true, msg); @@ -4200,8 +4198,8 @@ public void processConnect(final Host host, final StartupCommand cmd, final bool if (answer.needReconnect()) { throw new ConnectionException(false, "Reinitialize agent after network setup."); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Network setup is correct on Agent"); + if (logger.isDebugEnabled()) { + logger.debug("Network setup is correct on Agent"); } return; } @@ -4343,18 +4341,18 @@ public NicProfile createNicForVm(final Network network, final NicProfile request final VMNetworkMapVO vno = new VMNetworkMapVO(vm.getId(), network.getId()); _vmNetworkMapDao.persist(vno); } - s_logger.debug("Nic is allocated successfully for vm " + vm + " in network " + network); + logger.debug("Nic is allocated successfully for vm " + vm + " in network " + network); } //2) prepare nic if (prepare) { final Pair implemented = implementNetwork(nic.getNetworkId(), dest, context, vmProfile.getVirtualMachine().getType() == Type.DomainRouter); if (implemented == null || implemented.first() == null) { - s_logger.warn("Failed to implement network id=" + nic.getNetworkId() + " as a part of preparing nic id=" + nic.getId()); + logger.warn("Failed to implement network id=" + nic.getNetworkId() + " as a part of preparing nic id=" + nic.getId()); throw new CloudRuntimeException("Failed to implement network id=" + nic.getNetworkId() + " as a part preparing nic id=" + nic.getId()); } nic = prepareNic(vmProfile, dest, context, nic.getId(), implemented.second()); - s_logger.debug("Nic is prepared successfully for vm " + vm + " in network " + network); + logger.debug("Nic is prepared successfully for vm " + vm + " in network " + network); } return nic; @@ -4362,11 +4360,11 @@ public NicProfile createNicForVm(final Network network, final NicProfile request private boolean getNicProfileDefaultNic(NicProfile nicProfile) { if (nicProfile != null) { - s_logger.debug(String.format("Using requested nic profile isDefaultNic value [%s].", nicProfile.isDefaultNic())); + logger.debug(String.format("Using requested nic profile isDefaultNic value [%s].", nicProfile.isDefaultNic())); return nicProfile.isDefaultNic(); } - s_logger.debug("Using isDefaultNic default value [false] as requested nic profile is null."); + logger.debug("Using isDefaultNic default value [false] as requested nic profile is null."); return false; } @@ -4462,18 +4460,18 @@ protected List getElementForServiceInNetwork(final Network netwo final List providers = getProvidersForServiceInNetwork(network, service); //Only support one provider now if (providers == null) { - s_logger.error("Cannot find " + service.getName() + " provider for network " + network.getId()); + logger.error("Cannot find " + service.getName() + " provider for network " + network.getId()); return null; } if (providers.size() != 1 && service != Service.Lb) { //support more than one LB providers only - s_logger.error("Found " + providers.size() + " " + service.getName() + " providers for network!" + network.getId()); + logger.error("Found " + providers.size() + " " + service.getName() + " providers for network!" + network.getId()); return null; } for (final Provider provider : providers) { final NetworkElement element = _networkModel.getElementImplementingProvider(provider.getName()); - s_logger.info("Let " + element.getName() + " handle " + service.getName() + " in network " + network.getId()); + logger.info("Let " + element.getName() + " handle " + service.getName() + " in network " + network.getId()); elements.add(element); } return elements; @@ -4534,7 +4532,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { for (final NicSecondaryIpVO ip : ipList) { _nicSecondaryIpDao.remove(ip.getId()); } - s_logger.debug("Revoving nic secondary ip entry ..."); + logger.debug("Revoving nic secondary ip entry ..."); } } }); @@ -4567,7 +4565,7 @@ public NicVO savePlaceholderNic(final Network network, final String ip4Address, @Override public Pair importNic(final String macAddress, int deviceId, final Network network, final Boolean isDefaultNic, final VirtualMachine vm, final Network.IpAddresses ipAddresses, final DataCenter dataCenter, final boolean forced) throws ConcurrentOperationException, InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException { - s_logger.debug("Allocating nic for vm " + vm.getUuid() + " in network " + network + " during import"); + logger.debug("Allocating nic for vm " + vm.getUuid() + " in network " + network + " during import"); String selectedIp = null; if (ipAddresses != null && StringUtils.isNotEmpty(ipAddresses.getIp4Address())) { if (ipAddresses.getIp4Address().equals("auto")) { @@ -4611,7 +4609,7 @@ public NicVO doInTransaction(TransactionStatus status) { int count = 1; if (vo.getVmType() == VirtualMachine.Type.User) { - s_logger.debug("Changing active number of nics for network id=" + network.getUuid() + " on " + count); + logger.debug("Changing active number of nics for network id=" + network.getUuid() + " on " + count); _networksDao.changeActiveNicsBy(network.getId(), count); } if (vo.getVmType() == VirtualMachine.Type.User @@ -4649,7 +4647,7 @@ protected String getSelectedIpForNicImportOnBasicZone(String requestedIp, Networ _ipAddressDao.findByIp(requestedIp); if (ipAddressVO == null || ipAddressVO.getState() != IpAddress.State.Free) { String msg = String.format("Cannot find a free IP to assign to VM NIC on network %s", network.getName()); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } return ipAddressVO.getAddress() != null ? ipAddressVO.getAddress().addr() : null; @@ -4679,21 +4677,21 @@ private String generateNewMacAddressIfForced(Network network, String macAddress, " and forced flag is disabled"); } try { - s_logger.debug(String.format("Generating a new mac address on network %s as the mac address %s already exists", network.getName(), macAddress)); + logger.debug(String.format("Generating a new mac address on network %s as the mac address %s already exists", network.getName(), macAddress)); String newMacAddress = _networkModel.getNextAvailableMacAddressInNetwork(network.getId()); - s_logger.debug(String.format("Successfully generated the mac address %s, using it instead of the conflicting address %s", newMacAddress, macAddress)); + logger.debug(String.format("Successfully generated the mac address %s, using it instead of the conflicting address %s", newMacAddress, macAddress)); return newMacAddress; } catch (InsufficientAddressCapacityException e) { String msg = String.format("Could not generate a new mac address on network %s", network.getName()); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } } @Override public void unmanageNics(VirtualMachineProfile vm) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unmanaging NICs for VM: " + vm.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Unmanaging NICs for VM: " + vm.getId()); } VirtualMachine virtualMachine = vm.getVirtualMachine(); diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java index 873ddb5d80bf..1faf46361c64 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java @@ -59,7 +59,6 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.math3.stat.descriptive.moment.Mean; import org.apache.commons.math3.stat.descriptive.moment.StandardDeviation; -import org.apache.log4j.Logger; import com.cloud.capacity.CapacityManager; import com.cloud.server.StatsCollector; @@ -75,7 +74,6 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestrationService, Configurable { - private static final Logger s_logger = Logger.getLogger(StorageOrchestrator.class); @Inject SnapshotDataStoreDao snapshotDataStoreDao; @Inject @@ -161,7 +159,7 @@ public MigrationResponse migrateData(Long srcDataStoreId, List destDatasto } storageCapacities.put(srcDataStoreId, new Pair<>(null, null)); if (migrationPolicy == MigrationPolicy.COMPLETE) { - s_logger.debug(String.format("Setting source image store: %s to read-only", srcDatastore.getId())); + logger.debug(String.format("Setting source image store: %s to read-only", srcDatastore.getId())); storageService.updateImageStoreStatus(srcDataStoreId, true); } @@ -173,7 +171,7 @@ public MigrationResponse migrateData(Long srcDataStoreId, List destDatasto TimeUnit.MINUTES, new MigrateBlockingQueue<>(numConcurrentCopyTasksPerSSVM)); Date start = new Date(); if (meanstddev < threshold && migrationPolicy == MigrationPolicy.BALANCE) { - s_logger.debug("mean std deviation of the image stores is below threshold, no migration required"); + logger.debug("mean std deviation of the image stores is below threshold, no migration required"); response = new MigrationResponse("Migration not required as system seems balanced", migrationPolicy.toString(), true); return response; } @@ -202,7 +200,7 @@ public MigrationResponse migrateData(Long srcDataStoreId, List destDatasto } if (chosenFileForMigration.getPhysicalSize() > storageCapacities.get(destDatastoreId).first()) { - s_logger.debug(String.format("%s: %s too large to be migrated to %s", chosenFileForMigration.getType().name() , chosenFileForMigration.getUuid(), destDatastoreId)); + logger.debug(String.format("%s: %s too large to be migrated to %s", chosenFileForMigration.getType().name() , chosenFileForMigration.getUuid(), destDatastoreId)); skipped += 1; continue; } @@ -269,7 +267,7 @@ public MigrationResponse migrateResources(Long srcImgStoreId, Long destImgStoreI } if (chosenFileForMigration.getPhysicalSize() > storageCapacities.get(destImgStoreId).first()) { - s_logger.debug(String.format("%s: %s too large to be migrated to %s", chosenFileForMigration.getType().name(), chosenFileForMigration.getUuid(), destImgStoreId)); + logger.debug(String.format("%s: %s too large to be migrated to %s", chosenFileForMigration.getType().name(), chosenFileForMigration.getUuid(), destImgStoreId)); continue; } @@ -304,7 +302,7 @@ protected Pair migrateCompleted(Long destDatastoreId, DataStore boolean success = true; if (destDatastoreId == srcDatastore.getId() && !files.isEmpty()) { if (migrationPolicy == MigrationPolicy.BALANCE) { - s_logger.debug("Migration completed : data stores have been balanced "); + logger.debug("Migration completed : data stores have been balanced "); if (destDatastoreId == srcDatastore.getId()) { message = "Seems like source datastore has more free capacity than the destination(s)"; } @@ -355,7 +353,7 @@ protected Map> migrateAway( task.setTemplateChain(templateChains); } futures.add((executor.submit(task))); - s_logger.debug(String.format("Migration of %s: %s is initiated. ", chosenFileForMigration.getType().name(), chosenFileForMigration.getUuid())); + logger.debug(String.format("Migration of %s: %s is initiated. ", chosenFileForMigration.getType().name(), chosenFileForMigration.getUuid())); return storageCapacities; } @@ -370,7 +368,7 @@ private MigrationResponse handleResponse(List meanStdDevCurrent) { - s_logger.debug("migrating the file doesn't prove to be beneficial, skipping migration"); + logger.debug("migrating the file doesn't prove to be beneficial, skipping migration"); return false; } @@ -512,10 +510,10 @@ private boolean storageCapacityBelowThreshold(Map> storag Pair imageStoreCapacity = storageCapacities.get(destStoreId); long usedCapacity = imageStoreCapacity.second() - imageStoreCapacity.first(); if (imageStoreCapacity != null && (usedCapacity / (imageStoreCapacity.second() * 1.0)) <= CapacityManager.SecondaryStorageCapacityThreshold.value()) { - s_logger.debug("image store: " + destStoreId + " has sufficient capacity to proceed with migration of file"); + logger.debug("image store: " + destStoreId + " has sufficient capacity to proceed with migration of file"); return true; } - s_logger.debug("Image store capacity threshold exceeded, migration not possible"); + logger.debug("Image store capacity threshold exceeded, migration not possible"); return false; } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java index e95085d53f6e..52fd962ba0a9 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java @@ -86,7 +86,6 @@ import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.jetbrains.annotations.Nullable; import com.cloud.agent.api.to.DataTO; @@ -179,7 +178,6 @@ public enum UserVmCloneType { full, linked } - private static final Logger s_logger = Logger.getLogger(VolumeOrchestrator.class); @Inject EntityManager _entityMgr; @@ -348,12 +346,12 @@ private Optional getPreferredStoragePool(List poolList if (storagePool.isPresent()) { String storagePoolToString = getReflectOnlySelectedFields(storagePool.get()); - s_logger.debug(String.format("The storage pool [%s] was specified for this account [%s] and will be used for allocation.", storagePoolToString, vm.getAccountId())); + logger.debug(String.format("The storage pool [%s] was specified for this account [%s] and will be used for allocation.", storagePoolToString, vm.getAccountId())); } else { String globalStoragePoolUuid = StorageManager.PreferredStoragePool.value(); storagePool = getMatchingStoragePool(globalStoragePoolUuid, poolList); - storagePool.ifPresent(pool -> s_logger.debug(String.format("The storage pool [%s] was specified in the Global Settings and will be used for allocation.", + storagePool.ifPresent(pool -> logger.debug(String.format("The storage pool [%s] was specified in the Global Settings and will be used for allocation.", getReflectOnlySelectedFields(pool)))); } return storagePool; @@ -374,28 +372,28 @@ public StoragePool findStoragePool(DiskProfile dskCh, DataCenter dc, Pod pod, Lo final List poolList = allocator.allocateToPool(dskCh, profile, plan, avoidList, StoragePoolAllocator.RETURN_UPTO_ALL); if (poolList != null && !poolList.isEmpty()) { - StorageUtil.traceLogStoragePools(poolList, s_logger, "pools to choose from: "); + StorageUtil.traceLogStoragePools(poolList, logger, "pools to choose from: "); // Check if the preferred storage pool can be used. If yes, use it. Optional storagePool = getPreferredStoragePool(poolList, vm); - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("we have a preferred pool: %b", storagePool.isPresent())); + if (logger.isTraceEnabled()) { + logger.trace(String.format("we have a preferred pool: %b", storagePool.isPresent())); } StoragePool storage; if (storagePool.isPresent()) { storage = (StoragePool)this.dataStoreMgr.getDataStore(storagePool.get().getId(), DataStoreRole.Primary); - s_logger.debug(String.format("VM [%s] has a preferred storage pool [%s]. Volume Orchestrator found this storage using Storage Pool Allocator [%s] and will" + logger.debug(String.format("VM [%s] has a preferred storage pool [%s]. Volume Orchestrator found this storage using Storage Pool Allocator [%s] and will" + " use it.", vm, storage, allocator.getClass().getSimpleName())); } else { storage = (StoragePool)dataStoreMgr.getDataStore(poolList.get(0).getId(), DataStoreRole.Primary); - s_logger.debug(String.format("VM [%s] does not have a preferred storage pool or it cannot be used. Volume Orchestrator will use the available Storage Pool" + logger.debug(String.format("VM [%s] does not have a preferred storage pool or it cannot be used. Volume Orchestrator will use the available Storage Pool" + " [%s], which was discovered using Storage Pool Allocator [%s].", vm, storage, allocator.getClass().getSimpleName())); } return storage; } - s_logger.debug(String.format("Could not find any available Storage Pool using Storage Pool Allocator [%s].", allocator.getClass().getSimpleName())); + logger.debug(String.format("Could not find any available Storage Pool using Storage Pool Allocator [%s].", allocator.getClass().getSimpleName())); } - s_logger.info("Volume Orchestrator could not find any available Storage Pool."); + logger.info("Volume Orchestrator could not find any available Storage Pool."); return null; } @@ -524,7 +522,7 @@ public VolumeInfo createVolumeFromSnapshot(Volume volume, Snapshot snapshot, Use String logMsg = String.format("Could not find a storage pool in the pod/cluster of the provided VM [%s] to create the volume [%s] in.", vm, volumeToString); //pool could not be found in the VM's pod/cluster. - s_logger.error(logMsg); + logger.error(logMsg); StringBuilder addDetails = new StringBuilder(msg); addDetails.append(logMsg); @@ -542,8 +540,8 @@ public VolumeInfo createVolumeFromSnapshot(Volume volume, Snapshot snapshot, Use if (pool != null) { String poolToString = getReflectOnlySelectedFields(pool); - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Found a suitable pool [%s] to create the volume [%s] in.", poolToString, volumeToString)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Found a suitable pool [%s] to create the volume [%s] in.", poolToString, volumeToString)); } break; } @@ -551,7 +549,7 @@ public VolumeInfo createVolumeFromSnapshot(Volume volume, Snapshot snapshot, Use } if (pool == null) { - s_logger.info(msg); + logger.info(msg); throw new StorageUnavailableException(msg, -1); } @@ -579,7 +577,7 @@ public VolumeInfo createVolumeFromSnapshot(Volume volume, Snapshot snapshot, Use _snapshotSrv.syncVolumeSnapshotsToRegionStore(snapVolId, snapStore); } catch (Exception ex) { // log but ignore the sync error to avoid any potential S3 down issue, it should be sync next time - s_logger.warn(ex.getMessage(), ex); + logger.warn(ex.getMessage(), ex); } } @@ -591,14 +589,14 @@ public VolumeInfo createVolumeFromSnapshot(Volume volume, Snapshot snapshot, Use VolumeApiResult result = future.get(); if (result.isFailed()) { String logMsg = String.format("Failed to create volume from snapshot [%s] due to [%s].", snapshotToString, result.getResult()); - s_logger.error(logMsg); + logger.error(logMsg); throw new CloudRuntimeException(logMsg); } return result.getVolume(); } catch (InterruptedException | ExecutionException e) { String message = String.format("Failed to create volume from snapshot [%s] due to [%s].", snapshotToString, e.getMessage()); - s_logger.error(message); - s_logger.debug("Exception: ", e); + logger.error(message); + logger.debug("Exception: ", e); throw new CloudRuntimeException(message, e); } finally { snapshotHelper.expungeTemporarySnapshot(kvmSnapshotOnlyInPrimaryStorage, snapInfo); @@ -647,14 +645,14 @@ public VolumeInfo copyVolumeFromSecToPrimary(VolumeInfo volumeInfo, VirtualMachi VolumeApiResult result = future.get(); if (result.isFailed()) { String msg = String.format("Copy of the volume [%s] failed due to [%s].", volumeToString, result.getResult()); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } return result.getVolume(); } catch (InterruptedException | ExecutionException e) { String msg = String.format("Failed to copy the volume [%s] due to [%s].", volumeToString, e.getMessage()); - s_logger.error(msg); - s_logger.debug("Exception: ", e); + logger.error(msg); + logger.debug("Exception: ", e); throw new CloudRuntimeException(msg, e); } } @@ -699,14 +697,14 @@ public VolumeInfo createVolume(VolumeInfo volumeInfo, VirtualMachine vm, Virtual pool = findStoragePool(dskCh, dc, pod, clusterId, hostId, vm, avoidPools); if (pool == null) { String msg = String.format("Unable to find suitable primary storage when creating volume [%s].", volumeToString); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } String poolToString = getReflectOnlySelectedFields(pool); - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Trying to create volume [%s] on storage pool [%s].", + if (logger.isDebugEnabled()) { + logger.debug(String.format("Trying to create volume [%s] on storage pool [%s].", volumeToString, poolToString)); } DataStore store = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); @@ -724,19 +722,19 @@ public VolumeInfo createVolume(VolumeInfo volumeInfo, VirtualMachine vm, Virtual VolumeApiResult result = future.get(); if (result.isFailed()) { if (result.getResult().contains(REQUEST_TEMPLATE_RELOAD) && (i == 0)) { - s_logger.debug(String.format("Retrying to deploy template [%s] for VMware, attempt 2/2. ", templateToString)); + logger.debug(String.format("Retrying to deploy template [%s] for VMware, attempt 2/2. ", templateToString)); continue; } else { String msg = String.format("Failed to create volume [%s] due to [%s].", volumeToString, result.getResult()); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } } return result.getVolume(); } catch (InterruptedException | ExecutionException e) { String msg = String.format("Failed to create volume [%s] due to [%s].", volumeToString, e.getMessage()); - s_logger.error(msg); - s_logger.debug("Exception: ", e); + logger.error(msg); + logger.debug("Exception: ", e); throw new CloudRuntimeException(msg, e); } } @@ -903,10 +901,10 @@ private DiskProfile allocateTemplatedVolume(Type type, String name, DiskOffering } else { rootDisksize = rootDisksize * 1024 * 1024 * 1024; if (rootDisksize > size) { - s_logger.debug(String.format("Using root disk size of [%s] bytes for the volume [%s].", toHumanReadableSize(rootDisksize), name)); + logger.debug(String.format("Using root disk size of [%s] bytes for the volume [%s].", toHumanReadableSize(rootDisksize), name)); size = rootDisksize; } else { - s_logger.debug(String.format("The specified root disk size of [%s] bytes is smaller than the template. Using root disk size of [%s] bytes for the volume [%s].", + logger.debug(String.format("The specified root disk size of [%s] bytes is smaller than the template. Using root disk size of [%s] bytes for the volume [%s].", toHumanReadableSize(rootDisksize), size, name)); } } @@ -983,7 +981,7 @@ public List allocateTemplatedVolumes(Type type, String name, DiskOf if (template.isDeployAsIs() && vm.getType() != VirtualMachine.Type.SecondaryStorageVm) { List runningSSVMs = secondaryStorageVmDao.getSecStorageVmListInStates(null, vm.getDataCenterId(), State.Running); if (CollectionUtils.isEmpty(runningSSVMs)) { - s_logger.info(String.format("Could not find a running SSVM in datacenter [%s] for deploying VM as is. Not deploying VM [%s] as is.", + logger.info(String.format("Could not find a running SSVM in datacenter [%s] for deploying VM as is. Not deploying VM [%s] as is.", vm.getDataCenterId(), vm)); } else { UserVmDetailVO configurationDetail = userVmDetailsDao.findDetail(vm.getId(), VmDetailConstants.DEPLOY_AS_IS_CONFIGURATION); @@ -1018,7 +1016,7 @@ public List allocateTemplatedVolumes(Type type, String name, DiskOf volumeSize = templateAsIsDisks.get(number).getVirtualSize(); deviceId = templateAsIsDisks.get(number).getDiskNumber(); } - s_logger.info(String.format("Adding disk object [%s] to VM [%s]", volumeName, vm)); + logger.info(String.format("Adding disk object [%s] to VM [%s]", volumeName, vm)); DiskProfile diskProfile = allocateTemplatedVolume(type, volumeName, offering, volumeSize, minIops, maxIops, template, vm, owner, deviceId, configurationId); profiles.add(diskProfile); @@ -1115,8 +1113,8 @@ public VolumeInfo createVolumeOnPrimaryStorage(VirtualMachine vm, VolumeInfo vol VirtualMachineTemplate rootDiskTmplt = _entityMgr.findById(VirtualMachineTemplate.class, vm.getTemplateId()); DataCenter dcVO = _entityMgr.findById(DataCenter.class, vm.getDataCenterId()); - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("storage-pool %s/%s is associated with pod %d",storagePool.getName(), storagePool.getUuid(), storagePool.getPodId())); + if (logger.isTraceEnabled()) { + logger.trace(String.format("storage-pool %s/%s is associated with pod %d",storagePool.getName(), storagePool.getUuid(), storagePool.getPodId())); } Long podId = storagePool.getPodId() != null ? storagePool.getPodId() : vm.getPodIdToDeployIn(); Pod pod = _entityMgr.findById(Pod.class, podId); @@ -1124,8 +1122,8 @@ public VolumeInfo createVolumeOnPrimaryStorage(VirtualMachine vm, VolumeInfo vol ServiceOffering svo = _entityMgr.findById(ServiceOffering.class, vm.getServiceOfferingId()); DiskOffering diskVO = _entityMgr.findById(DiskOffering.class, volumeInfo.getDiskOfferingId()); Long clusterId = storagePool.getClusterId(); - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("storage-pool %s/%s is associated with cluster %d",storagePool.getName(), storagePool.getUuid(), clusterId)); + if (logger.isTraceEnabled()) { + logger.trace(String.format("storage-pool %s/%s is associated with cluster %d",storagePool.getName(), storagePool.getUuid(), clusterId)); } Long hostId = vm.getHostId(); if (hostId == null && storagePool.isLocal()) { @@ -1166,8 +1164,8 @@ protected VolumeVO switchVolume(final VolumeVO existingVolume, final VirtualMach Long volTemplateId = existingVolume.getTemplateId(); long vmTemplateId = vm.getTemplateId(); if (volTemplateId != null && volTemplateId.longValue() != vmTemplateId) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("switchVolume: Old volume's templateId [%s] does not match the VM's templateId [%s]. Updating templateId in the new volume.", volTemplateId, vmTemplateId)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("switchVolume: Old volume's templateId [%s] does not match the VM's templateId [%s]. Updating templateId in the new volume.", volTemplateId, vmTemplateId)); } templateIdToUse = vmTemplateId; } @@ -1180,17 +1178,17 @@ public VolumeVO doInTransaction(TransactionStatus status) { try { stateTransitTo(existingVolume, Volume.Event.DestroyRequested); } catch (NoTransitionException e) { - s_logger.error(String.format("Unable to destroy existing volume [%s] due to [%s].", volumeToString, e.getMessage())); + logger.error(String.format("Unable to destroy existing volume [%s] due to [%s].", volumeToString, e.getMessage())); } // In case of VMware VM will continue to use the old root disk until expunged, so force expunge old root disk if (vm.getHypervisorType() == HypervisorType.VMware) { - s_logger.info(String.format("Trying to expunge volume [%s] from primary data storage.", volumeToString)); + logger.info(String.format("Trying to expunge volume [%s] from primary data storage.", volumeToString)); AsyncCallFuture future = volService.expungeVolumeAsync(volFactory.getVolume(existingVolume.getId())); try { future.get(); } catch (Exception e) { - s_logger.error(String.format("Failed to expunge volume [%s] from primary data storage due to [%s].", volumeToString, e.getMessage())); - s_logger.debug("Exception: ", e); + logger.error(String.format("Failed to expunge volume [%s] from primary data storage due to [%s].", volumeToString, e.getMessage())); + logger.debug("Exception: ", e); } } @@ -1216,8 +1214,8 @@ public void release(long vmId, long hostId) { HostVO host = _hostDao.findById(hostId); - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Releasing [%s] volumes for VM [%s] from host [%s].", volumesForVm.size(), _userVmDao.findById(vmId), host)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Releasing [%s] volumes for VM [%s] from host [%s].", volumesForVm.size(), _userVmDao.findById(vmId), host)); } for (VolumeVO volumeForVm : volumesForVm) { @@ -1241,8 +1239,8 @@ public void release(long vmId, long hostId) { public void cleanupVolumes(long vmId) throws ConcurrentOperationException { VMInstanceVO vm = _userVmDao.findById(vmId); - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Cleaning storage for VM [%s].", vm)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Cleaning storage for VM [%s].", vm)); } final List volumesForVm = _volsDao.findByInstance(vmId); final List toBeExpunged = new ArrayList(); @@ -1259,12 +1257,12 @@ public void doInTransactionWithoutResult(TransactionStatus status) { if (!volumeAlreadyDestroyed) { destroyVolumeInContext(vol); } else { - s_logger.debug(String.format("Skipping destroy for the volume [%s] as it is in [%s] state.", volumeToString, vol.getState().toString())); + logger.debug(String.format("Skipping destroy for the volume [%s] as it is in [%s] state.", volumeToString, vol.getState().toString())); } toBeExpunged.add(vol); } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Detaching volume [%s].", volumeToString)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Detaching volume [%s].", volumeToString)); } if (vm.getHypervisorType().equals(HypervisorType.VMware)) { _volumeApiService.detachVolumeViaDestroyVM(vmId, vol.getId()); @@ -1283,8 +1281,8 @@ public void doInTransactionWithoutResult(TransactionStatus status) { try { future.get(); } catch (InterruptedException | ExecutionException e) { - s_logger.error(String.format("Failed to expunge volume [%s] due to [%s].", expungeToString, e.getMessage())); - s_logger.debug("Exception: ", e); + logger.error(String.format("Failed to expunge volume [%s] due to [%s].", expungeToString, e.getMessage())); + logger.debug("Exception: ", e); } } } @@ -1367,7 +1365,7 @@ public Volume migrateVolume(Volume volume, StoragePool destPool) throws StorageU String volToString = getReflectOnlySelectedFields(vol.getVolume()); String msg = String.format("Volume [%s] migration failed due to [%s].", volToString, result.getResult()); - s_logger.error(msg); + logger.error(msg); if (result.getResult() != null && result.getResult().contains("[UNSUPPORTED]")) { throw new CloudRuntimeException(msg); @@ -1383,8 +1381,8 @@ public Volume migrateVolume(Volume volume, StoragePool destPool) throws StorageU return result.getVolume(); } catch (InterruptedException | ExecutionException e) { String msg = String.format("Volume [%s] migration failed due to [%s].", volumeToString, e.getMessage()); - s_logger.error(msg); - s_logger.debug("Exception: ", e); + logger.error(msg); + logger.debug("Exception: ", e); throw new CloudRuntimeException(msg, e); } } @@ -1401,13 +1399,13 @@ public Volume liveMigrateVolume(Volume volume, StoragePool destPool) { try { VolumeApiResult result = future.get(); if (result.isFailed()) { - s_logger.error(String.format("Volume [%s] migration failed due to [%s].", volToString, result.getResult())); + logger.error(String.format("Volume [%s] migration failed due to [%s].", volToString, result.getResult())); return null; } return result.getVolume(); } catch (InterruptedException | ExecutionException e) { - s_logger.error(String.format("Volume [%s] migration failed due to [%s].", volToString, e.getMessage())); - s_logger.debug("Exception: ", e); + logger.error(String.format("Volume [%s] migration failed due to [%s].", volToString, e.getMessage())); + logger.debug("Exception: ", e); return null; } } @@ -1443,12 +1441,12 @@ public void migrateVolumes(VirtualMachine vm, VirtualMachineTO vmTo, Host srcHos CommandResult result = future.get(); if (result.isFailed()) { String msg = String.format("Failed to migrate VM [%s] along with its volumes due to [%s].", vm, result.getResult()); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } } catch (InterruptedException | ExecutionException e) { - s_logger.error(String.format("Failed to migrate VM [%s] along with its volumes due to [%s].", vm, e.getMessage())); - s_logger.debug("Exception: ", e); + logger.error(String.format("Failed to migrate VM [%s] along with its volumes due to [%s].", vm, e.getMessage())); + logger.debug("Exception: ", e); } } @@ -1464,23 +1462,23 @@ public boolean storageMigration(VirtualMachineProfile vm, Map entry : volumeStoragePoolMap.entrySet()) { Volume result = migrateVolume(entry.getKey(), entry.getValue()); @@ -1494,8 +1492,8 @@ public boolean storageMigration(VirtualMachineProfile vm, Map vols = _volsDao.findUsableVolumesForInstance(vm.getId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Preparing to migrate [%s] volumes for VM [%s].", vols.size(), vm.getVirtualMachine())); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Preparing to migrate [%s] volumes for VM [%s].", vols.size(), vm.getVirtualMachine())); } for (VolumeVO vol : vols) { @@ -1618,15 +1616,15 @@ private List getTasks(List vols, Map tasks.add(task); } else { if (vol.isRecreatable()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Volume [%s] will be recreated on storage pool [%s], assigned by deploymentPlanner.", volToString, assignedPoolToString)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Volume [%s] will be recreated on storage pool [%s], assigned by deploymentPlanner.", volToString, assignedPoolToString)); } VolumeTask task = new VolumeTask(VolumeTaskType.RECREATE, vol, null); tasks.add(task); } else { if (assignedPool.getId() != vol.getPoolId()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Mismatch with the storage pool [%s] assigned by deploymentPlanner and the one associated with the volume [%s].", + if (logger.isDebugEnabled()) { + logger.debug(String.format("Mismatch with the storage pool [%s] assigned by deploymentPlanner and the one associated with the volume [%s].", assignedPoolToString, volToString)); } DiskOffering diskOffering = _entityMgr.findById(DiskOffering.class, vol.getDiskOfferingId()); @@ -1634,7 +1632,7 @@ private List getTasks(List vols, Map // Currently migration of local volume is not supported so bail out String msg = String.format("Local volume [%s] cannot be recreated on storage pool [%s], assigned by deploymentPlanner.", volToString, assignedPoolToString); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } else { @@ -1647,8 +1645,8 @@ private List getTasks(List vols, Map storageMigrationEnabled = StorageMigrationEnabled.value(); } if (storageMigrationEnabled) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Shared volume [%s] will be migrated to the storage pool [%s], assigned by deploymentPlanner.", + if (logger.isDebugEnabled()) { + logger.debug(String.format("Shared volume [%s] will be migrated to the storage pool [%s], assigned by deploymentPlanner.", volToString, assignedPoolToString)); } VolumeTask task = new VolumeTask(VolumeTaskType.MIGRATE, vol, assignedPool); @@ -1673,8 +1671,8 @@ private List getTasks(List vols, Map StoragePoolVO pool = _storagePoolDao.findById(vol.getPoolId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("No need to recreate the volume [%s] since it already has an assigned pool: [%s]. Adding disk to the VM.", + if (logger.isDebugEnabled()) { + logger.debug(String.format("No need to recreate the volume [%s] since it already has an assigned pool: [%s]. Adding disk to the VM.", volToString, pool.getUuid())); } VolumeTask task = new VolumeTask(VolumeTaskType.NOP, vol, pool); @@ -1689,7 +1687,7 @@ protected void checkAndUpdateVolumeAccountResourceCount(VolumeVO originalEntry, if (Objects.equals(originalEntry.getSize(), updateEntry.getSize())) { return; } - s_logger.debug(String.format("Size mismatch found for %s after creation, old size: %d, new size: %d. Updating resource count", updateEntry, originalEntry.getSize(), updateEntry.getSize())); + logger.debug(String.format("Size mismatch found for %s after creation, old size: %d, new size: %d. Updating resource count", updateEntry, originalEntry.getSize(), updateEntry.getSize())); if (ObjectUtils.anyNull(originalEntry.getSize(), updateEntry.getSize())) { _resourceLimitMgr.recalculateResourceCount(updateEntry.getAccountId(), updateEntry.getDomainId(), ResourceType.primary_storage.getOrdinal()); @@ -1711,7 +1709,7 @@ private Pair recreateVolume(VolumeVO vol, VirtualMachinePro if (recreate && (dest.getStorageForDisks() == null || dest.getStorageForDisks().get(vol) == null)) { destPool = dataStoreMgr.getDataStore(vol.getPoolId(), DataStoreRole.Primary); String destPoolToString = getReflectOnlySelectedFields(destPool); - s_logger.debug(String.format("Existing pool: [%s].", destPoolToString)); + logger.debug(String.format("Existing pool: [%s].", destPoolToString)); } else { StoragePool pool = dest.getStorageForDisks().get(vol); destPool = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); @@ -1733,8 +1731,8 @@ private Pair recreateVolume(VolumeVO vol, VirtualMachinePro dest.getStorageForDisks().put(newVol, poolWithOldVol); dest.getStorageForDisks().remove(vol); } - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Created new volume [%s] from old volume [%s].", newVolToString, volToString)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Created new volume [%s] from old volume [%s].", newVolToString, volToString)); } } VolumeInfo volume = volFactory.getVolume(newVol.getId(), destPool); @@ -1756,7 +1754,7 @@ private Pair recreateVolume(VolumeVO vol, VirtualMachinePro } else { final VirtualMachineTemplate template = _entityMgr.findById(VirtualMachineTemplate.class, templateId); if (template == null) { - s_logger.error(String.format("Failed to find template: %d for %s", templateId, volume)); + logger.error(String.format("Failed to find template: %d for %s", templateId, volume)); throw new CloudRuntimeException(String.format("Failed to find template for volume ID: %s", volume.getUuid())); } TemplateInfo templ = tmplFactory.getReadyTemplateOnImageStore(templateId, dest.getDataCenter().getId()); @@ -1768,19 +1766,19 @@ private Pair recreateVolume(VolumeVO vol, VirtualMachinePro if (!primaryDataStore.isManaged()) { templ = tmplFactory.getReadyBypassedTemplateOnPrimaryStore(templateId, destPool.getId(), dest.getHost().getId()); } else { - s_logger.debug(String.format("Directly downloading template [%s] on host [%s] and copying it to the managed storage pool [%s].", + logger.debug(String.format("Directly downloading template [%s] on host [%s] and copying it to the managed storage pool [%s].", templateId, dest.getHost().getUuid(), destPool.getUuid())); templ = volService.createManagedStorageTemplate(templateId, destPool.getId(), dest.getHost().getId()); } if (templ == null) { String msg = String.format("Failed to spool direct download template [%s] to the data center [%s].", templateId, dest.getDataCenter().getUuid()); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } } else { String msg = String.format("Could not find template [%s] ready for the data center [%s].", templateId, dest.getDataCenter().getUuid()); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } } @@ -1806,11 +1804,11 @@ private Pair recreateVolume(VolumeVO vol, VirtualMachinePro result = future.get(); if (result.isFailed()) { if (result.getResult().contains(REQUEST_TEMPLATE_RELOAD) && (i == 0)) { - s_logger.debug("Retrying template deploy for VMware."); + logger.debug("Retrying template deploy for VMware."); continue; } else { String msg = String.format("Unable to create volume [%s] due to [%s].", newVolToString, result.getResult()); - s_logger.error(msg); + logger.error(msg); throw new StorageUnavailableException(msg, destPool.getId()); } } @@ -1834,8 +1832,8 @@ private Pair recreateVolume(VolumeVO vol, VirtualMachinePro throw e; } catch (InterruptedException | ExecutionException e) { String msg = String.format("Unable to create volume [%s] due to [%s].", newVolToString, e.toString()); - s_logger.error(msg); - s_logger.debug("Exception: ", e); + logger.error(msg); + logger.debug("Exception: ", e); throw new StorageUnavailableException(msg, destPool.getId()); } } @@ -1847,12 +1845,12 @@ private VolumeVO setPassphraseForVolumeEncryption(VolumeVO volume) { if (volume.getPassphraseId() != null) { return volume; } - s_logger.debug("Creating passphrase for the volume: " + volume.getName()); + logger.debug("Creating passphrase for the volume: " + volume.getName()); long startTime = System.currentTimeMillis(); PassphraseVO passphrase = passphraseDao.persist(new PassphraseVO(true)); volume.setPassphraseId(passphrase.getId()); long finishTime = System.currentTimeMillis(); - s_logger.debug("Creating and persisting passphrase took: " + (finishTime - startTime) + " ms for the volume: " + volume.toString()); + logger.debug("Creating and persisting passphrase took: " + (finishTime - startTime) + " ms for the volume: " + volume.toString()); return _volsDao.persist(volume); } @@ -1860,7 +1858,7 @@ private VolumeVO setPassphraseForVolumeEncryption(VolumeVO volume) { public void prepare(VirtualMachineProfile vm, DeployDestination dest) throws StorageUnavailableException, InsufficientStorageCapacityException, ConcurrentOperationException, StorageAccessException { if (dest == null) { String msg = String.format("Unable to prepare volumes for the VM [%s] because DeployDestination is null.", vm.getVirtualMachine()); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } @@ -2009,12 +2007,12 @@ private void cleanupVolumeDuringAttachFailure(Long volumeId, Long vmId) { String volumeToString = getReflectOnlySelectedFields(volume); if (volume.getState().equals(Volume.State.Creating)) { - s_logger.debug(String.format("Removing volume [%s], as it was leftover from the last management server stop.", volumeToString)); + logger.debug(String.format("Removing volume [%s], as it was leftover from the last management server stop.", volumeToString)); _volsDao.remove(volume.getId()); } if (volume.getState().equals(Volume.State.Attaching)) { - s_logger.warn(String.format("Volume [%s] failed to attach to the VM [%s] on the last management server stop, changing state back to Ready.", volumeToString, _userVmDao.findById(vmId))); + logger.warn(String.format("Volume [%s] failed to attach to the VM [%s] on the last management server stop, changing state back to Ready.", volumeToString, _userVmDao.findById(vmId))); volume.setState(Volume.State.Ready); _volsDao.update(volumeId, volume); } @@ -2037,11 +2035,11 @@ private void cleanupVolumeDuringMigrationFailure(Long volumeId, Long destPoolId) if (duplicateVol != null) { String duplicateVolToString = getReflectOnlySelectedFields(duplicateVol); - s_logger.debug(String.format("Removing volume [%s] from storage pool [%s] because it's duplicated.", duplicateVolToString, destPoolToString)); + logger.debug(String.format("Removing volume [%s] from storage pool [%s] because it's duplicated.", duplicateVolToString, destPoolToString)); _volsDao.remove(duplicateVol.getId()); } - s_logger.debug(String.format("Changing volume [%s] state from Migrating to Ready in case of migration failure.", volumeToString)); + logger.debug(String.format("Changing volume [%s] state from Migrating to Ready in case of migration failure.", volumeToString)); volume.setState(Volume.State.Ready); _volsDao.update(volumeId, volume); } @@ -2055,7 +2053,7 @@ private void cleanupVolumeDuringSnapshotFailure(Long volumeId, Long snapshotId) String volumeToString = getReflectOnlySelectedFields(volume); if (volume.getState() == Volume.State.Snapshotting) { - s_logger.debug(String.format("Changing volume [%s] state back to Ready.", volumeToString)); + logger.debug(String.format("Changing volume [%s] state back to Ready.", volumeToString)); volume.setState(Volume.State.Ready); _volsDao.update(volume.getId(), volume); } @@ -2079,8 +2077,8 @@ public void cleanupStorageJobs() { cleanupVolumeDuringSnapshotFailure(work.getVolumeId(), work.getSnapshotId()); } } catch (Exception e) { - s_logger.error(String.format("Clean up job failed due to [%s]. Will continue with other clean up jobs.", e.getMessage())); - s_logger.debug("Exception: ", e); + logger.error(String.format("Clean up job failed due to [%s]. Will continue with other clean up jobs.", e.getMessage())); + logger.debug("Exception: ", e); } } } @@ -2115,8 +2113,8 @@ public void destroyVolume(Volume volume) { volume.getUuid(), volume.isDisplayVolume()); } catch (Exception e) { String msg = String.format("Failed to destroy volume [%s] due to [%s].", volumeToString, e.getMessage()); - s_logger.error(msg); - s_logger.debug("Exception: ", e); + logger.error(msg); + logger.debug("Exception: ", e); throw new CloudRuntimeException(msg, e); } } @@ -2157,7 +2155,7 @@ public void updateVolumeDiskChain(long volumeId, String path, String chainInfo, } if (needUpdate) { - s_logger.info(String.format("Updating volume's disk chain info. Volume: [%s]. Path: [%s] -> [%s], Disk Chain Info: [%s] -> [%s].", + logger.info(String.format("Updating volume's disk chain info. Volume: [%s]. Path: [%s] -> [%s], Disk Chain Info: [%s] -> [%s].", volToString, vol.getPath(), path, vol.getChainInfo(), chainInfo)); vol.setPath(path); vol.setChainInfo(chainInfo); @@ -2269,8 +2267,8 @@ public DiskProfile updateImportedVolume(Type type, DiskOffering offering, Virtua @Override public void unmanageVolumes(long vmId) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Unmanaging storage for VM [%s].", _userVmDao.findById(vmId))); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Unmanaging storage for VM [%s].", _userVmDao.findById(vmId))); } final List volumesForVm = _volsDao.findByInstance(vmId); @@ -2283,7 +2281,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { boolean volumeAlreadyDestroyed = (vol.getState() == Volume.State.Destroy || vol.getState() == Volume.State.Expunged || vol.getState() == Volume.State.Expunging); if (volumeAlreadyDestroyed) { - s_logger.debug(String.format("Skipping Destroy for the volume [%s] as it is in [%s] state.", volToString, vol.getState().toString())); + logger.debug(String.format("Skipping Destroy for the volume [%s] as it is in [%s] state.", volToString, vol.getState().toString())); } else { volService.unmanageVolume(vol.getId()); } diff --git a/engine/orchestration/src/test/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestratorTest.java b/engine/orchestration/src/test/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestratorTest.java index d3d8cab2989b..d1532cdbef14 100644 --- a/engine/orchestration/src/test/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestratorTest.java +++ b/engine/orchestration/src/test/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestratorTest.java @@ -32,7 +32,6 @@ import com.cloud.dc.DataCenter; import com.cloud.network.IpAddressManager; import com.cloud.utils.Pair; -import org.apache.log4j.Logger; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -93,7 +92,6 @@ */ @RunWith(JUnit4.class) public class NetworkOrchestratorTest extends TestCase { - static final Logger s_logger = Logger.getLogger(NetworkOrchestratorTest.class); NetworkOrchestrator testOrchastrator = Mockito.spy(new NetworkOrchestrator()); diff --git a/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDaoImpl.java b/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDaoImpl.java index 302ffd8e760a..67d037319d5b 100644 --- a/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDaoImpl.java @@ -26,7 +26,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.commons.lang3.StringUtils; import org.springframework.stereotype.Component; @@ -51,7 +50,6 @@ @Component public class CapacityDaoImpl extends GenericDaoBase implements CapacityDao { - private static final Logger s_logger = Logger.getLogger(CapacityDaoImpl.class); private static final String ADD_ALLOCATED_SQL = "UPDATE `cloud`.`op_host_capacity` SET used_capacity = used_capacity + ? WHERE host_id = ? AND capacity_type = ?"; private static final String SUBTRACT_ALLOCATED_SQL = @@ -612,7 +610,7 @@ public void updateAllocated(Long hostId, long allocatedAmount, short capacityTyp txn.commit(); } catch (Exception e) { txn.rollback(); - s_logger.warn("Exception updating capacity for host: " + hostId, e); + logger.warn("Exception updating capacity for host: " + hostId, e); } } @@ -1126,7 +1124,7 @@ public void updateCapacityState(Long dcId, Long podId, Long clusterId, Long host pstmt.executeUpdate(); } catch (Exception e) { - s_logger.warn("Error updating CapacityVO", e); + logger.warn("Error updating CapacityVO", e); } } @@ -1146,7 +1144,7 @@ public float findClusterConsumption(Long clusterId, short capacityType, long com return rs.getFloat(1); } } catch (Exception e) { - s_logger.warn("Error checking cluster threshold", e); + logger.warn("Error checking cluster threshold", e); } return 0; } diff --git a/engine/schema/src/main/java/com/cloud/certificate/dao/CertificateDaoImpl.java b/engine/schema/src/main/java/com/cloud/certificate/dao/CertificateDaoImpl.java index 9544804284e9..99ba36f22b91 100644 --- a/engine/schema/src/main/java/com/cloud/certificate/dao/CertificateDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/certificate/dao/CertificateDaoImpl.java @@ -17,7 +17,6 @@ package com.cloud.certificate.dao; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.certificate.CertificateVO; @@ -28,7 +27,6 @@ @DB public class CertificateDaoImpl extends GenericDaoBase implements CertificateDao { - private static final Logger s_logger = Logger.getLogger(CertificateDaoImpl.class); public CertificateDaoImpl() { @@ -42,7 +40,7 @@ public Long persistCustomCertToDb(String certStr, CertificateVO cert, Long manag update(cert.getId(), cert); return cert.getId(); } catch (Exception e) { - s_logger.warn("Unable to read the certificate: " + e); + logger.warn("Unable to read the certificate: " + e); return new Long(0); } } diff --git a/engine/schema/src/main/java/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java b/engine/schema/src/main/java/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java index e1c0dbd9e472..861dbeb1df4a 100644 --- a/engine/schema/src/main/java/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java @@ -21,7 +21,6 @@ import javax.annotation.PostConstruct; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.cluster.agentlb.HostTransferMapVO; @@ -34,7 +33,6 @@ @Component @DB public class HostTransferMapDaoImpl extends GenericDaoBase implements HostTransferMapDao { - private static final Logger s_logger = Logger.getLogger(HostTransferMapDaoImpl.class); protected SearchBuilder AllFieldsSearch; protected SearchBuilder IntermediateStateSearch; diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterDaoImpl.java index 491919bbca73..2776b09c2a1c 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterDaoImpl.java @@ -26,7 +26,6 @@ import javax.naming.ConfigurationException; import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.dc.DataCenter; @@ -55,7 +54,6 @@ **/ @Component public class DataCenterDaoImpl extends GenericDaoBase implements DataCenterDao { - private static final Logger s_logger = Logger.getLogger(DataCenterDaoImpl.class); protected SearchBuilder NameSearch; protected SearchBuilder ListZonesByDomainIdSearch; @@ -405,7 +403,7 @@ public DataCenterVO findByTokenOrIdOrName(String tokenOrIdOrName) { Long dcId = Long.parseLong(tokenOrIdOrName); return findById(dcId); } catch (NumberFormatException nfe) { - s_logger.debug("Cannot parse " + tokenOrIdOrName + " into long. " + nfe); + logger.debug("Cannot parse " + tokenOrIdOrName + " into long. " + nfe); } } } diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java index e58b08da4c18..c23137095e6f 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java @@ -23,7 +23,6 @@ import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.dc.DataCenterIpAddressVO; @@ -40,7 +39,6 @@ @Component @DB public class DataCenterIpAddressDaoImpl extends GenericDaoBase implements DataCenterIpAddressDao, Configurable { - private static final Logger s_logger = Logger.getLogger(DataCenterIpAddressDaoImpl.class); private final SearchBuilder AllFieldsSearch; private final GenericSearchBuilder AllIpCount; @@ -169,8 +167,8 @@ public void addIpRange(long dcId, long podId, String start, String end, boolean @Override public void releaseIpAddress(String ipAddress, long dcId, Long instanceId) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing ip address: " + ipAddress + " data center " + dcId); + if (logger.isDebugEnabled()) { + logger.debug("Releasing ip address: " + ipAddress + " data center " + dcId); } SearchCriteria sc = AllFieldsSearch.create(); sc.setParameters("ip", ipAddress); @@ -187,8 +185,8 @@ public void releaseIpAddress(String ipAddress, long dcId, Long instanceId) { @Override public void releaseIpAddress(long nicId, String reservationId) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing ip address for reservationId=" + reservationId + ", instance=" + nicId); + if (logger.isDebugEnabled()) { + logger.debug("Releasing ip address for reservationId=" + reservationId + ", instance=" + nicId); } SearchCriteria sc = AllFieldsSearch.create(); sc.setParameters("instance", nicId); @@ -203,8 +201,8 @@ public void releaseIpAddress(long nicId, String reservationId) { @Override public void releasePodIpAddress(long id) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing ip address for ID=" + id); + if (logger.isDebugEnabled()) { + logger.debug("Releasing ip address for ID=" + id); } DataCenterIpAddressVO vo = this.findById(id); @@ -216,8 +214,8 @@ public void releasePodIpAddress(long id) { @Override public void releaseIpAddress(long nicId) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing ip address for instance=" + nicId); + if (logger.isDebugEnabled()) { + logger.debug("Releasing ip address for instance=" + nicId); } SearchCriteria sc = AllFieldsSearch.create(); sc.setParameters("instance", nicId); diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterLinkLocalIpAddressDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterLinkLocalIpAddressDaoImpl.java index 4fa3ad75ab50..517f02edde78 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterLinkLocalIpAddressDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterLinkLocalIpAddressDaoImpl.java @@ -24,7 +24,6 @@ import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.dc.DataCenterLinkLocalIpAddressVO; @@ -41,7 +40,6 @@ @Component @DB public class DataCenterLinkLocalIpAddressDaoImpl extends GenericDaoBase implements DataCenterLinkLocalIpAddressDao { - private static final Logger s_logger = Logger.getLogger(DataCenterLinkLocalIpAddressDaoImpl.class); private final SearchBuilder AllFieldsSearch; private final GenericSearchBuilder AllIpCount; @@ -105,8 +103,8 @@ public void addIpRange(long dcId, long podId, String start, String end) { @Override public void releaseIpAddress(String ipAddress, long dcId, long instanceId) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing ip address: " + ipAddress + " data center " + dcId); + if (logger.isDebugEnabled()) { + logger.debug("Releasing ip address: " + ipAddress + " data center " + dcId); } SearchCriteria sc = AllFieldsSearch.create(); sc.setParameters("ip", ipAddress); diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/HostPodDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/dao/HostPodDaoImpl.java index 3fbeb58c56ff..f1835067380f 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/HostPodDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/HostPodDaoImpl.java @@ -24,7 +24,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.dc.HostPodVO; @@ -38,7 +37,6 @@ @Component public class HostPodDaoImpl extends GenericDaoBase implements HostPodDao { - private static final Logger s_logger = Logger.getLogger(HostPodDaoImpl.class); protected SearchBuilder DataCenterAndNameSearch; protected SearchBuilder DataCenterIdSearch; @@ -100,7 +98,7 @@ public HashMap> getCurrentPodCidrSubnets(long zoneId, long po currentPodCidrSubnets.put(podId, cidrPair); } } catch (SQLException ex) { - s_logger.warn("DB exception " + ex.getMessage(), ex); + logger.warn("DB exception " + ex.getMessage(), ex); return null; } diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/VsphereStoragePolicyDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/dao/VsphereStoragePolicyDaoImpl.java index 0cdb6ad74220..c99fec5a17a5 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/VsphereStoragePolicyDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/VsphereStoragePolicyDaoImpl.java @@ -20,7 +20,6 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import java.util.List; @@ -28,7 +27,6 @@ @Component public class VsphereStoragePolicyDaoImpl extends GenericDaoBase implements VsphereStoragePolicyDao { - protected static final Logger LOGGER = Logger.getLogger(VsphereStoragePolicyDaoImpl.class); private final SearchBuilder zoneSearch; private final SearchBuilder policySearch; diff --git a/engine/schema/src/main/java/com/cloud/domain/DomainVO.java b/engine/schema/src/main/java/com/cloud/domain/DomainVO.java index 05cd4cc492e6..4c36a3401ca6 100644 --- a/engine/schema/src/main/java/com/cloud/domain/DomainVO.java +++ b/engine/schema/src/main/java/com/cloud/domain/DomainVO.java @@ -26,14 +26,15 @@ import javax.persistence.Id; import javax.persistence.Table; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.utils.db.GenericDao; @Entity @Table(name = "domain") public class DomainVO implements Domain { - public static final Logger s_logger = Logger.getLogger(DomainVO.class.getName()); + protected transient Logger logger = LogManager.getLogger(getClass()); @Id @GeneratedValue(strategy = GenerationType.IDENTITY) diff --git a/engine/schema/src/main/java/com/cloud/domain/dao/DomainDaoImpl.java b/engine/schema/src/main/java/com/cloud/domain/dao/DomainDaoImpl.java index c020493d8f9c..74f2932ca433 100644 --- a/engine/schema/src/main/java/com/cloud/domain/dao/DomainDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/domain/dao/DomainDaoImpl.java @@ -25,7 +25,6 @@ import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.domain.Domain; @@ -40,7 +39,6 @@ @Component public class DomainDaoImpl extends GenericDaoBase implements DomainDao { - private static final Logger s_logger = Logger.getLogger(DomainDaoImpl.class); protected SearchBuilder DomainNameLikeSearch; protected SearchBuilder ParentDomainNameLikeSearch; @@ -112,7 +110,7 @@ public synchronized DomainVO create(DomainVO domain) { DomainVO parentDomain = findById(parent); if (parentDomain == null) { - s_logger.error("Unable to load parent domain: " + parent); + logger.error("Unable to load parent domain: " + parent); return null; } @@ -122,7 +120,7 @@ public synchronized DomainVO create(DomainVO domain) { parentDomain = this.lockRow(parent, true); if (parentDomain == null) { - s_logger.error("Unable to lock parent domain: " + parent); + logger.error("Unable to lock parent domain: " + parent); return null; } @@ -137,7 +135,7 @@ public synchronized DomainVO create(DomainVO domain) { txn.commit(); return domain; } catch (Exception e) { - s_logger.error("Unable to create domain due to " + e.getMessage(), e); + logger.error("Unable to create domain due to " + e.getMessage(), e); txn.rollback(); return null; } @@ -148,23 +146,23 @@ public synchronized DomainVO create(DomainVO domain) { public boolean remove(Long id) { // check for any active users / domains assigned to the given domain id and don't remove the domain if there are any if (id != null && id.longValue() == Domain.ROOT_DOMAIN) { - s_logger.error("Can not remove domain " + id + " as it is ROOT domain"); + logger.error("Can not remove domain " + id + " as it is ROOT domain"); return false; } else { if(id == null) { - s_logger.error("Can not remove domain without id."); + logger.error("Can not remove domain without id."); return false; } } DomainVO domain = findById(id); if (domain == null) { - s_logger.info("Unable to remove domain as domain " + id + " no longer exists"); + logger.info("Unable to remove domain as domain " + id + " no longer exists"); return true; } if (domain.getParent() == null) { - s_logger.error("Invalid domain " + id + ", orphan?"); + logger.error("Invalid domain " + id + ", orphan?"); return false; } @@ -177,7 +175,7 @@ public boolean remove(Long id) { txn.start(); DomainVO parentDomain = super.lockRow(domain.getParent(), true); if (parentDomain == null) { - s_logger.error("Unable to load parent domain: " + domain.getParent()); + logger.error("Unable to load parent domain: " + domain.getParent()); return false; } @@ -198,7 +196,7 @@ public boolean remove(Long id) { txn.commit(); } catch (SQLException ex) { success = false; - s_logger.error("error removing domain: " + id, ex); + logger.error("error removing domain: " + id, ex); txn.rollback(); } return success; @@ -310,7 +308,7 @@ public boolean domainIdListContainsAccessibleDomain(String domainIdList, Account return true; } } catch (NumberFormatException nfe) { - s_logger.debug(String.format("Unable to parse %s as domain ID from the list of domain IDs: %s", domainIdList.trim(), domainIdList), nfe); + logger.debug(String.format("Unable to parse %s as domain ID from the list of domain IDs: %s", domainIdList.trim(), domainIdList), nfe); } } return false; diff --git a/engine/schema/src/main/java/com/cloud/event/dao/EventDaoImpl.java b/engine/schema/src/main/java/com/cloud/event/dao/EventDaoImpl.java index d4627904f0b4..e748e98900eb 100644 --- a/engine/schema/src/main/java/com/cloud/event/dao/EventDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/event/dao/EventDaoImpl.java @@ -20,7 +20,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.event.Event.State; @@ -34,7 +33,6 @@ @Component public class EventDaoImpl extends GenericDaoBase implements EventDao { - public static final Logger s_logger = Logger.getLogger(EventDaoImpl.class.getName()); protected final SearchBuilder CompletedEventSearch; protected final SearchBuilder ToArchiveOrDeleteEventSearch; diff --git a/engine/schema/src/main/java/com/cloud/event/dao/UsageEventDaoImpl.java b/engine/schema/src/main/java/com/cloud/event/dao/UsageEventDaoImpl.java index 519b2ecfe736..fdef509da5bd 100644 --- a/engine/schema/src/main/java/com/cloud/event/dao/UsageEventDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/event/dao/UsageEventDaoImpl.java @@ -25,7 +25,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.dc.Vlan; @@ -42,7 +41,6 @@ @Component public class UsageEventDaoImpl extends GenericDaoBase implements UsageEventDao { - public static final Logger s_logger = Logger.getLogger(UsageEventDaoImpl.class.getName()); private final SearchBuilder latestEventsSearch; private final SearchBuilder IpeventsSearch; @@ -101,8 +99,8 @@ public synchronized List getRecentEvents(Date endDate) { // Copy events from cloud db to usage db String sql = COPY_EVENTS; if (recentEventId == 0) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("no recent event date, copying all events"); + if (logger.isDebugEnabled()) { + logger.debug("no recent event date, copying all events"); } sql = COPY_ALL_EVENTS; } @@ -120,7 +118,7 @@ public synchronized List getRecentEvents(Date endDate) { txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("error copying events from cloud db to usage db", ex); + logger.error("error copying events from cloud db to usage db", ex); throw new CloudRuntimeException(ex.getMessage()); } finally { txn.close(); @@ -129,8 +127,8 @@ public synchronized List getRecentEvents(Date endDate) { // Copy event details from cloud db to usage db sql = COPY_EVENT_DETAILS; if (recentEventId == 0) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("no recent event date, copying all event detailss"); + if (logger.isDebugEnabled()) { + logger.debug("no recent event date, copying all event detailss"); } sql = COPY_ALL_EVENT_DETAILS; } @@ -148,7 +146,7 @@ public synchronized List getRecentEvents(Date endDate) { txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("error copying event details from cloud db to usage db", ex); + logger.error("error copying event details from cloud db to usage db", ex); throw new CloudRuntimeException(ex.getMessage()); } finally { txn.close(); @@ -171,7 +169,7 @@ private long getMostRecentEventId() { } return 0; } catch (Exception ex) { - s_logger.error("error getting most recent event id", ex); + logger.error("error getting most recent event id", ex); throw new CloudRuntimeException(ex.getMessage()); } finally { txn.close(); @@ -183,7 +181,7 @@ private List findRecentEvents(Date endDate) { try { return listLatestEvents(endDate); } catch (Exception ex) { - s_logger.error("error getting most recent event date", ex); + logger.error("error getting most recent event date", ex); throw new CloudRuntimeException(ex.getMessage()); } finally { txn.close(); @@ -203,7 +201,7 @@ private long getMaxEventId(Date endDate) { } return 0; } catch (Exception ex) { - s_logger.error("error getting max event id", ex); + logger.error("error getting max event id", ex); throw new CloudRuntimeException(ex.getMessage()); } finally { txn.close(); diff --git a/engine/schema/src/main/java/com/cloud/event/dao/UsageEventDetailsDaoImpl.java b/engine/schema/src/main/java/com/cloud/event/dao/UsageEventDetailsDaoImpl.java index 43e00efc1ad3..37b203b2e88f 100644 --- a/engine/schema/src/main/java/com/cloud/event/dao/UsageEventDetailsDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/event/dao/UsageEventDetailsDaoImpl.java @@ -20,7 +20,6 @@ import java.util.Map; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.event.UsageEventDetailsVO; @@ -31,7 +30,6 @@ @Component public class UsageEventDetailsDaoImpl extends GenericDaoBase implements UsageEventDetailsDao { - public static final Logger s_logger = Logger.getLogger(UsageEventDetailsDaoImpl.class.getName()); protected final SearchBuilder EventDetailsSearch; protected final SearchBuilder DetailSearch; diff --git a/engine/schema/src/main/java/com/cloud/gpu/dao/HostGpuGroupsDaoImpl.java b/engine/schema/src/main/java/com/cloud/gpu/dao/HostGpuGroupsDaoImpl.java index 25f8d245a3b0..30535c7e27d5 100644 --- a/engine/schema/src/main/java/com/cloud/gpu/dao/HostGpuGroupsDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/gpu/dao/HostGpuGroupsDaoImpl.java @@ -19,7 +19,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.gpu.HostGpuGroupsVO; @@ -30,7 +29,6 @@ @Component public class HostGpuGroupsDaoImpl extends GenericDaoBase implements HostGpuGroupsDao { - private static final Logger s_logger = Logger.getLogger(HostGpuGroupsDaoImpl.class); private final SearchBuilder _hostIdGroupNameSearch; private final SearchBuilder _searchByHostId; diff --git a/engine/schema/src/main/java/com/cloud/gpu/dao/VGPUTypesDaoImpl.java b/engine/schema/src/main/java/com/cloud/gpu/dao/VGPUTypesDaoImpl.java index d4e31d365d9e..edc5e1f67c86 100644 --- a/engine/schema/src/main/java/com/cloud/gpu/dao/VGPUTypesDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/gpu/dao/VGPUTypesDaoImpl.java @@ -27,7 +27,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.api.VgpuTypesInfo; @@ -41,7 +40,6 @@ @Component public class VGPUTypesDaoImpl extends GenericDaoBase implements VGPUTypesDao { - private static final Logger s_logger = Logger.getLogger(VGPUTypesDaoImpl.class); private final SearchBuilder _searchByGroupId; private final SearchBuilder _searchByGroupIdVGPUType; diff --git a/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java b/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java index 136351527f6b..9eadab76b2c2 100644 --- a/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java @@ -36,7 +36,6 @@ import org.apache.cloudstack.utils.jsinterpreter.TagAsRuleHelper; import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; import com.cloud.agent.api.VgpuTypesInfo; import com.cloud.cluster.agentlb.HostTransferMapVO; @@ -74,14 +73,12 @@ import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.db.UpdateBuilder; import com.cloud.utils.exception.CloudRuntimeException; + import java.util.Arrays; @DB @TableGenerator(name = "host_req_sq", table = "op_host", pkColumnName = "id", valueColumnName = "sequence", allocationSize = 1) public class HostDaoImpl extends GenericDaoBase implements HostDao { //FIXME: , ExternalIdDao { - private static final Logger s_logger = Logger.getLogger(HostDaoImpl.class); - private static final Logger status_logger = Logger.getLogger(Status.class); - private static final Logger state_logger = Logger.getLogger(ResourceState.class); private static final String LIST_HOST_IDS_BY_COMPUTETAGS = "SELECT filtered.host_id, COUNT(filtered.tag) AS tag_count " + "FROM (SELECT host_id, tag, is_tag_a_rule FROM host_tags GROUP BY host_id,tag) AS filtered " @@ -355,7 +352,7 @@ public void init() { try { HostTransferSearch = _hostTransferDao.createSearchBuilder(); } catch (Throwable e) { - s_logger.debug("error", e); + logger.debug("error", e); } HostTransferSearch.and("id", HostTransferSearch.entity().getId(), SearchCriteria.Op.NULL); UnmanagedDirectConnectSearch.join("hostTransferSearch", HostTransferSearch, HostTransferSearch.entity().getId(), UnmanagedDirectConnectSearch.entity().getId(), @@ -593,8 +590,8 @@ private void resetHosts(long managementServerId, long lastPingSecondsAfter) { sb.append(" "); } - if (s_logger.isTraceEnabled()) { - s_logger.trace("Following hosts got reset: " + sb.toString()); + if (logger.isTraceEnabled()) { + logger.trace("Following hosts got reset: " + sb.toString()); } } @@ -642,19 +639,19 @@ private boolean canOwnCluster(long clusterId) { public List findAndUpdateDirectAgentToLoad(long lastPingSecondsAfter, Long limit, long managementServerId) { TransactionLegacy txn = TransactionLegacy.currentTxn(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Resetting hosts suitable for reconnect"); + if (logger.isDebugEnabled()) { + logger.debug("Resetting hosts suitable for reconnect"); } // reset hosts that are suitable candidates for reconnect resetHosts(managementServerId, lastPingSecondsAfter); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Completed resetting hosts suitable for reconnect"); + if (logger.isDebugEnabled()) { + logger.debug("Completed resetting hosts suitable for reconnect"); } List assignedHosts = new ArrayList(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Acquiring hosts for clusters already owned by this management server"); + if (logger.isDebugEnabled()) { + logger.debug("Acquiring hosts for clusters already owned by this management server"); } List clusters = findClustersOwnedByManagementServer(managementServerId); txn.start(); @@ -673,17 +670,17 @@ public List findAndUpdateDirectAgentToLoad(long lastPingSecondsAfter, Lo sb.append(host.getId()); sb.append(" "); } - if (s_logger.isTraceEnabled()) { - s_logger.trace("Following hosts got acquired for clusters already owned: " + sb.toString()); + if (logger.isTraceEnabled()) { + logger.trace("Following hosts got acquired for clusters already owned: " + sb.toString()); } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Completed acquiring hosts for clusters already owned by this management server"); + if (logger.isDebugEnabled()) { + logger.debug("Completed acquiring hosts for clusters already owned by this management server"); } if (assignedHosts.size() < limit) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Acquiring hosts for clusters not owned by any management server"); + if (logger.isDebugEnabled()) { + logger.debug("Acquiring hosts for clusters not owned by any management server"); } // for remaining hosts not owned by any MS check if they can be owned (by owning full cluster) clusters = findClustersForHostsNotOwnedByAnyManagementServer(); @@ -723,12 +720,12 @@ public List findAndUpdateDirectAgentToLoad(long lastPingSecondsAfter, Lo break; } } - if (s_logger.isTraceEnabled()) { - s_logger.trace("Following hosts got acquired from newly owned clusters: " + sb.toString()); + if (logger.isTraceEnabled()) { + logger.trace("Following hosts got acquired from newly owned clusters: " + sb.toString()); } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Completed acquiring hosts for clusters not owned by any management server"); + if (logger.isDebugEnabled()) { + logger.debug("Completed acquiring hosts for clusters not owned by any management server"); } } txn.commit(); @@ -899,7 +896,7 @@ public List findLostHosts(long timeout) { } } } catch (SQLException e) { - s_logger.warn("Exception: ", e); + logger.warn("Exception: ", e); } return result; } @@ -1009,15 +1006,15 @@ public List getRunningHostCounts(Date cutTime) { l.add(info); } } catch (SQLException e) { - s_logger.debug("SQLException caught", e); + logger.debug("SQLException caught", e); } return l; } @Override public long getNextSequence(long hostId) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("getNextSequence(), hostId: " + hostId); + if (logger.isTraceEnabled()) { + logger.trace("getNextSequence(), hostId: " + hostId); } TableGenerator tg = _tgs.get("host_req_sq"); @@ -1087,7 +1084,7 @@ public boolean updateState(Status oldStatus, Event event, Status newStatus, Host HostVO ho = findById(host.getId()); assert ho != null : "How how how? : " + host.getId(); - if (status_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { StringBuilder str = new StringBuilder("Unable to update host for event:").append(event.toString()); str.append(". Name=").append(host.getName()); @@ -1097,7 +1094,7 @@ public boolean updateState(Status oldStatus, Event event, Status newStatus, Host .append("]"); str.append("; DB=[status=").append(vo.getStatus().toString()).append(":msid=").append(vo.getManagementServerId()).append(":lastpinged=").append(vo.getLastPinged()) .append(":old update count=").append(oldUpdateCount).append("]"); - status_logger.debug(str.toString()); + logger.debug(str.toString()); } else { StringBuilder msg = new StringBuilder("Agent status update: ["); msg.append("id = " + host.getId()); @@ -1107,11 +1104,11 @@ public boolean updateState(Status oldStatus, Event event, Status newStatus, Host msg.append("; new status = " + newStatus); msg.append("; old update count = " + oldUpdateCount); msg.append("; new update count = " + newUpdateCount + "]"); - status_logger.debug(msg.toString()); + logger.debug(msg.toString()); } if (ho.getState() == newStatus) { - status_logger.debug("Host " + ho.getName() + " state has already been updated to " + newStatus); + logger.debug("Host " + ho.getName() + " state has already been updated to " + newStatus); return true; } } @@ -1137,7 +1134,7 @@ public boolean updateResourceState(ResourceState oldState, ResourceState.Event e int result = update(ub, sc, null); assert result <= 1 : "How can this update " + result + " rows? "; - if (state_logger.isDebugEnabled() && result == 0) { + if (logger.isDebugEnabled() && result == 0) { HostVO ho = findById(host.getId()); assert ho != null : "How how how? : " + host.getId(); @@ -1147,7 +1144,7 @@ public boolean updateResourceState(ResourceState oldState, ResourceState.Event e str.append("; old state = " + oldState); str.append("; event = " + event); str.append("; new state = " + newState + "]"); - state_logger.debug(str.toString()); + logger.debug(str.toString()); } else { StringBuilder msg = new StringBuilder("Resource state update: ["); msg.append("id = " + host.getId()); @@ -1155,7 +1152,7 @@ public boolean updateResourceState(ResourceState oldState, ResourceState.Event e msg.append("; old state = " + oldState); msg.append("; event = " + event); msg.append("; new state = " + newState + "]"); - state_logger.debug(msg.toString()); + logger.debug(msg.toString()); } return result > 0; @@ -1419,7 +1416,7 @@ public List listOrderedHostsHypervisorVersionsInDatacenter(long datacent result.add(resultSet.getString(1)); } } catch (SQLException e) { - s_logger.error("Error trying to obtain hypervisor version on datacenter", e); + logger.error("Error trying to obtain hypervisor version on datacenter", e); } return result; } diff --git a/engine/schema/src/main/java/com/cloud/hypervisor/dao/HypervisorCapabilitiesDaoImpl.java b/engine/schema/src/main/java/com/cloud/hypervisor/dao/HypervisorCapabilitiesDaoImpl.java index a4ec0a663609..f636c2eb7628 100644 --- a/engine/schema/src/main/java/com/cloud/hypervisor/dao/HypervisorCapabilitiesDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/hypervisor/dao/HypervisorCapabilitiesDaoImpl.java @@ -21,7 +21,6 @@ import org.apache.cloudstack.utils.CloudStackVersion; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.hypervisor.Hypervisor.HypervisorType; @@ -33,7 +32,6 @@ @Component public class HypervisorCapabilitiesDaoImpl extends GenericDaoBase implements HypervisorCapabilitiesDao { - private static final Logger s_logger = Logger.getLogger(HypervisorCapabilitiesDaoImpl.class); protected final SearchBuilder HypervisorTypeSearch; protected final SearchBuilder HypervisorTypeAndVersionSearch; @@ -80,8 +78,8 @@ public HypervisorCapabilitiesVO findByHypervisorTypeAndVersion(HypervisorType hy parentVersion == null) { return result; } - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Hypervisor capabilities for hypervisor: %s, version: %s can not be found. " + + if (logger.isDebugEnabled()) { + logger.debug(String.format("Hypervisor capabilities for hypervisor: %s, version: %s can not be found. " + "Trying to find capabilities for the parent version: %s", hypervisorType, hypervisorVersion, parentVersion)); } diff --git a/engine/schema/src/main/java/com/cloud/hypervisor/vmware/dao/VmwareDatacenterDaoImpl.java b/engine/schema/src/main/java/com/cloud/hypervisor/vmware/dao/VmwareDatacenterDaoImpl.java index b4bd56f5b4ee..73d21c3b15a7 100644 --- a/engine/schema/src/main/java/com/cloud/hypervisor/vmware/dao/VmwareDatacenterDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/hypervisor/vmware/dao/VmwareDatacenterDaoImpl.java @@ -21,7 +21,7 @@ import com.cloud.dc.dao.VmwareDatacenterDao; -import org.apache.log4j.Logger; + import org.springframework.stereotype.Component; import com.cloud.dc.VmwareDatacenterVO; @@ -34,7 +34,6 @@ @Component @DB public class VmwareDatacenterDaoImpl extends GenericDaoBase implements VmwareDatacenterDao { - protected static final Logger s_logger = Logger.getLogger(VmwareDatacenterDaoImpl.class); final SearchBuilder nameSearch; final SearchBuilder guidSearch; diff --git a/engine/schema/src/main/java/com/cloud/network/dao/FirewallRulesCidrsDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/FirewallRulesCidrsDaoImpl.java index f6185309972b..fdd1e0ec43ad 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/FirewallRulesCidrsDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/FirewallRulesCidrsDaoImpl.java @@ -20,7 +20,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.utils.db.DB; @@ -31,7 +30,6 @@ @Component public class FirewallRulesCidrsDaoImpl extends GenericDaoBase implements FirewallRulesCidrsDao { - private static final Logger s_logger = Logger.getLogger(FirewallRulesCidrsDaoImpl.class); protected final SearchBuilder CidrsSearch; protected FirewallRulesCidrsDaoImpl() { diff --git a/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDaoImpl.java index d14275227158..ca779f7e9cee 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDaoImpl.java @@ -26,7 +26,6 @@ import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.resourcedetail.dao.UserIpAddressDetailsDao; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.dc.Vlan.VlanType; @@ -50,7 +49,6 @@ @Component @DB public class IPAddressDaoImpl extends GenericDaoBase implements IPAddressDao { - private static final Logger s_logger = Logger.getLogger(IPAddressDaoImpl.class); protected SearchBuilder AllFieldsSearch; protected SearchBuilder VlanDbIdSearchUnallocated; @@ -374,7 +372,7 @@ public int countIPs(long dcId, Long accountId, String vlanId, String vlanGateway ipCount = rs.getInt(1); } } catch (Exception e) { - s_logger.warn("Exception counting IP addresses", e); + logger.warn("Exception counting IP addresses", e); } return ipCount; diff --git a/engine/schema/src/main/java/com/cloud/network/dao/NetworkDomainDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/NetworkDomainDaoImpl.java index ce86a8636a10..e8c55131ecdd 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/NetworkDomainDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/NetworkDomainDaoImpl.java @@ -26,7 +26,6 @@ import java.util.Map; import com.cloud.utils.db.TransactionLegacy; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.utils.db.DB; @@ -38,7 +37,6 @@ @Component @DB() public class NetworkDomainDaoImpl extends GenericDaoBase implements NetworkDomainDao { - public static Logger logger = Logger.getLogger(NetworkDomainDaoImpl.class.getName()); final SearchBuilder AllFieldsSearch; final SearchBuilder DomainsSearch; diff --git a/engine/schema/src/main/java/com/cloud/network/dao/NetworkVO.java b/engine/schema/src/main/java/com/cloud/network/dao/NetworkVO.java index f24eec4931e3..fde93238451a 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/NetworkVO.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/NetworkVO.java @@ -39,7 +39,6 @@ import com.cloud.utils.NumbersUtil; import com.cloud.utils.db.GenericDao; import com.cloud.utils.net.NetUtils; -import org.apache.log4j.Logger; /** * NetworkConfigurationVO contains information about a specific network. @@ -48,7 +47,6 @@ @Entity @Table(name = "networks") public class NetworkVO implements Network { - static final Logger s_logger = Logger.getLogger(NetworkVO.class); @Id @TableGenerator(name = "networks_sq", table = "sequence", pkColumnName = "name", valueColumnName = "value", pkColumnValue = "networks_seq", allocationSize = 1) @Column(name = "id") diff --git a/engine/schema/src/main/java/com/cloud/network/dao/PortProfileDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/PortProfileDaoImpl.java index a90ce059c314..eb2a19681585 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/PortProfileDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/PortProfileDaoImpl.java @@ -22,7 +22,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.utils.db.DB; @@ -36,7 +35,6 @@ @Component @DB() public class PortProfileDaoImpl extends GenericDaoBase implements PortProfileDao { - protected static final Logger s_logger = Logger.getLogger(PortProfileDaoImpl.class); final SearchBuilder nameSearch; final SearchBuilder accessVlanSearch; diff --git a/engine/schema/src/main/java/com/cloud/network/dao/RemoteAccessVpnDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/RemoteAccessVpnDaoImpl.java index 3aa2e749712e..484aa6f6631e 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/RemoteAccessVpnDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/RemoteAccessVpnDaoImpl.java @@ -19,7 +19,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.network.RemoteAccessVpn; @@ -29,7 +28,6 @@ @Component public class RemoteAccessVpnDaoImpl extends GenericDaoBase implements RemoteAccessVpnDao { - private static final Logger s_logger = Logger.getLogger(RemoteAccessVpnDaoImpl.class); private final SearchBuilder AllFieldsSearch; diff --git a/engine/schema/src/main/java/com/cloud/network/dao/RouterHealthCheckResultDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/RouterHealthCheckResultDaoImpl.java index 991365b5f540..b1292aebfd14 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/RouterHealthCheckResultDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/RouterHealthCheckResultDaoImpl.java @@ -19,7 +19,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.utils.db.GenericDaoBase; @@ -28,7 +27,6 @@ @Component public class RouterHealthCheckResultDaoImpl extends GenericDaoBase implements RouterHealthCheckResultDao { - private final static Logger s_logger = Logger.getLogger(RouterHealthCheckResultDaoImpl.class); private SearchBuilder RouterChecksSearchBuilder; private SearchBuilder IsRouterFailingSearchBuilder; @@ -69,7 +67,7 @@ public RouterHealthCheckResultVO getRouterHealthCheckResult(long routerId, Strin sc.setParameters("checkType", checkType); List checks = listBy(sc); if (checks.size() > 1) { - s_logger.error("Found multiple entries for router Id: " + routerId + ", check name: " + checkName); + logger.error("Found multiple entries for router Id: " + routerId + ", check name: " + checkName); } return checks.isEmpty() ? null : checks.get(0); } diff --git a/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteVpnConnectionDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteVpnConnectionDaoImpl.java index b55f39ab31ba..f9c5ce089645 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteVpnConnectionDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteVpnConnectionDaoImpl.java @@ -21,7 +21,6 @@ import javax.annotation.PostConstruct; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.utils.db.GenericDaoBase; @@ -31,7 +30,6 @@ @Component public class Site2SiteVpnConnectionDaoImpl extends GenericDaoBase implements Site2SiteVpnConnectionDao { - private static final Logger s_logger = Logger.getLogger(Site2SiteVpnConnectionDaoImpl.class); @Inject protected IPAddressDao _addrDao; diff --git a/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteVpnGatewayDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteVpnGatewayDaoImpl.java index 80465f9ee1e8..d1fde963217e 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteVpnGatewayDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/Site2SiteVpnGatewayDaoImpl.java @@ -18,7 +18,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.utils.db.GenericDaoBase; @@ -30,7 +29,6 @@ public class Site2SiteVpnGatewayDaoImpl extends GenericDaoBase AllFieldsSearch; diff --git a/engine/schema/src/main/java/com/cloud/network/dao/UserIpv6AddressDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/UserIpv6AddressDaoImpl.java index 08f0829f4acc..407d34fa3650 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/UserIpv6AddressDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/UserIpv6AddressDaoImpl.java @@ -20,7 +20,6 @@ import com.cloud.network.IpAddress; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.network.UserIpv6AddressVO; @@ -33,7 +32,6 @@ @Component public class UserIpv6AddressDaoImpl extends GenericDaoBase implements UserIpv6AddressDao { - private static final Logger s_logger = Logger.getLogger(IPAddressDaoImpl.class); protected final SearchBuilder AllFieldsSearch; protected GenericSearchBuilder CountFreePublicIps; diff --git a/engine/schema/src/main/java/com/cloud/network/security/dao/SecurityGroupWorkDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/security/dao/SecurityGroupWorkDaoImpl.java index 9b0bf088a147..327d12c759a7 100644 --- a/engine/schema/src/main/java/com/cloud/network/security/dao/SecurityGroupWorkDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/security/dao/SecurityGroupWorkDaoImpl.java @@ -20,7 +20,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.network.security.SecurityGroupWork; @@ -37,7 +36,6 @@ @Component public class SecurityGroupWorkDaoImpl extends GenericDaoBase implements SecurityGroupWorkDao { - private static final Logger s_logger = Logger.getLogger(SecurityGroupWorkDaoImpl.class); private final SearchBuilder VmIdTakenSearch; private final SearchBuilder VmIdSeqNumSearch; @@ -107,8 +105,8 @@ public SecurityGroupWorkVO take(long serverId) { final List vos = lockRows(sc, filter, true); if (vos.size() == 0) { txn.commit(); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Security Group take: no work found"); + if (logger.isTraceEnabled()) { + logger.trace("Security Group take: no work found"); } return null; } @@ -117,8 +115,8 @@ public SecurityGroupWorkVO take(long serverId) { if (findByVmIdStep(work.getInstanceId(), Step.Processing) != null) { //ensure that there is no job in Processing state for the same VM processing = true; - if (s_logger.isTraceEnabled()) { - s_logger.trace("Security Group work take: found a job in Scheduled and Processing vmid=" + work.getInstanceId()); + if (logger.isTraceEnabled()) { + logger.trace("Security Group work take: found a job in Scheduled and Processing vmid=" + work.getInstanceId()); } } work.setServerId(serverId); diff --git a/engine/schema/src/main/java/com/cloud/network/security/dao/VmRulesetLogDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/security/dao/VmRulesetLogDaoImpl.java index d4d4f606a9aa..9a9ca80bce59 100644 --- a/engine/schema/src/main/java/com/cloud/network/security/dao/VmRulesetLogDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/security/dao/VmRulesetLogDaoImpl.java @@ -26,7 +26,6 @@ import java.util.Set; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.network.security.VmRulesetLogVO; @@ -37,7 +36,6 @@ @Component public class VmRulesetLogDaoImpl extends GenericDaoBase implements VmRulesetLogDao { - protected static final Logger s_logger = Logger.getLogger(VmRulesetLogDaoImpl.class); private SearchBuilder VmIdSearch; private String InsertOrUpdateSQl = "INSERT INTO op_vm_ruleset_log (instance_id, created, logsequence) " + " VALUES(?, now(), 1) ON DUPLICATE KEY UPDATE logsequence=logsequence+1"; @@ -98,19 +96,19 @@ private int executeWithRetryOnDeadlock(TransactionLegacy txn, String pstmt, List } catch (SQLTransactionRollbackException e1) { if (i < maxTries - 1) { int delayMs = (i + 1) * 1000; - s_logger.debug("Caught a deadlock exception while inserting security group rule log, retrying in " + delayMs); + logger.debug("Caught a deadlock exception while inserting security group rule log, retrying in " + delayMs); try { Thread.sleep(delayMs); } catch (InterruptedException ie) { - s_logger.debug("[ignored] interrupted while inserting security group rule log."); + logger.debug("[ignored] interrupted while inserting security group rule log."); } } else - s_logger.warn("Caught another deadlock exception while retrying inserting security group rule log, giving up"); + logger.warn("Caught another deadlock exception while retrying inserting security group rule log, giving up"); } } - if (s_logger.isTraceEnabled()) { - s_logger.trace("Inserted or updated " + numUpdated + " rows"); + if (logger.isTraceEnabled()) { + logger.trace("Inserted or updated " + numUpdated + " rows"); } return numUpdated; } @@ -134,8 +132,8 @@ protected int createOrUpdateUsingMultiInsert(Set workItems) { vmIds.add(vmId); } int numUpdated = executeWithRetryOnDeadlock(txn, pstmt, vmIds); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Inserted or updated " + numUpdated + " rows"); + if (logger.isTraceEnabled()) { + logger.trace("Inserted or updated " + numUpdated + " rows"); } if (numUpdated > 0) count += stmtSize; @@ -145,7 +143,7 @@ protected int createOrUpdateUsingMultiInsert(Set workItems) { } } catch (SQLException sqe) { - s_logger.warn("Failed to execute multi insert ", sqe); + logger.warn("Failed to execute multi insert ", sqe); } return count; @@ -173,10 +171,10 @@ protected int createOrUpdateUsingBatch(Set workItems) { queryResult = stmtInsert.executeBatch(); txn.commit(); - if (s_logger.isTraceEnabled()) - s_logger.trace("Updated or inserted " + workItems.size() + " log items"); + if (logger.isTraceEnabled()) + logger.trace("Updated or inserted " + workItems.size() + " log items"); } catch (SQLException e) { - s_logger.warn("Failed to execute batch update statement for ruleset log: ", e); + logger.warn("Failed to execute batch update statement for ruleset log: ", e); txn.rollback(); success = false; } @@ -185,7 +183,7 @@ protected int createOrUpdateUsingBatch(Set workItems) { workItems.toArray(arrayItems); for (int i = 0; i < queryResult.length; i++) { if (queryResult[i] < 0) { - s_logger.debug("Batch query update failed for vm " + arrayItems[i]); + logger.debug("Batch query update failed for vm " + arrayItems[i]); } } } diff --git a/engine/schema/src/main/java/com/cloud/network/vpc/dao/NetworkACLItemCidrsDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/vpc/dao/NetworkACLItemCidrsDaoImpl.java index 4501f1493b57..1eb6482dd3af 100644 --- a/engine/schema/src/main/java/com/cloud/network/vpc/dao/NetworkACLItemCidrsDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/vpc/dao/NetworkACLItemCidrsDaoImpl.java @@ -21,7 +21,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.network.vpc.NetworkACLItemCidrsDao; @@ -37,7 +36,6 @@ */ @Component public class NetworkACLItemCidrsDaoImpl extends GenericDaoBase implements NetworkACLItemCidrsDao { - private static final Logger s_logger = Logger.getLogger(NetworkACLItemCidrsDaoImpl.class); protected final SearchBuilder cidrsSearch; protected NetworkACLItemCidrsDaoImpl() { diff --git a/engine/schema/src/main/java/com/cloud/network/vpc/dao/PrivateIpDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/vpc/dao/PrivateIpDaoImpl.java index 8b090fdfbc84..47b91b24c279 100644 --- a/engine/schema/src/main/java/com/cloud/network/vpc/dao/PrivateIpDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/vpc/dao/PrivateIpDaoImpl.java @@ -20,7 +20,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.network.vpc.PrivateIpVO; @@ -36,7 +35,6 @@ @Component @DB() public class PrivateIpDaoImpl extends GenericDaoBase implements PrivateIpDao { - private static final Logger s_logger = Logger.getLogger(PrivateIpDaoImpl.class); private final SearchBuilder AllFieldsSearch; private final GenericSearchBuilder CountAllocatedByNetworkId; @@ -90,8 +88,8 @@ public PrivateIpVO allocateIpAddress(long dcId, long networkId, String requested @Override public void releaseIpAddress(String ipAddress, long networkId) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing private ip address: " + ipAddress + " network id " + networkId); + if (logger.isDebugEnabled()) { + logger.debug("Releasing private ip address: " + ipAddress + " network id " + networkId); } SearchCriteria sc = AllFieldsSearch.create(); sc.setParameters("ip", ipAddress); diff --git a/engine/schema/src/main/java/com/cloud/projects/dao/ProjectAccountDaoImpl.java b/engine/schema/src/main/java/com/cloud/projects/dao/ProjectAccountDaoImpl.java index d2ba49e9408b..8947cc600b38 100644 --- a/engine/schema/src/main/java/com/cloud/projects/dao/ProjectAccountDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/projects/dao/ProjectAccountDaoImpl.java @@ -18,7 +18,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.projects.ProjectAccount; @@ -39,7 +38,6 @@ public class ProjectAccountDaoImpl extends GenericDaoBase ProjectAccountsSearch; final GenericSearchBuilder CountByRoleSearch; - public static final Logger s_logger = Logger.getLogger(ProjectAccountDaoImpl.class.getName()); protected ProjectAccountDaoImpl() { AllFieldsSearch = createSearchBuilder(); @@ -190,7 +188,7 @@ public void removeAccountFromProjects(long accountId) { int rowsRemoved = remove(sc); if (rowsRemoved > 0) { - s_logger.debug("Removed account id=" + accountId + " from " + rowsRemoved + " projects"); + logger.debug("Removed account id=" + accountId + " from " + rowsRemoved + " projects"); } } diff --git a/engine/schema/src/main/java/com/cloud/projects/dao/ProjectDaoImpl.java b/engine/schema/src/main/java/com/cloud/projects/dao/ProjectDaoImpl.java index 5deb85861208..46bf36ae397b 100644 --- a/engine/schema/src/main/java/com/cloud/projects/dao/ProjectDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/projects/dao/ProjectDaoImpl.java @@ -20,7 +20,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.projects.Project; @@ -36,7 +35,6 @@ @Component public class ProjectDaoImpl extends GenericDaoBase implements ProjectDao { - private static final Logger s_logger = Logger.getLogger(ProjectDaoImpl.class); protected final SearchBuilder AllFieldsSearch; protected GenericSearchBuilder CountByDomain; protected GenericSearchBuilder ProjectAccountSearch; diff --git a/engine/schema/src/main/java/com/cloud/projects/dao/ProjectInvitationDaoImpl.java b/engine/schema/src/main/java/com/cloud/projects/dao/ProjectInvitationDaoImpl.java index f8d153740ab7..d30b1c9f1f10 100644 --- a/engine/schema/src/main/java/com/cloud/projects/dao/ProjectInvitationDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/projects/dao/ProjectInvitationDaoImpl.java @@ -19,7 +19,6 @@ import java.sql.Date; import java.util.List; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.projects.ProjectInvitation.State; @@ -31,7 +30,6 @@ @Component public class ProjectInvitationDaoImpl extends GenericDaoBase implements ProjectInvitationDao { - private static final Logger s_logger = Logger.getLogger(ProjectInvitationDaoImpl.class); protected final SearchBuilder AllFieldsSearch; protected final SearchBuilder InactiveSearch; protected final SearchBuilder ProjectAccountInviteSearch; @@ -111,7 +109,7 @@ public boolean expirePendingInvitations(long timeout) { for (ProjectInvitationVO invitationToExpire : invitationsToExpire) { invitationToExpire.setState(State.Expired); if (!update(invitationToExpire.getId(), invitationToExpire)) { - s_logger.warn("Fail to expire invitation " + invitationToExpire.toString()); + logger.warn("Fail to expire invitation " + invitationToExpire.toString()); success = false; } } @@ -133,7 +131,7 @@ public boolean isActive(long id, long timeout) { sc.setParameters("id", id); if (findOneBy(sc) == null) { - s_logger.warn("Unable to find project invitation by id " + id); + logger.warn("Unable to find project invitation by id " + id); return false; } @@ -185,7 +183,7 @@ public void cleanupInvitations(long projectId) { sc.setParameters("projectId", projectId); int numberRemoved = remove(sc); - s_logger.debug("Removed " + numberRemoved + " invitations for project id=" + projectId); + logger.debug("Removed " + numberRemoved + " invitations for project id=" + projectId); } } diff --git a/engine/schema/src/main/java/com/cloud/resource/icon/dao/ResourceIconDaoImpl.java b/engine/schema/src/main/java/com/cloud/resource/icon/dao/ResourceIconDaoImpl.java index 41eba40c01f5..1ae01bfc1ec2 100644 --- a/engine/schema/src/main/java/com/cloud/resource/icon/dao/ResourceIconDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/resource/icon/dao/ResourceIconDaoImpl.java @@ -24,13 +24,11 @@ import com.cloud.utils.db.SearchCriteria; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.response.ResourceIconResponse; -import org.apache.log4j.Logger; import java.util.ArrayList; import java.util.List; public class ResourceIconDaoImpl extends GenericDaoBase implements ResourceIconDao { - public static final Logger s_logger = Logger.getLogger(ResourceIconDaoImpl.class); private final SearchBuilder AllFieldsSearch; protected ResourceIconDaoImpl() { diff --git a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java index 5c8e49938295..f83ab7f98a46 100644 --- a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java @@ -26,7 +26,6 @@ import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.dao.DiskOfferingDao; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.event.UsageEventVO; @@ -44,7 +43,6 @@ @Component @DB() public class ServiceOfferingDaoImpl extends GenericDaoBase implements ServiceOfferingDao { - protected static final Logger s_logger = Logger.getLogger(ServiceOfferingDaoImpl.class); @Inject protected ServiceOfferingDetailsDao detailsDao; @@ -268,7 +266,7 @@ public ServiceOfferingVO findDefaultSystemOffering(String offeringName, Boolean ServiceOfferingVO serviceOffering = findByName(name); if (serviceOffering == null) { String message = "System service offering " + name + " not found"; - s_logger.error(message); + logger.error(message); throw new CloudRuntimeException(message); } return serviceOffering; diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/BucketDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/BucketDaoImpl.java index 83b5f6bdb747..98bef6201a15 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/BucketDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/BucketDaoImpl.java @@ -20,7 +20,6 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import javax.naming.ConfigurationException; @@ -29,7 +28,6 @@ @Component public class BucketDaoImpl extends GenericDaoBase implements BucketDao { - public static final Logger s_logger = Logger.getLogger(BucketDaoImpl.class.getName()); private SearchBuilder searchFilteringStoreId; private SearchBuilder bucketSearch; diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/GuestOSHypervisorDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/GuestOSHypervisorDaoImpl.java index 69f4d4a3ceb0..1aaa277a358b 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/GuestOSHypervisorDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/GuestOSHypervisorDaoImpl.java @@ -22,7 +22,6 @@ import org.apache.cloudstack.utils.CloudStackVersion; import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.hypervisor.Hypervisor; @@ -35,7 +34,6 @@ @Component public class GuestOSHypervisorDaoImpl extends GenericDaoBase implements GuestOSHypervisorDao { - private static final Logger s_logger = Logger.getLogger(GuestOSHypervisorDaoImpl.class); protected final SearchBuilder guestOsSearch; protected final SearchBuilder mappingSearch; @@ -92,14 +90,14 @@ private GuestOSHypervisorVO getMappingForHypervisorVersionOrParentVersionIfNeede String guestOs = guestOsId != null ? String.format("guest OS ID: %d", guestOsId) : String.format("guest OS ID: %s", guestOsName); String parentVersion = CloudStackVersion.getVMwareParentVersion(hypervisorVersion); if (parentVersion == null) { - if (s_logger.isDebugEnabled()) { - s_logger.info(String.format("Mapping for %s for hypervisor: %s with version: %s can not be found. Parent version is also null", + if (logger.isDebugEnabled()) { + logger.info(String.format("Mapping for %s for hypervisor: %s with version: %s can not be found. Parent version is also null", guestOs, hypervisorType, hypervisorVersion)); } return null; } - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Mapping for %s for hypervisor: %s with version: %s can not be found. " + + if (logger.isDebugEnabled()) { + logger.debug(String.format("Mapping for %s for hypervisor: %s with version: %s can not be found. " + "Trying to find one for the parent version: %s", guestOs, hypervisorType, hypervisorVersion, parentVersion)); } return guestOsId != null ? diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/LaunchPermissionDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/LaunchPermissionDaoImpl.java index ec1a3a267c60..b4fdb4b6394e 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/LaunchPermissionDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/LaunchPermissionDaoImpl.java @@ -23,7 +23,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.storage.LaunchPermissionVO; @@ -39,7 +38,6 @@ @Component public class LaunchPermissionDaoImpl extends GenericDaoBase implements LaunchPermissionDao { - private static final Logger s_logger = Logger.getLogger(LaunchPermissionDaoImpl.class); private static final String REMOVE_LAUNCH_PERMISSION = "DELETE FROM `cloud`.`launch_permission`" + " WHERE template_id = ? AND account_id = ?"; private static final String LIST_PERMITTED_TEMPLATES = @@ -80,7 +78,7 @@ public void removePermissions(long templateId, List accountIds) { txn.commit(); } catch (Exception e) { txn.rollback(); - s_logger.warn("Error removing launch permissions", e); + logger.warn("Error removing launch permissions", e); throw new CloudRuntimeException("Error removing launch permissions", e); } } @@ -145,7 +143,7 @@ public List listPermittedTemplates(long accountId) { permittedTemplates.add(template); } } catch (Exception e) { - s_logger.warn("Error listing permitted templates", e); + logger.warn("Error listing permitted templates", e); } return permittedTemplates; } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDaoImpl.java index f5ebf4bcf3d0..030d10d66827 100755 --- a/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDaoImpl.java @@ -23,7 +23,6 @@ import javax.annotation.PostConstruct; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.server.ResourceTag.ResourceObjectType; @@ -51,7 +50,6 @@ @Component public class SnapshotDaoImpl extends GenericDaoBase implements SnapshotDao { - public static final Logger s_logger = Logger.getLogger(SnapshotDaoImpl.class.getName()); // TODO: we should remove these direct sqls private static final String GET_LAST_SNAPSHOT = "SELECT snapshots.id FROM snapshot_store_ref, snapshots where snapshots.id = snapshot_store_ref.snapshot_id AND snapshosts.volume_id = ? AND snapshot_store_ref.role = ? ORDER BY created DESC"; @@ -197,7 +195,7 @@ public long getLastSnapshot(long volumeId, DataStoreRole role) { return rs.getLong(1); } } catch (Exception ex) { - s_logger.error("error getting last snapshot", ex); + logger.error("error getting last snapshot", ex); } return 0; } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotZoneDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotZoneDaoImpl.java index 1ed8a547a105..5f8ded6665bb 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotZoneDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotZoneDaoImpl.java @@ -20,7 +20,6 @@ import java.util.Date; import java.util.List; -import org.apache.log4j.Logger; import com.cloud.storage.SnapshotZoneVO; import com.cloud.utils.db.GenericDaoBase; @@ -28,7 +27,6 @@ import com.cloud.utils.db.SearchCriteria; public class SnapshotZoneDaoImpl extends GenericDaoBase implements SnapshotZoneDao { - public static final Logger s_logger = Logger.getLogger(SnapshotZoneDaoImpl.class.getName()); protected final SearchBuilder ZoneSnapshotSearch; public SnapshotZoneDaoImpl() { diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDaoImpl.java index c27aeb0f652e..9e7bdca11817 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDaoImpl.java @@ -23,7 +23,6 @@ import java.util.List; import java.util.stream.Collectors; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.host.Status; @@ -36,7 +35,6 @@ @Component public class StoragePoolHostDaoImpl extends GenericDaoBase implements StoragePoolHostDao { - public static final Logger s_logger = Logger.getLogger(StoragePoolHostDaoImpl.class.getName()); protected final SearchBuilder PoolSearch; protected final SearchBuilder HostSearch; @@ -115,10 +113,10 @@ public List listByHostStatus(long poolId, Status hostStatus) result.add(findById(id)); } }catch (SQLException e) { - s_logger.warn("listByHostStatus:Exception: ", e); + logger.warn("listByHostStatus:Exception: ", e); } } catch (Exception e) { - s_logger.warn("listByHostStatus:Exception: ", e); + logger.warn("listByHostStatus:Exception: ", e); } return result; } @@ -141,10 +139,10 @@ public List findHostsConnectedToPools(List poolIds) { hosts.add(hostId); } } catch (SQLException e) { - s_logger.warn("findHostsConnectedToPools:Exception: ", e); + logger.warn("findHostsConnectedToPools:Exception: ", e); } } catch (Exception e) { - s_logger.warn("findHostsConnectedToPools:Exception: ", e); + logger.warn("findHostsConnectedToPools:Exception: ", e); } return hosts; @@ -165,7 +163,7 @@ public List> getDatacenterStoragePoolHostInfo(long dcId, boo l.add(new Pair(rs.getLong(1), rs.getInt(2))); } } catch (SQLException e) { - s_logger.debug("SQLException: ", e); + logger.debug("SQLException: ", e); } return l; } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/UploadDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/UploadDaoImpl.java index fb296be4d35a..8ee4a21a3c0e 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/UploadDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/UploadDaoImpl.java @@ -19,7 +19,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.storage.Upload.Mode; @@ -31,7 +30,6 @@ @Component public class UploadDaoImpl extends GenericDaoBase implements UploadDao { - public static final Logger s_logger = Logger.getLogger(UploadDaoImpl.class.getName()); protected final SearchBuilder typeUploadStatusSearch; protected final SearchBuilder typeHostAndUploadStatusSearch; protected final SearchBuilder typeModeAndStatusSearch; diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java index 031bcb3af7b8..03678732d1c6 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.dc.dao.DataCenterDao; @@ -62,7 +61,6 @@ @Component public class VMTemplateDaoImpl extends GenericDaoBase implements VMTemplateDao { - private static final Logger s_logger = Logger.getLogger(VMTemplateDaoImpl.class); @Inject VMTemplateZoneDao _templateZoneDao; @@ -293,7 +291,7 @@ public boolean configure(String name, Map params) throws Configu routerTmpltName = (String)params.get("routing.uniquename"); - s_logger.debug("Found parameter routing unique name " + routerTmpltName); + logger.debug("Found parameter routing unique name " + routerTmpltName); if (routerTmpltName == null) { routerTmpltName = "routing"; } @@ -302,8 +300,8 @@ public boolean configure(String name, Map params) throws Configu if (consoleProxyTmpltName == null) { consoleProxyTmpltName = "routing"; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Use console proxy template : " + consoleProxyTmpltName); + if (logger.isDebugEnabled()) { + logger.debug("Use console proxy template : " + consoleProxyTmpltName); } UniqueNameSearch = createSearchBuilder(); @@ -710,7 +708,7 @@ public boolean updateState( builder.set(vo, "updated", new Date()); int rows = update((VMTemplateVO)vo, sc); - if (rows == 0 && s_logger.isDebugEnabled()) { + if (rows == 0 && logger.isDebugEnabled()) { VMTemplateVO dbTemplate = findByIdIncludingRemoved(vo.getId()); if (dbTemplate != null) { StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString()); @@ -743,7 +741,7 @@ public boolean updateState( .append("; updatedTime=") .append(oldUpdatedTime); } else { - s_logger.debug("Unable to update template: id=" + vo.getId() + ", as no such template exists in the database anymore"); + logger.debug("Unable to update template: id=" + vo.getId() + ", as no such template exists in the database anymore"); } } return rows > 0; diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplatePoolDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplatePoolDaoImpl.java index d938bebb18ec..5a2ec1163fb0 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplatePoolDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplatePoolDaoImpl.java @@ -31,7 +31,6 @@ import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; @@ -49,7 +48,6 @@ @Component public class VMTemplatePoolDaoImpl extends GenericDaoBase implements VMTemplatePoolDao { - public static final Logger s_logger = Logger.getLogger(VMTemplatePoolDaoImpl.class.getName()); @Inject DataStoreManager dataStoreManager; @@ -193,7 +191,7 @@ public List listByTemplateStatus(long templateId, long result.add(toEntityBean(rs, false)); } } catch (Exception e) { - s_logger.warn("Exception: ", e); + logger.warn("Exception: ", e); } return result; @@ -217,10 +215,10 @@ public List listByTemplateStatus(long templateId, long result.add(findById(id)); } }catch (Exception e) { - s_logger.warn("Exception: ", e); + logger.warn("Exception: ", e); } } catch (Exception e) { - s_logger.warn("Exception: ", e); + logger.warn("Exception: ", e); } return result; @@ -245,10 +243,10 @@ public List listByHostTemplate(long hostId, long templa result.add(findById(id)); } }catch (Exception e) { - s_logger.warn("Exception: ", e); + logger.warn("Exception: ", e); } } catch (Exception e) { - s_logger.warn("Exception: ", e); + logger.warn("Exception: ", e); } return result; @@ -335,7 +333,7 @@ public boolean updateState(State currentState, Event event, State nextState, Dat builder.set(vo, "updated", new Date()); int rows = update((VMTemplateStoragePoolVO)vo, sc); - if (rows == 0 && s_logger.isDebugEnabled()) { + if (rows == 0 && logger.isDebugEnabled()) { VMTemplateStoragePoolVO dbVol = findByIdIncludingRemoved(templatePool.getId()); if (dbVol != null) { StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString()); @@ -368,7 +366,7 @@ public boolean updateState(State currentState, Event event, State nextState, Dat .append("; updatedTime=") .append(oldUpdatedTime); } else { - s_logger.debug("Unable to update objectIndatastore: id=" + templatePool.getId() + ", as there is no such object exists in the database anymore"); + logger.debug("Unable to update objectIndatastore: id=" + templatePool.getId() + ", as there is no such object exists in the database anymore"); } } return rows > 0; diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateZoneDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateZoneDaoImpl.java index 489ac130eafd..12835d184ea6 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateZoneDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateZoneDaoImpl.java @@ -19,7 +19,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.storage.VMTemplateZoneVO; @@ -30,7 +29,6 @@ @Component public class VMTemplateZoneDaoImpl extends GenericDaoBase implements VMTemplateZoneDao { - public static final Logger s_logger = Logger.getLogger(VMTemplateZoneDaoImpl.class.getName()); protected final SearchBuilder ZoneSearch; protected final SearchBuilder TemplateSearch; diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java index a773a9502ce2..41c32883d2ef 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java @@ -27,7 +27,6 @@ import javax.inject.Inject; import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.exception.InvalidParameterValueException; @@ -55,7 +54,6 @@ @Component public class VolumeDaoImpl extends GenericDaoBase implements VolumeDao { - private static final Logger s_logger = Logger.getLogger(VolumeDaoImpl.class); protected final SearchBuilder DetachedAccountIdSearch; protected final SearchBuilder TemplateZoneSearch; protected final GenericSearchBuilder TotalSizeByPoolSearch; @@ -337,7 +335,7 @@ public HypervisorType getHypervisorType(long volumeId) { } else if (scope == ScopeType.ZONE) { sql = SELECT_HYPERTYPE_FROM_ZONE_VOLUME; } else { - s_logger.error("Unhandled scope type '" + scope + "' when running getHypervisorType on volume id " + volumeId); + logger.error("Unhandled scope type '" + scope + "' when running getHypervisorType on volume id " + volumeId); } pstmt = txn.prepareAutoCloseStatement(sql); @@ -367,7 +365,7 @@ public ImageFormat getImageFormat(Long volumeId) { } else if (type.equals(HypervisorType.VMware)) { return ImageFormat.OVA; } else { - s_logger.warn("Do not support hypervisor " + type.toString()); + logger.warn("Do not support hypervisor " + type.toString()); return null; } } @@ -592,7 +590,7 @@ public boolean updateState(com.cloud.storage.Volume.State currentState, Event ev builder.set(vo, "updated", new Date()); int rows = update((VolumeVO)vo, sc); - if (rows == 0 && s_logger.isDebugEnabled()) { + if (rows == 0 && logger.isDebugEnabled()) { VolumeVO dbVol = findByIdIncludingRemoved(vo.getId()); if (dbVol != null) { StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString()); @@ -603,7 +601,7 @@ public boolean updateState(com.cloud.storage.Volume.State currentState, Event ev str.append(": stale Data={id=").append(vo.getId()).append("; state=").append(currentState).append("; event=").append(event).append("; updatecount=").append(oldUpdated) .append("; updatedTime=").append(oldUpdatedTime); } else { - s_logger.debug("Unable to update volume: id=" + vo.getId() + ", as there is no such volume exists in the database anymore"); + logger.debug("Unable to update volume: id=" + vo.getId() + ", as there is no such volume exists in the database anymore"); } } return rows > 0; @@ -641,10 +639,10 @@ public List listPoolIdsByVolumeCount(long dcId, Long podId, Long clusterId } return result; } catch (SQLException e) { - s_logger.debug("DB Exception on: " + sql.toString(), e); + logger.debug("DB Exception on: " + sql.toString(), e); throw new CloudRuntimeException(e); } catch (Throwable e) { - s_logger.debug("Caught: " + sql.toString(), e); + logger.debug("Caught: " + sql.toString(), e); throw new CloudRuntimeException(e); } } @@ -716,7 +714,7 @@ public List listVolumesByPassphraseId(long passphraseId) { public boolean remove(Long id) { TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); - s_logger.debug(String.format("Removing volume %s from DB", id)); + logger.debug(String.format("Removing volume %s from DB", id)); VolumeVO entry = findById(id); if (entry != null) { _tagsDao.removeByIdAndType(id, ResourceObjectType.Volume); diff --git a/engine/schema/src/main/java/com/cloud/upgrade/ConfigurationGroupsAggregator.java b/engine/schema/src/main/java/com/cloud/upgrade/ConfigurationGroupsAggregator.java index 34de1bccb82f..03857137ded6 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/ConfigurationGroupsAggregator.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/ConfigurationGroupsAggregator.java @@ -30,13 +30,14 @@ import org.apache.cloudstack.framework.config.impl.ConfigurationVO; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.utils.Pair; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; public class ConfigurationGroupsAggregator { - static final Logger LOG = Logger.getLogger(ConfigurationGroupsAggregator.class); + protected Logger LOG = LogManager.getLogger(getClass()); @Inject ConfigurationDao configDao; diff --git a/engine/schema/src/main/java/com/cloud/upgrade/DatabaseIntegrityChecker.java b/engine/schema/src/main/java/com/cloud/upgrade/DatabaseIntegrityChecker.java index 1fc8b7e3d842..e7ea6025ad76 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/DatabaseIntegrityChecker.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/DatabaseIntegrityChecker.java @@ -23,7 +23,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.utils.CloudStackVersion; @@ -38,7 +37,6 @@ @Component public class DatabaseIntegrityChecker extends AdapterBase implements SystemIntegrityChecker { - private static final Logger s_logger = Logger.getLogger(DatabaseIntegrityChecker.class); @Inject VersionDao _dao; @@ -102,32 +100,32 @@ private Boolean checkDuplicateHostWithTheSameLocalStorage() { } catch (Exception e) { - s_logger.error("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage()); + logger.error("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage()); throw new CloudRuntimeException("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage(),e); } } catch (Exception e) { - s_logger.error("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage()); + logger.error("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage()); throw new CloudRuntimeException("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage(),e); } } if (noDuplicate) { - s_logger.debug("No duplicate hosts with the same local storage found in database"); + logger.debug("No duplicate hosts with the same local storage found in database"); } else { - s_logger.error(helpInfo.toString()); + logger.error(helpInfo.toString()); } txn.commit(); return noDuplicate; }catch (Exception e) { - s_logger.error("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage()); + logger.error("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage()); throw new CloudRuntimeException("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage(),e); } } catch (Exception e) { - s_logger.error("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage()); + logger.error("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage()); throw new CloudRuntimeException("checkDuplicateHostWithTheSameLocalStorage: Exception :" + e.getMessage(),e); } finally @@ -138,7 +136,7 @@ private Boolean checkDuplicateHostWithTheSameLocalStorage() { } }catch(Exception e) { - s_logger.error("checkDuplicateHostWithTheSameLocalStorage: Exception:"+ e.getMessage()); + logger.error("checkDuplicateHostWithTheSameLocalStorage: Exception:"+ e.getMessage()); } } } @@ -151,7 +149,7 @@ private boolean check21to22PremiumUprage(Connection conn) throws SQLException { String tableName = rs.getString(1); if (tableName.equalsIgnoreCase("usage_event") || tableName.equalsIgnoreCase("usage_port_forwarding") || tableName.equalsIgnoreCase("usage_network_offering")) { num++; - s_logger.debug("Checking 21to22PremiumUprage table " + tableName + " found"); + logger.debug("Checking 21to22PremiumUprage table " + tableName + " found"); } if (num == 3) { return true; @@ -167,7 +165,7 @@ private boolean isColumnExisted(Connection conn, String dbName, String tableName boolean found = false; while (rs.next()) { if (column.equalsIgnoreCase(rs.getString(1))) { - s_logger.debug(String.format("Column %1$s.%2$s.%3$s found", dbName, tableName, column)); + logger.debug(String.format("Column %1$s.%2$s.%3$s found", dbName, tableName, column)); found = true; break; } @@ -224,33 +222,33 @@ private boolean checkMissedPremiumUpgradeFor228() { } } if (!hasUsage) { - s_logger.debug("No cloud_usage found in database, no need to check missed premium upgrade"); + logger.debug("No cloud_usage found in database, no need to check missed premium upgrade"); txn.commit(); return true; } if (!check21to22PremiumUprage(conn)) { - s_logger.error("21to22 premium upgrade missed"); + logger.error("21to22 premium upgrade missed"); txn.commit(); return false; } if (!check221to222PremiumUprage(conn)) { - s_logger.error("221to222 premium upgrade missed"); + logger.error("221to222 premium upgrade missed"); txn.commit(); return false; } if (!check222to224PremiumUpgrade(conn)) { - s_logger.error("222to224 premium upgrade missed"); + logger.error("222to224 premium upgrade missed"); txn.commit(); return false; } txn.commit(); return true; } catch (Exception e) { - s_logger.error("checkMissedPremiumUpgradeFor228: Exception:" + e.getMessage()); + logger.error("checkMissedPremiumUpgradeFor228: Exception:" + e.getMessage()); throw new CloudRuntimeException("checkMissedPremiumUpgradeFor228: Exception:" + e.getMessage(), e); } }catch (Exception e) { - s_logger.error("checkMissedPremiumUpgradeFor228: Exception:"+ e.getMessage()); + logger.error("checkMissedPremiumUpgradeFor228: Exception:"+ e.getMessage()); throw new CloudRuntimeException("checkMissedPremiumUpgradeFor228: Exception:" + e.getMessage(),e); } finally @@ -261,7 +259,7 @@ private boolean checkMissedPremiumUpgradeFor228() { } }catch(Exception e) { - s_logger.error("checkMissedPremiumUpgradeFor228: Exception:"+ e.getMessage()); + logger.error("checkMissedPremiumUpgradeFor228: Exception:"+ e.getMessage()); } } } @@ -270,19 +268,19 @@ private boolean checkMissedPremiumUpgradeFor228() { public void check() { GlobalLock lock = GlobalLock.getInternLock("DatabaseIntegrity"); try { - s_logger.info("Grabbing lock to check for database integrity."); + logger.info("Grabbing lock to check for database integrity."); if (!lock.lock(20 * 60)) { throw new CloudRuntimeException("Unable to acquire lock to check for database integrity."); } try { - s_logger.info("Performing database integrity check"); + logger.info("Performing database integrity check"); if (!checkDuplicateHostWithTheSameLocalStorage()) { throw new CloudRuntimeException("checkDuplicateHostWithTheSameLocalStorage detected error"); } if (!checkMissedPremiumUpgradeFor228()) { - s_logger.error("Your current database version is 2.2.8, management server detected some missed premium upgrade, please contact CloudStack support and attach log file. Thank you!"); + logger.error("Your current database version is 2.2.8, management server detected some missed premium upgrade, please contact CloudStack support and attach log file. Thank you!"); throw new CloudRuntimeException("Detected missed premium upgrade"); } } finally { @@ -298,7 +296,7 @@ public boolean start() { try { check(); } catch (Exception e) { - s_logger.error("System integrity check exception", e); + logger.error("System integrity check exception", e); System.exit(1); } return true; diff --git a/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java b/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java index bf84d9215b91..ea8ce47611a2 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java @@ -37,7 +37,8 @@ import com.cloud.utils.FileUtil; import org.apache.cloudstack.utils.CloudStackVersion; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.upgrade.dao.DbUpgrade; import com.cloud.upgrade.dao.DbUpgradeSystemVmTemplate; @@ -125,7 +126,7 @@ import com.google.common.annotations.VisibleForTesting; public class DatabaseUpgradeChecker implements SystemIntegrityChecker { - private static final Logger s_logger = Logger.getLogger(DatabaseUpgradeChecker.class); + protected static Logger LOGGER = LogManager.getLogger(DatabaseUpgradeChecker.class); private final DatabaseVersionHierarchy hierarchy; private static final String VIEWS_DIRECTORY = Paths.get("META-INF", "db", "views").toString(); @@ -235,10 +236,10 @@ protected void runScript(Connection conn, InputStream file) { ScriptRunner runner = new ScriptRunner(conn, false, true); runner.runScript(reader); } catch (IOException e) { - s_logger.error("Unable to read upgrade script", e); + LOGGER.error("Unable to read upgrade script", e); throw new CloudRuntimeException("Unable to read upgrade script", e); } catch (SQLException e) { - s_logger.error("Unable to execute upgrade script", e); + LOGGER.error("Unable to execute upgrade script", e); throw new CloudRuntimeException("Unable to execute upgrade script", e); } @@ -277,7 +278,7 @@ private void updateSystemVmTemplates(DbUpgrade[] upgrades) { conn = txn.getConnection(); } catch (SQLException e) { String errorMessage = "Unable to upgrade the database"; - s_logger.error(errorMessage, e); + LOGGER.error(errorMessage, e); throw new CloudRuntimeException(errorMessage, e); } ((DbUpgradeSystemVmTemplate)upgrade).updateSystemVmTemplates(conn); @@ -285,7 +286,7 @@ private void updateSystemVmTemplates(DbUpgrade[] upgrades) { break; } catch (CloudRuntimeException e) { String errorMessage = "Unable to upgrade the database"; - s_logger.error(errorMessage, e); + LOGGER.error(errorMessage, e); throw new CloudRuntimeException(errorMessage, e); } finally { txn.close(); @@ -295,13 +296,13 @@ private void updateSystemVmTemplates(DbUpgrade[] upgrades) { } protected void upgrade(CloudStackVersion dbVersion, CloudStackVersion currentVersion) { - s_logger.info("Database upgrade must be performed from " + dbVersion + " to " + currentVersion); + LOGGER.info("Database upgrade must be performed from " + dbVersion + " to " + currentVersion); final DbUpgrade[] upgrades = calculateUpgradePath(dbVersion, currentVersion); for (DbUpgrade upgrade : upgrades) { VersionVO version; - s_logger.debug("Running upgrade " + upgrade.getClass().getSimpleName() + " to upgrade from " + upgrade.getUpgradableVersionRange()[0] + "-" + upgrade + LOGGER.debug("Running upgrade " + upgrade.getClass().getSimpleName() + " to upgrade from " + upgrade.getUpgradableVersionRange()[0] + "-" + upgrade .getUpgradableVersionRange()[1] + " to " + upgrade.getUpgradedVersion()); TransactionLegacy txn = TransactionLegacy.open("Upgrade"); txn.start(); @@ -311,7 +312,7 @@ protected void upgrade(CloudStackVersion dbVersion, CloudStackVersion currentVer conn = txn.getConnection(); } catch (SQLException e) { String errorMessage = "Unable to upgrade the database"; - s_logger.error(errorMessage, e); + LOGGER.error(errorMessage, e); throw new CloudRuntimeException(errorMessage, e); } InputStream[] scripts = upgrade.getPrepareScripts(); @@ -329,7 +330,7 @@ protected void upgrade(CloudStackVersion dbVersion, CloudStackVersion currentVer txn.commit(); } catch (CloudRuntimeException e) { String errorMessage = "Unable to upgrade the database"; - s_logger.error(errorMessage, e); + LOGGER.error(errorMessage, e); throw new CloudRuntimeException(errorMessage, e); } finally { txn.close(); @@ -338,7 +339,7 @@ protected void upgrade(CloudStackVersion dbVersion, CloudStackVersion currentVer // Run the corresponding '-cleanup.sql' script txn = TransactionLegacy.open("Cleanup"); try { - s_logger.info("Cleanup upgrade " + upgrade.getClass().getSimpleName() + " to upgrade from " + upgrade.getUpgradableVersionRange()[0] + "-" + upgrade + LOGGER.info("Cleanup upgrade " + upgrade.getClass().getSimpleName() + " to upgrade from " + upgrade.getUpgradableVersionRange()[0] + "-" + upgrade .getUpgradableVersionRange()[1] + " to " + upgrade.getUpgradedVersion()); txn.start(); @@ -346,7 +347,7 @@ protected void upgrade(CloudStackVersion dbVersion, CloudStackVersion currentVer try { conn = txn.getConnection(); } catch (SQLException e) { - s_logger.error("Unable to cleanup the database", e); + LOGGER.error("Unable to cleanup the database", e); throw new CloudRuntimeException("Unable to cleanup the database", e); } @@ -354,7 +355,7 @@ protected void upgrade(CloudStackVersion dbVersion, CloudStackVersion currentVer if (scripts != null) { for (InputStream script : scripts) { runScript(conn, script); - s_logger.debug("Cleanup script " + upgrade.getClass().getSimpleName() + " is executed successfully"); + LOGGER.debug("Cleanup script " + upgrade.getClass().getSimpleName() + " is executed successfully"); } } txn.commit(); @@ -364,7 +365,7 @@ protected void upgrade(CloudStackVersion dbVersion, CloudStackVersion currentVer version.setUpdated(new Date()); _dao.update(version.getId(), version); txn.commit(); - s_logger.debug("Upgrade completed for version " + version.getVersion()); + LOGGER.debug("Upgrade completed for version " + version.getVersion()); } finally { txn.close(); } @@ -375,23 +376,23 @@ protected void upgrade(CloudStackVersion dbVersion, CloudStackVersion currentVer } protected void executeViewScripts() { - s_logger.info(String.format("Executing VIEW scripts that are under resource directory [%s].", VIEWS_DIRECTORY)); + LOGGER.info(String.format("Executing VIEW scripts that are under resource directory [%s].", VIEWS_DIRECTORY)); List filesPathUnderViewsDirectory = FileUtil.getFilesPathsUnderResourceDirectory(VIEWS_DIRECTORY); try (TransactionLegacy txn = TransactionLegacy.open("execute-view-scripts")) { Connection conn = txn.getConnection(); for (String filePath : filesPathUnderViewsDirectory) { - s_logger.debug(String.format("Executing VIEW script [%s].", filePath)); + LOGGER.debug(String.format("Executing VIEW script [%s].", filePath)); InputStream viewScript = Thread.currentThread().getContextClassLoader().getResourceAsStream(filePath); runScript(conn, viewScript); } - s_logger.info(String.format("Finished execution of VIEW scripts that are under resource directory [%s].", VIEWS_DIRECTORY)); + LOGGER.info(String.format("Finished execution of VIEW scripts that are under resource directory [%s].", VIEWS_DIRECTORY)); } catch (SQLException e) { String message = String.format("Unable to execute VIEW scripts due to [%s].", e.getMessage()); - s_logger.error(message, e); + LOGGER.error(message, e); throw new CloudRuntimeException(message, e); } } @@ -400,7 +401,7 @@ protected void executeViewScripts() { public void check() { GlobalLock lock = GlobalLock.getInternLock("DatabaseUpgrade"); try { - s_logger.info("Grabbing lock to check for database upgrade."); + LOGGER.info("Grabbing lock to check for database upgrade."); if (!lock.lock(20 * 60)) { throw new CloudRuntimeException("Unable to acquire lock to check for database integrity."); } @@ -421,14 +422,14 @@ public void check() { SystemVmTemplateRegistration.CS_MAJOR_VERSION = String.valueOf(sysVmVersion.getMajorRelease()) + "." + String.valueOf(sysVmVersion.getMinorRelease()); SystemVmTemplateRegistration.CS_TINY_VERSION = String.valueOf(sysVmVersion.getPatchRelease()); - s_logger.info("DB version = " + dbVersion + " Code Version = " + currentVersion); + LOGGER.info("DB version = " + dbVersion + " Code Version = " + currentVersion); if (dbVersion.compareTo(currentVersion) > 0) { throw new CloudRuntimeException("Database version " + dbVersion + " is higher than management software version " + currentVersionValue); } if (dbVersion.compareTo(currentVersion) == 0) { - s_logger.info("DB version and code version matches so no upgrade needed."); + LOGGER.info("DB version and code version matches so no upgrade needed."); return; } @@ -451,13 +452,13 @@ private void initializeDatabaseEncryptors() { decryptInit(conn); txn.commit(); } catch (CloudRuntimeException e) { - s_logger.error(e.getMessage()); + LOGGER.error(e.getMessage()); errorMessage = String.format("Unable to initialize the database encryptors due to %s. " + "Please check if database encryption key and database encryptor version are correct.", errorMessage); - s_logger.error(errorMessage); + LOGGER.error(errorMessage); throw new CloudRuntimeException(errorMessage, e); } catch (SQLException e) { - s_logger.error(errorMessage, e); + LOGGER.error(errorMessage, e); throw new CloudRuntimeException(errorMessage, e); } finally { txn.close(); @@ -470,7 +471,7 @@ private void decryptInit(Connection conn) throws SQLException { ResultSet result = pstmt.executeQuery()) { if (result.next()) { String init = result.getString(1); - s_logger.info("init = " + DBEncryptionUtil.decrypt(init)); + LOGGER.info("init = " + DBEncryptionUtil.decrypt(init)); } } } @@ -527,7 +528,7 @@ private void initSystemVmTemplateRegistration() { @Override public void updateSystemVmTemplates(Connection conn) { - s_logger.debug("Updating System Vm template IDs"); + LOGGER.debug("Updating System Vm template IDs"); initSystemVmTemplateRegistration(); try { systemVmTemplateRegistration.updateSystemVmTemplates(conn); diff --git a/engine/schema/src/main/java/com/cloud/upgrade/GuestOsMapper.java b/engine/schema/src/main/java/com/cloud/upgrade/GuestOsMapper.java index 4aabaa3e182b..abb0d7f76690 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/GuestOsMapper.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/GuestOsMapper.java @@ -18,7 +18,8 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import java.sql.Connection; import java.sql.PreparedStatement; @@ -40,7 +41,7 @@ public class GuestOsMapper { - final static Logger LOG = Logger.getLogger(GuestOsMapper.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject GuestOSHypervisorDao guestOSHypervisorDao; @@ -56,15 +57,15 @@ public GuestOsMapper() { } public void mergeDuplicates() { - LOG.info("merging duplicate guest osses"); + logger.info("merging duplicate guest osses"); Set> duplicates = findDuplicates(); - LOG.debug(String.format("merging %d sets of duplicates", duplicates.size())); + logger.debug(String.format("merging %d sets of duplicates", duplicates.size())); for (Set setOfGuestOSes : duplicates) { // decide which to (mark as) remove(d) // # highest/lowest id // # or is user_defined == false GuestOSVO guestOSVO = highestIdFrom(setOfGuestOSes); - LOG.info(String.format("merging %d duplicates for %s ", setOfGuestOSes.size(), guestOSVO.getDisplayName())); + logger.info(String.format("merging %d duplicates for %s ", setOfGuestOSes.size(), guestOSVO.getDisplayName())); makeNormative(guestOSVO, setOfGuestOSes); } @@ -144,7 +145,7 @@ private long getGuestOsId(long categoryId, String displayName) { if (guestOS != null) { id = guestOS.getId(); } else { - LOG.warn(String.format("Unable to find the guest OS details with category id: %d and display name: %s", + categoryId, displayName)); + logger.warn(String.format("Unable to find the guest OS details with category id: %d and display name: %s", + categoryId, displayName)); } return id; } @@ -155,7 +156,7 @@ private long getGuestOsIdFromHypervisorMapping(GuestOSHypervisorMapping mapping) if (guestOSHypervisorVO != null) { id = guestOSHypervisorVO.getGuestOsId(); } else { - LOG.warn(String.format("Unable to find the guest OS hypervisor mapping details for %s", mapping.toString())); + logger.warn(String.format("Unable to find the guest OS hypervisor mapping details for %s", mapping.toString())); } return id; } @@ -163,9 +164,9 @@ private long getGuestOsIdFromHypervisorMapping(GuestOSHypervisorMapping mapping) public void addGuestOsAndHypervisorMappings(long categoryId, String displayName, List mappings) { long guestOsId = getGuestOsId(categoryId, displayName); if (guestOsId == 0) { - LOG.debug("No guest OS found with category id: " + categoryId + " and display name: " + displayName); + logger.debug("No guest OS found with category id: " + categoryId + " and display name: " + displayName); if (!addGuestOs(categoryId, displayName)) { - LOG.warn("Couldn't add the guest OS with category id: " + categoryId + " and display name: " + displayName); + logger.warn("Couldn't add the guest OS with category id: " + categoryId + " and display name: " + displayName); return; } guestOsId = getGuestOsId(categoryId, displayName); @@ -189,7 +190,7 @@ private void updateToSystemDefined(long guestOsId) { } public boolean addGuestOs(long categoryId, String displayName) { - LOG.debug("Adding guest OS with category id: " + categoryId + " and display name: " + displayName); + logger.debug("Adding guest OS with category id: " + categoryId + " and display name: " + displayName); GuestOSVO guestOS = new GuestOSVO(); guestOS.setCategoryId(categoryId); guestOS.setDisplayName(displayName); @@ -199,7 +200,7 @@ public boolean addGuestOs(long categoryId, String displayName) { public void addGuestOsHypervisorMapping(GuestOSHypervisorMapping mapping, long category, String displayName) { long guestOsId = getGuestOsId(category, displayName); if (guestOsId == 0) { - LOG.error(String.format("no guest os found for category %d and name %s, skipping mapping it to %s/%s", guestOsId, displayName, mapping.getHypervisorType(), mapping.getHypervisorVersion())); + logger.error(String.format("no guest os found for category %d and name %s, skipping mapping it to %s/%s", guestOsId, displayName, mapping.getHypervisorType(), mapping.getHypervisorVersion())); } else { addGuestOsHypervisorMapping(mapping, guestOsId); } @@ -210,7 +211,7 @@ private void addGuestOsHypervisorMapping(GuestOSHypervisorMapping mapping, long return; } - LOG.debug("Adding guest OS hypervisor mapping - " + mapping.toString() + ", for guest OS with id - " + guestOsId); + logger.debug("Adding guest OS hypervisor mapping - " + mapping.toString() + ", for guest OS with id - " + guestOsId); GuestOSHypervisorVO guestOsMapping = new GuestOSHypervisorVO(); guestOsMapping.setHypervisorType(mapping.getHypervisorType()); guestOsMapping.setHypervisorVersion(mapping.getHypervisorVersion()); @@ -222,7 +223,7 @@ private void addGuestOsHypervisorMapping(GuestOSHypervisorMapping mapping, long public void updateGuestOsName(long categoryId, String oldDisplayName, String newDisplayName) { GuestOSVO guestOS = guestOSDao.findByCategoryIdAndDisplayNameOrderByCreatedDesc(categoryId, oldDisplayName); if (guestOS == null) { - LOG.debug("Unable to update guest OS name, as there is no guest OS with category id: " + categoryId + " and display name: " + oldDisplayName); + logger.debug("Unable to update guest OS name, as there is no guest OS with category id: " + categoryId + " and display name: " + oldDisplayName); return; } @@ -237,7 +238,7 @@ public void updateGuestOsNameFromMapping(String newDisplayName, GuestOSHyperviso GuestOSHypervisorVO guestOSHypervisorVO = guestOSHypervisorDao.findByOsNameAndHypervisorOrderByCreatedDesc(mapping.getGuestOsName(), mapping.getHypervisorType(), mapping.getHypervisorVersion()); if (guestOSHypervisorVO == null) { - LOG.debug("Unable to update guest OS name, as there is no guest os hypervisor mapping"); + logger.debug("Unable to update guest OS name, as there is no guest os hypervisor mapping"); return; } @@ -256,13 +257,13 @@ public void updateGuestOsIdInHypervisorMapping(Connection conn, long categoryId, long oldGuestOsId = getGuestOsIdFromHypervisorMapping(mapping); if (oldGuestOsId == 0) { - LOG.debug("Unable to update guest OS in hypervisor mapping, as there is no guest os hypervisor mapping - " + mapping.toString()); + logger.debug("Unable to update guest OS in hypervisor mapping, as there is no guest os hypervisor mapping - " + mapping.toString()); return; } long newGuestOsId = getGuestOsId(categoryId, displayName); if (newGuestOsId == 0) { - LOG.debug("Unable to update guest OS id in hypervisor mapping, as there is no guest OS with category id: " + categoryId + " and display name: " + displayName); + logger.debug("Unable to update guest OS id in hypervisor mapping, as there is no guest OS with category id: " + categoryId + " and display name: " + displayName); return; } @@ -270,7 +271,7 @@ public void updateGuestOsIdInHypervisorMapping(Connection conn, long categoryId, } private void updateGuestOsIdInMapping(Connection conn, long oldGuestOsId, long newGuestOsId, GuestOSHypervisorMapping mapping) { - LOG.debug("Updating guest os id: " + oldGuestOsId + " to id: " + newGuestOsId + " in hypervisor mapping - " + mapping.toString()); + logger.debug("Updating guest os id: " + oldGuestOsId + " to id: " + newGuestOsId + " in hypervisor mapping - " + mapping.toString()); try { PreparedStatement pstmt = conn.prepareStatement(updateGuestOsHypervisorSql); pstmt.setLong(1, newGuestOsId); @@ -280,7 +281,7 @@ private void updateGuestOsIdInMapping(Connection conn, long oldGuestOsId, long n pstmt.setString(5, mapping.getGuestOsName()); pstmt.executeUpdate(); } catch (SQLException e) { - LOG.error("Failed to update guest OS id in hypervisor mapping due to: " + e.getMessage(), e); + logger.error("Failed to update guest OS id in hypervisor mapping due to: " + e.getMessage(), e); } } @@ -289,7 +290,7 @@ private boolean isValidGuestOSHypervisorMapping(GuestOSHypervisorMapping mapping return true; } - LOG.warn("Invalid Guest OS hypervisor mapping"); + logger.warn("Invalid Guest OS hypervisor mapping"); return false; } @@ -299,22 +300,22 @@ private boolean isValidGuestOSHypervisorMapping(GuestOSHypervisorMapping mapping */ public boolean copyGuestOSHypervisorMappings(HypervisorType hypervisorType, String srcVersion, String destVersion) { if (hypervisorType == HypervisorType.None || hypervisorType == HypervisorType.Any) { - LOG.warn("Unable to copy, invalid hypervisor"); + logger.warn("Unable to copy, invalid hypervisor"); return false; } if (StringUtils.isAnyBlank(srcVersion, destVersion)) { - LOG.warn("Unable to copy, invalid hypervisor version details"); + logger.warn("Unable to copy, invalid hypervisor version details"); return false; } List guestOSHypervisorMappingsForSrcVersion = guestOSHypervisorDao.listByHypervisorTypeAndVersion(hypervisorType.toString(), srcVersion); if (CollectionUtils.isEmpty(guestOSHypervisorMappingsForSrcVersion)) { - LOG.warn(String.format("Unable to copy, couldn't find guest OS mappings for hypervisor: %s and src version: %s", hypervisorType.toString(), srcVersion)); + logger.warn(String.format("Unable to copy, couldn't find guest OS mappings for hypervisor: %s and src version: %s", hypervisorType.toString(), srcVersion)); return false; } - LOG.debug(String.format("Adding guest OS mappings for hypervisor: %s and version: %s, from version: %s ", hypervisorType.toString(), destVersion, srcVersion)); + logger.debug(String.format("Adding guest OS mappings for hypervisor: %s and version: %s, from version: %s ", hypervisorType.toString(), destVersion, srcVersion)); for (GuestOSHypervisorVO guestOSHypervisorMapping : guestOSHypervisorMappingsForSrcVersion) { GuestOSHypervisorMapping mapping = new GuestOSHypervisorMapping(hypervisorType.toString(), destVersion, guestOSHypervisorMapping.getGuestOsName()); addGuestOsHypervisorMapping(mapping, guestOSHypervisorMapping.getGuestOsId()); @@ -329,7 +330,7 @@ public void updateGuestOsNameInHypervisorMapping(long categoryId, String display long guestOsId = getGuestOsId(categoryId, displayName); if (guestOsId == 0) { - LOG.error(String.format("no guest os found for category %d and name %s, skipping mapping it to %s/%s", guestOsId, displayName, mapping.getHypervisorType(), mapping.getHypervisorVersion())); + logger.error(String.format("no guest os found for category %d and name %s, skipping mapping it to %s/%s", guestOsId, displayName, mapping.getHypervisorType(), mapping.getHypervisorVersion())); return; } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/RolePermissionChecker.java b/engine/schema/src/main/java/com/cloud/upgrade/RolePermissionChecker.java index 6d434cda755c..370b85e37bec 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/RolePermissionChecker.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/RolePermissionChecker.java @@ -16,7 +16,8 @@ // under the License. package com.cloud.upgrade; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import java.sql.Connection; import java.sql.PreparedStatement; @@ -25,7 +26,7 @@ public class RolePermissionChecker { - final static Logger LOG = Logger.getLogger(RolePermissionChecker.class); + protected Logger logger = LogManager.getLogger(getClass()); private static final String checkAnnotationRulesPermissionPreparedStatement = "SELECT permission FROM `cloud`.`role_permissions` WHERE role_id = ? AND rule = ?"; @@ -43,7 +44,7 @@ public boolean existsRolePermissionByRoleIdAndRule(Connection conn, long roleId, ResultSet rs = pstmt.executeQuery(); return rs != null && rs.next(); } catch (SQLException e) { - LOG.error("Error on existsRolePermissionByRoleIdAndRule: " + e.getMessage(), e); + logger.error("Error on existsRolePermissionByRoleIdAndRule: " + e.getMessage(), e); return false; } } @@ -55,7 +56,7 @@ public void insertAnnotationRulePermission(Connection conn, long roleId, String pstmt.setString(2, rule); pstmt.executeUpdate(); } catch (SQLException e) { - LOG.error("Error on insertAnnotationRulePermission: " + e.getMessage(), e); + logger.error("Error on insertAnnotationRulePermission: " + e.getMessage(), e); } } } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java index dc94dd708b1c..428bd0260b87 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -56,7 +56,8 @@ import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; import org.apache.cloudstack.utils.security.DigestHelper; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.ini4j.Ini; import javax.inject.Inject; @@ -82,7 +83,7 @@ import java.util.stream.Collectors; public class SystemVmTemplateRegistration { - private static final Logger LOGGER = Logger.getLogger(SystemVmTemplateRegistration.class); + protected static Logger LOGGER = LogManager.getLogger(SystemVmTemplateRegistration.class); private static final String MOUNT_COMMAND = "sudo mount -t nfs %s %s"; private static final String UMOUNT_COMMAND = "sudo umount %s"; private static final String RELATIVE_TEMPLATE_PATH = "./engine/schema/dist/systemvm-templates/"; diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/DatabaseAccessObject.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/DatabaseAccessObject.java index de161afea071..1c2c4b3c7ce7 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/DatabaseAccessObject.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/DatabaseAccessObject.java @@ -22,20 +22,21 @@ import java.sql.SQLException; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; public class DatabaseAccessObject { - private static Logger s_logger = Logger.getLogger(DatabaseAccessObject.class); + protected Logger logger = LogManager.getLogger(DatabaseAccessObject.class); public void addForeignKey(Connection conn, String tableName, String tableColumn, String foreignTableName, String foreignColumnName) { String addForeignKeyStmt = String.format("ALTER TABLE `cloud`.`%s` ADD CONSTRAINT `fk_%s__%s` FOREIGN KEY `fk_%s__%s`(`%s`) REFERENCES `%s`(`%s`)", tableName, tableName, tableColumn, tableName, tableColumn, tableColumn, foreignTableName, foreignColumnName); try(PreparedStatement pstmt = conn.prepareStatement(addForeignKeyStmt);) { pstmt.executeUpdate(); - s_logger.debug(String.format("Foreign key is added successfully from the table %s", tableName)); + logger.debug(String.format("Foreign key is added successfully from the table %s", tableName)); } catch (SQLException e) { - s_logger.error("Ignored SQL Exception when trying to add foreign key on table " + tableName + " exception: " + e.getMessage()); + logger.error("Ignored SQL Exception when trying to add foreign key on table " + tableName + " exception: " + e.getMessage()); } } @@ -50,9 +51,9 @@ public void dropKey(Connection conn, String tableName, String key, boolean isFor try(PreparedStatement pstmt = conn.prepareStatement(alter_sql_str);) { pstmt.executeUpdate(); - s_logger.debug("Key " + key + " is dropped successfully from the table " + tableName); + logger.debug("Key " + key + " is dropped successfully from the table " + tableName); } catch (SQLException e) { - s_logger.debug("Ignored SQL Exception when trying to drop " + (isForeignKey ? "foreign " : "") + "key " + key + " on table " + tableName + " exception: " + e.getMessage()); + logger.debug("Ignored SQL Exception when trying to drop " + (isForeignKey ? "foreign " : "") + "key " + key + " on table " + tableName + " exception: " + e.getMessage()); } } @@ -60,18 +61,18 @@ public void dropKey(Connection conn, String tableName, String key, boolean isFor public void dropPrimaryKey(Connection conn, String tableName) { try(PreparedStatement pstmt = conn.prepareStatement("ALTER TABLE " + tableName + " DROP PRIMARY KEY ");) { pstmt.executeUpdate(); - s_logger.debug("Primary key is dropped successfully from the table " + tableName); + logger.debug("Primary key is dropped successfully from the table " + tableName); } catch (SQLException e) { - s_logger.debug("Ignored SQL Exception when trying to drop primary key on table " + tableName + " exception: " + e.getMessage()); + logger.debug("Ignored SQL Exception when trying to drop primary key on table " + tableName + " exception: " + e.getMessage()); } } public void dropColumn(Connection conn, String tableName, String columnName) { try (PreparedStatement pstmt = conn.prepareStatement("ALTER TABLE " + tableName + " DROP COLUMN " + columnName);){ pstmt.executeUpdate(); - s_logger.debug("Column " + columnName + " is dropped successfully from the table " + tableName); + logger.debug("Column " + columnName + " is dropped successfully from the table " + tableName); } catch (SQLException e) { - s_logger.warn("Unable to drop column " + columnName + " due to exception", e); + logger.warn("Unable to drop column " + columnName + " due to exception", e); } } @@ -81,7 +82,7 @@ public boolean columnExists(Connection conn, String tableName, String columnName pstmt.executeQuery(); columnExists = true; } catch (SQLException e) { - s_logger.debug("Field " + columnName + " doesn't exist in " + tableName + " ignoring exception: " + e.getMessage()); + logger.debug("Field " + columnName + " doesn't exist in " + tableName + " ignoring exception: " + e.getMessage()); } return columnExists; } @@ -97,29 +98,29 @@ public boolean indexExists(Connection conn, String tableName, String indexName) return true; } } catch (SQLException e) { - s_logger.debug(String.format("Index %s doesn't exist, ignoring exception:", indexName, e.getMessage())); + logger.debug(String.format("Index %s doesn't exist, ignoring exception:", indexName, e.getMessage())); } return false; } public void createIndex(Connection conn, String tableName, String indexName, String... columnNames) { String stmt = String.format("CREATE INDEX %s ON %s (%s)", indexName, tableName, StringUtils.join(columnNames, ", ")); - s_logger.debug("Statement: " + stmt); + logger.debug("Statement: " + stmt); try (PreparedStatement pstmt = conn.prepareStatement(stmt)) { pstmt.execute(); - s_logger.debug(String.format("Created index %s", indexName)); + logger.debug(String.format("Created index %s", indexName)); } catch (SQLException e) { - s_logger.warn(String.format("Unable to create index %s", indexName), e); + logger.warn(String.format("Unable to create index %s", indexName), e); } } - protected static void closePreparedStatement(PreparedStatement pstmt, String errorMessage) { + protected void closePreparedStatement(PreparedStatement pstmt, String errorMessage) { try { if (pstmt != null) { pstmt.close(); } } catch (SQLException e) { - s_logger.warn(errorMessage, e); + logger.warn(errorMessage, e); } } } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/DbUpgradeAbstractImpl.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/DbUpgradeAbstractImpl.java new file mode 100644 index 000000000000..c96365dd6f50 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/DbUpgradeAbstractImpl.java @@ -0,0 +1,24 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.upgrade.dao; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; + +public abstract class DbUpgradeAbstractImpl implements DbUpgrade { + protected Logger logger = LogManager.getLogger(getClass()); +} diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/LegacyDbUpgrade.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/LegacyDbUpgrade.java index d058943f4395..2a64ff496a2a 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/LegacyDbUpgrade.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/LegacyDbUpgrade.java @@ -16,11 +16,9 @@ // under the License. package com.cloud.upgrade.dao; -import org.apache.log4j.Logger; -public abstract class LegacyDbUpgrade implements DbUpgrade{ +public abstract class LegacyDbUpgrade extends DbUpgradeAbstractImpl{ - final static Logger s_logger = Logger.getLogger(LegacyDbUpgrade.class); public LegacyDbUpgrade() { super(); @@ -34,7 +32,7 @@ protected void closeAutoCloseable(AutoCloseable closable) { try { closable.close(); } catch (Exception e) { - s_logger.info("[ignored]",e); + logger.info("[ignored]",e); } } } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade217to218.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade217to218.java index 2ca4e794fb84..5441f8fb02f8 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade217to218.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade217to218.java @@ -21,7 +21,7 @@ import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade217to218 implements DbUpgrade { +public class Upgrade217to218 extends DbUpgradeAbstractImpl { @Override public InputStream[] getPrepareScripts() { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade218to22.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade218to22.java index bc58794e8bd2..171357578ee9 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade218to22.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade218to22.java @@ -34,7 +34,6 @@ import java.util.TimeZone; import java.util.UUID; -import org.apache.log4j.Logger; import com.cloud.configuration.Resource.ResourceType; import com.cloud.event.EventTypes; @@ -45,8 +44,7 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; -public class Upgrade218to22 implements DbUpgrade { - final static Logger s_logger = Logger.getLogger(Upgrade218to22.class); +public class Upgrade218to22 extends DbUpgradeAbstractImpl { boolean _basicZone; @Override @@ -212,7 +210,7 @@ protected long insertNic(Connection conn, long networkId, long instanceId, boole protected void upgradeDomR(Connection conn, long dcId, long domrId, Long publicNetworkId, long guestNetworkId, long controlNetworkId, String zoneType, String vnet) throws SQLException { - s_logger.debug("Upgrading domR" + domrId); + logger.debug("Upgrading domR" + domrId); try ( PreparedStatement pstmt = conn.prepareStatement("SELECT vm_instance.id, vm_instance.state, vm_instance.private_mac_address, vm_instance.private_ip_address, vm_instance.private_netmask, domain_router.public_mac_address, domain_router.public_ip_address, domain_router.public_netmask, domain_router.guest_mac_address, domain_router.guest_ip_address, domain_router.guest_netmask, domain_router.vnet, domain_router.gateway FROM vm_instance INNER JOIN domain_router ON vm_instance.id=domain_router.id WHERE vm_instance.removed is NULL AND vm_instance.id=?"); @@ -274,7 +272,7 @@ protected void upgradeDomR(Connection conn, long dcId, long domrId, Long publicN protected void upgradeSsvm(Connection conn, long dataCenterId, long publicNetworkId, long managementNetworkId, long controlNetworkId, String zoneType) throws SQLException { - s_logger.debug("Upgrading ssvm in " + dataCenterId); + logger.debug("Upgrading ssvm in " + dataCenterId); //select instance try ( PreparedStatement selectInstance = @@ -284,7 +282,7 @@ protected void upgradeSsvm(Connection conn, long dataCenterId, long publicNetwor try (ResultSet instanceResult = selectInstance.executeQuery();) { if (!instanceResult.next()) { - s_logger.debug("Unable to find ssvm in data center " + dataCenterId); + logger.debug("Unable to find ssvm in data center " + dataCenterId); return; } @@ -309,7 +307,7 @@ protected void upgradeSsvm(Connection conn, long dataCenterId, long publicNetwor try (ResultSet hostResult = selectHost.executeQuery();) { if (!hostResult.next()) { - s_logger.debug("Unable to find ssvm in data center " + dataCenterId); + logger.debug("Unable to find ssvm in data center " + dataCenterId); return; } @@ -365,7 +363,7 @@ protected void upgradeSsvm(Connection conn, long dataCenterId, long publicNetwor protected void upgradeConsoleProxy(Connection conn, long dcId, long cpId, long publicNetworkId, long managementNetworkId, long controlNetworkId, String zoneType) throws SQLException { - s_logger.debug("Upgrading cp" + cpId); + logger.debug("Upgrading cp" + cpId); try (PreparedStatement pstmt = conn.prepareStatement("SELECT vm_instance.id, vm_instance.state, vm_instance.private_mac_address, vm_instance.private_ip_address, vm_instance.private_netmask, console_proxy.public_mac_address, console_proxy.public_ip_address, console_proxy.public_netmask, console_proxy.guest_mac_address, console_proxy.guest_ip_address, console_proxy.guest_netmask, console_proxy.gateway, vm_instance.type FROM vm_instance INNER JOIN console_proxy ON vm_instance.id=console_proxy.id WHERE vm_instance.removed is NULL AND vm_instance.id=?");) { pstmt.setLong(1, cpId); @@ -466,7 +464,7 @@ protected void upgradeUserVms(Connection conn, long domainRouterId, long network vm[4] = rs.getString(5); // vm state vms.add(vm); } - s_logger.debug("Upgrading " + vms.size() + " vms for router " + domainRouterId); + logger.debug("Upgrading " + vms.size() + " vms for router " + domainRouterId); for (Object[] vm : vms) { String state = (String)vm[4]; @@ -617,7 +615,7 @@ protected void upgradeManagementIpAddress(Connection conn, long dcId) throws SQL } protected void upgradeDirectUserIpAddress(Connection conn, long dcId, long networkId, String vlanType) throws SQLException { - s_logger.debug("Upgrading user ip address for data center " + dcId + " network " + networkId + " vlan type " + vlanType); + logger.debug("Upgrading user ip address for data center " + dcId + " network " + networkId + " vlan type " + vlanType); try (PreparedStatement pstmt = conn.prepareStatement("UPDATE user_ip_address INNER JOIN vlan ON user_ip_address.vlan_db_id=vlan.id SET user_ip_address.source_network_id=vlan.network_id WHERE user_ip_address.data_center_id=? AND vlan.vlan_type=?");) { pstmt.setLong(1, dcId); @@ -638,8 +636,8 @@ protected void upgradeDirectUserIpAddress(Connection conn, long dcId, long netwo ip[3] = rs.getDate(4); // allocated allocatedIps.add(ip); } - s_logger.debug("Marking " + allocatedIps.size() + " ip addresses to belong to network " + networkId); - s_logger.debug("Updating mac addresses for data center id=" + dcId + ". Found " + allocatedIps.size() + " ip addresses to update"); + logger.debug("Marking " + allocatedIps.size() + " ip addresses to belong to network " + networkId); + logger.debug("Updating mac addresses for data center id=" + dcId + ". Found " + allocatedIps.size() + " ip addresses to update"); for (Object[] allocatedIp : allocatedIps) { try (PreparedStatement selectMacAddresses = conn.prepareStatement("SELECT mac_address FROM data_center WHERE id = ?");) { selectMacAddresses.setLong(1, dcId); @@ -665,7 +663,7 @@ protected void upgradeDirectUserIpAddress(Connection conn, long dcId, long netwo } protected void upgradePublicUserIpAddress(Connection conn, long dcId, long networkId, String vlanType) throws SQLException { - s_logger.debug("Upgrading user ip address for data center " + dcId + " network " + networkId + " vlan type " + vlanType); + logger.debug("Upgrading user ip address for data center " + dcId + " network " + networkId + " vlan type " + vlanType); try (PreparedStatement pstmt = conn.prepareStatement("UPDATE user_ip_address INNER JOIN vlan ON user_ip_address.vlan_db_id=vlan.id SET source_network_id=? WHERE user_ip_address.data_center_id=? AND vlan.vlan_type=?");) { pstmt.setLong(1, networkId); @@ -763,7 +761,7 @@ protected void upgradeDataCenter(Connection conn) { } } catch (SQLException e) { - s_logger.error("Can't update data center ", e); + logger.error("Can't update data center ", e); throw new CloudRuntimeException("Can't update data center ", e); } } @@ -832,7 +830,7 @@ private long retrieveNetworkOfferingId(Connection conn, String type) throws SQLE pstmt.setString(1, type); try (ResultSet rs = pstmt.executeQuery();) { if (!rs.next()) { - s_logger.error("Unable to find the network offering for networktype '" + type + "'"); + logger.error("Unable to find the network offering for networktype '" + type + "'"); throw new CloudRuntimeException("Unable to find the storage network offering."); } networkOfferingId = rs.getLong(1); @@ -970,7 +968,7 @@ private void updateRouters(Connection conn, Object[] dc, Long dcId, long control private void updateRouters(Connection conn, Long dcId, long controlNetworkId, long basicDefaultDirectNetworkId, ArrayList routers) throws SQLException { for (Object[] router : routers) { - s_logger.debug("Updating domR with network id in basic zone id=" + dcId); + logger.debug("Updating domR with network id in basic zone id=" + dcId); updateNetworkForRouter(conn, router, basicDefaultDirectNetworkId); upgradeUserVms(conn, (Long)router[0], basicDefaultDirectNetworkId, (String)router[1], "untagged", "DirectPodBasedNetworkGuru", "Create"); upgradeDomR(conn, dcId, (Long)router[0], null, basicDefaultDirectNetworkId, controlNetworkId, "Basic", "untagged"); @@ -1007,7 +1005,7 @@ private void updateNetworkForRouter(Connection conn, Object[] router, long virtu updateDomainRouter.setLong(2, (Long)router[0]); updateDomainRouter.executeUpdate(); } - s_logger.debug("Network inserted for " + router[0] + " id = " + virtualNetworkId); + logger.debug("Network inserted for " + router[0] + " id = " + virtualNetworkId); } private void createDirectNetworks(Connection conn, Object[] dc, Long dcId) throws SQLException { @@ -1029,7 +1027,7 @@ private void createDirectNetworks(Connection conn, Object[] dc, Long dcId) throw updateNetworkInVlanTableforTag(conn, vlanNetworkMap, vlanId, tag); upgradeDirectUserIpAddress(conn, dcId, vlanNetworkMap.get(tag), "DirectAttached"); - s_logger.debug("Created Direct networks and upgraded Direct ip addresses"); + logger.debug("Created Direct networks and upgraded Direct ip addresses"); } } } @@ -1118,11 +1116,11 @@ private void updateDhcpServerData(Connection conn, Long dcId, long controlNetwor String gateway = retrieveGateway(conn, directNetworkId); updateDomainRouter(conn, routerId, directNetworkId); - s_logger.debug("NetworkId updated for router id=" + routerId + "with network id = " + directNetworkId); + logger.debug("NetworkId updated for router id=" + routerId + "with network id = " + directNetworkId); upgradeUserVms(conn, routerId, directNetworkId, gateway, vnet, "DirectNetworkGuru", "Create"); - s_logger.debug("Upgraded Direct vms in Advance zone id=" + dcId); + logger.debug("Upgraded Direct vms in Advance zone id=" + dcId); upgradeDomR(conn, dcId, routerId, null, directNetworkId, controlNetworkId, "Advanced", vnet); - s_logger.debug("Upgraded Direct domRs in Advance zone id=" + dcId); + logger.debug("Upgraded Direct domRs in Advance zone id=" + dcId); } } } @@ -1166,7 +1164,7 @@ private void updateUserStats(Connection conn) { PreparedStatement pstmt = conn.prepareStatement("UPDATE user_statistics SET device_type='DomainRouter'"); ){ pstmt.executeUpdate(); - s_logger.debug("Upgraded userStatistcis with device_type=DomainRouter"); + logger.debug("Upgraded userStatistcis with device_type=DomainRouter"); // update device_id information try ( @@ -1182,7 +1180,7 @@ private void updateUserStats(Connection conn) { selectNetworkType.setLong(1, dataCenterId); try (ResultSet dcSet = selectNetworkType.executeQuery();) { if (!dcSet.next()) { - s_logger.error("Unable to get data_center information as a part of user_statistics update"); + logger.error("Unable to get data_center information as a part of user_statistics update"); throw new CloudRuntimeException("Unable to get data_center information as a part of user_statistics update"); } String dataCenterType = dcSet.getString(1); @@ -1204,7 +1202,7 @@ private void updateUserStats(Connection conn) { selectnonRemovedVms.setLong(2, dataCenterId); try (ResultSet nonRemovedVms = selectnonRemovedVms.executeQuery();) { if (nonRemovedVms.next()) { - s_logger.warn("Failed to find domR for account id=" + accountId + " in zone id=" + dataCenterId + + logger.warn("Failed to find domR for account id=" + accountId + " in zone id=" + dataCenterId + "; will try to locate domR based on user_vm info"); //try to get domR information from the user_vm belonging to the account try (PreparedStatement selectNetworkType = @@ -1213,14 +1211,14 @@ private void updateUserStats(Connection conn) { selectNetworkType.setLong(2, dataCenterId); try (ResultSet userVmSet = selectNetworkType.executeQuery();) { if (!userVmSet.next()) { - s_logger.warn("Skipping user_statistics upgrade for account id=" + accountId + " in datacenter id=" + dataCenterId); + logger.warn("Skipping user_statistics upgrade for account id=" + accountId + " in datacenter id=" + dataCenterId); continue; } deviceId = userVmSet.getLong(1); } } } else { - s_logger.debug("Account id=" + accountId + " doesn't own any user vms and domRs, so skipping user_statistics update"); + logger.debug("Account id=" + accountId + " doesn't own any user vms and domRs, so skipping user_statistics update"); continue; } } @@ -1237,7 +1235,7 @@ private void updateUserStats(Connection conn) { } } } - s_logger.debug("Upgraded userStatistcis with deviceId(s)"); + logger.debug("Upgraded userStatistcis with deviceId(s)"); } catch (Exception e) { throw new CloudRuntimeException("Failed to migrate usage events: ", e); @@ -1263,7 +1261,7 @@ public void upgradePortForwardingRules(Connection conn) { } if (!rules.isEmpty()) { - s_logger.debug("Found " + rules.size() + " port forwarding rules to upgrade"); + logger.debug("Found " + rules.size() + " port forwarding rules to upgrade"); for (Object[] rule : rules) { long id = (Long)rule[0]; String sourcePort = (String)rule[2]; @@ -1275,7 +1273,7 @@ public void upgradePortForwardingRules(Connection conn) { try (ResultSet userIpAddressData = selectUserIpAddressData.executeQuery();) { if (!userIpAddressData.next()) { - s_logger.error("Unable to find public IP address " + publicIp); + logger.error("Unable to find public IP address " + publicIp); throw new CloudRuntimeException("Unable to find public IP address " + publicIp); } int ipAddressId = userIpAddressData.getInt(1); @@ -1285,7 +1283,7 @@ public void upgradePortForwardingRules(Connection conn) { String privateIp = (String)rule[3]; // update port_forwarding_rules table - s_logger.trace("Updating port_forwarding_rules table..."); + logger.trace("Updating port_forwarding_rules table..."); try (PreparedStatement selectInstanceId = conn.prepareStatement("SELECT instance_id FROM nics where network_id=? AND ip4_address=?");) { selectInstanceId.setLong(1, networkId); selectInstanceId.setString(2, privateIp); @@ -1293,14 +1291,14 @@ public void upgradePortForwardingRules(Connection conn) { if (!selectedInstanceId.next()) { // the vm might be expunged already...so just give the warning - s_logger.warn("Unable to find vmId for private ip address " + privateIp + " for account id=" + accountId + "; assume that the vm is expunged"); + logger.warn("Unable to find vmId for private ip address " + privateIp + " for account id=" + accountId + "; assume that the vm is expunged"); // throw new CloudRuntimeException("Unable to find vmId for private ip address " + privateIp + // " for account id=" + accountId); } else { long instanceId = selectedInstanceId.getLong(1); - s_logger.debug("Instance id is " + instanceId); + logger.debug("Instance id is " + instanceId); // update firewall_rules table - s_logger.trace("Updating firewall_rules table as a part of PF rules upgrade..."); + logger.trace("Updating firewall_rules table as a part of PF rules upgrade..."); try ( PreparedStatement insertFirewallRules = conn.prepareStatement("INSERT INTO firewall_rules (id, ip_address_id, start_port, end_port, state, protocol, purpose, account_id, domain_id, network_id, xid, is_static_nat, created) VALUES (?, ?, ?, ?, 'Active', ?, 'PortForwarding', ?, ?, ?, ?, 0, now())"); @@ -1315,7 +1313,7 @@ public void upgradePortForwardingRules(Connection conn) { insertFirewallRules.setLong(8, networkId); insertFirewallRules.setString(9, UUID.randomUUID().toString()); insertFirewallRules.executeUpdate(); - s_logger.trace("firewall_rules table is updated as a part of PF rules upgrade"); + logger.trace("firewall_rules table is updated as a part of PF rules upgrade"); } String privatePort = (String)rule[4]; try (PreparedStatement insertPortForwardingRules = conn.prepareStatement("INSERT INTO port_forwarding_rules VALUES (?, ?, ?, ?, ?)");) { @@ -1326,7 +1324,7 @@ public void upgradePortForwardingRules(Connection conn) { insertPortForwardingRules.setInt(5, Integer.parseInt(privatePort.trim())); insertPortForwardingRules.executeUpdate(); } - s_logger.trace("port_forwarding_rules table is updated"); + logger.trace("port_forwarding_rules table is updated"); } } } @@ -1334,7 +1332,7 @@ public void upgradePortForwardingRules(Connection conn) { } } } - s_logger.debug("Port forwarding rules are updated"); + logger.debug("Port forwarding rules are updated"); } catch (SQLException e) { throw new CloudRuntimeException("Can't update port forwarding rules ", e); } @@ -1358,7 +1356,7 @@ public void upgradeLoadBalancingRules(Connection conn) { } if (!lbs.isEmpty()) { - s_logger.debug("Found " + lbs.size() + " lb rules to upgrade"); + logger.debug("Found " + lbs.size() + " lb rules to upgrade"); long newLbId = 0; try ( PreparedStatement selectFWRules = conn.prepareStatement("SELECT max(id) FROM firewall_rules order by id"); @@ -1382,7 +1380,7 @@ public void upgradeLoadBalancingRules(Connection conn) { try (ResultSet ipData = selectIpData.executeQuery();) { if (!ipData.next()) { - s_logger.warn("Unable to find public IP address " + publicIp + "; skipping lb rule id=" + originalLbId + + logger.warn("Unable to find public IP address " + publicIp + "; skipping lb rule id=" + originalLbId + " from update. Cleaning it up from load_balancer_vm_map and load_balancer table"); try (PreparedStatement deleteLbVmMap = conn.prepareStatement("DELETE from load_balancer_vm_map where load_balancer_id=?");) { deleteLbVmMap.setLong(1, originalLbId); @@ -1399,7 +1397,7 @@ public void upgradeLoadBalancingRules(Connection conn) { long domainId = ipData.getLong(3); long networkId = ipData.getLong(4); // update firewall_rules table - s_logger.trace("Updating firewall_rules table as a part of LB rules upgrade..."); + logger.trace("Updating firewall_rules table as a part of LB rules upgrade..."); try (PreparedStatement insertFirewallRules = conn.prepareStatement("INSERT INTO firewall_rules (id, ip_address_id, start_port, end_port, state, protocol, purpose, account_id, domain_id, network_id, xid, is_static_nat, created) VALUES (?, ?, ?, ?, 'Active', ?, 'LoadBalancing', ?, ?, ?, ?, 0, now())");) { insertFirewallRules.setLong(1, newLbId); @@ -1413,13 +1411,13 @@ public void upgradeLoadBalancingRules(Connection conn) { insertFirewallRules.setString(9, UUID.randomUUID().toString()); insertFirewallRules.executeUpdate(); } - s_logger.trace("firewall_rules table is updated as a part of LB rules upgrade"); + logger.trace("firewall_rules table is updated as a part of LB rules upgrade"); } } // update load_balancing_rules - s_logger.trace("Updating load_balancing_rules table as a part of LB rules upgrade..."); + logger.trace("Updating load_balancing_rules table as a part of LB rules upgrade..."); try (PreparedStatement insertLoadBalancer = conn.prepareStatement("INSERT INTO load_balancing_rules VALUES (?, ?, NULL, ?, ?, ?)");) { insertLoadBalancer.setLong(1, newLbId); insertLoadBalancer.setString(2, name); @@ -1428,10 +1426,10 @@ public void upgradeLoadBalancingRules(Connection conn) { insertLoadBalancer.setString(5, algorithm); insertLoadBalancer.executeUpdate(); } - s_logger.trace("load_balancing_rules table is updated as a part of LB rules upgrade"); + logger.trace("load_balancing_rules table is updated as a part of LB rules upgrade"); // update load_balancer_vm_map table - s_logger.trace("Updating load_balancer_vm_map table as a part of LB rules upgrade..."); + logger.trace("Updating load_balancer_vm_map table as a part of LB rules upgrade..."); try ( PreparedStatement selectInstance = conn.prepareStatement("SELECT instance_id FROM load_balancer_vm_map WHERE load_balancer_id=?"); ) { @@ -1451,10 +1449,10 @@ public void upgradeLoadBalancingRules(Connection conn) { updateLoadBalancer.setLong(2, originalLbId); updateLoadBalancer.executeUpdate(); } - s_logger.trace("load_balancer_vm_map table is updated as a part of LB rules upgrade"); + logger.trace("load_balancer_vm_map table is updated as a part of LB rules upgrade"); } } - s_logger.debug("LB rules are upgraded"); + logger.debug("LB rules are upgraded"); } catch (SQLException e) { throw new CloudRuntimeException("Can't update LB rules ", e); } @@ -1724,7 +1722,7 @@ private void migrateEvents(Connection conn) { ResultSet rs1 = pstmt1.executeQuery(); ) { if (!rs1.next()) { - s_logger.debug("cloud_usage db doesn't exist. Skipping events migration"); + logger.debug("cloud_usage db doesn't exist. Skipping events migration"); return; } @@ -1734,7 +1732,7 @@ private void migrateEvents(Connection conn) { String sql = "SELECT type, description, user_id, account_id, created, level, parameters FROM cloud.event vmevt WHERE vmevt.id > ? and vmevt.state = 'Completed' "; if (lastProcessedEvent == null) { - s_logger.trace("no events are processed earlier, copying all events"); + logger.trace("no events are processed earlier, copying all events"); sql = "SELECT type, description, user_id, account_id, created, level, parameters FROM cloud.event vmevt WHERE vmevt.state = 'Completed' "; } @@ -1744,7 +1742,7 @@ private void migrateEvents(Connection conn) { pstmt.setLong(i++, lastProcessedEvent); } try (ResultSet rs = pstmt.executeQuery();) { - s_logger.debug("Begin Migrating events"); + logger.debug("Begin Migrating events"); while (rs.next()) { EventVO event = new EventVO(); event.setType(rs.getString(1)); @@ -1758,7 +1756,7 @@ private void migrateEvents(Connection conn) { } } } - s_logger.debug("Migrating events completed"); + logger.debug("Migrating events completed"); } catch (Exception e) { throw new CloudRuntimeException("Failed to migrate usage events: ", e); } @@ -2142,7 +2140,7 @@ public void performDataMigration(Connection conn) { cleanupLbVmMaps(conn); } catch (SQLException e) { - s_logger.error("Can't perform data migration ", e); + logger.error("Can't perform data migration ", e); throw new CloudRuntimeException("Can't perform data migration ", e); } @@ -2180,7 +2178,7 @@ private void deleteOrphanedTemplateRef(Connection conn) { ResultSet rs = selectStoragePoolRef.executeQuery(); ) { if (!rs.next()) { - s_logger.debug("No records in template_spool_ref, skipping this upgrade part"); + logger.debug("No records in template_spool_ref, skipping this upgrade part"); return; } while (rs.next()) { @@ -2192,7 +2190,7 @@ private void deleteOrphanedTemplateRef(Connection conn) { try (ResultSet selectedStoragePool = selectStoragePool.executeQuery();) { if (!selectedStoragePool.next()) { - s_logger.debug("Orphaned template_spool_ref record is found (storage pool doesn't exist any more0) id=" + id + "; so removing the record"); + logger.debug("Orphaned template_spool_ref record is found (storage pool doesn't exist any more0) id=" + id + "; so removing the record"); try (PreparedStatement delete = conn.prepareStatement("DELETE FROM template_spool_ref where id=?");) { delete.setLong(1, id); delete.executeUpdate(); @@ -2201,9 +2199,9 @@ private void deleteOrphanedTemplateRef(Connection conn) { } } } - s_logger.debug("Finished deleting orphaned template_spool_ref(s)"); + logger.debug("Finished deleting orphaned template_spool_ref(s)"); } catch (Exception e) { - s_logger.error("Failed to delete orphaned template_spool_ref(s): ", e); + logger.error("Failed to delete orphaned template_spool_ref(s): ", e); throw new CloudRuntimeException("Failed to delete orphaned template_spool_ref(s): ", e); } } @@ -2215,7 +2213,7 @@ private void cleanupVolumes(Connection conn) { ){ while (selectedVolumes.next()) { Long id = selectedVolumes.getLong(1); - s_logger.debug("Volume id is " + id); + logger.debug("Volume id is " + id); Long instanceId = selectedVolumes.getLong(2); Long accountId = selectedVolumes.getLong(3); @@ -2245,15 +2243,15 @@ private void cleanupVolumes(Connection conn) { try(PreparedStatement pstmt = conn.prepareStatement("UPDATE volumes SET state='Destroy' WHERE id=?");) { pstmt.setLong(1, id); pstmt.executeUpdate(); - s_logger.debug("Volume with id=" + id + " is marked with Destroy state as a part of volume cleanup (it's Destroyed had 127 value)"); + logger.debug("Volume with id=" + id + " is marked with Destroy state as a part of volume cleanup (it's Destroyed had 127 value)"); } } } } } - s_logger.debug("Finished cleaning up volumes with incorrect Destroyed field (127)"); + logger.debug("Finished cleaning up volumes with incorrect Destroyed field (127)"); } catch (Exception e) { - s_logger.error("Failed to cleanup volumes with incorrect Destroyed field (127):", e); + logger.error("Failed to cleanup volumes with incorrect Destroyed field (127):", e); throw new CloudRuntimeException("Failed to cleanup volumes with incorrect Destroyed field (127):", e); } } @@ -2267,7 +2265,7 @@ private void modifyIndexes(Connection conn) { if (result__index.next()) { try (PreparedStatement alterTable = conn.prepareStatement("ALTER TABLE `cloud`.`security_group` DROP INDEX `fk_network_group__account_id`");) { alterTable.executeUpdate(); - s_logger.debug("Unique key 'fk_network_group__account_id' is removed successfully"); + logger.debug("Unique key 'fk_network_group__account_id' is removed successfully"); } } @@ -2278,7 +2276,7 @@ private void modifyIndexes(Connection conn) { if (result___index.next()) { try (PreparedStatement pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`security_group` DROP INDEX `fk_network_group___account_id`");) { pstmt.executeUpdate(); - s_logger.debug("Unique key 'fk_network_group___account_id' is removed successfully"); + logger.debug("Unique key 'fk_network_group___account_id' is removed successfully"); } } } @@ -2310,7 +2308,7 @@ private void cleanupLbVmMaps(Connection conn) { ResultSet rs2 = pstmt2.executeQuery(); ) { if (!rs1.next() && rs2.next()) { - s_logger.debug("Removing load balancer vm mappings for lb id=" + lbId + " as a part of cleanup"); + logger.debug("Removing load balancer vm mappings for lb id=" + lbId + " as a part of cleanup"); try (PreparedStatement delete = conn.prepareStatement("DELETE FROM load_balancer_vm_map where load_balancer_id=?");) { delete.setLong(1, lbId); delete.executeUpdate(); @@ -2329,7 +2327,7 @@ private void cleanupLbVmMaps(Connection conn) { * Create usage events for existing port forwarding rules */ private void createPortForwardingEvents(Connection conn) { - s_logger.debug("Creating Port Forwarding usage events"); + logger.debug("Creating Port Forwarding usage events"); try ( PreparedStatement pstmt = conn.prepareStatement("SELECT fw.account_id, ip.data_center_id, fw.id FROM firewall_rules fw, user_ip_address ip where purpose = 'PortForwarding' and " @@ -2354,7 +2352,7 @@ private void createPortForwardingEvents(Connection conn) { pstmt1.executeUpdate(); } } - s_logger.debug("Completed creating Port Forwarding usage events"); + logger.debug("Completed creating Port Forwarding usage events"); } catch (SQLException e) { throw new CloudRuntimeException("Failed to add port forwarding usage events due to:", e); } @@ -2364,7 +2362,7 @@ private void createPortForwardingEvents(Connection conn) { * Create usage events for existing load balancer rules */ private void createLoadBalancerEvents(Connection conn) { - s_logger.debug("Creating load balancer usage events"); + logger.debug("Creating load balancer usage events"); try ( PreparedStatement pstmt = conn.prepareStatement("SELECT fw.account_id, ip.data_center_id, fw.id FROM firewall_rules fw, user_ip_address ip where purpose = 'LoadBalancing' and " @@ -2389,7 +2387,7 @@ private void createLoadBalancerEvents(Connection conn) { pstmt1.executeUpdate(); } } - s_logger.debug("Completed creating load balancer usage events"); + logger.debug("Completed creating load balancer usage events"); } catch (SQLException e) { throw new CloudRuntimeException("Failed to add Load Balancer usage events due to:", e); } @@ -2399,7 +2397,7 @@ private void createLoadBalancerEvents(Connection conn) { * Create usage events for network offerings */ private void createNetworkOfferingEvents(Connection conn) { - s_logger.debug("Creating network offering usage events"); + logger.debug("Creating network offering usage events"); try ( PreparedStatement pstmt = conn.prepareStatement("SELECT vm.account_id, vm.data_center_id, ni.instance_id, vm.name, nw.network_offering_id, nw.is_default FROM nics ni, " @@ -2429,7 +2427,7 @@ private void createNetworkOfferingEvents(Connection conn) { pstmt1.executeUpdate(); } } - s_logger.debug("Completed creating network offering usage events"); + logger.debug("Completed creating network offering usage events"); } catch (SQLException e) { throw new CloudRuntimeException("Failed to add network offering usage events due to:", e); } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade218to224DomainVlans.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade218to224DomainVlans.java index 5f66728e6d9f..65a71993e912 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade218to224DomainVlans.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade218to224DomainVlans.java @@ -23,12 +23,10 @@ import java.sql.SQLException; import java.util.HashMap; -import org.apache.log4j.Logger; import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade218to224DomainVlans implements DbUpgrade { - final static Logger s_logger = Logger.getLogger(Upgrade218to224DomainVlans.class); +public class Upgrade218to224DomainVlans extends DbUpgradeAbstractImpl { @Override public InputStream[] getPrepareScripts() { @@ -42,7 +40,7 @@ public void performDataMigration(Connection conn) { try { PreparedStatement pstmt = conn.prepareStatement("SELECT id FROM networks WHERE shared=1 AND traffic_type='Guest' AND guest_type='Direct'"); ResultSet rs = pstmt.executeQuery(); - s_logger.debug("query is " + pstmt); + logger.debug("query is " + pstmt); while (rs.next()) { Long networkId = rs.getLong(1); Long vlanId = null; @@ -50,7 +48,7 @@ public void performDataMigration(Connection conn) { pstmt = conn.prepareStatement("SELECT id FROM vlan WHERE network_id=? LIMIT 0,1"); pstmt.setLong(1, networkId); - s_logger.debug("query is " + pstmt); + logger.debug("query is " + pstmt); rs = pstmt.executeQuery(); while (rs.next()) { @@ -60,7 +58,7 @@ public void performDataMigration(Connection conn) { if (vlanId != null) { pstmt = conn.prepareStatement("SELECT domain_id FROM account_vlan_map WHERE domain_id IS NOT NULL AND vlan_db_id=? LIMIT 0,1"); pstmt.setLong(1, vlanId); - s_logger.debug("query is " + pstmt); + logger.debug("query is " + pstmt); rs = pstmt.executeQuery(); while (rs.next()) { @@ -118,7 +116,7 @@ private void performDbCleanup(Connection conn) { try { pstmt.executeQuery(); } catch (SQLException e) { - s_logger.debug("Assuming that domain_id field doesn't exist in account_vlan_map table, no need to upgrade"); + logger.debug("Assuming that domain_id field doesn't exist in account_vlan_map table, no need to upgrade"); return; } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade218to22Premium.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade218to22Premium.java index d21d1ce4e662..d5106925e998 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade218to22Premium.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade218to22Premium.java @@ -52,7 +52,7 @@ private void updateUserStats(Connection conn) { ) { pstmt.executeUpdate(); - s_logger.debug("Upgraded cloud_usage user_statistics with deviceId"); + logger.debug("Upgraded cloud_usage user_statistics with deviceId"); } catch (Exception e) { throw new CloudRuntimeException("Failed to upgrade user stats: ", e); } @@ -64,7 +64,7 @@ private void updateUserStats(Connection conn) { ) { pstmt1.executeUpdate(); - s_logger.debug("Upgraded cloud_usage usage_network with hostId"); + logger.debug("Upgraded cloud_usage usage_network with hostId"); } catch (Exception e) { throw new CloudRuntimeException("Failed to upgrade network usage stats: ", e); } @@ -78,7 +78,7 @@ private void updateUsageIpAddress(Connection conn) { ) { pstmt.executeUpdate(); - s_logger.debug("Upgraded cloud_usage usage_ip_address with Id"); + logger.debug("Upgraded cloud_usage usage_ip_address with Id"); } catch (Exception e) { throw new CloudRuntimeException("Failed to upgrade usage_ip_address: ", e); } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade2210to2211.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade2210to2211.java index db1452270805..ba456a62953e 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade2210to2211.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade2210to2211.java @@ -21,7 +21,7 @@ import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade2210to2211 implements DbUpgrade { +public class Upgrade2210to2211 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade2211to2212.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade2211to2212.java index f8175386df52..1c4868d61b0e 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade2211to2212.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade2211to2212.java @@ -24,12 +24,10 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade2211to2212 implements DbUpgrade { - final static Logger s_logger = Logger.getLogger(Upgrade2211to2212.class); +public class Upgrade2211to2212 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { @@ -68,7 +66,7 @@ public InputStream[] getCleanupScripts() { } private void createResourceCount(Connection conn) { - s_logger.debug("Creating missing resource_count records as a part of 2.2.11-2.2.12 upgrade"); + logger.debug("Creating missing resource_count records as a part of 2.2.11-2.2.12 upgrade"); try { //Get all non removed accounts @@ -99,7 +97,7 @@ private void createResourceCount(Connection conn) { pstmt.setLong(2, accountId); rs = pstmt.executeQuery(); if (!rs.next()) { - s_logger.debug("Inserting resource_count record of type " + resourceType + " for account id=" + accountId); + logger.debug("Inserting resource_count record of type " + resourceType + " for account id=" + accountId); pstmt = conn.prepareStatement("INSERT INTO resource_count (account_id, domain_id, type, count) VALUES (?, null, ?, 0)"); pstmt.setLong(1, accountId); pstmt.setString(2, resourceType); @@ -117,7 +115,7 @@ private void createResourceCount(Connection conn) { pstmt.setLong(2, domainId); rs = pstmt.executeQuery(); if (!rs.next()) { - s_logger.debug("Inserting resource_count record of type " + resourceType + " for domain id=" + domainId); + logger.debug("Inserting resource_count record of type " + resourceType + " for domain id=" + domainId); pstmt = conn.prepareStatement("INSERT INTO resource_count (account_id, domain_id, type, count) VALUES (null, ?, ?, 0)"); pstmt.setLong(1, domainId); pstmt.setString(2, resourceType); diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade2211to2212Premium.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade2211to2212Premium.java index 374483811368..d2f0f00ee0a3 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade2211to2212Premium.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade2211to2212Premium.java @@ -18,12 +18,10 @@ import java.io.InputStream; -import org.apache.log4j.Logger; import com.cloud.utils.exception.CloudRuntimeException; public class Upgrade2211to2212Premium extends Upgrade2211to2212 { - final static Logger s_logger = Logger.getLogger(Upgrade2211to2212Premium.class); @Override public InputStream[] getPrepareScripts() { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade2212to2213.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade2212to2213.java index 7debe2ec378a..809e23c1b5a0 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade2212to2213.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade2212to2213.java @@ -24,12 +24,10 @@ import java.util.HashMap; import java.util.List; -import org.apache.log4j.Logger; import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade2212to2213 implements DbUpgrade { - final static Logger s_logger = Logger.getLogger(Upgrade2212to2213.class); +public class Upgrade2212to2213 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { @@ -74,7 +72,7 @@ private void fixForeignKeys(Connection conn) { foreignKeys.put("networks", keys); // drop all foreign keys - s_logger.debug("Dropping old key fk_networks__data_center_id..."); + logger.debug("Dropping old key fk_networks__data_center_id..."); for (String tableName : foreignKeys.keySet()) { DbUpgradeUtils.dropKeysIfExist(conn, tableName, foreignKeys.get(tableName), true); } @@ -95,7 +93,7 @@ private void fixForeignKeys(Connection conn) { try { PreparedStatement pstmt = conn.prepareStatement("drop index network_offering_id on cloud_usage.usage_network_offering"); pstmt.executeUpdate(); - s_logger.debug("Dropped usage_network_offering unique key"); + logger.debug("Dropped usage_network_offering unique key"); } catch (Exception e) { // Ignore error if the usage_network_offering table or the unique key doesn't exist } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade2213to2214.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade2213to2214.java index 9dc3f5be2d8f..6299abf10304 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade2213to2214.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade2213to2214.java @@ -23,12 +23,10 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade2213to2214 implements DbUpgrade { - final static Logger s_logger = Logger.getLogger(Upgrade2213to2214.class); +public class Upgrade2213to2214 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade2214to30.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade2214to30.java index d806490d87df..524b6a34893b 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade2214to30.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade2214to30.java @@ -29,7 +29,6 @@ import java.util.Map; import java.util.UUID; -import org.apache.log4j.Logger; import com.cloud.offering.NetworkOffering; import com.cloud.utils.crypt.DBEncryptionUtil; @@ -37,8 +36,7 @@ import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade { - final static Logger s_logger = Logger.getLogger(Upgrade2214to30.class); +public class Upgrade2214to30 extends Upgrade30xBase { @Override public String[] getUpgradableVersionRange() { @@ -183,7 +181,7 @@ private void setupPhysicalNetworks(Connection conn) { pstmt2.setLong(1, zoneId); ResultSet rsTags = pstmt2.executeQuery(); if (rsTags.next()) { - s_logger.debug("Network tags are not empty, might have to create more than one physical network..."); + logger.debug("Network tags are not empty, might have to create more than one physical network..."); //make sure setup does not use guest vnets if (vnet != null) { @@ -214,7 +212,7 @@ private void setupPhysicalNetworks(Connection conn) { + "6. Reconfigure the vnet ranges for each physical network as desired by using updatePhysicalNetwork API \n" + "7. Start all your VMs"; - s_logger.error(message); + logger.error(message); throw new CloudRuntimeException( "Cannot upgrade this setup since it uses guest vnet and will have multiple physical networks. Please check the logs for details on how to proceed"); @@ -263,7 +261,7 @@ private void setupPhysicalNetworks(Connection conn) { if (crtPbNtwk) { addTrafficType(conn, physicalNetworkId, "Public", xenPublicLabel, kvmPublicLabel, vmwarePublicLabel); } else { - s_logger.debug("Skip adding public traffic type to zone id=" + zoneId); + logger.debug("Skip adding public traffic type to zone id=" + zoneId); } addTrafficType(conn, physicalNetworkId, "Management", xenPrivateLabel, kvmPrivateLabel, vmwarePrivateLabel); addTrafficType(conn, physicalNetworkId, "Storage", xenStorageLabel, null, null); @@ -276,9 +274,9 @@ private void setupPhysicalNetworks(Connection conn) { PreparedStatement pstmt3 = conn.prepareStatement("SELECT network_id FROM `cloud`.`network_tags` where tag= ?"); pstmt3.setString(1,guestNetworkTag); ResultSet rsNet = pstmt3.executeQuery(); - s_logger.debug("Adding PhysicalNetwork to VLAN"); - s_logger.debug("Adding PhysicalNetwork to user_ip_address"); - s_logger.debug("Adding PhysicalNetwork to networks"); + logger.debug("Adding PhysicalNetwork to VLAN"); + logger.debug("Adding PhysicalNetwork to user_ip_address"); + logger.debug("Adding PhysicalNetwork to networks"); while (rsNet.next()) { Long networkId = rsNet.getLong(1); addPhysicalNtwk_To_Ntwk_IP_Vlan(conn, physicalNetworkId, networkId); @@ -288,7 +286,7 @@ private void setupPhysicalNetworks(Connection conn) { // add the reference to this physical network for the default public network entries in vlan / user_ip_address tables // add first physicalNetworkId to op_dc_vnet_alloc for this zone - just a placeholder since direct networking don't need this if (isFirstPhysicalNtwk) { - s_logger.debug("Adding PhysicalNetwork to default Public network entries in vlan and user_ip_address"); + logger.debug("Adding PhysicalNetwork to default Public network entries in vlan and user_ip_address"); pstmt3 = conn.prepareStatement("SELECT id FROM `cloud`.`networks` where traffic_type = 'Public' and data_center_id = " + zoneId); ResultSet rsPubNet = pstmt3.executeQuery(); if (rsPubNet.next()) { @@ -297,7 +295,7 @@ private void setupPhysicalNetworks(Connection conn) { } pstmt3.close(); - s_logger.debug("Adding PhysicalNetwork to op_dc_vnet_alloc"); + logger.debug("Adding PhysicalNetwork to op_dc_vnet_alloc"); String updateVnet = "UPDATE `cloud`.`op_dc_vnet_alloc` SET physical_network_id = " + physicalNetworkId + " WHERE data_center_id = " + zoneId; pstmtUpdate = conn.prepareStatement(updateVnet); pstmtUpdate.executeUpdate(); @@ -314,7 +312,7 @@ private void setupPhysicalNetworks(Connection conn) { if (crtPbNtwk) { addTrafficType(conn, physicalNetworkId, "Public", xenPublicLabel, kvmPublicLabel, vmwarePublicLabel); } else { - s_logger.debug("Skip adding public traffic type to zone id=" + zoneId); + logger.debug("Skip adding public traffic type to zone id=" + zoneId); } addTrafficType(conn, physicalNetworkId, "Management", xenPrivateLabel, kvmPrivateLabel, vmwarePrivateLabel); addTrafficType(conn, physicalNetworkId, "Storage", xenStorageLabel, null, null); @@ -323,28 +321,28 @@ private void setupPhysicalNetworks(Connection conn) { addDefaultSGProvider(conn, physicalNetworkId, zoneId, networkType, false); // add physicalNetworkId to op_dc_vnet_alloc for this zone - s_logger.debug("Adding PhysicalNetwork to op_dc_vnet_alloc"); + logger.debug("Adding PhysicalNetwork to op_dc_vnet_alloc"); String updateVnet = "UPDATE `cloud`.`op_dc_vnet_alloc` SET physical_network_id = " + physicalNetworkId + " WHERE data_center_id = " + zoneId; pstmtUpdate = conn.prepareStatement(updateVnet); pstmtUpdate.executeUpdate(); pstmtUpdate.close(); // add physicalNetworkId to vlan for this zone - s_logger.debug("Adding PhysicalNetwork to VLAN"); + logger.debug("Adding PhysicalNetwork to VLAN"); String updateVLAN = "UPDATE `cloud`.`vlan` SET physical_network_id = " + physicalNetworkId + " WHERE data_center_id = " + zoneId; pstmtUpdate = conn.prepareStatement(updateVLAN); pstmtUpdate.executeUpdate(); pstmtUpdate.close(); // add physicalNetworkId to user_ip_address for this zone - s_logger.debug("Adding PhysicalNetwork to user_ip_address"); + logger.debug("Adding PhysicalNetwork to user_ip_address"); String updateUsrIp = "UPDATE `cloud`.`user_ip_address` SET physical_network_id = " + physicalNetworkId + " WHERE data_center_id = " + zoneId; pstmtUpdate = conn.prepareStatement(updateUsrIp); pstmtUpdate.executeUpdate(); pstmtUpdate.close(); // add physicalNetworkId to guest networks for this zone - s_logger.debug("Adding PhysicalNetwork to networks"); + logger.debug("Adding PhysicalNetwork to networks"); String updateNet = "UPDATE `cloud`.`networks` SET physical_network_id = " + physicalNetworkId + " WHERE data_center_id = " + zoneId + " AND traffic_type = 'Guest'"; pstmtUpdate = conn.prepareStatement(updateNet); @@ -370,17 +368,17 @@ private void setupPhysicalNetworks(Connection conn) { } private void encryptData(Connection conn) { - s_logger.debug("Encrypting the data..."); + logger.debug("Encrypting the data..."); encryptConfigValues(conn); encryptHostDetails(conn); encryptVNCPassword(conn); encryptUserCredentials(conn); encryptVPNPassword(conn); - s_logger.debug("Done encrypting the data"); + logger.debug("Done encrypting the data"); } private void encryptConfigValues(Connection conn) { - s_logger.debug("Encrypting Config values"); + logger.debug("Encrypting Config values"); PreparedStatement pstmt = null; ResultSet rs = null; try { @@ -412,14 +410,14 @@ private void encryptConfigValues(Connection conn) { pstmt.close(); } } catch (SQLException e) { - s_logger.info("[ignored]",e); + logger.info("[ignored]",e); } } - s_logger.debug("Done encrypting Config values"); + logger.debug("Done encrypting Config values"); } private void encryptHostDetails(Connection conn) { - s_logger.debug("Encrypting host details"); + logger.debug("Encrypting host details"); List pstmt2Close = new ArrayList(); PreparedStatement pstmt = null; ResultSet rs = null; @@ -447,11 +445,11 @@ private void encryptHostDetails(Connection conn) { } finally { TransactionLegacy.closePstmts(pstmt2Close); } - s_logger.debug("Done encrypting host details"); + logger.debug("Done encrypting host details"); } private void encryptVNCPassword(Connection conn) { - s_logger.debug("Encrypting vm_instance vnc_password"); + logger.debug("Encrypting vm_instance vnc_password"); List pstmt2Close = new ArrayList(); PreparedStatement pstmt = null; ResultSet rs = null; @@ -493,11 +491,11 @@ private void encryptVNCPassword(Connection conn) { } finally { TransactionLegacy.closePstmts(pstmt2Close); } - s_logger.debug("Done encrypting vm_instance vnc_password"); + logger.debug("Done encrypting vm_instance vnc_password"); } private void encryptUserCredentials(Connection conn) { - s_logger.debug("Encrypting user keys"); + logger.debug("Encrypting user keys"); List pstmt2Close = new ArrayList(); PreparedStatement pstmt = null; ResultSet rs = null; @@ -526,11 +524,11 @@ private void encryptUserCredentials(Connection conn) { } finally { TransactionLegacy.closePstmts(pstmt2Close); } - s_logger.debug("Done encrypting user keys"); + logger.debug("Done encrypting user keys"); } private void encryptVPNPassword(Connection conn) { - s_logger.debug("Encrypting vpn_users password"); + logger.debug("Encrypting vpn_users password"); List pstmt2Close = new ArrayList(); PreparedStatement pstmt = null; ResultSet rs = null; @@ -559,7 +557,7 @@ private void encryptVPNPassword(Connection conn) { } finally { TransactionLegacy.closePstmts(pstmt2Close); } - s_logger.debug("Done encrypting vpn_users password"); + logger.debug("Done encrypting vpn_users password"); } private void dropKeysIfExist(Connection conn) { @@ -570,7 +568,7 @@ private void dropKeysIfExist(Connection conn) { uniqueKeys.put("secondary_storage_vm", keys); // drop keys - s_logger.debug("Dropping public_ip_address keys from `cloud`.`secondary_storage_vm` and console_proxy tables..."); + logger.debug("Dropping public_ip_address keys from `cloud`.`secondary_storage_vm` and console_proxy tables..."); for (String tableName : uniqueKeys.keySet()) { DbUpgradeUtils.dropKeysIfExist(conn, tableName, uniqueKeys.get(tableName), false); } @@ -697,7 +695,7 @@ private void updateDomainNetworkRef(Connection conn) { pstmt2Close.add(pstmt); pstmt.setBoolean(1, subdomainAccess); pstmt.executeUpdate(); - s_logger.debug("Successfully updated subdomain_access field in network_domain table with value " + subdomainAccess); + logger.debug("Successfully updated subdomain_access field in network_domain table with value " + subdomainAccess); } // convert zone level 2.2.x networks to ROOT domain 3.0 access networks @@ -710,7 +708,7 @@ private void updateDomainNetworkRef(Connection conn) { pstmt2Close.add(pstmt); pstmt.setLong(1, networkId); pstmt.executeUpdate(); - s_logger.debug("Successfully converted zone specific network id=" + networkId + " to the ROOT domain level network with subdomain access set to true"); + logger.debug("Successfully converted zone specific network id=" + networkId + " to the ROOT domain level network with subdomain access set to true"); } } catch (SQLException e) { @@ -745,7 +743,7 @@ protected void createNetworkServices(Connection conn) { pstmt.setString(3, provider); pstmt.executeUpdate(); } - s_logger.debug("Created service/provider map for network id=" + networkId); + logger.debug("Created service/provider map for network id=" + networkId); } } catch (SQLException e) { throw new CloudRuntimeException("Unable to create service/provider map for networks", e); @@ -757,7 +755,7 @@ protected void createNetworkServices(Connection conn) { protected void updateRouters(Connection conn) { PreparedStatement pstmt = null; try { - s_logger.debug("Updating domain_router table"); + logger.debug("Updating domain_router table"); pstmt = conn.prepareStatement("UPDATE domain_router, virtual_router_providers vrp LEFT JOIN (physical_network_service_providers pnsp INNER JOIN physical_network pntwk INNER JOIN vm_instance vm INNER JOIN domain_router vr) ON (vrp.nsp_id = pnsp.id AND pnsp.physical_network_id = pntwk.id AND pntwk.data_center_id = vm.data_center_id AND vm.id=vr.id) SET vr.element_id=vrp.id;"); pstmt.executeUpdate(); @@ -793,7 +791,7 @@ protected void updateReduntantRouters(Connection conn) { ntwkOffCount = rs1.getLong(1); } - s_logger.debug("Have " + ntwkOffCount + " networkOfferings"); + logger.debug("Have " + ntwkOffCount + " networkOfferings"); pstmt = conn.prepareStatement("CREATE TEMPORARY TABLE `cloud`.`network_offerings2` ENGINE=MEMORY SELECT * FROM `cloud`.`network_offerings` WHERE id=1"); pstmt2Close.add(pstmt); pstmt.executeUpdate(); @@ -803,7 +801,7 @@ protected void updateReduntantRouters(Connection conn) { while (rs.next()) { long networkId = rs.getLong(1); long networkOfferingId = rs.getLong(2); - s_logger.debug("Updating network offering for the network id=" + networkId + " as it has redundant routers"); + logger.debug("Updating network offering for the network id=" + networkId + " as it has redundant routers"); Long newNetworkOfferingId = null; if (!newNetworkOfferingMap.containsKey(networkOfferingId)) { @@ -852,7 +850,7 @@ protected void updateReduntantRouters(Connection conn) { pstmt.executeUpdate(); } - s_logger.debug("Successfully updated network offering id=" + networkId + " with new network offering id " + newNetworkOfferingId); + logger.debug("Successfully updated network offering id=" + networkId + " with new network offering id " + newNetworkOfferingId); } } catch (SQLException e) { @@ -863,7 +861,7 @@ protected void updateReduntantRouters(Connection conn) { pstmt.executeUpdate(); pstmt.close(); } catch (SQLException e) { - s_logger.info("[ignored]",e); + logger.info("[ignored]",e); } TransactionLegacy.closePstmts(pstmt2Close); } @@ -873,7 +871,7 @@ protected void updateHostCapacity(Connection conn) { List pstmt2Close = new ArrayList(); PreparedStatement pstmt = null; try { - s_logger.debug("Updating op_host_capacity table, column capacity_state"); + logger.debug("Updating op_host_capacity table, column capacity_state"); pstmt = conn.prepareStatement("UPDATE op_host_capacity, host SET op_host_capacity.capacity_state='Disabled' where host.id=op_host_capacity.host_id and op_host_capacity.capacity_type in (0,1) and host.resource_state='Disabled';"); pstmt2Close.add(pstmt); @@ -912,7 +910,7 @@ protected void switchAccountSpecificNetworksToIsolated(Connection conn) { pstmt2Close.add(pstmt); rs = pstmt.executeQuery(); } catch (Exception ex) { - s_logger.debug("switch_to_isolated field is not present in networks table"); + logger.debug("switch_to_isolated field is not present in networks table"); if (pstmt != null) { pstmt.close(); } @@ -932,7 +930,7 @@ protected void switchAccountSpecificNetworksToIsolated(Connection conn) { ntwkOffCount = rs1.getLong(1); } - s_logger.debug("Have " + ntwkOffCount + " networkOfferings"); + logger.debug("Have " + ntwkOffCount + " networkOfferings"); pstmt = conn.prepareStatement("CREATE TEMPORARY TABLE `cloud`.`network_offerings2` ENGINE=MEMORY SELECT * FROM `cloud`.`network_offerings` WHERE id=1"); pstmt2Close.add(pstmt); pstmt.executeUpdate(); @@ -942,7 +940,7 @@ protected void switchAccountSpecificNetworksToIsolated(Connection conn) { while (rs.next()) { long networkId = rs.getLong(1); long networkOfferingId = rs.getLong(2); - s_logger.debug("Updating network offering for the network id=" + networkId + " as it has switch_to_isolated=1"); + logger.debug("Updating network offering for the network id=" + networkId + " as it has switch_to_isolated=1"); Long newNetworkOfferingId = null; if (!newNetworkOfferingMap.containsKey(networkOfferingId)) { @@ -983,7 +981,7 @@ protected void switchAccountSpecificNetworksToIsolated(Connection conn) { pstmt.executeUpdate(); } - s_logger.debug("Successfully updated network offering id=" + networkId + " with new network offering id " + newNetworkOfferingId); + logger.debug("Successfully updated network offering id=" + networkId + " with new network offering id " + newNetworkOfferingId); } try { @@ -992,7 +990,7 @@ protected void switchAccountSpecificNetworksToIsolated(Connection conn) { pstmt.executeUpdate(); } catch (SQLException ex) { // do nothing here - s_logger.debug("Caught SQLException when trying to drop switch_to_isolated column ", ex); + logger.debug("Caught SQLException when trying to drop switch_to_isolated column ", ex); } } catch (SQLException e) { @@ -1003,7 +1001,7 @@ protected void switchAccountSpecificNetworksToIsolated(Connection conn) { pstmt.executeUpdate(); pstmt.close(); } catch (SQLException e) { - s_logger.info("[ignored]",e); + logger.info("[ignored]",e); } TransactionLegacy.closePstmts(pstmt2Close); } @@ -1057,7 +1055,7 @@ private void migrateUserConcentratedPlannerChoice(Connection conn) { pstmt.close(); } } catch (SQLException e) { - s_logger.info("[ignored]",e); + logger.info("[ignored]",e); } } } @@ -1107,7 +1105,7 @@ protected String fixNetworksWithExternalDevices(Connection conn) { while (rs.next()) { long networkId = rs.getLong(1); long networkOfferingId = rs.getLong(2); - s_logger.debug("Updating network offering for the network id=" + networkId + " as it has switch_to_isolated=1"); + logger.debug("Updating network offering for the network id=" + networkId + " as it has switch_to_isolated=1"); Long newNetworkOfferingId = null; if (!newNetworkOfferingMap.containsKey(networkOfferingId)) { uniqueName = "Isolated with external providers"; @@ -1150,7 +1148,7 @@ protected String fixNetworksWithExternalDevices(Connection conn) { pstmt.executeUpdate(); } - s_logger.debug("Successfully updated network id=" + networkId + " with new network offering id " + newNetworkOfferingId); + logger.debug("Successfully updated network id=" + networkId + " with new network offering id " + newNetworkOfferingId); } } catch (SQLException e) { @@ -1159,7 +1157,7 @@ protected String fixNetworksWithExternalDevices(Connection conn) { try (PreparedStatement dropStatement = conn.prepareStatement("DROP TABLE `cloud`.`network_offerings2`");){ dropStatement.executeUpdate(); } catch (SQLException e) { - s_logger.info("[ignored]",e); + logger.info("[ignored]",e); } TransactionLegacy.closePstmts(pstmt2Close); } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade221to222.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade221to222.java index 41198adb14bd..307b72c61ea5 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade221to222.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade221to222.java @@ -21,7 +21,7 @@ import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade221to222 implements DbUpgrade { +public class Upgrade221to222 extends DbUpgradeAbstractImpl { @Override public InputStream[] getPrepareScripts() { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade222to224.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade222to224.java index 51a929d0377e..b891b02ea572 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade222to224.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade222to224.java @@ -25,13 +25,11 @@ import java.util.HashMap; import java.util.List; -import org.apache.log4j.Logger; import com.cloud.capacity.Capacity; import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade222to224 implements DbUpgrade { - final static Logger s_logger = Logger.getLogger(Upgrade222to224.class); +public class Upgrade222to224 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { @@ -64,7 +62,7 @@ private void fixRelatedFkeyOnNetworksTable(Connection conn) throws SQLException try { pstmt.executeUpdate(); } catch (SQLException e) { - s_logger.debug("Ignore if the key is not there."); + logger.debug("Ignore if the key is not there."); } pstmt.close(); @@ -130,11 +128,11 @@ private void checkForDuplicatePublicNetworks(Connection conn) { } if (zonesWithDuplicateNetworks.size() > 0) { - s_logger.warn(errorMsg + zonesWithDuplicateNetworks); + logger.warn(errorMsg + zonesWithDuplicateNetworks); } } catch (SQLException e) { - s_logger.warn(e); + logger.warn(e); throw new CloudRuntimeException("Unable to check for duplicate public networks as part of 222 to 224 upgrade."); } } @@ -208,21 +206,21 @@ private void updateClusterIdInOpHostCapacity(Connection conn) { try { pstmtUpdate.close(); } catch (SQLException e) { - s_logger.info("[ignored]",e); + logger.info("[ignored]",e); } } if (rs != null) { try { rs.close(); } catch (SQLException e) { - s_logger.info("[ignored]",e); + logger.info("[ignored]",e); } } if (pstmt != null) { try { pstmt.close(); } catch (SQLException e) { - s_logger.info("[ignored]",e); + logger.info("[ignored]",e); } } @@ -275,7 +273,7 @@ private void updateUserStatsWithNetwork(Connection conn) { ResultSet rs1 = pstmt.executeQuery(); if (rs1.next()) { - s_logger.debug("Not updating user_statistics table for domR id=" + instanceId + " as domR is already expunged"); + logger.debug("Not updating user_statistics table for domR id=" + instanceId + " as domR is already expunged"); continue; } @@ -301,7 +299,7 @@ private void updateUserStatsWithNetwork(Connection conn) { rs.close(); pstmt.close(); - s_logger.debug("Upgraded user_statistics with networkId for DomainRouter device type"); + logger.debug("Upgraded user_statistics with networkId for DomainRouter device type"); // update network_id information for ExternalFirewall and ExternalLoadBalancer device types PreparedStatement pstmt1 = @@ -310,9 +308,9 @@ private void updateUserStatsWithNetwork(Connection conn) { pstmt1.executeUpdate(); pstmt1.close(); - s_logger.debug("Upgraded user_statistics with networkId for ExternalFirewall and ExternalLoadBalancer device types"); + logger.debug("Upgraded user_statistics with networkId for ExternalFirewall and ExternalLoadBalancer device types"); - s_logger.debug("Successfully update user_statistics table with network_ids as a part of 222 to 224 upgrade"); + logger.debug("Successfully update user_statistics table with network_ids as a part of 222 to 224 upgrade"); } catch (SQLException e) { throw new CloudRuntimeException("Unable to update user_statistics table with network_ids as a part of 222 to 224 upgrade", e); @@ -327,7 +325,7 @@ private void dropIndexIfExists(Connection conn) { if (rs.next()) { pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`domain` DROP INDEX `path`"); pstmt.executeUpdate(); - s_logger.debug("Unique key 'path' is removed successfully"); + logger.debug("Unique key 'path' is removed successfully"); } rs.close(); @@ -346,7 +344,7 @@ private void fixBasicZoneNicCount(Connection conn) { Long zoneId = rs.getLong(1); Long networkId = null; Long vmCount = 0L; - s_logger.debug("Updating basic zone id=" + zoneId + " with correct nic count"); + logger.debug("Updating basic zone id=" + zoneId + " with correct nic count"); pstmt = conn.prepareStatement("SELECT id from networks where data_center_id=? AND guest_type='Direct'"); pstmt.setLong(1, zoneId); @@ -372,7 +370,7 @@ private void fixBasicZoneNicCount(Connection conn) { } - s_logger.debug("Basic zones are updated with correct nic counts successfully"); + logger.debug("Basic zones are updated with correct nic counts successfully"); rs.close(); pstmt.close(); } catch (SQLException e) { @@ -386,7 +384,7 @@ private void updateTotalCPUInOpHostCapacity(Connection conn) { PreparedStatement pstmtUpdate = null; try { // Load all Routing hosts - s_logger.debug("Updating total CPU capacity entries in op_host_capacity"); + logger.debug("Updating total CPU capacity entries in op_host_capacity"); pstmt = conn.prepareStatement("SELECT id, cpus, speed FROM host WHERE type = 'Routing'"); rs = pstmt.executeQuery(); while (rs.next()) { @@ -410,21 +408,21 @@ private void updateTotalCPUInOpHostCapacity(Connection conn) { try { pstmtUpdate.close(); } catch (SQLException e) { - s_logger.info("[ignored]",e); + logger.info("[ignored]",e); } } if (rs != null) { try { rs.close(); } catch (SQLException e) { - s_logger.info("[ignored]",e); + logger.info("[ignored]",e); } } if (pstmt != null) { try { pstmt.close(); } catch (SQLException e) { - s_logger.info("[ignored]",e); + logger.info("[ignored]",e); } } @@ -439,7 +437,7 @@ private void upgradeGuestOs(Connection conn) { if (!rs.next()) { pstmt = conn.prepareStatement("INSERT INTO `cloud`.`guest_os` (id, category_id, display_name) VALUES (138, 7, 'None')"); pstmt.executeUpdate(); - s_logger.debug("Inserted NONE category to guest_os table"); + logger.debug("Inserted NONE category to guest_os table"); } rs.close(); @@ -488,7 +486,7 @@ private void updateFkeysAndIndexes(Connection conn) throws SQLException { try { pstmt.executeUpdate(); } catch (SQLException e) { - s_logger.debug("Ignore if the key is not there."); + logger.debug("Ignore if the key is not there."); } pstmt.close(); } @@ -499,7 +497,7 @@ private void updateFkeysAndIndexes(Connection conn) throws SQLException { try { pstmt.executeUpdate(); } catch (SQLException e) { - s_logger.debug("Ignore if the index is not there."); + logger.debug("Ignore if the index is not there."); } pstmt.close(); } @@ -613,7 +611,7 @@ private void fixIPResourceCount(Connection conn) throws SQLException { pstmt.close(); - s_logger.debug("Resource limit is cleaned up successfully as a part of db upgrade"); + logger.debug("Resource limit is cleaned up successfully as a part of db upgrade"); } } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade222to224Premium.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade222to224Premium.java index ac7bd120c3da..cc5d3399fa6d 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade222to224Premium.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade222to224Premium.java @@ -20,12 +20,10 @@ import java.sql.Connection; import java.sql.PreparedStatement; -import org.apache.log4j.Logger; import com.cloud.utils.exception.CloudRuntimeException; public class Upgrade222to224Premium extends Upgrade222to224 { - final static Logger s_logger = Logger.getLogger(Upgrade222to224Premium.class); @Override public InputStream[] getPrepareScripts() { @@ -55,7 +53,7 @@ private void updateUserStats(Connection conn) { ) { pstmt.executeUpdate(); - s_logger.debug("Upgraded cloud_usage user_statistics with networkId"); + logger.debug("Upgraded cloud_usage user_statistics with networkId"); } catch (Exception e) { throw new CloudRuntimeException("Failed to upgrade user stats: ", e); } @@ -66,7 +64,7 @@ private void updateUserStats(Connection conn) { + "us.network_id where us.account_id = un.account_id and us.data_center_id = un.zone_id and us.device_id = un.host_id"); ) { pstmt1.executeUpdate(); - s_logger.debug("Upgraded cloud_usage usage_network with networkId"); + logger.debug("Upgraded cloud_usage usage_network with networkId"); } catch (Exception e) { throw new CloudRuntimeException("Failed to upgrade user stats: ", e); } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade224to225.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade224to225.java index 48908f5c7d53..4d88e1a6f401 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade224to225.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade224to225.java @@ -25,12 +25,10 @@ import java.util.HashMap; import java.util.List; -import org.apache.log4j.Logger; import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade224to225 implements DbUpgrade { - final static Logger s_logger = Logger.getLogger(Upgrade224to225.class); +public class Upgrade224to225 extends DbUpgradeAbstractImpl { @Override public InputStream[] getPrepareScripts() { @@ -80,7 +78,7 @@ public boolean supportsRollingUpgrade() { } private void createSecurityGroups(Connection conn) { - s_logger.debug("Creating missing default security group as a part of 224-225 upgrade"); + logger.debug("Creating missing default security group as a part of 224-225 upgrade"); try { List accounts = new ArrayList(); PreparedStatement pstmt = conn.prepareStatement("SELECT id FROM account WHERE removed IS NULL and id != 1"); @@ -95,7 +93,7 @@ private void createSecurityGroups(Connection conn) { pstmt.setLong(1, accountId); rs = pstmt.executeQuery(); if (!rs.next()) { - s_logger.debug("Default security group is missing for account id=" + accountId + " so adding it"); + logger.debug("Default security group is missing for account id=" + accountId + " so adding it"); // get accountName/domainId information @@ -208,7 +206,7 @@ private void dropTableColumnsIfExist(Connection conn) { columns.add("guest_ip_type"); tablesToModify.put("service_offering", columns); - s_logger.debug("Dropping columns that don't exist in 2.2.5 version of the DB..."); + logger.debug("Dropping columns that don't exist in 2.2.5 version of the DB..."); for (String tableName : tablesToModify.keySet()) { DbUpgradeUtils.dropTableColumnsIfExist(conn, tableName, tablesToModify.get(tableName)); } @@ -277,7 +275,7 @@ private void dropKeysIfExist(Connection conn) { indexes.put("remote_access_vpn", keys); // drop all foreign keys first - s_logger.debug("Dropping keys that don't exist in 2.2.5 version of the DB..."); + logger.debug("Dropping keys that don't exist in 2.2.5 version of the DB..."); for (String tableName : foreignKeys.keySet()) { DbUpgradeUtils.dropKeysIfExist(conn, tableName, foreignKeys.get(tableName), true); } @@ -291,7 +289,7 @@ private void dropKeysIfExist(Connection conn) { private void addMissingKeys(Connection conn) { PreparedStatement pstmt = null; try { - s_logger.debug("Adding missing foreign keys"); + logger.debug("Adding missing foreign keys"); HashMap keyToTableMap = new HashMap(); keyToTableMap.put("fk_console_proxy__id", "console_proxy"); @@ -325,13 +323,13 @@ private void addMissingKeys(Connection conn) { pstmt = conn.prepareStatement("ALTER TABLE " + tableName + " ADD CONSTRAINT " + key + " FOREIGN KEY " + keyToStatementMap.get(key)); pstmt.executeUpdate(); - s_logger.debug("Added missing key " + key + " to table " + tableName); + logger.debug("Added missing key " + key + " to table " + tableName); rs.close(); } - s_logger.debug("Missing keys were added successfully as a part of 224 to 225 upgrade"); + logger.debug("Missing keys were added successfully as a part of 224 to 225 upgrade"); pstmt.close(); } catch (SQLException e) { - s_logger.error("Unable to add missing foreign key; following statement was executed:" + pstmt); + logger.error("Unable to add missing foreign key; following statement was executed:" + pstmt); throw new CloudRuntimeException("Unable to add missing keys due to exception", e); } } @@ -341,13 +339,13 @@ private void addMissingOvsAccount(Connection conn) { PreparedStatement pstmt = conn.prepareStatement("SELECT * from ovs_tunnel_account"); ResultSet rs = pstmt.executeQuery(); if (!rs.next()) { - s_logger.debug("Adding missing ovs tunnel account"); + logger.debug("Adding missing ovs tunnel account"); pstmt = conn.prepareStatement("INSERT INTO `cloud`.`ovs_tunnel_account` (`from`, `to`, `account`, `key`, `port_name`, `state`) VALUES (0, 0, 0, 0, 'lock', 'SUCCESS')"); pstmt.executeUpdate(); } } catch (SQLException e) { - s_logger.error("Unable to add missing ovs tunnel account due to ", e); + logger.error("Unable to add missing ovs tunnel account due to ", e); throw new CloudRuntimeException("Unable to add missing ovs tunnel account due to ", e); } } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade225to226.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade225to226.java index f606d6e756f4..99bf1f981daa 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade225to226.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade225to226.java @@ -22,12 +22,10 @@ import java.util.HashMap; import java.util.List; -import org.apache.log4j.Logger; import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade225to226 implements DbUpgrade { - final static Logger s_logger = Logger.getLogger(Upgrade225to226.class); +public class Upgrade225to226 extends DbUpgradeAbstractImpl { @Override public InputStream[] getPrepareScripts() { @@ -75,7 +73,7 @@ private void dropTableColumnsIfExist(Connection conn) { columns.add("domain_id"); tablesToModify.put("domain_router", columns); - s_logger.debug("Dropping columns that don't exist in 2.2.6 version of the DB..."); + logger.debug("Dropping columns that don't exist in 2.2.6 version of the DB..."); for (String tableName : tablesToModify.keySet()) { DbUpgradeUtils.dropTableColumnsIfExist(conn, tableName, tablesToModify.get(tableName)); } @@ -95,7 +93,7 @@ private void dropKeysIfExist(Connection conn) { indexes.put("domain_router", keys); // drop all foreign keys first - s_logger.debug("Dropping keys that don't exist in 2.2.6 version of the DB..."); + logger.debug("Dropping keys that don't exist in 2.2.6 version of the DB..."); for (String tableName : foreignKeys.keySet()) { DbUpgradeUtils.dropKeysIfExist(conn, tableName, foreignKeys.get(tableName), true); } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade227to228.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade227to228.java index 7d665718db44..3c85ee21d1bd 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade227to228.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade227to228.java @@ -23,12 +23,10 @@ import java.sql.SQLException; import java.util.ArrayList; -import org.apache.log4j.Logger; import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade227to228 implements DbUpgrade { - final static Logger s_logger = Logger.getLogger(Upgrade227to228.class); +public class Upgrade227to228 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { @@ -79,7 +77,7 @@ public void performDataMigration(Connection conn) { pstmt.executeUpdate(); } catch (SQLException e) { - s_logger.error("Failed to DB migration for multiple secondary storages", e); + logger.error("Failed to DB migration for multiple secondary storages", e); throw new CloudRuntimeException("Failed to DB migration for multiple secondary storages", e); } @@ -93,7 +91,7 @@ public InputStream[] getCleanupScripts() { } private void updateDomainLevelNetworks(Connection conn) { - s_logger.debug("Updating domain level specific networks..."); + logger.debug("Updating domain level specific networks..."); try { PreparedStatement pstmt = conn.prepareStatement("SELECT n.id FROM networks n, network_offerings o WHERE n.shared=1 AND o.system_only=0 AND o.id=n.network_offering_id"); @@ -113,7 +111,7 @@ private void updateDomainLevelNetworks(Connection conn) { pstmt.setLong(1, networkId); rs = pstmt.executeQuery(); if (rs.next()) { - s_logger.debug("Setting network id=" + networkId + " as domain specific shared network"); + logger.debug("Setting network id=" + networkId + " as domain specific shared network"); pstmt = conn.prepareStatement("UPDATE networks set is_domain_specific=1 where id=?"); pstmt.setLong(1, networkId); pstmt.executeUpdate(); @@ -122,9 +120,9 @@ private void updateDomainLevelNetworks(Connection conn) { pstmt.close(); } - s_logger.debug("Successfully updated domain level specific networks"); + logger.debug("Successfully updated domain level specific networks"); } catch (SQLException e) { - s_logger.error("Failed to set domain specific shared networks due to ", e); + logger.error("Failed to set domain specific shared networks due to ", e); throw new CloudRuntimeException("Failed to set domain specific shared networks due to ", e); } } @@ -132,7 +130,7 @@ private void updateDomainLevelNetworks(Connection conn) { //this method inserts missing volume.delete events (events were missing when vm failed to create) private void updateVolumeUsageRecords(Connection conn) { try { - s_logger.debug("Inserting missing usage_event records for destroyed volumes..."); + logger.debug("Inserting missing usage_event records for destroyed volumes..."); PreparedStatement pstmt = conn.prepareStatement("select id, account_id, data_center_id, name from volumes where state='Destroy' and id in (select resource_id from usage_event where type='volume.create') and id not in (select resource_id from usage_event where type='volume.delete')"); ResultSet rs = pstmt.executeQuery(); @@ -151,9 +149,9 @@ private void updateVolumeUsageRecords(Connection conn) { pstmt.executeUpdate(); } - s_logger.debug("Successfully inserted missing usage_event records for destroyed volumes"); + logger.debug("Successfully inserted missing usage_event records for destroyed volumes"); } catch (SQLException e) { - s_logger.error("Failed to insert missing delete usage records ", e); + logger.error("Failed to insert missing delete usage records ", e); throw new CloudRuntimeException("Failed to insert missing delete usage records ", e); } } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade227to228Premium.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade227to228Premium.java index 032fb58da862..4787017ce4b5 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade227to228Premium.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade227to228Premium.java @@ -22,12 +22,10 @@ import java.sql.ResultSet; import java.sql.SQLException; -import org.apache.log4j.Logger; import com.cloud.utils.exception.CloudRuntimeException; public class Upgrade227to228Premium extends Upgrade227to228 { - final static Logger s_logger = Logger.getLogger(Upgrade227to228Premium.class); @Override public InputStream[] getPrepareScripts() { @@ -59,7 +57,7 @@ private void addSourceIdColumn(Connection conn) { ResultSet rs = pstmt.executeQuery(); if (rs.next()) { - s_logger.info("The source id field already exist, not adding it"); + logger.info("The source id field already exist, not adding it"); } } catch (Exception e) { @@ -68,21 +66,21 @@ private void addSourceIdColumn(Connection conn) { } if (insertField) { - s_logger.debug("Adding source_id to usage_storage..."); + logger.debug("Adding source_id to usage_storage..."); pstmt = conn.prepareStatement("ALTER TABLE `cloud_usage`.`usage_storage` ADD COLUMN `source_id` bigint unsigned"); pstmt.executeUpdate(); - s_logger.debug("Column source_id was added successfully to usage_storage table"); + logger.debug("Column source_id was added successfully to usage_storage table"); pstmt.close(); } } catch (SQLException e) { - s_logger.error("Failed to add source_id to usage_storage due to ", e); + logger.error("Failed to add source_id to usage_storage due to ", e); throw new CloudRuntimeException("Failed to add source_id to usage_storage due to ", e); } } private void addNetworkIdsToUserStats(Connection conn) { - s_logger.debug("Adding network IDs to user stats..."); + logger.debug("Adding network IDs to user stats..."); try { String stmt = "SELECT DISTINCT public_ip_address FROM `cloud`.`user_statistics` WHERE public_ip_address IS NOT null"; PreparedStatement pstmt = conn.prepareStatement(stmt); @@ -112,10 +110,10 @@ private void addNetworkIdsToUserStats(Connection conn) { rs.close(); pstmt.close(); - s_logger.debug("Successfully added network IDs to user stats."); + logger.debug("Successfully added network IDs to user stats."); } catch (SQLException e) { String errorMsg = "Failed to add network IDs to user stats."; - s_logger.error(errorMsg, e); + logger.error(errorMsg, e); throw new CloudRuntimeException(errorMsg, e); } } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade228to229.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade228to229.java index c556cd900896..bd95a0671298 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade228to229.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade228to229.java @@ -24,12 +24,10 @@ import java.util.HashMap; import java.util.List; -import org.apache.log4j.Logger; import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade228to229 implements DbUpgrade { - final static Logger s_logger = Logger.getLogger(Upgrade228to229.class); +public class Upgrade228to229 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { @@ -126,7 +124,7 @@ private void dropKeysIfExist(Connection conn) { foreignKeys.put("network_tags", keys); // drop all foreign keys first - s_logger.debug("Dropping keys that don't exist in 2.2.6 version of the DB..."); + logger.debug("Dropping keys that don't exist in 2.2.6 version of the DB..."); for (String tableName : foreignKeys.keySet()) { DbUpgradeUtils.dropKeysIfExist(conn, tableName, foreignKeys.get(tableName), true); } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade229to2210.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade229to2210.java index 1ad7e6d2b4c5..3d4725c108f2 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade229to2210.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade229to2210.java @@ -23,12 +23,10 @@ import java.sql.SQLException; import java.util.UUID; -import org.apache.log4j.Logger; import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade229to2210 implements DbUpgrade { - final static Logger s_logger = Logger.getLogger(Upgrade229to2210.class); +public class Upgrade229to2210 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { @@ -128,7 +126,7 @@ private void updateFirewallRules(Connection conn) { pstmt.setString(8, UUID.randomUUID().toString()); pstmt.setLong(9, id); - s_logger.debug("Updating firewall rule with the statement " + pstmt); + logger.debug("Updating firewall rule with the statement " + pstmt); pstmt.executeUpdate(); //get new FirewallRule update @@ -159,12 +157,12 @@ private void updateFirewallRules(Connection conn) { pstmt = conn.prepareStatement("update firewall_rules_cidrs set firewall_rule_id=? where firewall_rule_id=?"); pstmt.setLong(1, firewallRuleId); pstmt.setLong(2, id); - s_logger.debug("Updating existing cidrs for the rule id=" + id + " with the new Firewall rule id=" + firewallRuleId + " with statement" + pstmt); + logger.debug("Updating existing cidrs for the rule id=" + id + " with the new Firewall rule id=" + firewallRuleId + " with statement" + pstmt); pstmt.executeUpdate(); } else { pstmt = conn.prepareStatement("insert into firewall_rules_cidrs (firewall_rule_id,source_cidr) values (?, '0.0.0.0/0')"); pstmt.setLong(1, firewallRuleId); - s_logger.debug("Inserting rule for cidr 0.0.0.0/0 for the new Firewall rule id=" + firewallRuleId + " with statement " + pstmt); + logger.debug("Inserting rule for cidr 0.0.0.0/0 for the new Firewall rule id=" + firewallRuleId + " with statement " + pstmt); pstmt.executeUpdate(); } } @@ -180,7 +178,7 @@ private void updateFirewallRules(Connection conn) { pstmt.close(); } } catch (SQLException e) { - s_logger.info("[ignored]",e); + logger.info("[ignored]",e); } } } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade301to302.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade301to302.java index ba479b52f89f..28e8d89dff08 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade301to302.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade301to302.java @@ -26,12 +26,10 @@ import java.util.HashMap; import java.util.List; -import org.apache.log4j.Logger; import com.cloud.utils.exception.CloudRuntimeException; public class Upgrade301to302 extends LegacyDbUpgrade { - final static Logger s_logger = Logger.getLogger(Upgrade301to302.class); @Override public String[] getUpgradableVersionRange() { @@ -66,7 +64,7 @@ private void dropKeysIfExists(Connection conn) { keys.add("i_host__allocation_state"); uniqueKeys.put("host", keys); - s_logger.debug("Dropping i_host__allocation_state key in host table"); + logger.debug("Dropping i_host__allocation_state key in host table"); for (String tableName : uniqueKeys.keySet()) { DbUpgradeUtils.dropKeysIfExist(conn, tableName, uniqueKeys.get(tableName), false); } @@ -129,7 +127,7 @@ protected void updateSharedNetworks(Connection conn) { pstmt = conn.prepareStatement("DELETE FROM `cloud`.`ntwk_offering_service_map` WHERE id=?"); pstmt.setLong(1, mapId); pstmt.executeUpdate(); - s_logger.debug("Deleted lb service for network offering id=" + ntwkOffId + " as it doesn't have source nat service enabled"); + logger.debug("Deleted lb service for network offering id=" + ntwkOffId + " as it doesn't have source nat service enabled"); //delete lb service for the network pstmt = @@ -144,7 +142,7 @@ protected void updateSharedNetworks(Connection conn) { pstmt = conn.prepareStatement("DELETE FROM `cloud`.`ntwk_service_map` WHERE id=?"); pstmt.setLong(1, mapId); pstmt.executeUpdate(); - s_logger.debug("Deleted lb service for network id=" + ntwkId + " as it doesn't have source nat service enabled"); + logger.debug("Deleted lb service for network id=" + ntwkId + " as it doesn't have source nat service enabled"); } } @@ -180,14 +178,14 @@ private void fixLastHostIdKey(Connection conn) { } private void changeEngine(Connection conn) { - s_logger.debug("Fixing engine and row_format for op_lock and op_nwgrp_work tables"); + logger.debug("Fixing engine and row_format for op_lock and op_nwgrp_work tables"); String sqlOpLock = "ALTER TABLE `cloud`.`op_lock` ENGINE=MEMORY, ROW_FORMAT = FIXED"; try ( PreparedStatement pstmt = conn.prepareStatement(sqlOpLock); ) { pstmt.executeUpdate(); } catch (Exception e) { - s_logger.debug("Failed do execute the statement " + sqlOpLock + ", moving on as it's not critical fix"); + logger.debug("Failed do execute the statement " + sqlOpLock + ", moving on as it's not critical fix"); } String sqlOpNwgrpWork = "ALTER TABLE `cloud`.`op_nwgrp_work` ENGINE=MEMORY, ROW_FORMAT = FIXED"; @@ -196,7 +194,7 @@ private void changeEngine(Connection conn) { ) { pstmt.executeUpdate(); } catch (Exception e) { - s_logger.debug("Failed do execute the statement " + sqlOpNwgrpWork + ", moving on as it's not critical fix"); + logger.debug("Failed do execute the statement " + sqlOpNwgrpWork + ", moving on as it's not critical fix"); } } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade302to303.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade302to303.java index e07c98dd4489..91b9b3849a8d 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade302to303.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade302to303.java @@ -28,14 +28,12 @@ import java.sql.SQLException; import java.util.UUID; -import org.apache.log4j.Logger; import com.cloud.dc.DataCenter.NetworkType; import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.exception.CloudRuntimeException; public class Upgrade302to303 extends LegacyDbUpgrade { - final static Logger s_logger = Logger.getLogger(Upgrade302to303.class); @Override public String[] getUpgradableVersionRange() { @@ -142,7 +140,7 @@ private void setupExternalNetworkDevices(Connection conn) { private void addF5LoadBalancer(Connection conn, long hostId, long physicalNetworkId) { PreparedStatement pstmtUpdate = null; try { - s_logger.debug("Adding F5 Big IP load balancer with host id " + hostId + " in to physical network" + physicalNetworkId); + logger.debug("Adding F5 Big IP load balancer with host id " + hostId + " in to physical network" + physicalNetworkId); String insertF5 = "INSERT INTO `cloud`.`external_load_balancer_devices` (physical_network_id, host_id, provider_name, " + "device_name, capacity, is_dedicated, device_state, allocation_state, is_inline, is_managed, uuid) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; @@ -169,7 +167,7 @@ private void addF5LoadBalancer(Connection conn, long hostId, long physicalNetwor private void addSrxFirewall(Connection conn, long hostId, long physicalNetworkId) { PreparedStatement pstmtUpdate = null; try { - s_logger.debug("Adding SRX firewall device with host id " + hostId + " in to physical network" + physicalNetworkId); + logger.debug("Adding SRX firewall device with host id " + hostId + " in to physical network" + physicalNetworkId); String insertSrx = "INSERT INTO `cloud`.`external_firewall_devices` (physical_network_id, host_id, provider_name, " + "device_name, capacity, is_dedicated, device_state, allocation_state, uuid) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?)"; @@ -195,7 +193,7 @@ private void addF5ServiceProvider(Connection conn, long physicalNetworkId, long PreparedStatement pstmtUpdate = null; try { // add physical network service provider - F5BigIp - s_logger.debug("Adding PhysicalNetworkServiceProvider F5BigIp" + " in to physical network" + physicalNetworkId); + logger.debug("Adding PhysicalNetworkServiceProvider F5BigIp" + " in to physical network" + physicalNetworkId); String insertPNSP = "INSERT INTO `cloud`.`physical_network_service_providers` (`uuid`, `physical_network_id` , `provider_name`, `state` ," + "`destination_physical_network_id`, `vpn_service_provided`, `dhcp_service_provided`, `dns_service_provided`, `gateway_service_provided`," @@ -219,7 +217,7 @@ private void addSrxServiceProvider(Connection conn, long physicalNetworkId, long PreparedStatement pstmtUpdate = null; try { // add physical network service provider - JuniperSRX - s_logger.debug("Adding PhysicalNetworkServiceProvider JuniperSRX"); + logger.debug("Adding PhysicalNetworkServiceProvider JuniperSRX"); String insertPNSP = "INSERT INTO `cloud`.`physical_network_service_providers` (`uuid`, `physical_network_id` , `provider_name`, `state` ," + "`destination_physical_network_id`, `vpn_service_provided`, `dhcp_service_provided`, `dns_service_provided`, `gateway_service_provided`," @@ -241,7 +239,7 @@ private void addSrxServiceProvider(Connection conn, long physicalNetworkId, long private void encryptConfig(Connection conn) { //Encrypt config params and change category to Hidden - s_logger.debug("Encrypting Config values"); + logger.debug("Encrypting Config values"); PreparedStatement pstmt = null; ResultSet rs = null; try { @@ -268,7 +266,7 @@ private void encryptConfig(Connection conn) { closeAutoCloseable(rs); closeAutoCloseable(pstmt); } - s_logger.debug("Done encrypting Config values"); + logger.debug("Done encrypting Config values"); } @Override diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade302to40.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade302to40.java index eb0492cd288b..aa427252585f 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade302to40.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade302to40.java @@ -27,14 +27,12 @@ import java.util.List; import java.util.UUID; -import org.apache.log4j.Logger; import com.cloud.dc.DataCenter.NetworkType; import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.exception.CloudRuntimeException; public class Upgrade302to40 extends Upgrade30xBase { - final static Logger s_logger = Logger.getLogger(Upgrade302to40.class); @Override public String[] getUpgradableVersionRange() { @@ -211,9 +209,9 @@ private void correctMultiplePhysicaNetworkSetups(Connection conn) { if (rsSameLabel.next()) { Long sameLabelcount = rsSameLabel.getLong(1); if (sameLabelcount > 0) { - s_logger.error("There are untagged networks for which we need to add a physical network with Xen traffic label = 'xen.guest.network.device' config value, which is: " + + logger.error("There are untagged networks for which we need to add a physical network with Xen traffic label = 'xen.guest.network.device' config value, which is: " + xenGuestLabel); - s_logger.error("However already there are " + sameLabelcount + " physical networks setup with same traffic label, cannot upgrade"); + logger.error("However already there are " + sameLabelcount + " physical networks setup with same traffic label, cannot upgrade"); throw new CloudRuntimeException("Cannot upgrade this setup since a physical network with same traffic label: " + xenGuestLabel + " already exists, Please check logs and contact Support."); } @@ -230,9 +228,9 @@ private void correctMultiplePhysicaNetworkSetups(Connection conn) { conn.prepareStatement("SELECT n.id FROM networks n WHERE n.physical_network_id IS NULL AND n.traffic_type = 'Guest' and n.data_center_id = ? and n.removed is null"); pstmt3.setLong(1, zoneId); ResultSet rsNet = pstmt3.executeQuery(); - s_logger.debug("Adding PhysicalNetwork to VLAN"); - s_logger.debug("Adding PhysicalNetwork to user_ip_address"); - s_logger.debug("Adding PhysicalNetwork to networks"); + logger.debug("Adding PhysicalNetwork to VLAN"); + logger.debug("Adding PhysicalNetwork to user_ip_address"); + logger.debug("Adding PhysicalNetwork to networks"); while (rsNet.next()) { Long networkId = rsNet.getLong(1); addPhysicalNtwk_To_Ntwk_IP_Vlan(conn, physicalNetworkId, networkId); @@ -253,7 +251,7 @@ private void correctMultiplePhysicaNetworkSetups(Connection conn) { if (rs.next()) { Long count = rs.getLong(1); if (count > 1) { - s_logger.debug("There are " + count + " physical networks setup"); + logger.debug("There are " + count + " physical networks setup"); multiplePhysicalNetworks = true; } } @@ -272,7 +270,7 @@ private void correctMultiplePhysicaNetworkSetups(Connection conn) { String networkId = rsVNet.getString(5); String vpid = rsVNet.getString(4); String npid = rsVNet.getString(6); - s_logger.error("Guest Vnet assignment is set wrongly . Cannot upgrade until that is corrected. Example- Vnet: " + vnet + + logger.error("Guest Vnet assignment is set wrongly . Cannot upgrade until that is corrected. Example- Vnet: " + vnet + " has physical network id: " + vpid + " ,but the guest network: " + networkId + " that uses it has physical network id: " + npid); String message = "Cannot upgrade. Your setup has multiple Physical Networks and is using guest Vnet that is assigned wrongly. " @@ -291,7 +289,7 @@ private void correctMultiplePhysicaNetworkSetups(Connection conn) { + "5. Run upgrade. This will allocate all your guest vnet range to first physical network. \n" + "6. Reconfigure the vnet ranges for each physical network as desired by using updatePhysicalNetwork API \n" + "7. Start all your VMs"; - s_logger.error(message); + logger.error(message); throw new CloudRuntimeException("Cannot upgrade this setup since Guest Vnet assignment to the multiple physical " + "networks is incorrect. Please check the logs for details on how to proceed"); @@ -470,26 +468,26 @@ private void cloneOfferingAndAddTag(Connection conn, long networkOfferingId, lon pstmt = conn.prepareStatement("DROP TEMPORARY TABLE `cloud`.`network_offerings2`"); pstmt.executeUpdate(); } catch (SQLException e) { - s_logger.info("[ignored] ",e); + logger.info("[ignored] ",e); } closeAutoCloseable(pstmt); } } private void addHostDetailsUniqueKey(Connection conn) { - s_logger.debug("Checking if host_details unique key exists, if not we will add it"); + logger.debug("Checking if host_details unique key exists, if not we will add it"); try ( PreparedStatement pstmt = conn.prepareStatement("SHOW INDEX FROM `cloud`.`host_details` WHERE KEY_NAME = 'uk_host_id_name'"); ResultSet rs = pstmt.executeQuery(); ) { if (rs.next()) { - s_logger.debug("Unique key already exists on host_details - not adding new one"); + logger.debug("Unique key already exists on host_details - not adding new one"); } else { //add the key PreparedStatement pstmtUpdate = conn.prepareStatement("ALTER IGNORE TABLE `cloud`.`host_details` ADD CONSTRAINT UNIQUE KEY `uk_host_id_name` (`host_id`, `name`)"); pstmtUpdate.executeUpdate(); - s_logger.debug("Unique key did not exist on host_details - added new one"); + logger.debug("Unique key did not exist on host_details - added new one"); pstmtUpdate.close(); } } catch (SQLException e) { @@ -499,7 +497,7 @@ private void addHostDetailsUniqueKey(Connection conn) { private void addVpcProvider(Connection conn) { //Encrypt config params and change category to Hidden - s_logger.debug("Adding vpc provider to all physical networks in the system"); + logger.debug("Adding vpc provider to all physical networks in the system"); PreparedStatement pstmt = null; ResultSet rs = null; try { @@ -534,7 +532,7 @@ private void addVpcProvider(Connection conn) { pstmt.setLong(1, providerId); pstmt.executeUpdate(); - s_logger.debug("Added VPC Virtual router provider for physical network id=" + pNtwkId); + logger.debug("Added VPC Virtual router provider for physical network id=" + pNtwkId); } } catch (SQLException e) { @@ -543,12 +541,12 @@ private void addVpcProvider(Connection conn) { closeAutoCloseable(rs); closeAutoCloseable(pstmt); } - s_logger.debug("Done adding VPC physical network service providers to all physical networks"); + logger.debug("Done adding VPC physical network service providers to all physical networks"); } private void updateRouterNetworkRef(Connection conn) { //Encrypt config params and change category to Hidden - s_logger.debug("Updating router network ref"); + logger.debug("Updating router network ref"); try ( PreparedStatement pstmt = conn.prepareStatement("SELECT d.id, d.network_id FROM `cloud`.`domain_router` d, `cloud`.`vm_instance` v " + "WHERE d.id=v.id AND v.removed is NULL"); PreparedStatement pstmt1 = conn.prepareStatement("SELECT guest_type from `cloud`.`networks` where id=?"); @@ -571,13 +569,13 @@ private void updateRouterNetworkRef(Connection conn) { pstmt2.setString(3, networkType); pstmt2.executeUpdate(); } - s_logger.debug("Added reference for router id=" + routerId + " and network id=" + networkId); + logger.debug("Added reference for router id=" + routerId + " and network id=" + networkId); } } catch (SQLException e) { throw new CloudRuntimeException("Failed to update the router/network reference ", e); } - s_logger.debug("Done updating router/network references"); + logger.debug("Done updating router/network references"); } private void fixForeignKeys(Connection conn) { @@ -693,7 +691,7 @@ private void setupExternalNetworkDevices(Connection conn) { } private void addF5LoadBalancer(Connection conn, long hostId, long physicalNetworkId) { - s_logger.debug("Adding F5 Big IP load balancer with host id " + hostId + " in to physical network" + physicalNetworkId); + logger.debug("Adding F5 Big IP load balancer with host id " + hostId + " in to physical network" + physicalNetworkId); String insertF5 = "INSERT INTO `cloud`.`external_load_balancer_devices` (physical_network_id, host_id, provider_name, " + "device_name, capacity, is_dedicated, device_state, allocation_state, is_inline, is_managed, uuid) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; @@ -716,7 +714,7 @@ private void addF5LoadBalancer(Connection conn, long hostId, long physicalNetwor } private void addSrxFirewall(Connection conn, long hostId, long physicalNetworkId) { - s_logger.debug("Adding SRX firewall device with host id " + hostId + " in to physical network" + physicalNetworkId); + logger.debug("Adding SRX firewall device with host id " + hostId + " in to physical network" + physicalNetworkId); String insertSrx = "INSERT INTO `cloud`.`external_firewall_devices` (physical_network_id, host_id, provider_name, " + "device_name, capacity, is_dedicated, device_state, allocation_state, uuid) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?)"; @@ -738,7 +736,7 @@ private void addSrxFirewall(Connection conn, long hostId, long physicalNetworkId private void addF5ServiceProvider(Connection conn, long physicalNetworkId, long zoneId) { // add physical network service provider - F5BigIp - s_logger.debug("Adding PhysicalNetworkServiceProvider F5BigIp" + " in to physical network" + physicalNetworkId); + logger.debug("Adding PhysicalNetworkServiceProvider F5BigIp" + " in to physical network" + physicalNetworkId); String insertPNSP = "INSERT INTO `cloud`.`physical_network_service_providers` (`uuid`, `physical_network_id` , `provider_name`, `state` ," + "`destination_physical_network_id`, `vpn_service_provided`, `dhcp_service_provided`, `dns_service_provided`, `gateway_service_provided`," @@ -757,7 +755,7 @@ private void addF5ServiceProvider(Connection conn, long physicalNetworkId, long private void addSrxServiceProvider(Connection conn, long physicalNetworkId, long zoneId) { // add physical network service provider - JuniperSRX - s_logger.debug("Adding PhysicalNetworkServiceProvider JuniperSRX"); + logger.debug("Adding PhysicalNetworkServiceProvider JuniperSRX"); String insertPNSP = "INSERT INTO `cloud`.`physical_network_service_providers` (`uuid`, `physical_network_id` , `provider_name`, `state` ," + "`destination_physical_network_id`, `vpn_service_provided`, `dhcp_service_provided`, `dns_service_provided`, `gateway_service_provided`," @@ -875,7 +873,7 @@ private void fixZoneUsingExternalDevices(Connection conn) { pstmtUpdate.setLong(2, networkId); pstmtUpdate.setLong(3, f5DeviceId); pstmtUpdate.executeUpdate(); - s_logger.debug("Successfully added entry in network_external_lb_device_map for network " + networkId + " and F5 device ID " + f5DeviceId); + logger.debug("Successfully added entry in network_external_lb_device_map for network " + networkId + " and F5 device ID " + f5DeviceId); // add mapping for the network in network_external_firewall_device_map String insertFwMapping = @@ -885,11 +883,11 @@ private void fixZoneUsingExternalDevices(Connection conn) { pstmtUpdate.setLong(2, networkId); pstmtUpdate.setLong(3, srxDevivceId); pstmtUpdate.executeUpdate(); - s_logger.debug("Successfully added entry in network_external_firewall_device_map for network " + networkId + " and SRX device ID " + srxDevivceId); + logger.debug("Successfully added entry in network_external_firewall_device_map for network " + networkId + " and SRX device ID " + srxDevivceId); } // update host details for F5 and SRX devices - s_logger.debug("Updating the host details for F5 and SRX devices"); + logger.debug("Updating the host details for F5 and SRX devices"); pstmt = conn.prepareStatement("SELECT host_id, name FROM `cloud`.`host_details` WHERE host_id=? OR host_id=?"); pstmt.setLong(1, f5HostId); pstmt.setLong(2, srxHostId); @@ -908,20 +906,20 @@ private void fixZoneUsingExternalDevices(Connection conn) { pstmt.setString(3, camlCaseName); pstmt.executeUpdate(); } - s_logger.debug("Successfully updated host details for F5 and SRX devices"); + logger.debug("Successfully updated host details for F5 and SRX devices"); } catch (SQLException e) { throw new CloudRuntimeException("Unable create a mapping for the networks in network_external_lb_device_map and network_external_firewall_device_map", e); } finally { closeAutoCloseable(rs); closeAutoCloseable(pstmt); } - s_logger.info("Successfully upgraded networks using F5 and SRX devices to have a entry in the network_external_lb_device_map and network_external_firewall_device_map"); + logger.info("Successfully upgraded networks using F5 and SRX devices to have a entry in the network_external_lb_device_map and network_external_firewall_device_map"); } } private void encryptConfig(Connection conn) { //Encrypt config params and change category to Hidden - s_logger.debug("Encrypting Config values"); + logger.debug("Encrypting Config values"); try ( PreparedStatement pstmt = conn.prepareStatement("select name, value from `cloud`.`configuration` where name in ('router.ram.size', 'secondary.storage.vm', 'security.hash.key') and category <> 'Hidden'"); PreparedStatement pstmt1 = conn.prepareStatement("update `cloud`.`configuration` set value=?, category = 'Hidden' where name=?"); @@ -943,11 +941,11 @@ private void encryptConfig(Connection conn) { } catch (UnsupportedEncodingException e) { throw new CloudRuntimeException("Unable encrypt configuration values ", e); } - s_logger.debug("Done encrypting Config values"); + logger.debug("Done encrypting Config values"); } private void encryptClusterDetails(Connection conn) { - s_logger.debug("Encrypting cluster details"); + logger.debug("Encrypting cluster details"); try ( PreparedStatement pstmt = conn.prepareStatement("select id, value from `cloud`.`cluster_details` where name = 'password'"); PreparedStatement pstmt1 = conn.prepareStatement("update `cloud`.`cluster_details` set value=? where id=?"); @@ -969,6 +967,6 @@ private void encryptClusterDetails(Connection conn) { } catch (UnsupportedEncodingException e) { throw new CloudRuntimeException("Unable encrypt cluster_details values ", e); } - s_logger.debug("Done encrypting cluster_details"); + logger.debug("Done encrypting cluster_details"); } } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade303to304.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade303to304.java index 03f69ddefc90..d713a1c85921 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade303to304.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade303to304.java @@ -24,13 +24,11 @@ import java.sql.SQLException; import java.util.UUID; -import org.apache.log4j.Logger; import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade303to304 extends Upgrade30xBase implements DbUpgrade { - final static Logger s_logger = Logger.getLogger(Upgrade303to304.class); +public class Upgrade303to304 extends Upgrade30xBase { @Override public String[] getUpgradableVersionRange() { @@ -171,9 +169,9 @@ private void correctMultiplePhysicaNetworkSetups(Connection conn) { if (rsSameLabel.next()) { Long sameLabelcount = rsSameLabel.getLong(1); if (sameLabelcount > 0) { - s_logger.error("There are untagged networks for which we need to add a physical network with Xen traffic label = 'xen.guest.network.device' config value, which is: " + + logger.error("There are untagged networks for which we need to add a physical network with Xen traffic label = 'xen.guest.network.device' config value, which is: " + xenGuestLabel); - s_logger.error("However already there are " + sameLabelcount + " physical networks setup with same traffic label, cannot upgrade"); + logger.error("However already there are " + sameLabelcount + " physical networks setup with same traffic label, cannot upgrade"); throw new CloudRuntimeException("Cannot upgrade this setup since a physical network with same traffic label: " + xenGuestLabel + " already exists, Please check logs and contact Support."); } @@ -188,9 +186,9 @@ private void correctMultiplePhysicaNetworkSetups(Connection conn) { pstmt_network_id.setLong(1, zoneId); try (ResultSet rsNet = pstmt_network_id.executeQuery();) { - s_logger.debug("Adding PhysicalNetwork to VLAN"); - s_logger.debug("Adding PhysicalNetwork to user_ip_address"); - s_logger.debug("Adding PhysicalNetwork to networks"); + logger.debug("Adding PhysicalNetwork to VLAN"); + logger.debug("Adding PhysicalNetwork to user_ip_address"); + logger.debug("Adding PhysicalNetwork to networks"); while (rsNet.next()) { Long networkId = rsNet.getLong(1); addPhysicalNtwk_To_Ntwk_IP_Vlan(conn, physicalNetworkId, networkId); @@ -207,7 +205,7 @@ private void correctMultiplePhysicaNetworkSetups(Connection conn) { if (rs.next()) { Long count = rs.getLong(1); if (count > 1) { - s_logger.debug("There are " + count + " physical networks setup"); + logger.debug("There are " + count + " physical networks setup"); multiplePhysicalNetworks = true; } } @@ -223,7 +221,7 @@ private void correctMultiplePhysicaNetworkSetups(Connection conn) { String networkId = rsVNet.getString(5); String vpid = rsVNet.getString(4); String npid = rsVNet.getString(6); - s_logger.error("Guest Vnet assignment is set wrongly . Cannot upgrade until that is corrected. Example- Vnet: " + vnet + + logger.error("Guest Vnet assignment is set wrongly . Cannot upgrade until that is corrected. Example- Vnet: " + vnet + " has physical network id: " + vpid + " ,but the guest network: " + networkId + " that uses it has physical network id: " + npid); String message = "Cannot upgrade. Your setup has multiple Physical Networks and is using guest Vnet that is assigned wrongly. " @@ -242,7 +240,7 @@ private void correctMultiplePhysicaNetworkSetups(Connection conn) { + "5. Run upgrade. This will allocate all your guest vnet range to first physical network. \n" + "6. Reconfigure the vnet ranges for each physical network as desired by using updatePhysicalNetwork API \n" + "7. Start all your VMs"; - s_logger.error(message); + logger.error(message); throw new CloudRuntimeException("Cannot upgrade this setup since Guest Vnet assignment to the multiple physical networks " + "is incorrect. Please check the logs for details on how to proceed"); @@ -383,7 +381,7 @@ private void cloneOfferingAndAddTag(Connection conn, long networkOfferingId, lon try (PreparedStatement pstmt_drop_table = conn.prepareStatement("DROP TEMPORARY TABLE `cloud`.`network_offerings2`");) { pstmt_drop_table.executeUpdate(); } catch (SQLException e) { - s_logger.debug("drop of temp table 'network_offerings2' failed", e); + logger.debug("drop of temp table 'network_offerings2' failed", e); } } } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade304to305.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade304to305.java index a8009630976a..bb4c73f67b68 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade304to305.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade304to305.java @@ -27,13 +27,11 @@ import java.util.List; import java.util.UUID; -import org.apache.log4j.Logger; import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.exception.CloudRuntimeException; public class Upgrade304to305 extends Upgrade30xBase { - final static Logger s_logger = Logger.getLogger(Upgrade304to305.class); @Override public String[] getUpgradableVersionRange() { @@ -99,7 +97,7 @@ private void updateSystemVms(Connection conn) { throw new CloudRuntimeException("Error while iterating through list of hypervisors in use", e); } // Just update the VMware system template. Other hypervisor templates are unchanged from previous 3.0.x versions. - s_logger.debug("Updating VMware System Vms"); + logger.debug("Updating VMware System Vms"); try { //Get 3.0.5 VMware system Vm template Id pstmt = conn.prepareStatement("select id from `cloud`.`vm_template` where name = 'systemvm-vmware-3.0.5' and removed is null"); @@ -122,18 +120,18 @@ private void updateSystemVms(Connection conn) { if (VMware) { throw new CloudRuntimeException("3.0.5 VMware SystemVm template not found. Cannot upgrade system Vms"); } else { - s_logger.warn("3.0.5 VMware SystemVm template not found. VMware hypervisor is not used, so not failing upgrade"); + logger.warn("3.0.5 VMware SystemVm template not found. VMware hypervisor is not used, so not failing upgrade"); } } } catch (SQLException e) { throw new CloudRuntimeException("Error while updating VMware systemVm template", e); } - s_logger.debug("Updating System Vm Template IDs Complete"); + logger.debug("Updating System Vm Template IDs Complete"); } private void addVpcProvider(Connection conn) { //Encrypt config params and change category to Hidden - s_logger.debug("Adding vpc provider to all physical networks in the system"); + logger.debug("Adding vpc provider to all physical networks in the system"); PreparedStatement pstmt = null; ResultSet rs = null; try { @@ -168,7 +166,7 @@ private void addVpcProvider(Connection conn) { pstmt.setLong(1, providerId); pstmt.executeUpdate(); - s_logger.debug("Added VPC Virtual router provider for physical network id=" + pNtwkId); + logger.debug("Added VPC Virtual router provider for physical network id=" + pNtwkId); } } catch (SQLException e) { @@ -177,12 +175,12 @@ private void addVpcProvider(Connection conn) { closeAutoCloseable(rs); closeAutoCloseable(pstmt); } - s_logger.debug("Done adding VPC physical network service providers to all physical networks"); + logger.debug("Done adding VPC physical network service providers to all physical networks"); } private void updateRouterNetworkRef(Connection conn) { //Encrypt config params and change category to Hidden - s_logger.debug("Updating router network ref"); + logger.debug("Updating router network ref"); PreparedStatement pstmt = null; ResultSet rs = null; try { @@ -207,7 +205,7 @@ private void updateRouterNetworkRef(Connection conn) { pstmt.setString(3, networkType); pstmt.executeUpdate(); - s_logger.debug("Added reference for router id=" + routerId + " and network id=" + networkId); + logger.debug("Added reference for router id=" + routerId + " and network id=" + networkId); } } catch (SQLException e) { @@ -216,24 +214,24 @@ private void updateRouterNetworkRef(Connection conn) { closeAutoCloseable(rs); closeAutoCloseable(pstmt); } - s_logger.debug("Done updating router/network references"); + logger.debug("Done updating router/network references"); } private void addHostDetailsUniqueKey(Connection conn) { - s_logger.debug("Checking if host_details unique key exists, if not we will add it"); + logger.debug("Checking if host_details unique key exists, if not we will add it"); PreparedStatement pstmt = null; ResultSet rs = null; try { pstmt = conn.prepareStatement("SHOW INDEX FROM `cloud`.`host_details` WHERE KEY_NAME = 'uk_host_id_name'"); rs = pstmt.executeQuery(); if (rs.next()) { - s_logger.debug("Unique key already exists on host_details - not adding new one"); + logger.debug("Unique key already exists on host_details - not adding new one"); } else { //add the key PreparedStatement pstmtUpdate = conn.prepareStatement("ALTER IGNORE TABLE `cloud`.`host_details` ADD CONSTRAINT UNIQUE KEY `uk_host_id_name` (`host_id`, `name`)"); pstmtUpdate.executeUpdate(); - s_logger.debug("Unique key did not exist on host_details - added new one"); + logger.debug("Unique key did not exist on host_details - added new one"); pstmtUpdate.close(); } } catch (SQLException e) { @@ -347,7 +345,7 @@ private void fixZoneUsingExternalDevices(Connection conn) { pstmtUpdate.setLong(2, networkId); pstmtUpdate.setLong(3, f5DeviceId); pstmtUpdate.executeUpdate(); - s_logger.debug("Successfully added entry in network_external_lb_device_map for network " + networkId + " and F5 device ID " + f5DeviceId); + logger.debug("Successfully added entry in network_external_lb_device_map for network " + networkId + " and F5 device ID " + f5DeviceId); // add mapping for the network in network_external_firewall_device_map String insertFwMapping = @@ -357,11 +355,11 @@ private void fixZoneUsingExternalDevices(Connection conn) { pstmtUpdate.setLong(2, networkId); pstmtUpdate.setLong(3, srxDevivceId); pstmtUpdate.executeUpdate(); - s_logger.debug("Successfully added entry in network_external_firewall_device_map for network " + networkId + " and SRX device ID " + srxDevivceId); + logger.debug("Successfully added entry in network_external_firewall_device_map for network " + networkId + " and SRX device ID " + srxDevivceId); } // update host details for F5 and SRX devices - s_logger.debug("Updating the host details for F5 and SRX devices"); + logger.debug("Updating the host details for F5 and SRX devices"); pstmt = conn.prepareStatement("SELECT host_id, name FROM `cloud`.`host_details` WHERE host_id=? OR host_id=?"); pstmt.setLong(1, f5HostId); pstmt.setLong(2, srxHostId); @@ -380,19 +378,19 @@ private void fixZoneUsingExternalDevices(Connection conn) { pstmt.setString(3, camlCaseName); pstmt.executeUpdate(); } - s_logger.debug("Successfully updated host details for F5 and SRX devices"); + logger.debug("Successfully updated host details for F5 and SRX devices"); } catch (SQLException e) { throw new CloudRuntimeException("Unable create a mapping for the networks in network_external_lb_device_map and network_external_firewall_device_map", e); } finally { closeAutoCloseable(rs); closeAutoCloseable(pstmt); } - s_logger.info("Successfully upgraded network using F5 and SRX devices to have a entry in the network_external_lb_device_map and network_external_firewall_device_map"); + logger.info("Successfully upgraded network using F5 and SRX devices to have a entry in the network_external_lb_device_map and network_external_firewall_device_map"); } } private void fixForeignKeys(Connection conn) { - s_logger.debug("Fixing foreign keys' names in ssh_keypairs table"); + logger.debug("Fixing foreign keys' names in ssh_keypairs table"); //Drop the keys (if exist) List keys = new ArrayList(); keys.add("fk_ssh_keypair__account_id"); @@ -434,7 +432,7 @@ private void fixForeignKeys(Connection conn) { } private void encryptClusterDetails(Connection conn) { - s_logger.debug("Encrypting cluster details"); + logger.debug("Encrypting cluster details"); PreparedStatement pstmt = null; ResultSet rs = null; try { @@ -460,6 +458,6 @@ private void encryptClusterDetails(Connection conn) { closeAutoCloseable(rs); closeAutoCloseable(pstmt); } - s_logger.debug("Done encrypting cluster_details"); + logger.debug("Done encrypting cluster_details"); } } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade305to306.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade305to306.java index 796287697a90..52aab2a13e9b 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade305to306.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade305to306.java @@ -27,12 +27,10 @@ import java.util.List; import java.util.UUID; -import org.apache.log4j.Logger; import com.cloud.utils.exception.CloudRuntimeException; public class Upgrade305to306 extends Upgrade30xBase { - final static Logger s_logger = Logger.getLogger(Upgrade305to306.class); @Override public String[] getUpgradableVersionRange() { @@ -78,14 +76,14 @@ private void addIndexForAlert(Connection conn) { //First drop if it exists. (Due to patches shipped to customers some will have the index and some won't.) List indexList = new ArrayList(); - s_logger.debug("Dropping index i_alert__last_sent if it exists"); + logger.debug("Dropping index i_alert__last_sent if it exists"); indexList.add("i_alert__last_sent"); DbUpgradeUtils.dropKeysIfExist(conn, "alert", indexList, false); //Now add index. try (PreparedStatement pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`alert` ADD INDEX `i_alert__last_sent`(`last_sent`)");) { pstmt.executeUpdate(); - s_logger.debug("Added index i_alert__last_sent for table alert"); + logger.debug("Added index i_alert__last_sent for table alert"); } catch (SQLException e) { throw new CloudRuntimeException("Unable to add index i_alert__last_sent to alert table for the column last_sent", e); } @@ -117,14 +115,14 @@ private void addIndexForHostDetails(Connection conn) { //First drop if it exists. (Due to patches shipped to customers some will have the index and some won't.) List indexList = new ArrayList(); - s_logger.debug("Dropping index fk_host_details__host_id if it exists"); + logger.debug("Dropping index fk_host_details__host_id if it exists"); indexList.add("fk_host_details__host_id"); DbUpgradeUtils.dropKeysIfExist(conn, "host_details", indexList, false); //Now add index. try (PreparedStatement pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`host_details` ADD INDEX `fk_host_details__host_id`(`host_id`)");) { pstmt.executeUpdate(); - s_logger.debug("Added index fk_host_details__host_id for table host_details"); + logger.debug("Added index fk_host_details__host_id for table host_details"); } catch (SQLException e) { throw new CloudRuntimeException("Unable to add index fk_host_details__host_id to host_details table for the column host_id", e); } @@ -140,7 +138,7 @@ private void upgradeEgressFirewallRules(Connection conn) { // update the existing ingress rules traffic type pstmt = conn.prepareStatement("update `cloud`.`firewall_rules`" + " set traffic_type='Ingress' where purpose='Firewall' and ip_address_id is not null and traffic_type is null"); - s_logger.debug("Updating firewall Ingress rule traffic type: " + pstmt); + logger.debug("Updating firewall Ingress rule traffic type: " + pstmt); pstmt.executeUpdate(); pstmt = conn.prepareStatement("select network_id FROM `cloud`.`ntwk_service_map` where service='Firewall' and provider='VirtualRouter' "); @@ -152,7 +150,7 @@ private void upgradeEgressFirewallRules(Connection conn) { pstmt = conn.prepareStatement("select account_id, domain_id FROM `cloud`.`networks` where (guest_type='Isolated' OR guest_type='" + "Virtual') and traffic_type='Guest' and vpc_id is NULL and (state='implemented' OR state='Shutdown') and id=? "); pstmt.setLong(1, netId); - s_logger.debug("Getting account_id, domain_id from networks table: " + pstmt); + logger.debug("Getting account_id, domain_id from networks table: " + pstmt); rsNw = pstmt.executeQuery(); if (rsNw.next()) { @@ -160,7 +158,7 @@ private void upgradeEgressFirewallRules(Connection conn) { long domainId = rsNw.getLong(2); //Add new rule for the existing networks - s_logger.debug("Adding default egress firewall rule for network " + netId); + logger.debug("Adding default egress firewall rule for network " + netId); pstmt = conn.prepareStatement("INSERT INTO firewall_rules (uuid, state, protocol, purpose, account_id, domain_id, network_id, xid, created, traffic_type) VALUES (?, 'Active', 'all', 'Firewall', ?, ?, ?, ?, now(), 'Egress')"); pstmt.setString(1, UUID.randomUUID().toString()); @@ -168,7 +166,7 @@ private void upgradeEgressFirewallRules(Connection conn) { pstmt.setLong(3, domainId); pstmt.setLong(4, netId); pstmt.setString(5, UUID.randomUUID().toString()); - s_logger.debug("Inserting default egress firewall rule " + pstmt); + logger.debug("Inserting default egress firewall rule " + pstmt); pstmt.executeUpdate(); pstmt = conn.prepareStatement("select id from firewall_rules where protocol='all' and network_id=?"); @@ -180,7 +178,7 @@ private void upgradeEgressFirewallRules(Connection conn) { firewallRuleId = rsId.getLong(1); pstmt = conn.prepareStatement("insert into firewall_rules_cidrs (firewall_rule_id,source_cidr) values (?, '0.0.0.0/0')"); pstmt.setLong(1, firewallRuleId); - s_logger.debug("Inserting rule for cidr 0.0.0.0/0 for the new Firewall rule id=" + firewallRuleId + " with statement " + pstmt); + logger.debug("Inserting rule for cidr 0.0.0.0/0 for the new Firewall rule id=" + firewallRuleId + " with statement " + pstmt); pstmt.executeUpdate(); } } @@ -218,7 +216,7 @@ private void removeFirewallServiceFromSharedNetworkOfferingWithSGService(Connect private void fix22xKVMSnapshots(Connection conn) { PreparedStatement pstmt = null; ResultSet rs = null; - s_logger.debug("Updating KVM snapshots"); + logger.debug("Updating KVM snapshots"); try { pstmt = conn.prepareStatement("select id, backup_snap_id from `cloud`.`snapshots` where hypervisor_type='KVM' and removed is null and backup_snap_id is not null"); @@ -232,14 +230,14 @@ private void fix22xKVMSnapshots(Connection conn) { int index = backUpPath.indexOf("snapshots" + File.separator); if (index > 1) { String correctedPath = File.separator + backUpPath.substring(index); - s_logger.debug("Updating Snapshot with id: " + id + " original backup path: " + backUpPath + " updated backup path: " + correctedPath); + logger.debug("Updating Snapshot with id: " + id + " original backup path: " + backUpPath + " updated backup path: " + correctedPath); pstmt = conn.prepareStatement("UPDATE `cloud`.`snapshots` set backup_snap_id=? where id = ?"); pstmt.setString(1, correctedPath); pstmt.setLong(2, id); pstmt.executeUpdate(); } } - s_logger.debug("Done updating KVM snapshots"); + logger.debug("Done updating KVM snapshots"); } catch (SQLException e) { throw new CloudRuntimeException("Unable to update backup id for KVM snapshots", e); } finally { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade306to307.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade306to307.java index 4eb39af51d7c..3d28d73031ac 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade306to307.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade306to307.java @@ -23,12 +23,10 @@ import java.sql.ResultSet; import java.sql.SQLException; -import org.apache.log4j.Logger; import com.cloud.utils.exception.CloudRuntimeException; public class Upgrade306to307 extends Upgrade30xBase { - final static Logger s_logger = Logger.getLogger(Upgrade306to307.class); @Override public String[] getUpgradableVersionRange() { @@ -96,7 +94,7 @@ protected void updateConcurrentConnectionsInNetworkOfferings(Connection conn) { pstmt = conn.prepareStatement("drop table `cloud`.`network_details`"); pstmt.executeUpdate(); } catch (SQLException e) { - s_logger.info("[ignored] error during network offering update:" + e.getLocalizedMessage(), e); + logger.info("[ignored] error during network offering update:" + e.getLocalizedMessage(), e); } finally { closeAutoCloseable(rs); closeAutoCloseable(rs1); diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade307to410.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade307to410.java index 1554ff04f701..1d47717a2de5 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade307to410.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade307to410.java @@ -23,13 +23,11 @@ import java.sql.SQLException; import java.util.Properties; -import org.apache.log4j.Logger; import com.cloud.utils.db.DbProperties; import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade307to410 implements DbUpgrade { - final static Logger s_logger = Logger.getLogger(Upgrade307to410.class); +public class Upgrade307to410 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { @@ -71,7 +69,7 @@ private void updateRegionEntries(Connection conn) { } try (PreparedStatement pstmt = conn.prepareStatement("update `cloud`.`region` set id = ?");){ //Update regionId in region table - s_logger.debug("Updating region table with Id: " + region_id); + logger.debug("Updating region table with Id: " + region_id); pstmt.setInt(1, region_id); pstmt.executeUpdate(); diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade30to301.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade30to301.java index 806cabb12eb2..ba17082bab52 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade30to301.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade30to301.java @@ -22,13 +22,11 @@ import java.sql.ResultSet; import java.sql.SQLException; -import org.apache.log4j.Logger; import com.cloud.configuration.Resource.ResourceType; import com.cloud.utils.exception.CloudRuntimeException; public class Upgrade30to301 extends LegacyDbUpgrade { - final static Logger s_logger = Logger.getLogger(Upgrade30to301.class); @Override public String[] getUpgradableVersionRange() { @@ -95,7 +93,7 @@ protected void udpateAccountNetworkResourceCount(Connection conn) { pstmt.setLong(1, accountId); pstmt.setLong(2, count); pstmt.executeUpdate(); - s_logger.debug("Updated network resource count for account id=" + accountId + " to be " + count); + logger.debug("Updated network resource count for account id=" + accountId + " to be " + count); } } catch (SQLException e) { throw new CloudRuntimeException("Unable to update network resource count for account id=" + accountId, e); diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade30xBase.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade30xBase.java index 47b877d5aa5b..d2dd77365339 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade30xBase.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade30xBase.java @@ -23,13 +23,11 @@ import java.sql.SQLException; import java.util.UUID; -import org.apache.log4j.Logger; import com.cloud.utils.exception.CloudRuntimeException; public abstract class Upgrade30xBase extends LegacyDbUpgrade { - final static Logger s_logger = Logger.getLogger(Upgrade30xBase.class); protected String getNetworkLabelFromConfig(Connection conn, String name) { String sql = "SELECT value FROM `cloud`.`configuration` where name = ?"; @@ -72,7 +70,7 @@ protected long addPhysicalNetworkToZone(Connection conn, long zoneId, String zon broadcastDomainRange = "ZONE"; } - s_logger.debug("Adding PhysicalNetwork " + physicalNetworkId + " for Zone id " + zoneId); + logger.debug("Adding PhysicalNetwork " + physicalNetworkId + " for Zone id " + zoneId); String sql = "INSERT INTO `cloud`.`physical_network` (id, uuid, data_center_id, vnet, broadcast_domain_range, state, name) VALUES (?,?,?,?,?,?,?)"; pstmtUpdate = conn.prepareStatement(sql); @@ -84,12 +82,12 @@ protected long addPhysicalNetworkToZone(Connection conn, long zoneId, String zon pstmtUpdate.setString(6, "Enabled"); zoneName = zoneName + "-pNtwk" + physicalNetworkId; pstmtUpdate.setString(7, zoneName); - s_logger.warn("Statement is " + pstmtUpdate.toString()); + logger.warn("Statement is " + pstmtUpdate.toString()); pstmtUpdate.executeUpdate(); pstmtUpdate.close(); if (domainId != null && domainId.longValue() != 0) { - s_logger.debug("Updating domain_id for physical network id=" + physicalNetworkId); + logger.debug("Updating domain_id for physical network id=" + physicalNetworkId); sql = "UPDATE `cloud`.`physical_network` set domain_id=? where id=?"; pstmtUpdate = conn.prepareStatement(sql); pstmtUpdate.setLong(1, domainId); @@ -111,7 +109,7 @@ protected void addTrafficType(Connection conn, long physicalNetworkId, String tr // add traffic types PreparedStatement pstmtUpdate = null; try { - s_logger.debug("Adding PhysicalNetwork traffic types"); + logger.debug("Adding PhysicalNetwork traffic types"); String insertTraficType = "INSERT INTO `cloud`.`physical_network_traffic_types` (physical_network_id, traffic_type, xen_network_label, kvm_network_label, vmware_network_label, uuid) VALUES ( ?, ?, ?, ?, ?, ?)"; pstmtUpdate = conn.prepareStatement(insertTraficType); @@ -154,7 +152,7 @@ protected void addDefaultSGProvider(Connection conn, long physicalNetworkId, lon pstmt2.close(); if (isSGServiceEnabled) { - s_logger.debug("Adding PhysicalNetworkServiceProvider SecurityGroupProvider to the physical network id=" + physicalNetworkId); + logger.debug("Adding PhysicalNetworkServiceProvider SecurityGroupProvider to the physical network id=" + physicalNetworkId); String insertPNSP = "INSERT INTO `cloud`.`physical_network_service_providers` (`uuid`, `physical_network_id` , `provider_name`, `state` ," + "`destination_physical_network_id`, `vpn_service_provided`, `dhcp_service_provided`, `dns_service_provided`, `gateway_service_provided`," @@ -182,7 +180,7 @@ protected void addDefaultVRProvider(Connection conn, long physicalNetworkId, lon PreparedStatement pstmtUpdate = null, pstmt2 = null; try { // add physical network service provider - VirtualRouter - s_logger.debug("Adding PhysicalNetworkServiceProvider VirtualRouter"); + logger.debug("Adding PhysicalNetworkServiceProvider VirtualRouter"); String insertPNSP = "INSERT INTO `cloud`.`physical_network_service_providers` (`uuid`, `physical_network_id` , `provider_name`, `state` ," + "`destination_physical_network_id`, `vpn_service_provided`, `dhcp_service_provided`, `dns_service_provided`, `gateway_service_provided`," diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade40to41.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade40to41.java index 3e15ff6782b3..6dc58fdda792 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade40to41.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade40to41.java @@ -25,13 +25,11 @@ import java.util.Properties; import java.util.UUID; -import org.apache.log4j.Logger; import com.cloud.utils.db.DbProperties; import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade40to41 implements DbUpgrade { - final static Logger s_logger = Logger.getLogger(Upgrade40to41.class); +public class Upgrade40to41 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { @@ -85,7 +83,7 @@ private void updateRegionEntries(Connection conn) { } try (PreparedStatement pstmt = conn.prepareStatement("update `cloud`.`region` set id = ?");) { //Update regionId in region table - s_logger.debug("Updating region table with Id: " + region_id); + logger.debug("Updating region table with Id: " + region_id); pstmt.setInt(1, region_id); pstmt.executeUpdate(); @@ -101,7 +99,7 @@ private void upgradeEgressFirewallRules(Connection conn) { "not null and traffic_type is null");) { updateNwpstmt.executeUpdate(); - s_logger.debug("Updating firewall Ingress rule traffic type: " + updateNwpstmt); + logger.debug("Updating firewall Ingress rule traffic type: " + updateNwpstmt); } catch (SQLException e) { throw new CloudRuntimeException("Unable to update ingress firewall rules ", e); } @@ -120,13 +118,13 @@ private void upgradeEgressFirewallRules(Connection conn) { NwAcctDomIdpstmt.setLong(1, netId); try (ResultSet NwAcctDomIdps = NwAcctDomIdpstmt.executeQuery();) { - s_logger.debug("Getting account_id, domain_id from networks table: " + NwAcctDomIdpstmt); + logger.debug("Getting account_id, domain_id from networks table: " + NwAcctDomIdpstmt); if (NwAcctDomIdps.next()) { long accountId = NwAcctDomIdps.getLong(1); long domainId = NwAcctDomIdps.getLong(2); //Add new rule for the existing networks - s_logger.debug("Adding default egress firewall rule for network " + netId); + logger.debug("Adding default egress firewall rule for network " + netId); try (PreparedStatement fwRulespstmt = conn.prepareStatement("INSERT INTO firewall_rules "+ " (uuid, state, protocol, purpose, account_id, domain_id, network_id, xid, created," + " traffic_type) VALUES (?, 'Active', 'all', 'Firewall', ?, ?, ?, ?, now(), " @@ -137,7 +135,7 @@ private void upgradeEgressFirewallRules(Connection conn) { fwRulespstmt.setLong(3, domainId); fwRulespstmt.setLong(4, netId); fwRulespstmt.setString(5, UUID.randomUUID().toString()); - s_logger.debug("Inserting default egress firewall rule " + fwRulespstmt); + logger.debug("Inserting default egress firewall rule " + fwRulespstmt); fwRulespstmt.executeUpdate(); } catch (SQLException e) { throw new CloudRuntimeException("failed to insert default egress firewall rule ", e); @@ -154,7 +152,7 @@ private void upgradeEgressFirewallRules(Connection conn) { try (PreparedStatement fwCidrsPstmt = conn.prepareStatement("insert into firewall_rules_cidrs (firewall_rule_id,source_cidr) values (?, '0.0.0.0/0')");) { fwCidrsPstmt.setLong(1, firewallRuleId); - s_logger.debug("Inserting rule for cidr 0.0.0.0/0 for the new Firewall rule id=" + firewallRuleId + " with statement " + fwCidrsPstmt); + logger.debug("Inserting rule for cidr 0.0.0.0/0 for the new Firewall rule id=" + firewallRuleId + " with statement " + fwCidrsPstmt); fwCidrsPstmt.executeUpdate(); } catch (SQLException e) { throw new CloudRuntimeException("Unable to set egress firewall rules ", e); diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41000to41100.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41000to41100.java index 3900cf0bf82d..9b2a7fcc0bb3 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41000to41100.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41000to41100.java @@ -25,14 +25,12 @@ import java.util.Map; import org.apache.commons.codec.binary.Base64; -import org.apache.log4j.Logger; import com.cloud.utils.PropertiesUtil; import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade41000to41100 implements DbUpgrade { +public class Upgrade41000to41100 extends DbUpgradeAbstractImpl { - final static Logger LOG = Logger.getLogger(Upgrade41000to41100.class); @Override public String[] getUpgradableVersionRange() { @@ -69,16 +67,16 @@ public void performDataMigration(Connection conn) { private void checkAndEnableDynamicRoles(final Connection conn) { final Map apiMap = PropertiesUtil.processConfigFile(new String[] { "commands.properties" }); if (apiMap == null || apiMap.isEmpty()) { - if (LOG.isDebugEnabled()) { - LOG.debug("No commands.properties file was found, enabling dynamic roles by setting dynamic.apichecker.enabled to true if not already enabled."); + if (logger.isDebugEnabled()) { + logger.debug("No commands.properties file was found, enabling dynamic roles by setting dynamic.apichecker.enabled to true if not already enabled."); } try (final PreparedStatement updateStatement = conn.prepareStatement("INSERT INTO cloud.configuration (category, instance, name, default_value, value) VALUES ('Advanced', 'DEFAULT', 'dynamic.apichecker.enabled', 'false', 'true') ON DUPLICATE KEY UPDATE value='true'")) { updateStatement.executeUpdate(); } catch (SQLException e) { - LOG.error("Failed to set dynamic.apichecker.enabled to true, please run migrate-dynamicroles.py script to manually migrate to dynamic roles.", e); + logger.error("Failed to set dynamic.apichecker.enabled to true, please run migrate-dynamicroles.py script to manually migrate to dynamic roles.", e); } } else { - LOG.warn("Old commands.properties static checker is deprecated, please use migrate-dynamicroles.py to migrate to dynamic roles. Refer http://docs.cloudstack.apache.org/projects/cloudstack-administration/en/latest/accounts.html#using-dynamic-roles"); + logger.warn("Old commands.properties static checker is deprecated, please use migrate-dynamicroles.py to migrate to dynamic roles. Refer http://docs.cloudstack.apache.org/projects/cloudstack-administration/en/latest/accounts.html#using-dynamic-roles"); } } @@ -96,19 +94,19 @@ private void validateUserDataInBase64(Connection conn) { updateStatement.setLong(2, userVmId); updateStatement.executeUpdate(); } catch (SQLException e) { - LOG.error("Failed to update cloud.user_vm user_data for id:" + userVmId + " with exception: " + e.getMessage()); + logger.error("Failed to update cloud.user_vm user_data for id:" + userVmId + " with exception: " + e.getMessage()); throw new CloudRuntimeException("Exception while updating cloud.user_vm for id " + userVmId, e); } } } else { // Update to NULL since it's invalid - LOG.warn("Removing user_data for vm id " + userVmId + " because it's invalid"); - LOG.warn("Removed data was: " + userData); + logger.warn("Removing user_data for vm id " + userVmId + " because it's invalid"); + logger.warn("Removed data was: " + userData); try (final PreparedStatement updateStatement = conn.prepareStatement("UPDATE `cloud`.`user_vm` SET `user_data` = NULL WHERE `id` = ? ;")) { updateStatement.setLong(1, userVmId); updateStatement.executeUpdate(); } catch (SQLException e) { - LOG.error("Failed to update cloud.user_vm user_data for id:" + userVmId + " to NULL with exception: " + e.getMessage()); + logger.error("Failed to update cloud.user_vm user_data for id:" + userVmId + " to NULL with exception: " + e.getMessage()); throw new CloudRuntimeException("Exception while updating cloud.user_vm for id " + userVmId + " to NULL", e); } } @@ -116,8 +114,8 @@ private void validateUserDataInBase64(Connection conn) { } catch (SQLException e) { throw new CloudRuntimeException("Exception while validating existing user_vm table's user_data column to be base64 valid with padding", e); } - if (LOG.isDebugEnabled()) { - LOG.debug("Done validating base64 content of user data"); + if (logger.isDebugEnabled()) { + logger.debug("Done validating base64 content of user data"); } } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade410to420.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade410to420.java index 2e7eee13a5ad..b78aed3119a4 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade410to420.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade410to420.java @@ -33,7 +33,6 @@ import java.util.Map; import java.util.UUID; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; @@ -44,8 +43,7 @@ import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade410to420 implements DbUpgrade { - final static Logger s_logger = Logger.getLogger(Upgrade410to420.class); +public class Upgrade410to420 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { @@ -194,15 +192,15 @@ private void persistVswitchConfiguration(Connection conn) { detailsMap.put(clusterId, detailsList); updateClusterDetails(conn, detailsMap); - s_logger.debug("Persist vSwitch Configuration: Successfully persisted vswitch configuration for cluster " + clusterId); + logger.debug("Persist vSwitch Configuration: Successfully persisted vswitch configuration for cluster " + clusterId); } else { - s_logger.debug("Persist vSwitch Configuration: Ignoring cluster " + clusterId + " with hypervisor type " + clusterHypervisorType); + logger.debug("Persist vSwitch Configuration: Ignoring cluster " + clusterId + " with hypervisor type " + clusterHypervisorType); continue; } } // End cluster iteration }catch (SQLException e) { String msg = "Unable to persist vswitch configuration of VMware clusters." + e.getMessage(); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg, e); } @@ -212,7 +210,7 @@ private void persistVswitchConfiguration(Connection conn) { } } catch (SQLException e) { String msg = "Unable to persist vswitch configuration of VMware clusters." + e.getMessage(); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg, e); } } @@ -238,7 +236,7 @@ private void updateClusterDetails(Connection conn, Map indexList = new ArrayList(); - s_logger.debug("Dropping index i_alert__last_sent if it exists"); + logger.debug("Dropping index i_alert__last_sent if it exists"); indexList.add("last_sent"); // in 4.1, we created this index that is not in convention. indexList.add("i_alert__last_sent"); DbUpgradeUtils.dropKeysIfExist(conn, "alert", indexList, false); @@ -453,7 +451,7 @@ private void addIndexForAlert(Connection conn) { try(PreparedStatement pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`alert` ADD INDEX `i_alert__last_sent`(`last_sent`)");) { pstmt.executeUpdate(); - s_logger.debug("Added index i_alert__last_sent for table alert"); + logger.debug("Added index i_alert__last_sent for table alert"); } catch (SQLException e) { throw new CloudRuntimeException("Unable to add index i_alert__last_sent to alert table for the column last_sent", e); } @@ -462,7 +460,7 @@ private void addIndexForAlert(Connection conn) { private void dropUploadTable(Connection conn) { try(PreparedStatement pstmt0 = conn.prepareStatement("SELECT url, created, type_id, host_id from upload where type=?");) { // Read upload table - Templates - s_logger.debug("Populating template_store_ref table"); + logger.debug("Populating template_store_ref table"); pstmt0.setString(1, "TEMPLATE"); try(ResultSet rs0 = pstmt0.executeQuery();) { @@ -476,7 +474,7 @@ private void dropUploadTable(Connection conn) { pstmt1.executeUpdate(); } // Read upload table - Volumes - s_logger.debug("Populating volume store ref table"); + logger.debug("Populating volume store ref table"); try(PreparedStatement pstmt2 = conn.prepareStatement("SELECT url, created, type_id, host_id, install_path from upload where type=?");) { pstmt2.setString(1, "VOLUME"); try(ResultSet rs2 = pstmt2.executeQuery();) { @@ -518,7 +516,7 @@ private void dropUploadTable(Connection conn) { //KVM snapshot flag: only turn on if Customers is using snapshot; private void setKVMSnapshotFlag(Connection conn) { - s_logger.debug("Verify and set the KVM snapshot flag if snapshot was used. "); + logger.debug("Verify and set the KVM snapshot flag if snapshot was used. "); try(PreparedStatement pstmt = conn.prepareStatement("select count(*) from `cloud`.`snapshots` where hypervisor_type = 'KVM'");) { int numRows = 0; @@ -541,7 +539,7 @@ private void setKVMSnapshotFlag(Connection conn) { } catch (SQLException e) { throw new CloudRuntimeException("Failed to read the snapshot table for KVM upgrade. ", e); } - s_logger.debug("Done set KVM snapshot flag. "); + logger.debug("Done set KVM snapshot flag. "); } private void updatePrimaryStore(Connection conn) { @@ -633,7 +631,7 @@ private String getNewLabel(ResultSet rs, String oldParamValue) { } } } catch (SQLException e) { - s_logger.error(new CloudRuntimeException("Failed to read vmware_network_label : " + e)); + logger.error(new CloudRuntimeException("Failed to read vmware_network_label : " + e)); } return newGuestLabel; } @@ -667,7 +665,7 @@ private void upgradeVmwareLabels(Connection conn) { newLabel = getNewLabel(rsLabel, trafficTypeVswitchParamValue); try(PreparedStatement update_pstmt = conn.prepareStatement("update physical_network_traffic_types set vmware_network_label = ? where traffic_type = ? and vmware_network_label is not NULL;");) { - s_logger.debug("Updating vmware label for " + trafficType + " traffic. Update SQL statement is " + pstmt); + logger.debug("Updating vmware label for " + trafficType + " traffic. Update SQL statement is " + pstmt); pstmt.setString(1, newLabel); pstmt.setString(2, trafficType); update_pstmt.executeUpdate(); @@ -748,7 +746,7 @@ private void persistLegacyZones(Connection conn) { if (count > 0) { if (!dcOfPreviousCluster.equalsIgnoreCase(dcOfCurrentCluster)) { legacyZone = true; - s_logger.debug("Marking the zone " + zoneId + " as legacy zone."); + logger.debug("Marking the zone " + zoneId + " as legacy zone."); } } } catch (SQLException e) { @@ -758,7 +756,7 @@ private void persistLegacyZones(Connection conn) { throw new CloudRuntimeException("Unable add zones to cloud.legacyzones table.", e); } } else { - s_logger.debug("Ignoring zone " + zoneId + " with hypervisor type " + clusterHypervisorType); + logger.debug("Ignoring zone " + zoneId + " with hypervisor type " + clusterHypervisorType); break; } count++; @@ -798,11 +796,11 @@ private void persistLegacyZones(Connection conn) { updateLegacyZones(conn, listOfLegacyZones); updateNonLegacyZones(conn, listOfNonLegacyZones); } catch (SQLException e) { - s_logger.error("Unable to discover legacy zones." + e.getMessage(),e); + logger.error("Unable to discover legacy zones." + e.getMessage(),e); throw new CloudRuntimeException("Unable to discover legacy zones." + e.getMessage(), e); } }catch (SQLException e) { - s_logger.error("Unable to discover legacy zones." + e.getMessage(),e); + logger.error("Unable to discover legacy zones." + e.getMessage(),e); throw new CloudRuntimeException("Unable to discover legacy zones." + e.getMessage(), e); } } @@ -813,7 +811,7 @@ private void updateLegacyZones(Connection conn, List zones) { for (Long zoneId : zones) { legacyZonesQuery.setLong(1, zoneId); legacyZonesQuery.executeUpdate(); - s_logger.debug("Inserted zone " + zoneId + " into cloud.legacyzones table"); + logger.debug("Inserted zone " + zoneId + " into cloud.legacyzones table"); } } catch (SQLException e) { throw new CloudRuntimeException("Unable add zones to cloud.legacyzones table.", e); @@ -823,7 +821,7 @@ private void updateLegacyZones(Connection conn, List zones) { private void updateNonLegacyZones(Connection conn, List zones) { try { for (Long zoneId : zones) { - s_logger.debug("Discovered non-legacy zone " + zoneId + ". Processing the zone to associate with VMware datacenter."); + logger.debug("Discovered non-legacy zone " + zoneId + ". Processing the zone to associate with VMware datacenter."); // All clusters in a non legacy zone will belong to the same VMware DC, hence pick the first cluster try (PreparedStatement clustersQuery = conn.prepareStatement("select id from `cloud`.`cluster` where removed is NULL AND data_center_id=?");) { @@ -888,7 +886,7 @@ private void updateNonLegacyZones(Connection conn, List zones) { } } catch (SQLException e) { String msg = "Unable to update non legacy zones." + e.getMessage(); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg, e); } } @@ -914,7 +912,7 @@ private void createPlaceHolderNics(Connection conn) { }catch (SQLException e) { throw new CloudRuntimeException("Unable to create placeholder nics", e); } - s_logger.debug("Created placeholder nic for the ipAddress " + ip + " and network " + networkId); + logger.debug("Created placeholder nic for the ipAddress " + ip + " and network " + networkId); } }catch (SQLException e) { throw new CloudRuntimeException("Unable to create placeholder nics", e); @@ -959,7 +957,7 @@ private void addEgressFwRulesForSRXGuestNw(Connection conn) { try(PreparedStatement sel_net_pstmt = conn.prepareStatement("select account_id, domain_id FROM `cloud`.`networks` where (guest_type='Isolated' OR guest_type='Virtual') and traffic_type='Guest' and vpc_id is NULL and (state='implemented' OR state='Shutdown') and id=? ");) { sel_net_pstmt.setLong(1, netId); - s_logger.debug("Getting account_id, domain_id from networks table: "); + logger.debug("Getting account_id, domain_id from networks table: "); try(ResultSet rsNw = pstmt.executeQuery();) { if (rsNw.next()) { @@ -967,7 +965,7 @@ private void addEgressFwRulesForSRXGuestNw(Connection conn) { long domainId = rsNw.getLong(2); //Add new rule for the existing networks - s_logger.debug("Adding default egress firewall rule for network " + netId); + logger.debug("Adding default egress firewall rule for network " + netId); try (PreparedStatement insert_pstmt = conn.prepareStatement("INSERT INTO firewall_rules (uuid, state, protocol, purpose, account_id, domain_id, network_id, xid, created, traffic_type) VALUES (?, 'Active', 'all', 'Firewall', ?, ?, ?, ?, now(), 'Egress')");) { insert_pstmt.setString(1, UUID.randomUUID().toString()); @@ -975,7 +973,7 @@ private void addEgressFwRulesForSRXGuestNw(Connection conn) { insert_pstmt.setLong(3, domainId); insert_pstmt.setLong(4, netId); insert_pstmt.setString(5, UUID.randomUUID().toString()); - s_logger.debug("Inserting default egress firewall rule " + insert_pstmt); + logger.debug("Inserting default egress firewall rule " + insert_pstmt); insert_pstmt.executeUpdate(); } catch (SQLException e) { throw new CloudRuntimeException("Unable to set egress firewall rules ", e); @@ -988,7 +986,7 @@ private void addEgressFwRulesForSRXGuestNw(Connection conn) { firewallRuleId = rsId.getLong(1); try (PreparedStatement insert_pstmt = conn.prepareStatement("insert into firewall_rules_cidrs (firewall_rule_id,source_cidr) values (?, '0.0.0.0/0')");) { insert_pstmt.setLong(1, firewallRuleId); - s_logger.debug("Inserting rule for cidr 0.0.0.0/0 for the new Firewall rule id=" + firewallRuleId + " with statement " + insert_pstmt); + logger.debug("Inserting rule for cidr 0.0.0.0/0 for the new Firewall rule id=" + firewallRuleId + " with statement " + insert_pstmt); insert_pstmt.executeUpdate(); } catch (SQLException e) { throw new CloudRuntimeException("Unable to set egress firewall rules ", e); @@ -1040,7 +1038,7 @@ private void updateNetworkACLs(Connection conn) { //For each tier create a network ACL and move all the acl_items to network_acl_item table // If there are no acl_items for a tier, associate it with default ACL - s_logger.debug("Updating network ACLs"); + logger.debug("Updating network ACLs"); //1,2 are default acl Ids, start acl Ids from 3 long nextAclId = 3; @@ -1066,7 +1064,7 @@ private void updateNetworkACLs(Connection conn) { //Get all VPC tiers while (rsNetworkIds.next()) { Long networkId = rsNetworkIds.getLong(1); - s_logger.debug("Updating network ACLs for network: " + networkId); + logger.debug("Updating network ACLs for network: " + networkId); Long vpcId = rsNetworkIds.getLong(2); String tierUuid = rsNetworkIds.getString(3); pstmtSelectFirewallRules.setLong(1, networkId); @@ -1079,7 +1077,7 @@ private void updateNetworkACLs(Connection conn) { hasAcls = true; aclId = nextAclId++; //create ACL for the tier - s_logger.debug("Creating network ACL for tier: " + tierUuid); + logger.debug("Creating network ACL for tier: " + tierUuid); pstmtInsertNetworkAcl.setLong(1, aclId); pstmtInsertNetworkAcl.setLong(2, vpcId); pstmtInsertNetworkAcl.setString(3, "ACL for tier " + tierUuid); @@ -1107,7 +1105,7 @@ private void updateNetworkACLs(Connection conn) { } String aclItemUuid = rsAcls.getString(2); //Move acl to network_acl_item table - s_logger.debug("Moving firewall rule: " + aclItemUuid); + logger.debug("Moving firewall rule: " + aclItemUuid); //uuid pstmtInsertNetworkAclItem.setString(1, aclItemUuid); //aclId @@ -1178,7 +1176,7 @@ private void updateNetworkACLs(Connection conn) { pstmtUpdate.setLong(2, networkId); pstmtUpdate.executeUpdate(); } - s_logger.debug("Done updating network ACLs "); + logger.debug("Done updating network ACLs "); } catch (SQLException e) { throw new CloudRuntimeException("Unable to move network acls from firewall rules table to network_acl_item table", e); } @@ -1292,17 +1290,17 @@ private void upgradePhysicalNtwksWithInternalLbProvider(Connection conn) { } private void addHostDetailsIndex(Connection conn) { - s_logger.debug("Checking if host_details index exists, if not we will add it"); + logger.debug("Checking if host_details index exists, if not we will add it"); try(PreparedStatement pstmt = conn.prepareStatement("SHOW INDEX FROM `cloud`.`host_details` where KEY_NAME = 'fk_host_details__host_id'");) { try(ResultSet rs = pstmt.executeQuery();) { if (rs.next()) { - s_logger.debug("Index already exists on host_details - not adding new one"); + logger.debug("Index already exists on host_details - not adding new one"); } else { // add the index try(PreparedStatement pstmtUpdate = conn.prepareStatement("ALTER TABLE `cloud`.`host_details` ADD INDEX `fk_host_details__host_id` (`host_id`)");) { pstmtUpdate.executeUpdate(); - s_logger.debug("Index did not exist on host_details - added new one"); + logger.debug("Index did not exist on host_details - added new one"); }catch (SQLException e) { throw new CloudRuntimeException("Failed to check/update the host_details index ", e); } @@ -1363,7 +1361,7 @@ private void removeFirewallServiceFromSharedNetworkOfferingWithSGService(Connect } private void fix22xKVMSnapshots(Connection conn) { - s_logger.debug("Updating KVM snapshots"); + logger.debug("Updating KVM snapshots"); try (PreparedStatement pstmt = conn.prepareStatement("select id, backup_snap_id from `cloud`.`snapshots` where hypervisor_type='KVM' and removed is null and backup_snap_id is not null");) { try(ResultSet rs = pstmt.executeQuery();) { @@ -1376,7 +1374,7 @@ private void fix22xKVMSnapshots(Connection conn) { int index = backUpPath.indexOf("snapshots" + File.separator); if (index > 1) { String correctedPath = backUpPath.substring(index); - s_logger.debug("Updating Snapshot with id: " + id + " original backup path: " + backUpPath + " updated backup path: " + correctedPath); + logger.debug("Updating Snapshot with id: " + id + " original backup path: " + backUpPath + " updated backup path: " + correctedPath); try(PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`snapshots` set backup_snap_id=? where id = ?");) { update_pstmt.setString(1, correctedPath); update_pstmt.setLong(2, id); @@ -1386,7 +1384,7 @@ private void fix22xKVMSnapshots(Connection conn) { } } } - s_logger.debug("Done updating KVM snapshots"); + logger.debug("Done updating KVM snapshots"); }catch (SQLException e) { throw new CloudRuntimeException("Unable to update backup id for KVM snapshots", e); } @@ -1480,7 +1478,7 @@ private void addF5LoadBalancer(Connection conn, long hostId, long physicalNetwor "INSERT INTO `cloud`.`external_load_balancer_devices` (physical_network_id, host_id, provider_name, " + "device_name, capacity, is_dedicated, device_state, allocation_state, is_managed, uuid) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; try(PreparedStatement pstmtUpdate = conn.prepareStatement(insertF5);) { - s_logger.debug("Adding F5 Big IP load balancer with host id " + hostId + " in to physical network" + physicalNetworkId); + logger.debug("Adding F5 Big IP load balancer with host id " + hostId + " in to physical network" + physicalNetworkId); pstmtUpdate.setLong(1, physicalNetworkId); pstmtUpdate.setLong(2, hostId); pstmtUpdate.setString(3, "F5BigIp"); @@ -1502,7 +1500,7 @@ private void addSrxFirewall(Connection conn, long hostId, long physicalNetworkId "INSERT INTO `cloud`.`external_firewall_devices` (physical_network_id, host_id, provider_name, " + "device_name, capacity, is_dedicated, device_state, allocation_state, uuid) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?)"; try(PreparedStatement pstmtUpdate = conn.prepareStatement(insertSrx);) { - s_logger.debug("Adding SRX firewall device with host id " + hostId + " in to physical network" + physicalNetworkId); + logger.debug("Adding SRX firewall device with host id " + hostId + " in to physical network" + physicalNetworkId); pstmtUpdate.setLong(1, physicalNetworkId); pstmtUpdate.setLong(2, hostId); pstmtUpdate.setString(3, "JuniperSRX"); @@ -1526,7 +1524,7 @@ private void addF5ServiceProvider(Connection conn, long physicalNetworkId, long + "`port_forwarding_service_provided`, `user_data_service_provided`, `security_group_service_provided`) VALUES (?,?,?,?,0,0,0,0,0,0,0,1,0,0,0,0)"; try(PreparedStatement pstmtUpdate = conn.prepareStatement(insertPNSP);) { // add physical network service provider - F5BigIp - s_logger.debug("Adding PhysicalNetworkServiceProvider F5BigIp" + " in to physical network" + physicalNetworkId); + logger.debug("Adding PhysicalNetworkServiceProvider F5BigIp" + " in to physical network" + physicalNetworkId); pstmtUpdate.setString(1, UUID.randomUUID().toString()); pstmtUpdate.setLong(2, physicalNetworkId); pstmtUpdate.setString(3, "F5BigIp"); @@ -1545,7 +1543,7 @@ private void addSrxServiceProvider(Connection conn, long physicalNetworkId, long + "`port_forwarding_service_provided`, `user_data_service_provided`, `security_group_service_provided`) VALUES (?,?,?,?,0,0,0,0,1,1,1,0,1,1,0,0)"; try( PreparedStatement pstmtUpdate = conn.prepareStatement(insertPNSP);) { // add physical network service provider - JuniperSRX - s_logger.debug("Adding PhysicalNetworkServiceProvider JuniperSRX"); + logger.debug("Adding PhysicalNetworkServiceProvider JuniperSRX"); pstmtUpdate.setString(1, UUID.randomUUID().toString()); pstmtUpdate.setLong(2, physicalNetworkId); pstmtUpdate.setString(3, "JuniperSRX"); @@ -1690,7 +1688,7 @@ private void fixZoneUsingExternalDevices(Connection conn) { } catch (SQLException e) { throw new CloudRuntimeException("Unable create a mapping for the networks in network_external_lb_device_map and network_external_firewall_device_map", e); } - s_logger.debug("Successfully added entry in network_external_lb_device_map for network " + networkId + " and F5 device ID " + f5DeviceId); + logger.debug("Successfully added entry in network_external_lb_device_map for network " + networkId + " and F5 device ID " + f5DeviceId); // add mapping for the network in network_external_firewall_device_map String insertFwMapping = @@ -1703,7 +1701,7 @@ private void fixZoneUsingExternalDevices(Connection conn) { } catch (SQLException e) { throw new CloudRuntimeException("Unable create a mapping for the networks in network_external_lb_device_map and network_external_firewall_device_map", e); } - s_logger.debug("Successfully added entry in network_external_firewall_device_map for network " + networkId + " and SRX device ID " + srxDevivceId); + logger.debug("Successfully added entry in network_external_firewall_device_map for network " + networkId + " and SRX device ID " + srxDevivceId); } }catch (SQLException e) { throw new CloudRuntimeException("Unable create a mapping for the networks in network_external_lb_device_map and network_external_firewall_device_map", e); @@ -1712,7 +1710,7 @@ private void fixZoneUsingExternalDevices(Connection conn) { throw new CloudRuntimeException("Unable create a mapping for the networks in network_external_lb_device_map and network_external_firewall_device_map", e); } // update host details for F5 and SRX devices - s_logger.debug("Updating the host details for F5 and SRX devices"); + logger.debug("Updating the host details for F5 and SRX devices"); try(PreparedStatement sel_pstmt = conn.prepareStatement("SELECT host_id, name FROM `cloud`.`host_details` WHERE host_id=? OR host_id=?");) { sel_pstmt.setLong(1, f5HostId); sel_pstmt.setLong(2, srxHostId); @@ -1740,11 +1738,11 @@ private void fixZoneUsingExternalDevices(Connection conn) { }catch (SQLException e) { throw new CloudRuntimeException("Unable create a mapping for the networks in network_external_lb_device_map and network_external_firewall_device_map", e); } - s_logger.debug("Successfully updated host details for F5 and SRX devices"); + logger.debug("Successfully updated host details for F5 and SRX devices"); } catch (RuntimeException e) { throw new CloudRuntimeException("Unable create a mapping for the networks in network_external_lb_device_map and network_external_firewall_device_map", e); } - s_logger.info("Successfully upgraded network using F5 and SRX devices to have a entry in the network_external_lb_device_map and network_external_firewall_device_map"); + logger.info("Successfully upgraded network using F5 and SRX devices to have a entry in the network_external_lb_device_map and network_external_firewall_device_map"); } } @@ -1755,7 +1753,7 @@ private void migrateSecondaryStorageToImageStore(Connection conn) { String sqlInsertStoreDetail = "INSERT INTO `cloud`.`image_store_details` (store_id, name, value) values(?, ?, ?)"; String sqlUpdateHostAsRemoved = "UPDATE `cloud`.`host` SET removed = now() WHERE type = 'SecondaryStorage' and removed is null"; - s_logger.debug("Migrating secondary storage to image store"); + logger.debug("Migrating secondary storage to image store"); boolean hasS3orSwift = false; try ( PreparedStatement pstmtSelectS3Count = conn.prepareStatement(sqlSelectS3Count); @@ -1770,7 +1768,7 @@ private void migrateSecondaryStorageToImageStore(Connection conn) { ResultSet rsSelectSwiftCount = pstmtSelectSwiftCount.executeQuery(); ResultSet rsNfs = nfsQuery.executeQuery(); ) { - s_logger.debug("Checking if we need to migrate NFS secondary storage to image store or staging store"); + logger.debug("Checking if we need to migrate NFS secondary storage to image store or staging store"); int numRows = 0; if (rsSelectS3Count.next()) { numRows = rsSelectS3Count.getInt(1); @@ -1788,7 +1786,7 @@ private void migrateSecondaryStorageToImageStore(Connection conn) { store_role = "ImageCache"; } - s_logger.debug("Migrating NFS secondary storage to " + store_role + " store"); + logger.debug("Migrating NFS secondary storage to " + store_role + " store"); // migrate NFS secondary storage, for nfs, keep previous host_id as the store_id while (rsNfs.next()) { @@ -1820,84 +1818,84 @@ private void migrateSecondaryStorageToImageStore(Connection conn) { storeInsert.executeUpdate(); } - s_logger.debug("Marking NFS secondary storage in host table as removed"); + logger.debug("Marking NFS secondary storage in host table as removed"); pstmtUpdateHostAsRemoved.executeUpdate(); } catch (SQLException e) { String msg = "Unable to migrate secondary storages." + e.getMessage(); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg, e); } - s_logger.debug("Completed migrating secondary storage to image store"); + logger.debug("Completed migrating secondary storage to image store"); } // migrate volume_host_ref to volume_store_ref private void migrateVolumeHostRef(Connection conn) { - s_logger.debug("Updating volume_store_ref table from volume_host_ref table"); + logger.debug("Updating volume_store_ref table from volume_host_ref table"); try(PreparedStatement volStoreInsert = conn.prepareStatement("INSERT INTO `cloud`.`volume_store_ref` (store_id, volume_id, zone_id, created, last_updated, job_id, download_pct, size, physical_size, download_state, checksum, error_str, local_path, install_path, url, destroyed, update_count, ref_cnt, state) select host_id, volume_id, zone_id, created, last_updated, job_id, download_pct, size, physical_size, download_state, checksum, error_str, local_path, install_path, url, destroyed, 0, 0, 'Allocated' from `cloud`.`volume_host_ref`");) { int rowCount = volStoreInsert.executeUpdate(); - s_logger.debug("Insert modified " + rowCount + " rows"); + logger.debug("Insert modified " + rowCount + " rows"); try(PreparedStatement volStoreUpdate = conn.prepareStatement("update `cloud`.`volume_store_ref` set state = 'Ready' where download_state = 'DOWNLOADED'");) { rowCount = volStoreUpdate.executeUpdate(); - s_logger.debug("Update modified " + rowCount + " rows"); + logger.debug("Update modified " + rowCount + " rows"); }catch (SQLException e) { - s_logger.error("Unable to migrate volume_host_ref." + e.getMessage(),e); + logger.error("Unable to migrate volume_host_ref." + e.getMessage(),e); throw new CloudRuntimeException("Unable to migrate volume_host_ref." + e.getMessage(),e); } } catch (SQLException e) { - s_logger.error("Unable to migrate volume_host_ref." + e.getMessage(),e); + logger.error("Unable to migrate volume_host_ref." + e.getMessage(),e); throw new CloudRuntimeException("Unable to migrate volume_host_ref." + e.getMessage(),e); } - s_logger.debug("Completed updating volume_store_ref table from volume_host_ref table"); + logger.debug("Completed updating volume_store_ref table from volume_host_ref table"); } // migrate template_host_ref to template_store_ref private void migrateTemplateHostRef(Connection conn) { - s_logger.debug("Updating template_store_ref table from template_host_ref table"); + logger.debug("Updating template_store_ref table from template_host_ref table"); try (PreparedStatement tmplStoreInsert = conn.prepareStatement("INSERT INTO `cloud`.`template_store_ref` (store_id, template_id, created, last_updated, job_id, download_pct, size, physical_size, download_state, error_str, local_path, install_path, url, destroyed, is_copy, update_count, ref_cnt, store_role, state) select host_id, template_id, created, last_updated, job_id, download_pct, size, physical_size, download_state, error_str, local_path, install_path, url, destroyed, is_copy, 0, 0, 'Image', 'Allocated' from `cloud`.`template_host_ref`");) { int rowCount = tmplStoreInsert.executeUpdate(); - s_logger.debug("Insert modified " + rowCount + " rows"); + logger.debug("Insert modified " + rowCount + " rows"); try(PreparedStatement tmplStoreUpdate = conn.prepareStatement("update `cloud`.`template_store_ref` set state = 'Ready' where download_state = 'DOWNLOADED'");) { rowCount = tmplStoreUpdate.executeUpdate(); }catch (SQLException e) { - s_logger.error("Unable to migrate template_host_ref." + e.getMessage(),e); + logger.error("Unable to migrate template_host_ref." + e.getMessage(),e); throw new CloudRuntimeException("Unable to migrate template_host_ref." + e.getMessage(), e); } - s_logger.debug("Update modified " + rowCount + " rows"); + logger.debug("Update modified " + rowCount + " rows"); } catch (SQLException e) { - s_logger.error("Unable to migrate template_host_ref." + e.getMessage(),e); + logger.error("Unable to migrate template_host_ref." + e.getMessage(),e); throw new CloudRuntimeException("Unable to migrate template_host_ref." + e.getMessage(), e); } - s_logger.debug("Completed updating template_store_ref table from template_host_ref table"); + logger.debug("Completed updating template_store_ref table from template_host_ref table"); } // migrate some entry contents of snapshots to snapshot_store_ref private void migrateSnapshotStoreRef(Connection conn) { - s_logger.debug("Updating snapshot_store_ref table from snapshots table"); + logger.debug("Updating snapshot_store_ref table from snapshots table"); try(PreparedStatement snapshotStoreInsert = conn.prepareStatement("INSERT INTO `cloud`.`snapshot_store_ref` (store_id, snapshot_id, created, size, parent_snapshot_id, install_path, volume_id, update_count, ref_cnt, store_role, state) select sechost_id, id, created, size, prev_snap_id, CONCAT('snapshots', '/', account_id, '/', volume_id, '/', backup_snap_id), volume_id, 0, 0, 'Image', 'Ready' from `cloud`.`snapshots` where status = 'BackedUp' and hypervisor_type <> 'KVM' and sechost_id is not null and removed is null"); ) { //Update all snapshots except KVM snapshots int rowCount = snapshotStoreInsert.executeUpdate(); - s_logger.debug("Inserted " + rowCount + " snapshots into snapshot_store_ref"); + logger.debug("Inserted " + rowCount + " snapshots into snapshot_store_ref"); //backsnap_id for KVM snapshots is complete path. CONCAT is not required try(PreparedStatement snapshotStoreInsert_2 = conn.prepareStatement("INSERT INTO `cloud`.`snapshot_store_ref` (store_id, snapshot_id, created, size, parent_snapshot_id, install_path, volume_id, update_count, ref_cnt, store_role, state) select sechost_id, id, created, size, prev_snap_id, backup_snap_id, volume_id, 0, 0, 'Image', 'Ready' from `cloud`.`snapshots` where status = 'BackedUp' and hypervisor_type = 'KVM' and sechost_id is not null and removed is null");) { rowCount = snapshotStoreInsert_2.executeUpdate(); - s_logger.debug("Inserted " + rowCount + " KVM snapshots into snapshot_store_ref"); + logger.debug("Inserted " + rowCount + " KVM snapshots into snapshot_store_ref"); }catch (SQLException e) { - s_logger.error("Unable to migrate snapshot_store_ref." + e.getMessage(),e); + logger.error("Unable to migrate snapshot_store_ref." + e.getMessage(),e); throw new CloudRuntimeException("Unable to migrate snapshot_store_ref." + e.getMessage(),e); } } catch (SQLException e) { - s_logger.error("Unable to migrate snapshot_store_ref." + e.getMessage(),e); + logger.error("Unable to migrate snapshot_store_ref." + e.getMessage(),e); throw new CloudRuntimeException("Unable to migrate snapshot_store_ref." + e.getMessage(),e); } - s_logger.debug("Completed updating snapshot_store_ref table from snapshots table"); + logger.debug("Completed updating snapshot_store_ref table from snapshots table"); } // migrate secondary storages S3 from s3 tables to image_store table @@ -1905,7 +1903,7 @@ private void migrateS3ToImageStore(Connection conn) { Long storeId = null; Map s3_store_id_map = new HashMap(); - s_logger.debug("Migrating S3 to image store"); + logger.debug("Migrating S3 to image store"); try ( PreparedStatement storeQuery = conn.prepareStatement("select id from `cloud`.`image_store` where uuid = ?"); PreparedStatement storeDetailInsert = conn.prepareStatement("INSERT INTO `cloud`.`image_store_details` (store_id, name, value) values(?, ?, ?)"); @@ -1976,22 +1974,22 @@ private void migrateS3ToImageStore(Connection conn) { } } catch (SQLException e) { String msg = "Unable to migrate S3 secondary storages." + e.getMessage(); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg, e); } - s_logger.debug("Migrating template_s3_ref to template_store_ref"); + logger.debug("Migrating template_s3_ref to template_store_ref"); migrateTemplateS3Ref(conn, s3_store_id_map); - s_logger.debug("Migrating s3 backedup snapshots to snapshot_store_ref"); + logger.debug("Migrating s3 backedup snapshots to snapshot_store_ref"); migrateSnapshotS3Ref(conn, s3_store_id_map); - s_logger.debug("Completed migrating S3 secondary storage to image store"); + logger.debug("Completed migrating S3 secondary storage to image store"); } // migrate template_s3_ref to template_store_ref private void migrateTemplateS3Ref(Connection conn, Map s3StoreMap) { - s_logger.debug("Updating template_store_ref table from template_s3_ref table"); + logger.debug("Updating template_store_ref table from template_s3_ref table"); try(PreparedStatement tmplStoreInsert = conn.prepareStatement("INSERT INTO `cloud`.`template_store_ref` (store_id, template_id, created, download_pct, size, physical_size, download_state, local_path, install_path, update_count, ref_cnt, store_role, state) values(?, ?, ?, 100, ?, ?, 'DOWNLOADED', '?', '?', 0, 0, 'Image', 'Ready')"); ) { @@ -2024,23 +2022,23 @@ private void migrateTemplateS3Ref(Connection conn, Map s3StoreMap) { tmplStoreInsert.executeUpdate(); } }catch (SQLException e) { - s_logger.error("Unable to migrate template_s3_ref." + e.getMessage(),e); + logger.error("Unable to migrate template_s3_ref." + e.getMessage(),e); throw new CloudRuntimeException("Unable to migrate template_s3_ref." + e.getMessage(),e); } }catch (SQLException e) { - s_logger.error("Unable to migrate template_s3_ref." + e.getMessage(),e); + logger.error("Unable to migrate template_s3_ref." + e.getMessage(),e); throw new CloudRuntimeException("Unable to migrate template_s3_ref." + e.getMessage(),e); } } catch (SQLException e) { - s_logger.error("Unable to migrate template_s3_ref." + e.getMessage(),e); + logger.error("Unable to migrate template_s3_ref." + e.getMessage(),e); throw new CloudRuntimeException("Unable to migrate template_s3_ref." + e.getMessage(),e); } - s_logger.debug("Completed migrating template_s3_ref table."); + logger.debug("Completed migrating template_s3_ref table."); } // migrate some entry contents of snapshots to snapshot_store_ref private void migrateSnapshotS3Ref(Connection conn, Map s3StoreMap) { - s_logger.debug("Updating snapshot_store_ref table from snapshots table for s3"); + logger.debug("Updating snapshot_store_ref table from snapshots table for s3"); try(PreparedStatement snapshotStoreInsert = conn.prepareStatement("INSERT INTO `cloud`.`snapshot_store_ref` (store_id, snapshot_id, created, size, parent_snapshot_id, install_path, volume_id, update_count, ref_cnt, store_role, state) values(?, ?, ?, ?, ?, ?, ?, 0, 0, 'Image', 'Ready')"); ) { @@ -2074,18 +2072,18 @@ private void migrateSnapshotS3Ref(Connection conn, Map s3StoreMap) { snapshotStoreInsert.executeUpdate(); } }catch (SQLException e) { - s_logger.error("migrateSnapshotS3Ref:Exception:"+e.getMessage(),e); + logger.error("migrateSnapshotS3Ref:Exception:"+e.getMessage(),e); throw new CloudRuntimeException("migrateSnapshotS3Ref:Exception:"+e.getMessage(),e); } }catch (SQLException e) { - s_logger.error("migrateSnapshotS3Ref:Exception:"+e.getMessage(),e); + logger.error("migrateSnapshotS3Ref:Exception:"+e.getMessage(),e); throw new CloudRuntimeException("migrateSnapshotS3Ref:Exception:"+e.getMessage(),e); } } catch (SQLException e) { - s_logger.error("Unable to migrate s3 backedup snapshots to snapshot_store_ref." + e.getMessage()); + logger.error("Unable to migrate s3 backedup snapshots to snapshot_store_ref." + e.getMessage()); throw new CloudRuntimeException("Unable to migrate s3 backedup snapshots to snapshot_store_ref." + e.getMessage(), e); } - s_logger.debug("Completed updating snapshot_store_ref table from s3 snapshots entries"); + logger.debug("Completed updating snapshot_store_ref table from s3 snapshots entries"); } // migrate secondary storages Swift from swift tables to image_store table @@ -2093,7 +2091,7 @@ private void migrateSwiftToImageStore(Connection conn) { Long storeId = null; Map swift_store_id_map = new HashMap(); - s_logger.debug("Migrating Swift to image store"); + logger.debug("Migrating Swift to image store"); try ( PreparedStatement storeQuery = conn.prepareStatement("select id from `cloud`.`image_store` where uuid = ?"); PreparedStatement storeDetailInsert = conn.prepareStatement("INSERT INTO `cloud`.`image_store_details` (store_id, name, value) values(?, ?, ?)"); @@ -2146,22 +2144,22 @@ private void migrateSwiftToImageStore(Connection conn) { } } catch (SQLException e) { String msg = "Unable to migrate swift secondary storages." + e.getMessage(); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg, e); } - s_logger.debug("Migrating template_swift_ref to template_store_ref"); + logger.debug("Migrating template_swift_ref to template_store_ref"); migrateTemplateSwiftRef(conn, swift_store_id_map); - s_logger.debug("Migrating swift backedup snapshots to snapshot_store_ref"); + logger.debug("Migrating swift backedup snapshots to snapshot_store_ref"); migrateSnapshotSwiftRef(conn, swift_store_id_map); - s_logger.debug("Completed migrating Swift secondary storage to image store"); + logger.debug("Completed migrating Swift secondary storage to image store"); } // migrate template_s3_ref to template_store_ref private void migrateTemplateSwiftRef(Connection conn, Map swiftStoreMap) { - s_logger.debug("Updating template_store_ref table from template_swift_ref table"); + logger.debug("Updating template_store_ref table from template_swift_ref table"); try ( PreparedStatement tmplStoreInsert = conn.prepareStatement("INSERT INTO `cloud`.`template_store_ref` (store_id, template_id, created, download_pct, size, physical_size, download_state, local_path, install_path, update_count, ref_cnt, store_role, state) values(?, ?, ?, 100, ?, ?, 'DOWNLOADED', '?', '?', 0, 0, 'Image', 'Ready')"); @@ -2195,15 +2193,15 @@ private void migrateTemplateSwiftRef(Connection conn, Map swiftStore } } catch (SQLException e) { String msg = "Unable to migrate template_swift_ref." + e.getMessage(); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg, e); } - s_logger.debug("Completed migrating template_swift_ref table."); + logger.debug("Completed migrating template_swift_ref table."); } // migrate some entry contents of snapshots to snapshot_store_ref private void migrateSnapshotSwiftRef(Connection conn, Map swiftStoreMap) { - s_logger.debug("Updating snapshot_store_ref table from snapshots table for swift"); + logger.debug("Updating snapshot_store_ref table from snapshots table for swift"); try (PreparedStatement snapshotStoreInsert = conn.prepareStatement("INSERT INTO `cloud`.`snapshot_store_ref` (store_id, snapshot_id, created, size, parent_snapshot_id, install_path, volume_id, update_count, ref_cnt, store_role, state) values(?, ?, ?, ?, ?, ?, ?, 0, 0, 'Image', 'Ready')"); ){ @@ -2229,31 +2227,31 @@ private void migrateSnapshotSwiftRef(Connection conn, Map swiftStore snapshotStoreInsert.executeUpdate(); } }catch (SQLException e) { - s_logger.error("migrateSnapshotSwiftRef:Exception:"+e.getMessage(),e); + logger.error("migrateSnapshotSwiftRef:Exception:"+e.getMessage(),e); throw new CloudRuntimeException("migrateSnapshotSwiftRef:Exception:"+e.getMessage(),e); } }catch (SQLException e) { - s_logger.error("migrateSnapshotSwiftRef:Exception:"+e.getMessage(),e); + logger.error("migrateSnapshotSwiftRef:Exception:"+e.getMessage(),e); throw new CloudRuntimeException("migrateSnapshotSwiftRef:Exception:"+e.getMessage(),e); } } catch (SQLException e) { - s_logger.error("migrateSnapshotSwiftRef:Exception:"+e.getMessage(),e); + logger.error("migrateSnapshotSwiftRef:Exception:"+e.getMessage(),e); throw new CloudRuntimeException("migrateSnapshotSwiftRef:Exception:"+e.getMessage(),e); } - s_logger.debug("Completed updating snapshot_store_ref table from swift snapshots entries"); + logger.debug("Completed updating snapshot_store_ref table from swift snapshots entries"); } private void fixNiciraKeys(Connection conn) { //First drop the key if it exists. List keys = new ArrayList(); - s_logger.debug("Dropping foreign key fk_nicira_nvp_nic_map__nic from the table nicira_nvp_nic_map if it exists"); + logger.debug("Dropping foreign key fk_nicira_nvp_nic_map__nic from the table nicira_nvp_nic_map if it exists"); keys.add("fk_nicira_nvp_nic_map__nic"); DbUpgradeUtils.dropKeysIfExist(conn, "nicira_nvp_nic_map", keys, true); //Now add foreign key. try(PreparedStatement pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`nicira_nvp_nic_map` ADD CONSTRAINT `fk_nicira_nvp_nic_map__nic` FOREIGN KEY (`nic`) REFERENCES `nics` (`uuid`) ON DELETE CASCADE");) { pstmt.executeUpdate(); - s_logger.debug("Added foreign key fk_nicira_nvp_nic_map__nic to the table nicira_nvp_nic_map"); + logger.debug("Added foreign key fk_nicira_nvp_nic_map__nic to the table nicira_nvp_nic_map"); } catch (SQLException e) { throw new CloudRuntimeException("Unable to add foreign key fk_nicira_nvp_nic_map__nic to the table nicira_nvp_nic_map", e); } @@ -2262,7 +2260,7 @@ private void fixNiciraKeys(Connection conn) { private void fixRouterKeys(Connection conn) { //First drop the key if it exists. List keys = new ArrayList(); - s_logger.debug("Dropping foreign key fk_router_network_ref__router_id from the table router_network_ref if it exists"); + logger.debug("Dropping foreign key fk_router_network_ref__router_id from the table router_network_ref if it exists"); keys.add("fk_router_network_ref__router_id"); DbUpgradeUtils.dropKeysIfExist(conn, "router_network_ref", keys, true); //Now add foreign key. @@ -2270,14 +2268,14 @@ private void fixRouterKeys(Connection conn) { conn.prepareStatement("ALTER TABLE `cloud`.`router_network_ref` ADD CONSTRAINT `fk_router_network_ref__router_id` FOREIGN KEY (`router_id`) REFERENCES `domain_router` (`id`) ON DELETE CASCADE");) { pstmt.executeUpdate(); - s_logger.debug("Added foreign key fk_router_network_ref__router_id to the table router_network_ref"); + logger.debug("Added foreign key fk_router_network_ref__router_id to the table router_network_ref"); } catch (SQLException e) { throw new CloudRuntimeException("Unable to add foreign key fk_router_network_ref__router_id to the table router_network_ref", e); } } private void encryptSite2SitePSK(Connection conn) { - s_logger.debug("Encrypting Site2Site Customer Gateway pre-shared key"); + logger.debug("Encrypting Site2Site Customer Gateway pre-shared key"); try (PreparedStatement select_pstmt = conn.prepareStatement("select id, ipsec_psk from `cloud`.`s2s_customer_gateway`");){ try(ResultSet rs = select_pstmt.executeQuery();) { @@ -2304,7 +2302,7 @@ private void encryptSite2SitePSK(Connection conn) { } catch (UnsupportedEncodingException e) { throw new CloudRuntimeException("Unable to encrypt Site2Site Customer Gateway pre-shared key ", e); } - s_logger.debug("Done encrypting Site2Site Customer Gateway pre-shared key"); + logger.debug("Done encrypting Site2Site Customer Gateway pre-shared key"); } protected void updateConcurrentConnectionsInNetworkOfferings(Connection conn) { @@ -2388,31 +2386,31 @@ private void migrateDatafromIsoIdInVolumesTable(Connection conn) { conn.prepareStatement("ALTER TABLE `cloud`.`volumes` CHANGE COLUMN `iso_id1` `iso_id` bigint(20) unsigned COMMENT 'The id of the iso from which the volume was created'");) { alter_iso_pstmt.executeUpdate(); }catch (SQLException e) { - s_logger.info("migrateDatafromIsoIdInVolumesTable: ignoring Exception: " + e.getMessage()); - if (s_logger.isTraceEnabled()) { - s_logger.trace("migrateDatafromIsoIdInVolumesTable: ignored Exception",e); + logger.info("migrateDatafromIsoIdInVolumesTable: ignoring Exception: " + e.getMessage()); + if (logger.isTraceEnabled()) { + logger.trace("migrateDatafromIsoIdInVolumesTable: ignored Exception",e); } //implies iso_id1 is not present, so do nothing. } }catch (SQLException e) { - s_logger.info("migrateDatafromIsoIdInVolumesTable: ignoring Exception: " + e.getMessage()); - if (s_logger.isTraceEnabled()) { - s_logger.trace("migrateDatafromIsoIdInVolumesTable: ignored Exception",e); + logger.info("migrateDatafromIsoIdInVolumesTable: ignoring Exception: " + e.getMessage()); + if (logger.isTraceEnabled()) { + logger.trace("migrateDatafromIsoIdInVolumesTable: ignored Exception",e); } //implies iso_id1 is not present, so do nothing. } } }catch (SQLException e) { - s_logger.info("migrateDatafromIsoIdInVolumesTable: ignoring Exception: " + e.getMessage()); - if (s_logger.isTraceEnabled()) { - s_logger.trace("migrateDatafromIsoIdInVolumesTable: ignored Exception",e); + logger.info("migrateDatafromIsoIdInVolumesTable: ignoring Exception: " + e.getMessage()); + if (logger.isTraceEnabled()) { + logger.trace("migrateDatafromIsoIdInVolumesTable: ignored Exception",e); } //implies iso_id1 is not present, so do nothing. } } catch (SQLException e) { - s_logger.info("migrateDatafromIsoIdInVolumesTable: ignoring Exception: " + e.getMessage()); - if (s_logger.isTraceEnabled()) { - s_logger.trace("migrateDatafromIsoIdInVolumesTable: ignored Exception",e); + logger.info("migrateDatafromIsoIdInVolumesTable: ignoring Exception: " + e.getMessage()); + if (logger.isTraceEnabled()) { + logger.trace("migrateDatafromIsoIdInVolumesTable: ignored Exception",e); } //implies iso_id1 is not present, so do nothing. } @@ -2421,7 +2419,7 @@ private void migrateDatafromIsoIdInVolumesTable(Connection conn) { protected void setRAWformatForRBDVolumes(Connection conn) { try(PreparedStatement pstmt = conn.prepareStatement("UPDATE volumes SET format = 'RAW' WHERE pool_id IN(SELECT id FROM storage_pool WHERE pool_type = 'RBD')");) { - s_logger.debug("Setting format to RAW for all volumes on RBD primary storage pools"); + logger.debug("Setting format to RAW for all volumes on RBD primary storage pools"); pstmt.executeUpdate(); } catch (SQLException e) { throw new CloudRuntimeException("Failed to update volume format to RAW for volumes on RBD pools due to exception ", e); @@ -2429,7 +2427,7 @@ protected void setRAWformatForRBDVolumes(Connection conn) { } private void upgradeVpcServiceMap(Connection conn) { - s_logger.debug("Upgrading VPC service Map"); + logger.debug("Upgrading VPC service Map"); try(PreparedStatement listVpc = conn.prepareStatement("SELECT id, vpc_offering_id FROM `cloud`.`vpc` where removed is NULL");) { //Get all vpc Ids along with vpc offering Id @@ -2461,7 +2459,7 @@ private void upgradeVpcServiceMap(Connection conn) { }catch (SQLException e) { throw new CloudRuntimeException("Error during VPC service map upgrade", e); } - s_logger.debug("Upgraded service map for VPC: " + vpc_id); + logger.debug("Upgraded service map for VPC: " + vpc_id); } } } catch (SQLException e) { @@ -2470,7 +2468,7 @@ private void upgradeVpcServiceMap(Connection conn) { } private void upgradeResourceCount(Connection conn) { - s_logger.debug("upgradeResourceCount start"); + logger.debug("upgradeResourceCount start"); try( PreparedStatement sel_dom_pstmt = conn.prepareStatement("select id, domain_id FROM `cloud`.`account` where removed is NULL "); ResultSet rsAccount = sel_dom_pstmt.executeQuery(); @@ -2599,7 +2597,7 @@ private void upgradeResourceCount(Connection conn) { throw new CloudRuntimeException("Unable to upgrade resource count (cpu,memory,primary_storage,secondary_storage) ", e); } } - s_logger.debug("upgradeResourceCount finish"); + logger.debug("upgradeResourceCount finish"); } catch (SQLException e) { throw new CloudRuntimeException("Unable to upgrade resource count (cpu,memory,primary_storage,secondary_storage) ", e); } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41100to41110.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41100to41110.java index 3703040771bd..1df197b4fcf5 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41100to41110.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41100to41110.java @@ -25,13 +25,11 @@ import java.sql.ResultSet; import java.sql.SQLException; -import org.apache.log4j.Logger; import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade41100to41110 implements DbUpgrade { - final static Logger LOG = Logger.getLogger(Upgrade41000to41100.class); +public class Upgrade41100to41110 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { @@ -105,8 +103,8 @@ private void uncrypt(Connection conn, String name) try ( ResultSet resultSet = prepSelStmt.executeQuery(); ) { - if (LOG.isInfoEnabled()) { - LOG.info("updating setting '" + name + "'"); + if (logger.isInfoEnabled()) { + logger.info("updating setting '" + name + "'"); } if (resultSet.next()) { if ("Secure".equals(resultSet.getString(1))) { @@ -118,10 +116,10 @@ private void uncrypt(Connection conn, String name) prepUpdStmt.setString(2, name); prepUpdStmt.execute(); } catch (SQLException e) { - if (LOG.isInfoEnabled()) { - LOG.info("failed to update configuration item '" + name + "' with value '" + value + "'"); - if (LOG.isDebugEnabled()) { - LOG.debug("no update because ", e); + if (logger.isInfoEnabled()) { + logger.info("failed to update configuration item '" + name + "' with value '" + value + "'"); + if (logger.isDebugEnabled()) { + logger.debug("no update because ", e); } } } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41110to41120.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41110to41120.java index f7782cebf464..85be41fc6f2e 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41110to41120.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41110to41120.java @@ -20,12 +20,10 @@ import java.io.InputStream; import java.sql.Connection; -import org.apache.log4j.Logger; import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade41110to41120 implements DbUpgrade { - final static Logger LOG = Logger.getLogger(Upgrade41110to41120.class); +public class Upgrade41110to41120 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41120to41130.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41120to41130.java index d9eec476628d..d011f4fb0fc4 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41120to41130.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41120to41130.java @@ -20,7 +20,7 @@ import java.io.InputStream; import java.sql.Connection; -public class Upgrade41120to41130 implements DbUpgrade { +public class Upgrade41120to41130 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41120to41200.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41120to41200.java index f68f04a53aa8..ce0e1e39c2ef 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41120to41200.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41120to41200.java @@ -23,11 +23,9 @@ import java.sql.SQLException; import com.cloud.utils.exception.CloudRuntimeException; -import org.apache.log4j.Logger; -public class Upgrade41120to41200 implements DbUpgrade { +public class Upgrade41120to41200 extends DbUpgradeAbstractImpl { - final static Logger LOG = Logger.getLogger(Upgrade41120to41200.class); @Override public String[] getUpgradableVersionRange() { @@ -64,7 +62,7 @@ private void updateManagementServerHostUuid(Connection conn) { try (final PreparedStatement updateStatement = conn.prepareStatement("UPDATE cloud.mshost SET uuid=UUID()")) { updateStatement.executeUpdate(); } catch (SQLException e) { - LOG.error("Failed to add an UUID to each management server.", e); + logger.error("Failed to add an UUID to each management server.", e); } } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41200to41300.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41200to41300.java index 2de8dc983587..dd6f2cf30367 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41200to41300.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41200to41300.java @@ -22,7 +22,7 @@ import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade41200to41300 implements DbUpgrade { +public class Upgrade41200to41300 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41300to41310.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41300to41310.java index ac6149f78447..4cae3d445c7c 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41300to41310.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41300to41310.java @@ -19,7 +19,7 @@ import java.io.InputStream; import java.sql.Connection; -public class Upgrade41300to41310 implements DbUpgrade { +public class Upgrade41300to41310 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { return new String[] {"4.13.0.0", "4.13.1.0"}; diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41310to41400.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41310to41400.java index f1a333e7d48b..91fd5b6d57f0 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41310to41400.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41310to41400.java @@ -20,13 +20,11 @@ import java.io.InputStream; import java.sql.Connection; -import org.apache.log4j.Logger; import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade41310to41400 implements DbUpgrade { +public class Upgrade41310to41400 extends DbUpgradeAbstractImpl { - final static Logger LOG = Logger.getLogger(Upgrade41310to41400.class); @Override public String[] getUpgradableVersionRange() { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41400to41500.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41400to41500.java index ba969ae879e2..4750915baa60 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41400to41500.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41400to41500.java @@ -27,13 +27,10 @@ import java.util.List; import java.util.Map; -import org.apache.log4j.Logger; import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade41400to41500 implements DbUpgrade { - - final static Logger LOG = Logger.getLogger(Upgrade41400to41500.class); +public class Upgrade41400to41500 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { @@ -74,7 +71,7 @@ private void addRolePermissionsForNewReadOnlyAndSupportRoles(final Connection co } private void addRolePermissionsForReadOnlyAdmin(final Connection conn) { - LOG.debug("Adding role permissions for new read-only admin role"); + logger.debug("Adding role permissions for new read-only admin role"); try { PreparedStatement pstmt = conn.prepareStatement("SELECT id FROM `cloud`.`roles` WHERE name = 'Read-Only Admin - Default' AND is_default = 1"); ResultSet rs = pstmt.executeQuery(); @@ -108,15 +105,15 @@ private void addRolePermissionsForReadOnlyAdmin(final Connection conn) { if (pstmt != null && !pstmt.isClosed()) { pstmt.close(); } - LOG.debug("Successfully added role permissions for new read-only admin role"); + logger.debug("Successfully added role permissions for new read-only admin role"); } catch (final SQLException e) { - LOG.error("Exception while adding role permissions for read-only admin role: " + e.getMessage()); + logger.error("Exception while adding role permissions for read-only admin role: " + e.getMessage()); throw new CloudRuntimeException("Exception while adding role permissions for read-only admin role: " + e.getMessage(), e); } } private void addRolePermissionsForReadOnlyUser(final Connection conn) { - LOG.debug("Adding role permissions for new read-only user role"); + logger.debug("Adding role permissions for new read-only user role"); try { PreparedStatement pstmt = conn.prepareStatement("SELECT id FROM `cloud`.`roles` WHERE name = 'Read-Only User - Default' AND is_default = 1"); ResultSet rs = pstmt.executeQuery(); @@ -179,15 +176,15 @@ private void addRolePermissionsForReadOnlyUser(final Connection conn) { if (pstmt != null && !pstmt.isClosed()) { pstmt.close(); } - LOG.debug("Successfully added role permissions for new read-only user role"); + logger.debug("Successfully added role permissions for new read-only user role"); } catch (final SQLException e) { - LOG.error("Exception while adding role permissions for read-only user role: " + e.getMessage()); + logger.error("Exception while adding role permissions for read-only user role: " + e.getMessage()); throw new CloudRuntimeException("Exception while adding role permissions for read-only user role: " + e.getMessage(), e); } } private void addRolePermissionsForSupportAdmin(final Connection conn) { - LOG.debug("Adding role permissions for new support admin role"); + logger.debug("Adding role permissions for new support admin role"); try { PreparedStatement pstmt = conn.prepareStatement("SELECT id FROM `cloud`.`roles` WHERE name = 'Support Admin - Default' AND is_default = 1"); ResultSet rs = pstmt.executeQuery(); @@ -264,15 +261,15 @@ private void addRolePermissionsForSupportAdmin(final Connection conn) { if (pstmt != null && !pstmt.isClosed()) { pstmt.close(); } - LOG.debug("Successfully added role permissions for new support admin role"); + logger.debug("Successfully added role permissions for new support admin role"); } catch (final SQLException e) { - LOG.error("Exception while adding role permissions for support admin role: " + e.getMessage()); + logger.error("Exception while adding role permissions for support admin role: " + e.getMessage()); throw new CloudRuntimeException("Exception while adding role permissions for support admin role: " + e.getMessage(), e); } } private void addRolePermissionsForSupportUser(final Connection conn) { - LOG.debug("Adding role permissions for new support user role"); + logger.debug("Adding role permissions for new support user role"); try { PreparedStatement pstmt = conn.prepareStatement("SELECT id FROM `cloud`.`roles` WHERE name = 'Support User - Default' AND is_default = 1"); ResultSet rs = pstmt.executeQuery(); @@ -341,9 +338,9 @@ private void addRolePermissionsForSupportUser(final Connection conn) { if (pstmt != null && !pstmt.isClosed()) { pstmt.close(); } - LOG.debug("Successfully added role permissions for new support user role"); + logger.debug("Successfully added role permissions for new support user role"); } catch (final SQLException e) { - LOG.error("Exception while adding role permissions for support user role: " + e.getMessage()); + logger.error("Exception while adding role permissions for support user role: " + e.getMessage()); throw new CloudRuntimeException("Exception while adding role permissions for support user role: " + e.getMessage(), e); } } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41500to41510.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41500to41510.java index 344bbccc95b2..0ca81be3936f 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41500to41510.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41500to41510.java @@ -27,14 +27,11 @@ import java.util.Map; import java.util.Set; -import org.apache.log4j.Logger; import com.cloud.hypervisor.Hypervisor; import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade41500to41510 implements DbUpgrade, DbUpgradeSystemVmTemplate { - - final static Logger LOG = Logger.getLogger(Upgrade41500to41510.class); +public class Upgrade41500to41510 extends DbUpgradeAbstractImpl implements DbUpgradeSystemVmTemplate { @Override public String[] getUpgradableVersionRange() { @@ -70,7 +67,7 @@ public void performDataMigration(Connection conn) { @Override @SuppressWarnings("serial") public void updateSystemVmTemplates(final Connection conn) { - LOG.debug("Updating System Vm template IDs"); + logger.debug("Updating System Vm template IDs"); final Set hypervisorsListInUse = new HashSet(); try (PreparedStatement pstmt = conn.prepareStatement("select distinct(hypervisor_type) from `cloud`.`cluster` where removed is null"); ResultSet rs = pstmt.executeQuery()) { while (rs.next()) { @@ -98,7 +95,7 @@ public void updateSystemVmTemplates(final Connection conn) { } } } catch (final SQLException e) { - LOG.error("updateSystemVmTemplates: Exception caught while getting hypervisor types from clusters: " + e.getMessage()); + logger.error("updateSystemVmTemplates: Exception caught while getting hypervisor types from clusters: " + e.getMessage()); throw new CloudRuntimeException("updateSystemVmTemplates:Exception while getting hypervisor types from clusters", e); } @@ -147,7 +144,7 @@ public void updateSystemVmTemplates(final Connection conn) { }; for (final Map.Entry hypervisorAndTemplateName : NewTemplateNameList.entrySet()) { - LOG.debug("Updating " + hypervisorAndTemplateName.getKey() + " System Vms"); + logger.debug("Updating " + hypervisorAndTemplateName.getKey() + " System Vms"); try (PreparedStatement pstmt = conn.prepareStatement("select id from `cloud`.`vm_template` where name = ? and removed is null and account_id in (select id from account where type = 1 and removed is NULL) order by id desc limit 1")) { // Get systemvm template id for corresponding hypervisor long templateId = -1; @@ -157,7 +154,7 @@ public void updateSystemVmTemplates(final Connection conn) { templateId = rs.getLong(1); } } catch (final SQLException e) { - LOG.error("updateSystemVmTemplates: Exception caught while getting ids of templates: " + e.getMessage()); + logger.error("updateSystemVmTemplates: Exception caught while getting ids of templates: " + e.getMessage()); throw new CloudRuntimeException("updateSystemVmTemplates: Exception caught while getting ids of templates", e); } @@ -167,7 +164,7 @@ public void updateSystemVmTemplates(final Connection conn) { templ_type_pstmt.setLong(1, templateId); templ_type_pstmt.executeUpdate(); } catch (final SQLException e) { - LOG.error("updateSystemVmTemplates:Exception while updating template with id " + templateId + " to be marked as 'system': " + e.getMessage()); + logger.error("updateSystemVmTemplates:Exception while updating template with id " + templateId + " to be marked as 'system': " + e.getMessage()); throw new CloudRuntimeException("updateSystemVmTemplates:Exception while updating template with id " + templateId + " to be marked as 'system'", e); } // update template ID of system Vms @@ -177,7 +174,7 @@ public void updateSystemVmTemplates(final Connection conn) { update_templ_id_pstmt.setString(2, hypervisorAndTemplateName.getKey().toString()); update_templ_id_pstmt.executeUpdate(); } catch (final Exception e) { - LOG.error("updateSystemVmTemplates:Exception while setting template for " + hypervisorAndTemplateName.getKey().toString() + " to " + templateId + logger.error("updateSystemVmTemplates:Exception while setting template for " + hypervisorAndTemplateName.getKey().toString() + " to " + templateId + ": " + e.getMessage()); throw new CloudRuntimeException("updateSystemVmTemplates:Exception while setting template for " + hypervisorAndTemplateName.getKey().toString() + " to " + templateId, e); @@ -190,7 +187,7 @@ public void updateSystemVmTemplates(final Connection conn) { update_pstmt.setString(2, routerTemplateConfigurationNames.get(hypervisorAndTemplateName.getKey())); update_pstmt.executeUpdate(); } catch (final SQLException e) { - LOG.error("updateSystemVmTemplates:Exception while setting " + routerTemplateConfigurationNames.get(hypervisorAndTemplateName.getKey()) + " to " + logger.error("updateSystemVmTemplates:Exception while setting " + routerTemplateConfigurationNames.get(hypervisorAndTemplateName.getKey()) + " to " + hypervisorAndTemplateName.getValue() + ": " + e.getMessage()); throw new CloudRuntimeException("updateSystemVmTemplates:Exception while setting " + routerTemplateConfigurationNames.get(hypervisorAndTemplateName.getKey()) + " to " + hypervisorAndTemplateName.getValue(), e); @@ -203,14 +200,14 @@ public void updateSystemVmTemplates(final Connection conn) { update_pstmt.setString(2, "minreq.sysvmtemplate.version"); update_pstmt.executeUpdate(); } catch (final SQLException e) { - LOG.error("updateSystemVmTemplates:Exception while setting 'minreq.sysvmtemplate.version' to 4.15.1: " + e.getMessage()); + logger.error("updateSystemVmTemplates:Exception while setting 'minreq.sysvmtemplate.version' to 4.15.1: " + e.getMessage()); throw new CloudRuntimeException("updateSystemVmTemplates:Exception while setting 'minreq.sysvmtemplate.version' to 4.15.1", e); } } else { if (hypervisorsListInUse.contains(hypervisorAndTemplateName.getKey())) { throw new CloudRuntimeException(getUpgradedVersion() + hypervisorAndTemplateName.getKey() + " SystemVm template not found. Cannot upgrade system Vms"); } else { - LOG.warn(getUpgradedVersion() + hypervisorAndTemplateName.getKey() + " SystemVm template not found. " + hypervisorAndTemplateName.getKey() + logger.warn(getUpgradedVersion() + hypervisorAndTemplateName.getKey() + " SystemVm template not found. " + hypervisorAndTemplateName.getKey() + " hypervisor is not used, so not failing upgrade"); // Update the latest template URLs for corresponding // hypervisor @@ -221,7 +218,7 @@ public void updateSystemVmTemplates(final Connection conn) { update_templ_url_pstmt.setString(3, hypervisorAndTemplateName.getKey().toString()); update_templ_url_pstmt.executeUpdate(); } catch (final SQLException e) { - LOG.error("updateSystemVmTemplates:Exception while updating 'url' and 'checksum' for hypervisor type " + logger.error("updateSystemVmTemplates:Exception while updating 'url' and 'checksum' for hypervisor type " + hypervisorAndTemplateName.getKey().toString() + ": " + e.getMessage()); throw new CloudRuntimeException("updateSystemVmTemplates:Exception while updating 'url' and 'checksum' for hypervisor type " + hypervisorAndTemplateName.getKey().toString(), e); @@ -229,11 +226,11 @@ public void updateSystemVmTemplates(final Connection conn) { } } } catch (final SQLException e) { - LOG.error("updateSystemVmTemplates:Exception while getting ids of templates: " + e.getMessage()); + logger.error("updateSystemVmTemplates:Exception while getting ids of templates: " + e.getMessage()); throw new CloudRuntimeException("updateSystemVmTemplates:Exception while getting ids of templates", e); } } - LOG.debug("Updating System Vm Template IDs Complete"); + logger.debug("Updating System Vm Template IDs Complete"); } @Override diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41520.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41520.java index bf91c8f7aceb..41b362c4bf7d 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41520.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41510to41520.java @@ -21,11 +21,8 @@ import java.io.InputStream; import java.sql.Connection; -import org.apache.log4j.Logger; -public class Upgrade41510to41520 implements DbUpgrade { - - final static Logger LOG = Logger.getLogger(Upgrade41510to41520.class); +public class Upgrade41510to41520 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41520to41600.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41520to41600.java index 107742076fd2..76227d434173 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41520to41600.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41520to41600.java @@ -28,14 +28,12 @@ import com.cloud.upgrade.RolePermissionChecker; import com.cloud.upgrade.SystemVmTemplateRegistration; import org.apache.cloudstack.acl.RoleType; -import org.apache.log4j.Logger; import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade41520to41600 implements DbUpgrade, DbUpgradeSystemVmTemplate { +public class Upgrade41520to41600 extends DbUpgradeAbstractImpl implements DbUpgradeSystemVmTemplate { - final static Logger LOG = Logger.getLogger(Upgrade41520to41600.class); private SystemVmTemplateRegistration systemVmTemplateRegistration; private RolePermissionChecker rolePermissionChecker = new RolePermissionChecker(); @@ -82,21 +80,21 @@ private void populateAnnotationPermissions(Connection conn) { } private void checkAndPersistAnnotationPermissions(Connection conn, RoleType roleType, List rules) { - LOG.debug("Checking the annotation permissions for the role: " + roleType.getId()); + logger.debug("Checking the annotation permissions for the role: " + roleType.getId()); for (String rule : rules) { - LOG.debug("Checking the annotation permissions for the role: " + roleType.getId() + " and rule: " + rule); + logger.debug("Checking the annotation permissions for the role: " + roleType.getId() + " and rule: " + rule); if (!rolePermissionChecker.existsRolePermissionByRoleIdAndRule(conn, roleType.getId(), rule)) { - LOG.debug("Inserting role permission for role: " + roleType.getId() + " and rule: " + rule); + logger.debug("Inserting role permission for role: " + roleType.getId() + " and rule: " + rule); rolePermissionChecker.insertAnnotationRulePermission(conn, roleType.getId(), rule); } else { - LOG.debug("Found existing role permission for role: " + roleType.getId() + " and rule: " + rule + + logger.debug("Found existing role permission for role: " + roleType.getId() + " and rule: " + rule + ", not updating it"); } } } private void generateUuidForExistingSshKeyPairs(Connection conn) { - LOG.debug("Generating uuid for existing ssh key-pairs"); + logger.debug("Generating uuid for existing ssh key-pairs"); try { PreparedStatement pstmt = conn.prepareStatement("SELECT id FROM `cloud`.`ssh_keypairs` WHERE uuid is null"); ResultSet rs = pstmt.executeQuery(); @@ -112,10 +110,10 @@ private void generateUuidForExistingSshKeyPairs(Connection conn) { if (!pstmt.isClosed()) { pstmt.close(); } - LOG.debug("Successfully generated uuid for existing ssh key-pairs"); + logger.debug("Successfully generated uuid for existing ssh key-pairs"); } catch (SQLException e) { String errMsg = "Exception while generating uuid for existing ssh key-pairs: " + e.getMessage(); - LOG.error(errMsg, e); + logger.error(errMsg, e); throw new CloudRuntimeException(errMsg, e); } } @@ -127,7 +125,7 @@ private void initSystemVmTemplateRegistration() { @Override @SuppressWarnings("serial") public void updateSystemVmTemplates(final Connection conn) { - LOG.debug("Updating System Vm template IDs"); + logger.debug("Updating System Vm template IDs"); initSystemVmTemplateRegistration(); try { systemVmTemplateRegistration.updateSystemVmTemplates(conn); diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41600to41610.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41600to41610.java index 8094a2e9a3c1..3208b4ad8f97 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41600to41610.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41600to41610.java @@ -19,14 +19,12 @@ import com.cloud.upgrade.SystemVmTemplateRegistration; import com.cloud.utils.exception.CloudRuntimeException; -import org.apache.log4j.Logger; import java.io.InputStream; import java.sql.Connection; -public class Upgrade41600to41610 implements DbUpgrade, DbUpgradeSystemVmTemplate { +public class Upgrade41600to41610 extends DbUpgradeAbstractImpl implements DbUpgradeSystemVmTemplate { - final static Logger LOG = Logger.getLogger(Upgrade41600to41610.class); private SystemVmTemplateRegistration systemVmTemplateRegistration; @Override @@ -77,7 +75,7 @@ private void initSystemVmTemplateRegistration() { @Override public void updateSystemVmTemplates(Connection conn) { - LOG.debug("Updating System Vm template IDs"); + logger.debug("Updating System Vm template IDs"); initSystemVmTemplateRegistration(); try { systemVmTemplateRegistration.updateSystemVmTemplates(conn); diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41610to41700.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41610to41700.java index bb4e70567c62..0a0ab0b9f5a9 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41610to41700.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41610to41700.java @@ -24,14 +24,12 @@ import java.sql.SQLException; import java.util.UUID; -import org.apache.log4j.Logger; import com.cloud.upgrade.SystemVmTemplateRegistration; import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade41610to41700 implements DbUpgrade, DbUpgradeSystemVmTemplate { +public class Upgrade41610to41700 extends DbUpgradeAbstractImpl implements DbUpgradeSystemVmTemplate { - final static Logger LOG = Logger.getLogger(Upgrade41700to41710.class); private SystemVmTemplateRegistration systemVmTemplateRegistration; @Override @@ -82,7 +80,7 @@ private void initSystemVmTemplateRegistration() { @Override public void updateSystemVmTemplates(Connection conn) { - LOG.debug("Updating System Vm template IDs"); + logger.debug("Updating System Vm template IDs"); initSystemVmTemplateRegistration(); try { systemVmTemplateRegistration.updateSystemVmTemplates(conn); @@ -92,7 +90,7 @@ public void updateSystemVmTemplates(Connection conn) { } public void fixWrongDatastoreClusterPoolUuid(Connection conn) { - LOG.debug("Replacement of faulty pool uuids on datastorecluster"); + logger.debug("Replacement of faulty pool uuids on datastorecluster"); try (PreparedStatement pstmt = conn.prepareStatement("SELECT id,uuid FROM storage_pool " + "WHERE uuid NOT LIKE \"%-%-%-%\" AND removed IS NULL " + "AND pool_type = 'DatastoreCluster';"); ResultSet rs = pstmt.executeQuery()) { @@ -109,7 +107,7 @@ public void fixWrongDatastoreClusterPoolUuid(Connection conn) { updateStmt.executeBatch(); } catch (SQLException ex) { String errorMsg = "fixWrongPoolUuid:Exception while updating faulty pool uuids"; - LOG.error(errorMsg,ex); + logger.error(errorMsg,ex); throw new CloudRuntimeException(errorMsg, ex); } } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41700to41710.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41700to41710.java index a228a01b9e80..e3eb2bf514df 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41700to41710.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41700to41710.java @@ -23,7 +23,6 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDaoImpl; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.log4j.Logger; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.VolumeVO; @@ -32,9 +31,8 @@ import com.cloud.upgrade.SystemVmTemplateRegistration; import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade41700to41710 implements DbUpgrade, DbUpgradeSystemVmTemplate { +public class Upgrade41700to41710 extends DbUpgradeAbstractImpl implements DbUpgradeSystemVmTemplate { - final static Logger LOG = Logger.getLogger(Upgrade41700to41710.class); private SystemVmTemplateRegistration systemVmTemplateRegistration; private PrimaryDataStoreDao storageDao; @@ -88,7 +86,7 @@ private void initSystemVmTemplateRegistration() { @Override public void updateSystemVmTemplates(Connection conn) { - LOG.debug("Updating System Vm template IDs"); + logger.debug("Updating System Vm template IDs"); initSystemVmTemplateRegistration(); try { systemVmTemplateRegistration.updateSystemVmTemplates(conn); diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41710to41720.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41710to41720.java index 91b7cfe978dc..9854268c1ff1 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41710to41720.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41710to41720.java @@ -18,14 +18,12 @@ import com.cloud.upgrade.SystemVmTemplateRegistration; import com.cloud.utils.exception.CloudRuntimeException; -import org.apache.log4j.Logger; import java.io.InputStream; import java.sql.Connection; -public class Upgrade41710to41720 implements DbUpgrade, DbUpgradeSystemVmTemplate { +public class Upgrade41710to41720 extends DbUpgradeAbstractImpl implements DbUpgradeSystemVmTemplate { - final static Logger LOG = Logger.getLogger(Upgrade41710to41720.class); private SystemVmTemplateRegistration systemVmTemplateRegistration; @@ -64,7 +62,7 @@ private void initSystemVmTemplateRegistration() { @Override public void updateSystemVmTemplates(Connection conn) { - LOG.debug("Updating System Vm template IDs"); + logger.debug("Updating System Vm template IDs"); initSystemVmTemplateRegistration(); try { systemVmTemplateRegistration.updateSystemVmTemplates(conn); diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41720to41800.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41720to41800.java index 77fffb17ddd0..6a90396deb0b 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41720to41800.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41720to41800.java @@ -25,7 +25,6 @@ import org.apache.cloudstack.usage.UsageTypes; import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.commons.lang3.time.DateUtils; -import org.apache.log4j.Logger; import java.io.InputStream; import java.sql.Connection; @@ -39,9 +38,8 @@ import java.util.List; import java.util.Map; -public class Upgrade41720to41800 implements DbUpgrade, DbUpgradeSystemVmTemplate { +public class Upgrade41720to41800 extends DbUpgradeAbstractImpl implements DbUpgradeSystemVmTemplate { - final static Logger LOG = Logger.getLogger(Upgrade41720to41800.class); private GuestOsMapper guestOsMapper = new GuestOsMapper(); @@ -101,7 +99,7 @@ private void initSystemVmTemplateRegistration() { @Override public void updateSystemVmTemplates(Connection conn) { - LOG.debug("Updating System Vm template IDs"); + logger.debug("Updating System Vm template IDs"); initSystemVmTemplateRegistration(); try { systemVmTemplateRegistration.updateSystemVmTemplates(conn); @@ -111,7 +109,7 @@ public void updateSystemVmTemplates(Connection conn) { } protected void convertQuotaTariffsToNewParadigm(Connection conn) { - LOG.info("Converting quota tariffs to new paradigm."); + logger.info("Converting quota tariffs to new paradigm."); List usageTypeResponses = UsageTypes.listUsageTypes(); @@ -120,14 +118,14 @@ protected void convertQuotaTariffsToNewParadigm(Connection conn) { String tariffTypeDescription = ReflectionToStringBuilderUtils.reflectOnlySelectedFields(usageTypeResponse, "description", "usageType"); - LOG.info(String.format("Converting quota tariffs of type %s to new paradigm.", tariffTypeDescription)); + logger.info(String.format("Converting quota tariffs of type %s to new paradigm.", tariffTypeDescription)); for (boolean previousTariff : Arrays.asList(true, false)) { Map tariffs = selectTariffs(conn, usageType, previousTariff, tariffTypeDescription); int tariffsSize = tariffs.size(); if (tariffsSize < 2) { - LOG.info(String.format("Quota tariff of type %s has [%s] %s register(s). Tariffs with less than 2 register do not need to be converted to new paradigm.", + logger.info(String.format("Quota tariff of type %s has [%s] %s register(s). Tariffs with less than 2 register do not need to be converted to new paradigm.", tariffTypeDescription, tariffsSize, previousTariff ? "previous of current" : "next to current")); continue; } @@ -143,7 +141,7 @@ protected Map selectTariffs(Connection conn, Integer usageType, bool String selectQuotaTariffs = String.format("SELECT id, effective_on FROM cloud_usage.quota_tariff WHERE %s AND usage_type = ? ORDER BY effective_on, updated_on;", previousTariff ? "usage_name = name" : "removed is null"); - LOG.info(String.format("Selecting %s quota tariffs of type [%s] according to SQL [%s].", previousTariff ? "previous of current" : "next to current", + logger.info(String.format("Selecting %s quota tariffs of type [%s] according to SQL [%s].", previousTariff ? "previous of current" : "next to current", tariffTypeDescription, selectQuotaTariffs)); try (PreparedStatement pstmt = conn.prepareStatement(selectQuotaTariffs)) { @@ -158,7 +156,7 @@ protected Map selectTariffs(Connection conn, Integer usageType, bool } catch (SQLException e) { String message = String.format("Unable to retrieve %s quota tariffs of type [%s] due to [%s].", previousTariff ? "previous" : "next", tariffTypeDescription, e.getMessage()); - LOG.error(message, e); + logger.error(message, e); throw new CloudRuntimeException(message, e); } } @@ -168,7 +166,7 @@ protected void executeUpdateQuotaTariffSetEndDateAndRemoved(Connection conn, Int Object[] ids = tariffs.keySet().toArray(); - LOG.info(String.format("Updating %s registers of %s quota tariffs of type [%s] with SQL [%s].", tariffs.size() - 1, setRemoved ? "previous of current" : + logger.info(String.format("Updating %s registers of %s quota tariffs of type [%s] with SQL [%s].", tariffs.size() - 1, setRemoved ? "previous of current" : "next to current", tariffTypeDescription, updateQuotaTariff)); for (int i = 0; i < tariffs.size() - 1; i++) { @@ -195,19 +193,19 @@ protected void executeUpdateQuotaTariffSetEndDateAndRemoved(Connection conn, Int pstmt.setLong(2, id); } - LOG.info(String.format("Updating \"end_date\" to [%s] %sof quota tariff with ID [%s].", sqlEndDate, updateRemoved, id)); + logger.info(String.format("Updating \"end_date\" to [%s] %sof quota tariff with ID [%s].", sqlEndDate, updateRemoved, id)); pstmt.executeUpdate(); } catch (SQLException e) { String message = String.format("Unable to update \"end_date\" %s of quota tariffs of usage type [%s] due to [%s].", setRemoved ? "and \"removed\"" : "", usageType, e.getMessage()); - LOG.error(message, e); + logger.error(message, e); throw new CloudRuntimeException(message, e); } } } protected void convertVmResourcesQuotaTypesToRunningVmQuotaType(Connection conn) { - LOG.info("Converting quota tariffs of type \"vCPU\", \"CPU_SPEED\" and \"MEMORY\" to \"RUNNING_VM\"."); + logger.info("Converting quota tariffs of type \"vCPU\", \"CPU_SPEED\" and \"MEMORY\" to \"RUNNING_VM\"."); String insertSql = "INSERT INTO cloud_usage.quota_tariff (usage_type, usage_name, usage_unit, usage_discriminator, currency_value, effective_on, updated_on," + " updated_by, uuid, name, description, removed, end_date, activation_rule)\n" @@ -225,11 +223,11 @@ protected void convertVmResourcesQuotaTypesToRunningVmQuotaType(Connection conn) pstmt.executeUpdate(); } catch (SQLException e) { String message = String.format("Failed to convert quota tariffs of type \"vCPU\", \"CPU_SPEED\" and \"MEMORY\" to \"RUNNING_VM\" due to [%s].", e.getMessage()); - LOG.error(message, e); + logger.error(message, e); throw new CloudRuntimeException(message, e); } - LOG.info("Disabling unused quota tariffs of type \"vCPU\", \"CPU_SPEED\" and \"MEMORY\"."); + logger.info("Disabling unused quota tariffs of type \"vCPU\", \"CPU_SPEED\" and \"MEMORY\"."); String updateSql = "UPDATE cloud_usage.quota_tariff SET removed = now() WHERE usage_type in (15, 16, 17) and removed is null;"; @@ -237,7 +235,7 @@ protected void convertVmResourcesQuotaTypesToRunningVmQuotaType(Connection conn) pstmt.executeUpdate(); } catch (SQLException e) { String message = String.format("Failed disable quota tariffs of type \"vCPU\", \"CPU_SPEED\" and \"MEMORY\" due to [%s].", e.getMessage()); - LOG.error(message, e); + logger.error(message, e); throw new CloudRuntimeException(message, e); } } @@ -251,7 +249,7 @@ private void correctGuestOsNames() { } private void updateGuestOsMappings() { - LOG.debug("Updating guest OS mappings"); + logger.debug("Updating guest OS mappings"); // Add support for SUSE Linux Enterprise Desktop 12 SP3 (64-bit) for Xenserver 8.1.0 List mappings = new ArrayList(); @@ -708,7 +706,7 @@ private void updateGuestOsMappings() { } private void correctGuestOsIdsInHypervisorMapping(final Connection conn) { - LOG.debug("Correcting guest OS ids in hypervisor mappings"); + logger.debug("Correcting guest OS ids in hypervisor mappings"); guestOsMapper.updateGuestOsIdInHypervisorMapping(conn, 10, "Ubuntu 20.04 LTS", new GuestOSHypervisorMapping("Xenserver", "8.2.0", "Ubuntu Focal Fossa 20.04")); } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41800to41810.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41800to41810.java index a58d9965259a..b8d2e6180681 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41800to41810.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41800to41810.java @@ -22,7 +22,6 @@ import com.cloud.storage.GuestOSVO; import com.cloud.upgrade.SystemVmTemplateRegistration; import com.cloud.utils.exception.CloudRuntimeException; -import org.apache.log4j.Logger; import java.io.InputStream; import java.sql.Connection; @@ -30,8 +29,7 @@ import java.util.HashSet; import java.util.List; -public class Upgrade41800to41810 implements DbUpgrade, DbUpgradeSystemVmTemplate { - final static Logger LOG = Logger.getLogger(Upgrade41800to41810.class); +public class Upgrade41800to41810 extends DbUpgradeAbstractImpl implements DbUpgrade, DbUpgradeSystemVmTemplate { private GuestOsMapper guestOsMapper = new GuestOsMapper(); private SystemVmTemplateRegistration systemVmTemplateRegistration; @@ -96,7 +94,7 @@ private void initSystemVmTemplateRegistration() { @Override public void updateSystemVmTemplates(Connection conn) { - LOG.debug("Updating System Vm template IDs"); + logger.debug("Updating System Vm template IDs"); initSystemVmTemplateRegistration(); try { systemVmTemplateRegistration.updateSystemVmTemplates(conn); @@ -106,12 +104,12 @@ public void updateSystemVmTemplates(Connection conn) { } private void updateGuestOsMappings(Connection conn) { - LOG.debug("Updating guest OS mappings"); + logger.debug("Updating guest OS mappings"); GuestOsMapper guestOsMapper = new GuestOsMapper(); List mappings = new ArrayList<>(); - LOG.debug("Adding Ubuntu 20.04 support for VMware 6.5+"); + logger.debug("Adding Ubuntu 20.04 support for VMware 6.5+"); guestOsMapper.addGuestOsHypervisorMapping(new GuestOSHypervisorMapping("VMware", "6.5", "ubuntu64Guest"), 10, "Ubuntu 20.04 LTS"); guestOsMapper.addGuestOsHypervisorMapping(new GuestOSHypervisorMapping("VMware", "6.7", "ubuntu64Guest"), 10, "Ubuntu 20.04 LTS"); guestOsMapper.addGuestOsHypervisorMapping(new GuestOSHypervisorMapping("VMware", "6.7.1", "ubuntu64Guest"), 10, "Ubuntu 20.04 LTS"); @@ -123,7 +121,7 @@ private void updateGuestOsMappings(Connection conn) { guestOsMapper.addGuestOsHypervisorMapping(new GuestOSHypervisorMapping("VMware", "7.0.3.0", "ubuntu64Guest"), 10, "Ubuntu 20.04 LTS"); guestOsMapper.addGuestOsHypervisorMapping(new GuestOSHypervisorMapping("VMware", "8.0", "ubuntu64Guest"), 10, "Ubuntu 20.04 LTS"); - LOG.debug("Adding Ubuntu 22.04 support for KVM and VMware 6.5+"); + logger.debug("Adding Ubuntu 22.04 support for KVM and VMware 6.5+"); mappings.add(new GuestOSHypervisorMapping("KVM", "default", "Ubuntu 22.04 LTS")); mappings.add(new GuestOSHypervisorMapping("VMware", "6.5", "ubuntu64Guest")); mappings.add(new GuestOSHypervisorMapping("VMware", "6.7", "ubuntu64Guest")); @@ -138,7 +136,7 @@ private void updateGuestOsMappings(Connection conn) { guestOsMapper.addGuestOsAndHypervisorMappings(10, "Ubuntu 22.04 LTS", mappings); mappings.clear(); - LOG.debug("Correcting guest OS names in hypervisor mappings for VMware 8.0 ad 8.0.0.1"); + logger.debug("Correcting guest OS names in hypervisor mappings for VMware 8.0 ad 8.0.0.1"); final String hypervisorVMware = Hypervisor.HypervisorType.VMware.name(); final String hypervisorVersionVmware8 = "8.0"; guestOsMapper.updateGuestOsNameInHypervisorMapping(1, "AlmaLinux 9", new GuestOSHypervisorMapping(hypervisorVMware, hypervisorVersionVmware8, "almalinux_64Guest")); @@ -148,7 +146,7 @@ private void updateGuestOsMappings(Connection conn) { guestOsMapper.updateGuestOsNameInHypervisorMapping(1, "Oracle Linux 9", new GuestOSHypervisorMapping(hypervisorVMware, "8.0.0.1", "oracleLinux9_64Guest")); guestOsMapper.updateGuestOsNameInHypervisorMapping(1, "Rocky Linux 9", new GuestOSHypervisorMapping(hypervisorVMware, "8.0.0.1", "rockylinux_64Guest")); - LOG.debug("Correcting guest OS names in hypervisor mappings for Red Hat Enterprise Linux 9"); + logger.debug("Correcting guest OS names in hypervisor mappings for Red Hat Enterprise Linux 9"); guestOsMapper.updateGuestOsNameInHypervisorMapping(1, "Red Hat Enterprise Linux 9", new GuestOSHypervisorMapping(hypervisorVMware, "7.0", "rhel9_64Guest")); guestOsMapper.updateGuestOsNameInHypervisorMapping(1, "Red Hat Enterprise Linux 9", new GuestOSHypervisorMapping(hypervisorVMware, "7.0.1.0", "rhel9_64Guest")); guestOsMapper.updateGuestOsNameInHypervisorMapping(1, "Red Hat Enterprise Linux 9", new GuestOSHypervisorMapping(hypervisorVMware, "7.0.2.0", "rhel9_64Guest")); @@ -156,7 +154,7 @@ private void updateGuestOsMappings(Connection conn) { guestOsMapper.updateGuestOsNameInHypervisorMapping(1, "Red Hat Enterprise Linux 9", new GuestOSHypervisorMapping(hypervisorVMware, hypervisorVersionVmware8, "rhel9_64Guest")); guestOsMapper.updateGuestOsNameInHypervisorMapping(1, "Red Hat Enterprise Linux 9", new GuestOSHypervisorMapping(hypervisorVMware, "8.0.0.1", "rhel9_64Guest")); - LOG.debug("Adding new guest OS ids in hypervisor mappings for VMware 8.0"); + logger.debug("Adding new guest OS ids in hypervisor mappings for VMware 8.0"); // Add support for darwin22_64Guest from VMware 8.0 mappings.add(new GuestOSHypervisorMapping(hypervisorVMware, hypervisorVersionVmware8, "darwin22_64Guest")); guestOsMapper.addGuestOsAndHypervisorMappings(7, "macOS 13 (64-bit)", mappings); @@ -209,7 +207,7 @@ private void updateGuestOsMappings(Connection conn) { } private void copyGuestOsMappingsToVMware80u1() { - LOG.debug("Copying guest OS mappings from VMware 8.0 to VMware 8.0.1"); + logger.debug("Copying guest OS mappings from VMware 8.0 to VMware 8.0.1"); GuestOsMapper guestOsMapper = new GuestOsMapper(); guestOsMapper.copyGuestOSHypervisorMappings(Hypervisor.HypervisorType.VMware, "8.0", "8.0.1"); } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41810to41900.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41810to41900.java index 13e30c0f6e2d..e2b1ae1399b6 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41810to41900.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41810to41900.java @@ -20,7 +20,6 @@ import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.DateUtil; import com.cloud.utils.exception.CloudRuntimeException; -import org.apache.log4j.Logger; import org.jasypt.exceptions.EncryptionOperationNotPossibleException; import java.io.InputStream; @@ -34,8 +33,7 @@ import java.text.SimpleDateFormat; import java.util.Date; -public class Upgrade41810to41900 implements DbUpgrade, DbUpgradeSystemVmTemplate { - final static Logger LOG = Logger.getLogger(Upgrade41810to41900.class); +public class Upgrade41810to41900 extends DbUpgradeAbstractImpl implements DbUpgrade, DbUpgradeSystemVmTemplate { private SystemVmTemplateRegistration systemVmTemplateRegistration; private static final String ACCOUNT_DETAILS = "account_details"; @@ -97,7 +95,7 @@ private void initSystemVmTemplateRegistration() { @Override public void updateSystemVmTemplates(Connection conn) { - LOG.debug("Updating System Vm template IDs"); + logger.debug("Updating System Vm template IDs"); initSystemVmTemplateRegistration(); try { systemVmTemplateRegistration.updateSystemVmTemplates(conn); @@ -107,15 +105,15 @@ public void updateSystemVmTemplates(Connection conn) { } protected void decryptConfigurationValuesFromAccountAndDomainScopesNotInSecureHiddenCategories(Connection conn) { - LOG.info("Decrypting global configuration values from the following tables: account_details and domain_details."); + logger.info("Decrypting global configuration values from the following tables: account_details and domain_details."); Map accountsMap = getConfigsWithScope(conn, ACCOUNT_DETAILS); updateConfigValuesWithScope(conn, accountsMap, ACCOUNT_DETAILS); - LOG.info("Successfully decrypted configurations from account_details table."); + logger.info("Successfully decrypted configurations from account_details table."); Map domainsMap = getConfigsWithScope(conn, DOMAIN_DETAILS); updateConfigValuesWithScope(conn, domainsMap, DOMAIN_DETAILS); - LOG.info("Successfully decrypted configurations from domain_details table."); + logger.info("Successfully decrypted configurations from domain_details table."); } protected Map getConfigsWithScope(Connection conn, String table) { @@ -132,19 +130,19 @@ protected Map getConfigsWithScope(Connection conn, String table) { return configsToBeUpdated; } catch (SQLException e) { String message = String.format("Unable to retrieve data from table [%s] due to [%s].", table, e.getMessage()); - LOG.error(message, e); + logger.error(message, e); throw new CloudRuntimeException(message, e); } } public void migrateBackupDates(Connection conn) { - LOG.info("Trying to convert backups' date column from varchar(255) to datetime type."); + logger.info("Trying to convert backups' date column from varchar(255) to datetime type."); modifyDateColumnNameAndCreateNewOne(conn); fetchDatesAndMigrateToNewColumn(conn); dropOldColumn(conn); - LOG.info("Finished converting backups' date column from varchar(255) to datetime."); + logger.info("Finished converting backups' date column from varchar(255) to datetime."); } private void modifyDateColumnNameAndCreateNewOne(Connection conn) { @@ -153,7 +151,7 @@ private void modifyDateColumnNameAndCreateNewOne(Connection conn) { pstmt.execute(); } catch (SQLException e) { String message = String.format("Unable to alter backups' date column name due to [%s].", e.getMessage()); - LOG.error(message, e); + logger.error(message, e); throw new CloudRuntimeException(message, e); } @@ -162,7 +160,7 @@ private void modifyDateColumnNameAndCreateNewOne(Connection conn) { pstmt.execute(); } catch (SQLException e) { String message = String.format("Unable to crate new backups' column date due to [%s].", e.getMessage()); - LOG.error(message, e); + logger.error(message, e); throw new CloudRuntimeException(message, e); } } @@ -177,12 +175,12 @@ protected void updateConfigValuesWithScope(Connection conn, Map co pstmt.setString(1, decryptedValue); pstmt.setLong(2, config.getKey()); - LOG.info(String.format("Updating config with ID [%s] to value [%s].", config.getKey(), decryptedValue)); + logger.info(String.format("Updating config with ID [%s] to value [%s].", config.getKey(), decryptedValue)); pstmt.executeUpdate(); } catch (SQLException | EncryptionOperationNotPossibleException e) { String message = String.format("Unable to update config value with ID [%s] on table [%s] due to [%s]. The config value may already be decrypted.", config.getKey(), table, e); - LOG.error(message); + logger.error(message); throw new CloudRuntimeException(message, e); } } @@ -203,7 +201,7 @@ private void fetchDatesAndMigrateToNewColumn(Connection conn) { } } catch (SQLException e) { String message = String.format("Unable to retrieve backup dates due to [%s].", e.getMessage()); - LOG.error(message, e); + logger.error(message, e); throw new CloudRuntimeException(message, e); } } @@ -224,7 +222,7 @@ private java.sql.Date tryToTransformStringToDate(String date) { } if (parsedDate == null) { String msg = String.format("Unable to parse date [%s]. Will change backup date to null.", date); - LOG.error(msg); + logger.error(msg); return null; } @@ -240,7 +238,7 @@ private void updateBackupDate(Connection conn, long id, java.sql.Date date) { pstmt.executeUpdate(); } catch (SQLException e) { String message = String.format("Unable to update backup date with id [%s] to date [%s] due to [%s].", id, date, e.getMessage()); - LOG.error(message, e); + logger.error(message, e); throw new CloudRuntimeException(message, e); } } @@ -251,7 +249,7 @@ private void dropOldColumn(Connection conn) { pstmt.execute(); } catch (SQLException e) { String message = String.format("Unable to drop old_date column due to [%s].", e.getMessage()); - LOG.error(message, e); + logger.error(message, e); throw new CloudRuntimeException(message, e); } } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41900to42000.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41900to42000.java index 71cc6f074a24..200c5fda2326 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41900to42000.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41900to42000.java @@ -19,13 +19,10 @@ import java.io.InputStream; import java.sql.Connection; -import org.apache.log4j.Logger; - import com.cloud.upgrade.SystemVmTemplateRegistration; import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade41900to42000 implements DbUpgrade, DbUpgradeSystemVmTemplate { - final static Logger LOG = Logger.getLogger(Upgrade41900to42000.class); +public class Upgrade41900to42000 extends DbUpgradeAbstractImpl implements DbUpgrade, DbUpgradeSystemVmTemplate { private SystemVmTemplateRegistration systemVmTemplateRegistration; @Override @@ -75,7 +72,7 @@ private void initSystemVmTemplateRegistration() { @Override public void updateSystemVmTemplates(Connection conn) { - LOG.debug("Updating System Vm template IDs"); + logger.debug("Updating System Vm template IDs"); initSystemVmTemplateRegistration(); try { systemVmTemplateRegistration.updateSystemVmTemplates(conn); diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade420to421.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade420to421.java index d7ba2ed20da3..9ca342d7f4ad 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade420to421.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade420to421.java @@ -23,13 +23,11 @@ import java.sql.ResultSet; import java.sql.SQLException; -import org.apache.log4j.Logger; import com.cloud.hypervisor.Hypervisor; import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade420to421 implements DbUpgrade { - final static Logger s_logger = Logger.getLogger(Upgrade420to421.class); +public class Upgrade420to421 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { @@ -89,7 +87,7 @@ private void updateOverprovisioningPerVm(Connection conn) { } } // Need to populate only when overprovisioning factor doesn't pre exist. - s_logger.debug("Starting updating user_vm_details with cpu/memory overprovisioning factors"); + logger.debug("Starting updating user_vm_details with cpu/memory overprovisioning factors"); try ( PreparedStatement pstmt2 = conn .prepareStatement("select id, hypervisor_type from `cloud`.`vm_instance` where removed is null and id not in (select vm_id from `cloud`.`user_vm_details` where name='cpuOvercommitRatio')"); @@ -123,14 +121,14 @@ private void updateOverprovisioningPerVm(Connection conn) { } } } - s_logger.debug("Done updating user_vm_details with cpu/memory overprovisioning factors"); + logger.debug("Done updating user_vm_details with cpu/memory overprovisioning factors"); } catch (SQLException e) { throw new CloudRuntimeException("Unable to update cpu/memory overprovisioning factors", e); } } private void upgradeResourceCount(Connection conn) { - s_logger.debug("upgradeResourceCount start"); + logger.debug("upgradeResourceCount start"); String sqlSelectAccountIds = "select id, domain_id FROM `cloud`.`account` where removed is NULL "; String sqlSelectOfferingTotals = "SELECT SUM(service_offering.cpu), SUM(service_offering.ram_size)" + " FROM `cloud`.`vm_instance`, `cloud`.`service_offering`" @@ -236,7 +234,7 @@ private void upgradeResourceCount(Connection conn) { } } } - s_logger.debug("upgradeResourceCount finish"); + logger.debug("upgradeResourceCount finish"); } catch (SQLException e) { throw new CloudRuntimeException("Unable to upgrade resource count (cpu,memory,primary_storage,secondary_storage) ", e); } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade421to430.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade421to430.java index 55e7d3bdac22..88428f455f3b 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade421to430.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade421to430.java @@ -24,13 +24,11 @@ import java.sql.ResultSet; import java.sql.SQLException; -import org.apache.log4j.Logger; import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade421to430 implements DbUpgrade { - final static Logger s_logger = Logger.getLogger(Upgrade421to430.class); +public class Upgrade421to430 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { @@ -88,11 +86,11 @@ private void upgradeMemoryOfSsvmOffering(Connection conn) { } catch (SQLException e) { throw new CloudRuntimeException("Unable to upgrade ram_size of service offering for secondary storage vm. ", e); } - s_logger.debug("Done upgrading RAM for service offering of Secondary Storage VM to " + newRamSize); + logger.debug("Done upgrading RAM for service offering of Secondary Storage VM to " + newRamSize); } private void encryptImageStoreDetails(Connection conn) { - s_logger.debug("Encrypting image store details"); + logger.debug("Encrypting image store details"); try ( PreparedStatement selectPstmt = conn.prepareStatement("select id, value from `cloud`.`image_store_details` where name = 'key' or name = 'secretkey'"); ResultSet rs = selectPstmt.executeQuery(); @@ -115,7 +113,7 @@ private void encryptImageStoreDetails(Connection conn) { } catch (UnsupportedEncodingException e) { throw new CloudRuntimeException("Unable encrypt image_store_details values ", e); } - s_logger.debug("Done encrypting image_store_details"); + logger.debug("Done encrypting image_store_details"); } @Override diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade430to440.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade430to440.java index 43d4d877361e..9a2077471e62 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade430to440.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade430to440.java @@ -23,14 +23,12 @@ import java.sql.ResultSet; import java.sql.SQLException; -import org.apache.log4j.Logger; import com.cloud.network.Network; import com.cloud.network.Networks.BroadcastDomainType; import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade430to440 implements DbUpgrade { - final static Logger s_logger = Logger.getLogger(Upgrade430to440.class); +public class Upgrade430to440 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { @@ -163,12 +161,12 @@ private void secondaryIpsAccountAndDomainIdsUpdate(Connection conn) { } catch (SQLException e) { throw new CloudRuntimeException("Exception while Moving private zone information to dedicated resources", e); } - s_logger.debug("Done updating vm nic secondary ip account and domain ids"); + logger.debug("Done updating vm nic secondary ip account and domain ids"); } private void moveCidrsToTheirOwnTable(Connection conn) { - s_logger.debug("Moving network acl item cidrs to a row per cidr"); + logger.debug("Moving network acl item cidrs to a row per cidr"); String networkAclItemSql = "SELECT id, cidr FROM `cloud`.`network_acl_item`"; String networkAclItemCidrSql = "INSERT INTO `cloud`.`network_acl_item_cidrs` (network_acl_item_id, cidr) VALUES (?,?)"; @@ -184,7 +182,7 @@ private void moveCidrsToTheirOwnTable(Connection conn) { long itemId = rsItems.getLong(1); // get the source cidr list String cidrList = rsItems.getString(2); - s_logger.debug("Moving '" + cidrList + "' to a row per cidr"); + logger.debug("Moving '" + cidrList + "' to a row per cidr"); // split it String[] cidrArray = cidrList.split(","); // insert a record per cidr @@ -197,11 +195,11 @@ private void moveCidrsToTheirOwnTable(Connection conn) { } catch (SQLException e) { throw new CloudRuntimeException("Exception while Moving network acl item cidrs to a row per cidr", e); } - s_logger.debug("Done moving network acl item cidrs to a row per cidr"); + logger.debug("Done moving network acl item cidrs to a row per cidr"); } private void updateVlanUris(Connection conn) { - s_logger.debug("updating vlan URIs"); + logger.debug("updating vlan URIs"); try(PreparedStatement selectstatement = conn.prepareStatement("SELECT id, vlan_id FROM `cloud`.`vlan` where vlan_id not like '%:%'"); ResultSet results = selectstatement.executeQuery()) { @@ -224,7 +222,7 @@ private void updateVlanUris(Connection conn) { } catch (SQLException e) { throw new CloudRuntimeException("Unable to update vlan URIs ", e); } - s_logger.debug("Done updating vlan URIs"); + logger.debug("Done updating vlan URIs"); } @Override diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade431to440.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade431to440.java index 98b52ac0aa96..54aa3b76d6f9 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade431to440.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade431to440.java @@ -17,10 +17,8 @@ package com.cloud.upgrade.dao; -import org.apache.log4j.Logger; -public class Upgrade431to440 extends Upgrade430to440 implements DbUpgrade { - final static Logger s_logger = Logger.getLogger(Upgrade431to440.class); +public class Upgrade431to440 extends Upgrade430to440 { @Override public String[] getUpgradableVersionRange() { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade432to440.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade432to440.java index ded0db474d5e..3b934c1f3447 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade432to440.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade432to440.java @@ -17,10 +17,8 @@ package com.cloud.upgrade.dao; -import org.apache.log4j.Logger; -public class Upgrade432to440 extends Upgrade431to440 implements DbUpgrade { - final static Logger s_logger = Logger.getLogger(Upgrade432to440.class); +public class Upgrade432to440 extends Upgrade431to440 { @Override public String[] getUpgradableVersionRange() { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade440to441.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade440to441.java index a51f464a797f..4309a1a53c5e 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade440to441.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade440to441.java @@ -22,7 +22,7 @@ import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade440to441 implements DbUpgrade { +public class Upgrade440to441 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade441to442.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade441to442.java index 4234428f72b0..1993b15b6403 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade441to442.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade441to442.java @@ -23,7 +23,7 @@ import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade441to442 implements DbUpgrade { +public class Upgrade441to442 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade442to450.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade442to450.java index 54e8da5f6e9e..803d521ac24c 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade442to450.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade442to450.java @@ -28,13 +28,11 @@ import java.util.List; import java.util.Map; -import org.apache.log4j.Logger; import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade442to450 implements DbUpgrade { - final static Logger s_logger = Logger.getLogger(Upgrade442to450.class); +public class Upgrade442to450 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { @@ -82,7 +80,7 @@ private void updateMaxRouterSizeConfig(Connection conn) { } catch (UnsupportedEncodingException e) { throw new CloudRuntimeException("Unable encrypt configuration values ", e); } - s_logger.debug("Done updating router.ram.size config to 256"); + logger.debug("Done updating router.ram.size config to 256"); } private void upgradeMemoryOfVirtualRoutervmOffering(Connection conn) { @@ -109,7 +107,7 @@ private void upgradeMemoryOfVirtualRoutervmOffering(Connection conn) { } catch (SQLException e) { throw new CloudRuntimeException("Unable to upgrade ram_size of service offering for domain router. ", e); } - s_logger.debug("Done upgrading RAM for service offering of domain router to " + newRamSize); + logger.debug("Done upgrading RAM for service offering of domain router to " + newRamSize); } private void upgradeMemoryOfInternalLoadBalancervmOffering(Connection conn) { @@ -134,7 +132,7 @@ private void upgradeMemoryOfInternalLoadBalancervmOffering(Connection conn) { } catch (SQLException e) { throw new CloudRuntimeException("Unable to upgrade ram_size of service offering for internal loadbalancer vm. ", e); } - s_logger.debug("Done upgrading RAM for service offering of internal loadbalancer vm to " + newRamSize); + logger.debug("Done upgrading RAM for service offering of internal loadbalancer vm to " + newRamSize); } @Override @@ -155,7 +153,7 @@ private void dropInvalidKeyFromStoragePoolTable(Connection conn) { keys.add("id_2"); uniqueKeys.put("storage_pool", keys); - s_logger.debug("Dropping id_2 key from storage_pool table"); + logger.debug("Dropping id_2 key from storage_pool table"); for (Map.Entry> entry: uniqueKeys.entrySet()) { DbUpgradeUtils.dropKeysIfExist(conn,entry.getKey(), entry.getValue(), false); } @@ -168,7 +166,7 @@ private void dropDuplicatedForeignKeyFromAsyncJobTable(Connection conn) { keys.add("fk_async_job_join_map__join_job_id"); foreignKeys.put("async_job_join_map", keys); - s_logger.debug("Dropping fk_async_job_join_map__join_job_id key from async_job_join_map table"); + logger.debug("Dropping fk_async_job_join_map__join_job_id key from async_job_join_map table"); for (Map.Entry> entry: foreignKeys.entrySet()) { DbUpgradeUtils.dropKeysIfExist(conn,entry.getKey(), entry.getValue(), true); } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade443to444.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade443to444.java index b8110546a13c..61674d9870f6 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade443to444.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade443to444.java @@ -22,7 +22,7 @@ import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade443to444 implements DbUpgrade { +public class Upgrade443to444 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade443to450.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade443to450.java index 80b2c14e00c7..9f571fedc2c8 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade443to450.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade443to450.java @@ -17,7 +17,7 @@ package com.cloud.upgrade.dao; -public class Upgrade443to450 extends Upgrade442to450 implements DbUpgrade { +public class Upgrade443to450 extends Upgrade442to450 { @Override public String[] getUpgradableVersionRange() { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade444to450.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade444to450.java index 52fc7299810a..d393e73f59a9 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade444to450.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade444to450.java @@ -17,7 +17,7 @@ package com.cloud.upgrade.dao; -public class Upgrade444to450 extends Upgrade442to450 implements DbUpgrade { +public class Upgrade444to450 extends Upgrade442to450 { @Override public String[] getUpgradableVersionRange() { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade450to451.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade450to451.java index 015d463347ae..ffdf2cc12738 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade450to451.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade450to451.java @@ -26,13 +26,11 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade450to451 implements DbUpgrade { - final static Logger s_logger = Logger.getLogger(Upgrade450to451.class); +public class Upgrade450to451 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { @@ -97,7 +95,7 @@ private void encryptKeyInKeyStore(Connection conn) { } catch (SQLException e) { throw new CloudRuntimeException("Exception while encrypting key column in keystore table", e); } - s_logger.debug("Done encrypting keystore's key column"); + logger.debug("Done encrypting keystore's key column"); } private void encryptIpSecPresharedKeysOfRemoteAccessVpn(Connection conn) { @@ -111,7 +109,7 @@ private void encryptIpSecPresharedKeysOfRemoteAccessVpn(Connection conn) { try { preSharedKey = DBEncryptionUtil.decrypt(preSharedKey); } catch (CloudRuntimeException ignored) { - s_logger.debug("The ipsec_psk preshared key id=" + rowId + "in remote_access_vpn is not encrypted, encrypting it."); + logger.debug("The ipsec_psk preshared key id=" + rowId + "in remote_access_vpn is not encrypted, encrypting it."); } try (PreparedStatement updateStatement = conn.prepareStatement("UPDATE `cloud`.`remote_access_vpn` SET ipsec_psk=? WHERE id=?");) { updateStatement.setString(1, DBEncryptionUtil.encrypt(preSharedKey)); @@ -122,7 +120,7 @@ private void encryptIpSecPresharedKeysOfRemoteAccessVpn(Connection conn) { } catch (SQLException e) { throw new CloudRuntimeException("Unable to update the remote_access_vpn's preshared key ipsec_psk column", e); } - s_logger.debug("Done encrypting remote_access_vpn's ipsec_psk column"); + logger.debug("Done encrypting remote_access_vpn's ipsec_psk column"); } private void encryptStoragePoolUserInfo(Connection conn) { @@ -151,7 +149,7 @@ private void encryptStoragePoolUserInfo(Connection conn) { } catch (UnsupportedEncodingException e) { throw new CloudRuntimeException("Unable encrypt storage pool user info ", e); } - s_logger.debug("Done encrypting storage_pool's user_info column"); + logger.debug("Done encrypting storage_pool's user_info column"); } private void updateUserVmDetailsWithNicAdapterType(Connection conn) { @@ -160,13 +158,13 @@ private void updateUserVmDetailsWithNicAdapterType(Connection conn) { } catch (SQLException e) { throw new CloudRuntimeException("Failed to update user_vm_details table with nicAdapter entries by copying from vm_template_detail table", e); } - s_logger.debug("Done. Updated user_vm_details table with nicAdapter entries by copying from vm_template_detail table. This affects only VM/templates with hypervisor_type as VMware."); + logger.debug("Done. Updated user_vm_details table with nicAdapter entries by copying from vm_template_detail table. This affects only VM/templates with hypervisor_type as VMware."); } private void upgradeVMWareLocalStorage(Connection conn) { try (PreparedStatement updatePstmt = conn.prepareStatement("UPDATE storage_pool SET pool_type='VMFS',host_address=@newaddress WHERE (@newaddress:=concat('VMFS datastore: ', path)) IS NOT NULL AND scope = 'HOST' AND pool_type = 'LVM' AND id IN (SELECT * FROM (SELECT storage_pool.id FROM storage_pool,cluster WHERE storage_pool.cluster_id = cluster.id AND cluster.hypervisor_type='VMware') AS t);");) { updatePstmt.executeUpdate(); - s_logger.debug("Done, upgraded VMWare local storage pool type to VMFS and host_address to the VMFS format"); + logger.debug("Done, upgraded VMWare local storage pool type to VMFS and host_address to the VMFS format"); } catch (SQLException e) { throw new CloudRuntimeException("Unable to upgrade VMWare local storage pool type", e); } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade451to452.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade451to452.java index 788b6f28ef1e..d019558a33ca 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade451to452.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade451to452.java @@ -22,7 +22,7 @@ import java.io.InputStream; import java.sql.Connection; -public class Upgrade451to452 implements DbUpgrade { +public class Upgrade451to452 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade452to453.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade452to453.java index 3bc39ebc68fd..17ec3414896b 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade452to453.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade452to453.java @@ -22,7 +22,7 @@ import java.io.InputStream; import java.sql.Connection; -public class Upgrade452to453 implements DbUpgrade { +public class Upgrade452to453 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade452to460.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade452to460.java index 91fe345f4a12..d14d6c9e5435 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade452to460.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade452to460.java @@ -25,12 +25,10 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade452to460 implements DbUpgrade { - final static Logger s_logger = Logger.getLogger(Upgrade452to460.class); +public class Upgrade452to460 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { @@ -67,7 +65,7 @@ public void performDataMigration(final Connection conn) { public void updateVMInstanceUserId(final Connection conn) { // For schemas before this, copy first user from an account_id which // deployed already running VMs - s_logger.debug("Updating vm_instance column user_id using first user in vm_instance's account_id"); + logger.debug("Updating vm_instance column user_id using first user in vm_instance's account_id"); final String vmInstanceSql = "SELECT id, account_id FROM `cloud`.`vm_instance`"; final String userSql = "SELECT id FROM `cloud`.`user` where account_id=?"; final String userIdUpdateSql = "update `cloud`.`vm_instance` set user_id=? where id=?"; @@ -97,7 +95,7 @@ public void updateVMInstanceUserId(final Connection conn) { } catch (final SQLException e) { throw new CloudRuntimeException("Unable to update user Ids for previously deployed VMs", e); } - s_logger.debug("Done updating user Ids for previously deployed VMs"); + logger.debug("Done updating user Ids for previously deployed VMs"); addRedundancyForNwAndVpc(conn); removeBumPriorityColumn(conn); } @@ -142,14 +140,14 @@ private void removeBumPriorityColumn(final Connection conn) { private void addIndexForVMInstance(final Connection conn) { // Drop index if it exists final List indexList = new ArrayList(); - s_logger.debug("Dropping index i_vm_instance__instance_name from vm_instance table if it exists"); + logger.debug("Dropping index i_vm_instance__instance_name from vm_instance table if it exists"); indexList.add("i_vm_instance__instance_name"); DbUpgradeUtils.dropKeysIfExist(conn, "vm_instance", indexList, false); // Now add index try (PreparedStatement pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`vm_instance` ADD INDEX `i_vm_instance__instance_name`(`instance_name`)");) { pstmt.executeUpdate(); - s_logger.debug("Added index i_vm_instance__instance_name to vm_instance table"); + logger.debug("Added index i_vm_instance__instance_name to vm_instance table"); } catch (final SQLException e) { throw new CloudRuntimeException("Unable to add index i_vm_instance__instance_name to vm_instance table for the column instance_name", e); } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade453to460.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade453to460.java index 2dd4b0aad27b..321d0304f9d0 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade453to460.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade453to460.java @@ -17,7 +17,7 @@ package com.cloud.upgrade.dao; -public class Upgrade453to460 extends Upgrade452to460 implements DbUpgrade { +public class Upgrade453to460 extends Upgrade452to460 { @Override public String[] getUpgradableVersionRange() { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade460to461.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade460to461.java index 88bda4616c80..3642a59e516c 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade460to461.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade460to461.java @@ -22,7 +22,7 @@ import java.io.InputStream; import java.sql.Connection; -public class Upgrade460to461 implements DbUpgrade { +public class Upgrade460to461 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade461to470.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade461to470.java index e7922cebf0ac..d2241e0498c8 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade461to470.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade461to470.java @@ -18,15 +18,13 @@ package com.cloud.upgrade.dao; import com.cloud.utils.exception.CloudRuntimeException; -import org.apache.log4j.Logger; import java.io.InputStream; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.SQLException; -public class Upgrade461to470 implements DbUpgrade { - final static Logger s_logger = Logger.getLogger(Upgrade461to470.class); +public class Upgrade461to470 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { @@ -58,10 +56,10 @@ public void alterAddColumnToCloudUsage(final Connection conn) { final String alterTableSql = "ALTER TABLE `cloud_usage`.`cloud_usage` ADD COLUMN `quota_calculated` tinyint(1) DEFAULT 0 NOT NULL COMMENT 'quota calculation status'"; try (PreparedStatement pstmt = conn.prepareStatement(alterTableSql)) { pstmt.executeUpdate(); - s_logger.info("Altered cloud_usage.cloud_usage table and added column quota_calculated"); + logger.info("Altered cloud_usage.cloud_usage table and added column quota_calculated"); } catch (SQLException e) { if (e.getMessage().contains("quota_calculated")) { - s_logger.warn("cloud_usage.cloud_usage table already has a column called quota_calculated"); + logger.warn("cloud_usage.cloud_usage table already has a column called quota_calculated"); } else { throw new CloudRuntimeException("Unable to create column quota_calculated in table cloud_usage.cloud_usage", e); } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade470to471.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade470to471.java index 08cdfdd53f55..0464381dd602 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade470to471.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade470to471.java @@ -22,7 +22,7 @@ import java.io.InputStream; import java.sql.Connection; -public class Upgrade470to471 implements DbUpgrade { +public class Upgrade470to471 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade471to480.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade471to480.java index 3b3a0bba03ec..614e1d8779ec 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade471to480.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade471to480.java @@ -22,7 +22,7 @@ import java.io.InputStream; import java.sql.Connection; -public class Upgrade471to480 implements DbUpgrade { +public class Upgrade471to480 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade480to481.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade480to481.java index be33709dd921..d1aa4214837f 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade480to481.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade480to481.java @@ -22,7 +22,7 @@ import java.io.InputStream; import java.sql.Connection; -public class Upgrade480to481 implements DbUpgrade { +public class Upgrade480to481 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade481to490.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade481to490.java index 2165d809f852..5c950a86b0e3 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade481to490.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade481to490.java @@ -27,13 +27,11 @@ import com.cloud.user.Account; import org.apache.cloudstack.acl.RoleType; -import org.apache.log4j.Logger; import com.cloud.utils.db.ScriptRunner; import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade481to490 implements DbUpgrade { - final static Logger s_logger = Logger.getLogger(Upgrade481to490.class); +public class Upgrade481to490 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { @@ -74,7 +72,7 @@ private void migrateAccountsToDefaultRoles(final Connection conn) { final Integer accountType = selectResultSet.getInt(2); final Long roleId = RoleType.getByAccountType(Account.Type.getFromValue(accountType)).getId(); if (roleId < 1L || roleId > 4L) { - s_logger.warn("Skipping role ID migration due to invalid role_id resolved for account id=" + accountId); + logger.warn("Skipping role ID migration due to invalid role_id resolved for account id=" + accountId); continue; } try (final PreparedStatement updateStatement = conn.prepareStatement("UPDATE `cloud`.`account` SET account.role_id = ? WHERE account.id = ? ;")) { @@ -82,14 +80,14 @@ private void migrateAccountsToDefaultRoles(final Connection conn) { updateStatement.setLong(2, accountId); updateStatement.executeUpdate(); } catch (SQLException e) { - s_logger.error("Failed to update cloud.account role_id for account id:" + accountId + " with exception: " + e.getMessage()); + logger.error("Failed to update cloud.account role_id for account id:" + accountId + " with exception: " + e.getMessage()); throw new CloudRuntimeException("Exception while updating cloud.account role_id", e); } } } catch (SQLException e) { throw new CloudRuntimeException("Exception while migrating existing account table's role_id column to a role based on account type", e); } - s_logger.debug("Done migrating existing accounts to use one of default roles based on account type"); + logger.debug("Done migrating existing accounts to use one of default roles based on account type"); } private void setupRolesAndPermissionsForDynamicChecker(final Connection conn) { @@ -101,7 +99,7 @@ private void setupRolesAndPermissionsForDynamicChecker(final Connection conn) { pstmt.executeUpdate(); } catch (SQLException e) { if (e.getMessage().contains("role_id")) { - s_logger.warn("cloud.account table already has the role_id column, skipping altering table and migration of accounts"); + logger.warn("cloud.account table already has the role_id column, skipping altering table and migration of accounts"); return; } else { throw new CloudRuntimeException("Unable to create column role_id in table cloud.account", e); @@ -116,20 +114,20 @@ private void setupRolesAndPermissionsForDynamicChecker(final Connection conn) { migrateAccountsToDefaultRoles(conn); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Configuring default role-api mappings, use migrate-dynamicroles.py instead if you want to migrate rules from an existing commands.properties file"); + if (logger.isDebugEnabled()) { + logger.debug("Configuring default role-api mappings, use migrate-dynamicroles.py instead if you want to migrate rules from an existing commands.properties file"); } final String scriptFile = "META-INF/db/create-default-role-api-mappings.sql"; final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); if (script == null) { - s_logger.error("Unable to find default role-api mapping sql file, please configure api per role manually"); + logger.error("Unable to find default role-api mapping sql file, please configure api per role manually"); return; } try(final InputStreamReader reader = new InputStreamReader(script)) { ScriptRunner runner = new ScriptRunner(conn, false, true); runner.runScript(reader); } catch (SQLException | IOException e) { - s_logger.error("Unable to insert default api-role mappings from file: " + script + ". Please configure api per role manually, giving up!", e); + logger.error("Unable to insert default api-role mappings from file: " + script + ". Please configure api per role manually, giving up!", e); } } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade490to4910.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade490to4910.java index 8757d7fa5395..fdb5fd261b91 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade490to4910.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade490to4910.java @@ -22,7 +22,7 @@ import java.io.InputStream; import java.sql.Connection; -public class Upgrade490to4910 implements DbUpgrade { +public class Upgrade490to4910 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4910to4920.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4910to4920.java index 1950c8f28b88..69cd5b796a8e 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4910to4920.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4910to4920.java @@ -22,7 +22,7 @@ import java.io.InputStream; import java.sql.Connection; -public class Upgrade4910to4920 implements DbUpgrade { +public class Upgrade4910to4920 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4920to4930.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4920to4930.java index bc02c95064c8..9f5437e8d87e 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4920to4930.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4920to4930.java @@ -22,7 +22,7 @@ import java.io.InputStream; import java.sql.Connection; -public class Upgrade4920to4930 implements DbUpgrade { +public class Upgrade4920to4930 extends DbUpgradeAbstractImpl { @Override public String[] getUpgradableVersionRange() { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4930to41000.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4930to41000.java index 46abd44aa6a0..631308a975ea 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4930to41000.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade4930to41000.java @@ -23,12 +23,10 @@ import java.sql.SQLException; import java.util.HashMap; -import org.apache.log4j.Logger; import com.cloud.utils.exception.CloudRuntimeException; -public class Upgrade4930to41000 implements DbUpgrade { - final static Logger s_logger = Logger.getLogger(Upgrade4930to41000.class); +public class Upgrade4930to41000 extends DbUpgradeAbstractImpl { public static class MemoryValues { long max; diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/UpgradeSnapshot217to224.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/UpgradeSnapshot217to224.java index 3e39f81f761c..4c856ab154e0 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/UpgradeSnapshot217to224.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/UpgradeSnapshot217to224.java @@ -21,7 +21,7 @@ import com.cloud.utils.exception.CloudRuntimeException; -public class UpgradeSnapshot217to224 implements DbUpgrade { +public class UpgradeSnapshot217to224 extends DbUpgradeAbstractImpl { @Override public InputStream[] getPrepareScripts() { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/UpgradeSnapshot223to224.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/UpgradeSnapshot223to224.java index 8e546e7dd4d5..7a5b7de17b5d 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/UpgradeSnapshot223to224.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/UpgradeSnapshot223to224.java @@ -21,7 +21,7 @@ import com.cloud.utils.exception.CloudRuntimeException; -public class UpgradeSnapshot223to224 implements DbUpgrade { +public class UpgradeSnapshot223to224 extends DbUpgradeAbstractImpl { @Override public InputStream[] getPrepareScripts() { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/VersionDaoImpl.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/VersionDaoImpl.java index 67fe70fdb29e..90e1912408c8 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/VersionDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/VersionDaoImpl.java @@ -23,7 +23,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.upgrade.dao.VersionVO.Step; @@ -40,7 +39,6 @@ @Component @DB() public class VersionDaoImpl extends GenericDaoBase implements VersionDao { - private static final Logger s_logger = Logger.getLogger(VersionDaoImpl.class); final GenericSearchBuilder CurrentVersionSearch; final SearchBuilder AllFieldsSearch; @@ -74,7 +72,7 @@ public VersionVO findByVersion(final String version, final Step step) { @DB public String getCurrentVersion() { try (Connection conn = TransactionLegacy.getStandaloneConnection();) { - s_logger.debug("Checking to see if the database is at a version before it was the version table is created"); + logger.debug("Checking to see if the database is at a version before it was the version table is created"); try ( PreparedStatement pstmt = conn.prepareStatement("SHOW TABLES LIKE 'version'"); @@ -89,8 +87,8 @@ public String getCurrentVersion() { pstmt_domain.executeQuery(); return "2.1.8"; } catch (final SQLException e) { - s_logger.debug("Assuming the exception means domain_id is not there."); - s_logger.debug("No version table and no nics table, returning 2.1.7"); + logger.debug("Assuming the exception means domain_id is not there."); + logger.debug("No version table and no nics table, returning 2.1.7"); return "2.1.7"; } } else { @@ -98,7 +96,7 @@ public String getCurrentVersion() { ResultSet rs_static_nat = pstmt_static_nat.executeQuery();){ return "2.2.1"; } catch (final SQLException e) { - s_logger.debug("Assuming the exception means static_nat field doesn't exist in firewall_rules table, returning version 2.2.2"); + logger.debug("Assuming the exception means static_nat field doesn't exist in firewall_rules table, returning version 2.2.2"); return "2.2.2"; } } @@ -125,7 +123,7 @@ public String getCurrentVersion() { } // Use nics table information and is_static_nat field from firewall_rules table to determine version information - s_logger.debug("Version table exists, but it's empty; have to confirm that version is 2.2.2"); + logger.debug("Version table exists, but it's empty; have to confirm that version is 2.2.2"); try (PreparedStatement pstmt = conn.prepareStatement("SHOW TABLES LIKE 'nics'"); ResultSet rs = pstmt.executeQuery();){ if (!rs.next()) { @@ -136,7 +134,7 @@ public String getCurrentVersion() { throw new CloudRuntimeException("Unable to determine the current version, version table exists and empty, " + "nics table doesn't exist, is_static_nat field exists in firewall_rules table"); } catch (final SQLException e) { - s_logger.debug("Assuming the exception means static_nat field doesn't exist in firewall_rules table, returning version 2.2.2"); + logger.debug("Assuming the exception means static_nat field doesn't exist in firewall_rules table, returning version 2.2.2"); return "2.2.2"; } } diff --git a/engine/schema/src/main/java/com/cloud/usage/dao/BucketStatisticsDaoImpl.java b/engine/schema/src/main/java/com/cloud/usage/dao/BucketStatisticsDaoImpl.java index 2261389eab6b..1df24079cee1 100644 --- a/engine/schema/src/main/java/com/cloud/usage/dao/BucketStatisticsDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/usage/dao/BucketStatisticsDaoImpl.java @@ -20,14 +20,12 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import java.util.List; @Component public class BucketStatisticsDaoImpl extends GenericDaoBase implements BucketStatisticsDao { - private static final Logger s_logger = Logger.getLogger(BucketStatisticsDaoImpl.class); private final SearchBuilder AllFieldsSearch; private final SearchBuilder AccountSearch; diff --git a/engine/schema/src/main/java/com/cloud/usage/dao/UsageBackupDaoImpl.java b/engine/schema/src/main/java/com/cloud/usage/dao/UsageBackupDaoImpl.java index 712f81807c72..3403a8dfe5bb 100644 --- a/engine/schema/src/main/java/com/cloud/usage/dao/UsageBackupDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/usage/dao/UsageBackupDaoImpl.java @@ -25,7 +25,6 @@ import java.util.List; import java.util.TimeZone; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.exception.CloudException; @@ -37,7 +36,6 @@ @Component public class UsageBackupDaoImpl extends GenericDaoBase implements UsageBackupDao { - public static final Logger LOGGER = Logger.getLogger(UsageBackupDaoImpl.class); protected static final String UPDATE_DELETED = "UPDATE usage_backup SET removed = ? WHERE account_id = ? AND vm_id = ? and removed IS NULL"; protected static final String GET_USAGE_RECORDS_BY_ACCOUNT = "SELECT id, zone_id, account_id, domain_id, vm_id, backup_offering_id, size, protected_size, created, removed FROM usage_backup WHERE " + " account_id = ? AND ((removed IS NULL AND created <= ?) OR (created BETWEEN ? AND ?) OR (removed BETWEEN ? AND ?) " + @@ -55,7 +53,7 @@ public void updateMetrics(final Long vmId, final Long size, final Long virtualSi update(vo.getId(), vo); } } catch (final Exception e) { - LOGGER.error("Error updating backup metrics: " + e.getMessage(), e); + logger.error("Error updating backup metrics: " + e.getMessage(), e); } } @@ -72,13 +70,13 @@ public void removeUsage(Long accountId, Long vmId, Date eventDate) { pstmt.executeUpdate(); } } catch (SQLException e) { - LOGGER.error("Error removing UsageBackupVO: " + e.getMessage(), e); + logger.error("Error removing UsageBackupVO: " + e.getMessage(), e); throw new CloudException("Remove backup usage exception: " + e.getMessage(), e); } txn.commit(); } catch (Exception e) { txn.rollback(); - LOGGER.error("Exception caught while removing UsageBackupVO: " + e.getMessage(), e); + logger.error("Exception caught while removing UsageBackupVO: " + e.getMessage(), e); } finally { txn.close(); } @@ -128,7 +126,7 @@ public List getUsageRecords(Long accountId, Date startDate, Date } } catch (Exception e) { txn.rollback(); - LOGGER.warn("Error getting VM backup usage records", e); + logger.warn("Error getting VM backup usage records", e); } finally { txn.close(); } diff --git a/engine/schema/src/main/java/com/cloud/usage/dao/UsageDaoImpl.java b/engine/schema/src/main/java/com/cloud/usage/dao/UsageDaoImpl.java index 0d9e727abe21..2335043b7c5f 100644 --- a/engine/schema/src/main/java/com/cloud/usage/dao/UsageDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/usage/dao/UsageDaoImpl.java @@ -34,7 +34,6 @@ import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.acl.RoleType; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import java.sql.PreparedStatement; @@ -49,7 +48,6 @@ @Component public class UsageDaoImpl extends GenericDaoBase implements UsageDao { - public static final Logger s_logger = Logger.getLogger(UsageDaoImpl.class.getName()); private static final String DELETE_ALL = "DELETE FROM cloud_usage"; private static final String DELETE_ALL_BY_ACCOUNTID = "DELETE FROM cloud_usage WHERE account_id = ?"; private static final String DELETE_ALL_BY_INTERVAL = "DELETE FROM cloud_usage WHERE end_date < DATE_SUB(CURRENT_DATE(), INTERVAL ? DAY)"; @@ -108,7 +106,7 @@ public void deleteRecordsForAccount(Long accountId) { txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("error retrieving usage vm instances for account id: " + accountId, ex); + logger.error("error retrieving usage vm instances for account id: " + accountId, ex); } finally { txn.close(); } @@ -156,7 +154,7 @@ public void saveAccounts(List accounts) { txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("error saving account to cloud_usage db", ex); + logger.error("error saving account to cloud_usage db", ex); throw new CloudRuntimeException(ex.getMessage()); } } @@ -186,7 +184,7 @@ public void updateAccounts(List accounts) { txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("error updating account to cloud_usage db", ex); + logger.error("error updating account to cloud_usage db", ex); throw new CloudRuntimeException(ex.getMessage()); } } @@ -227,7 +225,7 @@ public void saveUserStats(List userStats) { txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("error saving user stats to cloud_usage db", ex); + logger.error("error saving user stats to cloud_usage db", ex); throw new CloudRuntimeException(ex.getMessage()); } } @@ -254,7 +252,7 @@ public void updateUserStats(List userStats) { txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("error updating user stats to cloud_usage db", ex); + logger.error("error updating user stats to cloud_usage db", ex); throw new CloudRuntimeException(ex.getMessage()); } } @@ -271,7 +269,7 @@ public Long getLastAccountId() { return Long.valueOf(rs.getLong(1)); } } catch (Exception ex) { - s_logger.error("error getting last account id", ex); + logger.error("error getting last account id", ex); } return null; } @@ -288,7 +286,7 @@ public Long getLastUserStatsId() { return Long.valueOf(rs.getLong(1)); } } catch (Exception ex) { - s_logger.error("error getting last user stats id", ex); + logger.error("error getting last user stats id", ex); } return null; } @@ -305,7 +303,7 @@ public Long getLastBucketStatsId() { return Long.valueOf(rs.getLong(1)); } } catch (Exception ex) { - s_logger.error("error getting last bucket stats id", ex); + logger.error("error getting last bucket stats id", ex); } return null; } @@ -329,7 +327,7 @@ public void saveBucketStats(List bucketStats) { txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("error saving bucket stats to cloud_usage db", ex); + logger.error("error saving bucket stats to cloud_usage db", ex); throw new CloudRuntimeException(ex.getMessage()); } } @@ -351,7 +349,7 @@ public void updateBucketStats(List bucketStats) { txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("error updating bucket stats to cloud_usage db", ex); + logger.error("error updating bucket stats to cloud_usage db", ex); throw new CloudRuntimeException(ex.getMessage()); } } @@ -370,7 +368,7 @@ public List listPublicTemplatesByAccount(long accountId) { templateList.add(Long.valueOf(rs.getLong(1))); } } catch (Exception ex) { - s_logger.error("error listing public templates", ex); + logger.error("error listing public templates", ex); } return templateList; } @@ -387,7 +385,7 @@ public Long getLastVmDiskStatsId() { return Long.valueOf(rs.getLong(1)); } } catch (Exception ex) { - s_logger.error("error getting last vm disk stats id", ex); + logger.error("error getting last vm disk stats id", ex); } return null; } @@ -420,7 +418,7 @@ public void updateVmDiskStats(List vmDiskStats) { txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("error updating vm disk stats to cloud_usage db", ex); + logger.error("error updating vm disk stats to cloud_usage db", ex); throw new CloudRuntimeException(ex.getMessage()); } @@ -466,7 +464,7 @@ public void saveVmDiskStats(List vmDiskStats) { txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("error saving vm disk stats to cloud_usage db", ex); + logger.error("error saving vm disk stats to cloud_usage db", ex); throw new CloudRuntimeException(ex.getMessage()); } @@ -533,7 +531,7 @@ public void saveUsageRecords(List usageRecords) { txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("error saving usage records to cloud_usage db", ex); + logger.error("error saving usage records to cloud_usage db", ex); throw new CloudRuntimeException(ex.getMessage()); } } @@ -551,7 +549,7 @@ public void removeOldUsageRecords(int days) { txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("error removing old cloud_usage records for interval: " + days); + logger.error("error removing old cloud_usage records for interval: " + days); } finally { txn.close(); } @@ -568,7 +566,7 @@ public UsageVO doInTransaction(final TransactionStatus status) { @Override public Pair, Integer> listUsageRecordsPendingForQuotaAggregation(long accountId, long domainId) { - s_logger.debug(String.format("Retrieving pending usage records for accountId [%s] and domainId [%s].", accountId, domainId)); + logger.debug(String.format("Retrieving pending usage records for accountId [%s] and domainId [%s].", accountId, domainId)); return Transaction.execute(TransactionLegacy.USAGE_DB, (TransactionCallback, Integer>>) status -> { Filter usageFilter = new Filter(UsageVO.class, "startDate", true, null, null); @@ -594,7 +592,7 @@ public List> listAccountResourcesInThePeriod(long accountId String startDateString = DateUtil.getOutputString(startDate); String endDateString = DateUtil.getOutputString(endDate); - s_logger.debug(String.format("Retrieving account resources between [%s] and [%s] for accountId [%s] and usageType [%s].", startDateString, endDateString, accountId, + logger.debug(String.format("Retrieving account resources between [%s] and [%s] for accountId [%s] and usageType [%s].", startDateString, endDateString, accountId, usageType)); TransactionLegacy txn = TransactionLegacy.currentTxn(); @@ -617,7 +615,7 @@ public List> listAccountResourcesInThePeriod(long accountId return accountResourcesOfTheLastDay; } catch (SQLException e) { - s_logger.error(String.format("Failed to retrieve account resources between [%s] and [%s] for accountId [%s] and usageType [%s] due to [%s]. Returning an empty list of" + logger.error(String.format("Failed to retrieve account resources between [%s] and [%s] for accountId [%s] and usageType [%s] due to [%s]. Returning an empty list of" + " resources.", startDateString, endDateString, accountId, usageType, e.getMessage()), e); return new ArrayList<>(); diff --git a/engine/schema/src/main/java/com/cloud/usage/dao/UsageIPAddressDaoImpl.java b/engine/schema/src/main/java/com/cloud/usage/dao/UsageIPAddressDaoImpl.java index 2dcb181c6c77..9c0b8f87ab0e 100644 --- a/engine/schema/src/main/java/com/cloud/usage/dao/UsageIPAddressDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/usage/dao/UsageIPAddressDaoImpl.java @@ -26,7 +26,6 @@ import com.cloud.exception.CloudException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.usage.UsageIPAddressVO; @@ -36,7 +35,6 @@ @Component public class UsageIPAddressDaoImpl extends GenericDaoBase implements UsageIPAddressDao { - public static final Logger s_logger = Logger.getLogger(UsageIPAddressDaoImpl.class.getName()); protected static final String UPDATE_RELEASED = "UPDATE usage_ip_address SET released = ? WHERE account_id = ? AND public_ip_address = ? and released IS NULL"; protected static final String GET_USAGE_RECORDS_BY_ACCOUNT = @@ -79,7 +77,7 @@ public void update(UsageIPAddressVO usage) { txn.commit(); } catch (Exception e) { txn.rollback(); - s_logger.error("Error updating usageIPAddressVO:"+e.getMessage(), e); + logger.error("Error updating usageIPAddressVO:"+e.getMessage(), e); } finally { txn.close(); } @@ -145,7 +143,7 @@ public List getUsageRecords(Long accountId, Long domainId, Dat } } catch (Exception e) { txn.rollback(); - s_logger.warn("Error getting usage records", e); + logger.warn("Error getting usage records", e); } finally { txn.close(); } diff --git a/engine/schema/src/main/java/com/cloud/usage/dao/UsageJobDaoImpl.java b/engine/schema/src/main/java/com/cloud/usage/dao/UsageJobDaoImpl.java index 065dc309ebea..6d460aadd093 100644 --- a/engine/schema/src/main/java/com/cloud/usage/dao/UsageJobDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/usage/dao/UsageJobDaoImpl.java @@ -22,7 +22,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.usage.UsageJobVO; @@ -34,7 +33,6 @@ @Component public class UsageJobDaoImpl extends GenericDaoBase implements UsageJobDao { - private static final Logger s_logger = Logger.getLogger(UsageJobDaoImpl.class.getName()); private static final String GET_LAST_JOB_SUCCESS_DATE_MILLIS = "SELECT end_millis FROM cloud_usage.usage_job WHERE end_millis > 0 and success = 1 ORDER BY end_millis DESC LIMIT 1"; @@ -51,7 +49,7 @@ public long getLastJobSuccessDateMillis() { return rs.getLong(1); } } catch (Exception ex) { - s_logger.error("error getting last usage job success date", ex); + logger.error("error getting last usage job success date", ex); } finally { txn.close(); } @@ -77,7 +75,7 @@ public void updateJobSuccess(Long jobId, long startMillis, long endMillis, long txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("error updating job success date", ex); + logger.error("error updating job success date", ex); throw new CloudRuntimeException(ex.getMessage()); } finally { txn.close(); diff --git a/engine/schema/src/main/java/com/cloud/usage/dao/UsageLoadBalancerPolicyDaoImpl.java b/engine/schema/src/main/java/com/cloud/usage/dao/UsageLoadBalancerPolicyDaoImpl.java index 7260caecd1aa..ba5c70fbc32e 100644 --- a/engine/schema/src/main/java/com/cloud/usage/dao/UsageLoadBalancerPolicyDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/usage/dao/UsageLoadBalancerPolicyDaoImpl.java @@ -26,7 +26,6 @@ import com.cloud.exception.CloudException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.usage.UsageLoadBalancerPolicyVO; @@ -36,7 +35,6 @@ @Component public class UsageLoadBalancerPolicyDaoImpl extends GenericDaoBase implements UsageLoadBalancerPolicyDao { - public static final Logger s_logger = Logger.getLogger(UsageLoadBalancerPolicyDaoImpl.class.getName()); protected static final String REMOVE_BY_USERID_LBID = "DELETE FROM usage_load_balancer_policy WHERE account_id = ? AND lb_id = ?"; protected static final String UPDATE_DELETED = "UPDATE usage_load_balancer_policy SET deleted = ? WHERE account_id = ? AND lb_id = ? and deleted IS NULL"; @@ -64,7 +62,7 @@ public void removeBy(long accountId, long lbId) { txn.commit(); } catch (Exception e) { txn.rollback(); - s_logger.warn("Error removing UsageLoadBalancerPolicyVO", e); + logger.warn("Error removing UsageLoadBalancerPolicyVO", e); } finally { txn.close(); } @@ -90,7 +88,7 @@ public void update(UsageLoadBalancerPolicyVO usage) { txn.commit(); } catch (Exception e) { txn.rollback(); - s_logger.warn("Error updating UsageLoadBalancerPolicyVO"+e.getMessage(), e); + logger.warn("Error updating UsageLoadBalancerPolicyVO"+e.getMessage(), e); } finally { txn.close(); } @@ -159,7 +157,7 @@ public List getUsageRecords(Long accountId, Long doma } } catch (Exception e) { txn.rollback(); - s_logger.warn("Error getting usage records", e); + logger.warn("Error getting usage records", e); } finally { txn.close(); } diff --git a/engine/schema/src/main/java/com/cloud/usage/dao/UsageNetworkDaoImpl.java b/engine/schema/src/main/java/com/cloud/usage/dao/UsageNetworkDaoImpl.java index c4c5076878e8..27060cfe672d 100644 --- a/engine/schema/src/main/java/com/cloud/usage/dao/UsageNetworkDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/usage/dao/UsageNetworkDaoImpl.java @@ -23,7 +23,6 @@ import java.util.Map; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.usage.UsageNetworkVO; @@ -33,7 +32,6 @@ @Component public class UsageNetworkDaoImpl extends GenericDaoBase implements UsageNetworkDao { - private static final Logger s_logger = Logger.getLogger(UsageNetworkDaoImpl.class.getName()); private static final String SELECT_LATEST_STATS = "SELECT u.account_id, u.zone_id, u.host_id, u.host_type, u.network_id, u.bytes_sent, u.bytes_received, u.agg_bytes_received, u.agg_bytes_sent, u.event_time_millis " + "FROM cloud_usage.usage_network u INNER JOIN (SELECT netusage.account_id as acct_id, netusage.zone_id as z_id, max(netusage.event_time_millis) as max_date " @@ -77,7 +75,7 @@ public Map getRecentNetworkStats() { } return returnMap; } catch (Exception ex) { - s_logger.error("error getting recent usage network stats", ex); + logger.error("error getting recent usage network stats", ex); } finally { txn.close(); } @@ -97,7 +95,7 @@ public void deleteOldStats(long maxEventTime) { txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("error deleting old usage network stats", ex); + logger.error("error deleting old usage network stats", ex); } } @@ -126,7 +124,7 @@ public void saveUsageNetworks(List usageNetworks) { txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("error saving usage_network to cloud_usage db", ex); + logger.error("error saving usage_network to cloud_usage db", ex); throw new CloudRuntimeException(ex.getMessage()); } } diff --git a/engine/schema/src/main/java/com/cloud/usage/dao/UsageNetworkOfferingDaoImpl.java b/engine/schema/src/main/java/com/cloud/usage/dao/UsageNetworkOfferingDaoImpl.java index 23931f0a84e3..b3bc06e8af40 100644 --- a/engine/schema/src/main/java/com/cloud/usage/dao/UsageNetworkOfferingDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/usage/dao/UsageNetworkOfferingDaoImpl.java @@ -26,7 +26,6 @@ import com.cloud.exception.CloudException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.usage.UsageNetworkOfferingVO; @@ -36,7 +35,6 @@ @Component public class UsageNetworkOfferingDaoImpl extends GenericDaoBase implements UsageNetworkOfferingDao { - public static final Logger s_logger = Logger.getLogger(UsageNetworkOfferingDaoImpl.class.getName()); protected static final String UPDATE_DELETED = "UPDATE usage_network_offering SET deleted = ? WHERE account_id = ? AND vm_instance_id = ? AND network_offering_id = ? and deleted IS NULL"; @@ -74,7 +72,7 @@ public void update(UsageNetworkOfferingVO usage) { txn.commit(); } catch (Exception e) { txn.rollback(); - s_logger.warn("Error updating UsageNetworkOfferingVO:"+e.getMessage(), e); + logger.warn("Error updating UsageNetworkOfferingVO:"+e.getMessage(), e); } finally { txn.close(); } @@ -146,7 +144,7 @@ public List getUsageRecords(Long accountId, Long domainI } } catch (Exception e) { txn.rollback(); - s_logger.warn("Error getting usage records", e); + logger.warn("Error getting usage records", e); } finally { txn.close(); } diff --git a/engine/schema/src/main/java/com/cloud/usage/dao/UsagePortForwardingRuleDaoImpl.java b/engine/schema/src/main/java/com/cloud/usage/dao/UsagePortForwardingRuleDaoImpl.java index 99216420a0df..e66b47f74fa9 100644 --- a/engine/schema/src/main/java/com/cloud/usage/dao/UsagePortForwardingRuleDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/usage/dao/UsagePortForwardingRuleDaoImpl.java @@ -26,7 +26,6 @@ import com.cloud.exception.CloudException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.usage.UsagePortForwardingRuleVO; @@ -36,7 +35,6 @@ @Component public class UsagePortForwardingRuleDaoImpl extends GenericDaoBase implements UsagePortForwardingRuleDao { - public static final Logger s_logger = Logger.getLogger(UsagePortForwardingRuleDaoImpl.class.getName()); protected static final String REMOVE_BY_USERID_PFID = "DELETE FROM usage_port_forwarding WHERE account_id = ? AND pf_id = ?"; protected static final String UPDATE_DELETED = "UPDATE usage_port_forwarding SET deleted = ? WHERE account_id = ? AND pf_id = ? and deleted IS NULL"; @@ -64,7 +62,7 @@ public void removeBy(long accountId, long pfId) { txn.commit(); } catch (Exception e) { txn.rollback(); - s_logger.warn("Error removing UsagePortForwardingRuleVO", e); + logger.warn("Error removing UsagePortForwardingRuleVO", e); } finally { txn.close(); } @@ -90,7 +88,7 @@ public void update(UsagePortForwardingRuleVO usage) { txn.commit(); } catch (Exception e) { txn.rollback(); - s_logger.warn("Error updating UsagePortForwardingRuleVO:"+e.getMessage(), e); + logger.warn("Error updating UsagePortForwardingRuleVO:"+e.getMessage(), e); } finally { txn.close(); } @@ -159,7 +157,7 @@ public List getUsageRecords(Long accountId, Long doma } } catch (Exception e) { txn.rollback(); - s_logger.warn("Error getting usage records", e); + logger.warn("Error getting usage records", e); } finally { txn.close(); } diff --git a/engine/schema/src/main/java/com/cloud/usage/dao/UsageSecurityGroupDaoImpl.java b/engine/schema/src/main/java/com/cloud/usage/dao/UsageSecurityGroupDaoImpl.java index f98133f54390..43224918f0c4 100644 --- a/engine/schema/src/main/java/com/cloud/usage/dao/UsageSecurityGroupDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/usage/dao/UsageSecurityGroupDaoImpl.java @@ -26,7 +26,6 @@ import com.cloud.exception.CloudException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.usage.UsageSecurityGroupVO; @@ -36,7 +35,6 @@ @Component public class UsageSecurityGroupDaoImpl extends GenericDaoBase implements UsageSecurityGroupDao { - public static final Logger s_logger = Logger.getLogger(UsageSecurityGroupDaoImpl.class.getName()); protected static final String UPDATE_DELETED = "UPDATE usage_security_group SET deleted = ? WHERE account_id = ? AND vm_instance_id = ? AND security_group_id = ? and deleted IS NULL"; @@ -74,7 +72,7 @@ public void update(UsageSecurityGroupVO usage) { txn.commit(); } catch (Exception e) { txn.rollback(); - s_logger.warn("Error updating UsageSecurityGroupVO:"+e.getMessage(), e); + logger.warn("Error updating UsageSecurityGroupVO:"+e.getMessage(), e); } finally { txn.close(); } @@ -142,7 +140,7 @@ public List getUsageRecords(Long accountId, Long domainId, } } catch (Exception e) { txn.rollback(); - s_logger.warn("Error getting usage records:"+e.getMessage(), e); + logger.warn("Error getting usage records:"+e.getMessage(), e); } finally { txn.close(); } diff --git a/engine/schema/src/main/java/com/cloud/usage/dao/UsageStorageDaoImpl.java b/engine/schema/src/main/java/com/cloud/usage/dao/UsageStorageDaoImpl.java index 680429b315b1..1da533493997 100644 --- a/engine/schema/src/main/java/com/cloud/usage/dao/UsageStorageDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/usage/dao/UsageStorageDaoImpl.java @@ -26,7 +26,6 @@ import com.cloud.exception.CloudException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.usage.UsageStorageVO; @@ -38,7 +37,6 @@ @Component public class UsageStorageDaoImpl extends GenericDaoBase implements UsageStorageDao { - public static final Logger s_logger = Logger.getLogger(UsageStorageDaoImpl.class.getName()); protected static final String REMOVE_BY_USERID_STORAGEID = "DELETE FROM usage_storage WHERE account_id = ? AND entity_id = ? AND storage_type = ?"; protected static final String UPDATE_DELETED = "UPDATE usage_storage SET deleted = ? WHERE account_id = ? AND entity_id = ? AND storage_type = ? AND zone_id = ? and deleted IS NULL"; @@ -108,7 +106,7 @@ public void removeBy(long accountId, long volId, int storageType) { txn.commit(); } catch (Exception e) { txn.rollback(); - s_logger.error("Error removing usageStorageVO", e); + logger.error("Error removing usageStorageVO", e); } finally { txn.close(); } @@ -137,7 +135,7 @@ public void update(UsageStorageVO usage) { txn.commit(); } catch (Exception e) { txn.rollback(); - s_logger.error("Error updating UsageStorageVO:"+e.getMessage(), e); + logger.error("Error updating UsageStorageVO:"+e.getMessage(), e); } finally { txn.close(); } @@ -211,7 +209,7 @@ public List getUsageRecords(Long accountId, Long domainId, Date } }catch (Exception e) { txn.rollback(); - s_logger.error("getUsageRecords:Exception:"+e.getMessage(), e); + logger.error("getUsageRecords:Exception:"+e.getMessage(), e); } finally { txn.close(); } diff --git a/engine/schema/src/main/java/com/cloud/usage/dao/UsageVMInstanceDaoImpl.java b/engine/schema/src/main/java/com/cloud/usage/dao/UsageVMInstanceDaoImpl.java index d330267d0b47..2fd453013bd1 100644 --- a/engine/schema/src/main/java/com/cloud/usage/dao/UsageVMInstanceDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/usage/dao/UsageVMInstanceDaoImpl.java @@ -24,7 +24,6 @@ import java.util.TimeZone; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.usage.UsageVMInstanceVO; @@ -34,7 +33,6 @@ @Component public class UsageVMInstanceDaoImpl extends GenericDaoBase implements UsageVMInstanceDao { - public static final Logger s_logger = Logger.getLogger(UsageVMInstanceDaoImpl.class.getName()); protected static final String UPDATE_USAGE_INSTANCE_SQL = "UPDATE usage_vm_instance SET end_date = ? " + "WHERE account_id = ? and vm_instance_id = ? and usage_type = ? and end_date IS NULL"; @@ -62,7 +60,7 @@ public void update(UsageVMInstanceVO instance) { pstmt.executeUpdate(); txn.commit(); } catch (Exception e) { - s_logger.warn(e); + logger.warn(e); } finally { txn.close(); } @@ -83,7 +81,7 @@ public void delete(UsageVMInstanceVO instance) { txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("error deleting usage vm instance with vmId: " + instance.getVmInstanceId() + ", for account with id: " + instance.getAccountId()); + logger.error("error deleting usage vm instance with vmId: " + instance.getVmInstanceId() + ", for account with id: " + instance.getAccountId()); } finally { txn.close(); } @@ -141,7 +139,7 @@ public List getUsageRecords(long accountId, Date startDate, D usageInstances.add(usageInstance); } } catch (Exception ex) { - s_logger.error("error retrieving usage vm instances for account id: " + accountId, ex); + logger.error("error retrieving usage vm instances for account id: " + accountId, ex); } finally { txn.close(); } diff --git a/engine/schema/src/main/java/com/cloud/usage/dao/UsageVMSnapshotDaoImpl.java b/engine/schema/src/main/java/com/cloud/usage/dao/UsageVMSnapshotDaoImpl.java index fdd852d706b5..fbb5f7fae722 100644 --- a/engine/schema/src/main/java/com/cloud/usage/dao/UsageVMSnapshotDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/usage/dao/UsageVMSnapshotDaoImpl.java @@ -26,7 +26,6 @@ import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.usage.UsageVMSnapshotVO; @@ -36,7 +35,6 @@ @Component public class UsageVMSnapshotDaoImpl extends GenericDaoBase implements UsageVMSnapshotDao { - public static final Logger s_logger = Logger.getLogger(UsageVMSnapshotDaoImpl.class.getName()); protected static final String GET_USAGE_RECORDS_BY_ACCOUNT = "SELECT volume_id, zone_id, account_id, domain_id, vm_id, disk_offering_id, size, created, processed, vm_snapshot_id " + " FROM usage_vmsnapshot" + " WHERE account_id = ? " + " AND ( (created BETWEEN ? AND ?) OR " + " (created < ? AND processed is NULL) ) ORDER BY created asc"; @@ -61,7 +59,7 @@ public void update(UsageVMSnapshotVO usage) { txn.commit(); } catch (Exception e) { txn.rollback(); - s_logger.warn("Error updating UsageVMSnapshotVO", e); + logger.warn("Error updating UsageVMSnapshotVO", e); } finally { txn.close(); } @@ -115,7 +113,7 @@ public List getUsageRecords(Long accountId, Long domainId, Da } } catch (Exception e) { txn.rollback(); - s_logger.warn("Error getting usage records", e); + logger.warn("Error getting usage records", e); } finally { txn.close(); } @@ -170,7 +168,7 @@ public UsageVMSnapshotVO getPreviousUsageRecord(UsageVMSnapshotVO rec) { } } catch (Exception e) { txn.rollback(); - s_logger.warn("Error getting usage records", e); + logger.warn("Error getting usage records", e); } finally { txn.close(); } diff --git a/engine/schema/src/main/java/com/cloud/usage/dao/UsageVMSnapshotOnPrimaryDaoImpl.java b/engine/schema/src/main/java/com/cloud/usage/dao/UsageVMSnapshotOnPrimaryDaoImpl.java index f6628511564a..34a8af4da63d 100644 --- a/engine/schema/src/main/java/com/cloud/usage/dao/UsageVMSnapshotOnPrimaryDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/usage/dao/UsageVMSnapshotOnPrimaryDaoImpl.java @@ -26,7 +26,6 @@ import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.usage.UsageSnapshotOnPrimaryVO; @@ -36,7 +35,6 @@ @Component public class UsageVMSnapshotOnPrimaryDaoImpl extends GenericDaoBase implements UsageVMSnapshotOnPrimaryDao { - public static final Logger s_logger = Logger.getLogger(UsageVMSnapshotOnPrimaryDaoImpl.class.getName()); protected static final String GET_USAGE_RECORDS_BY_ACCOUNT = "SELECT volume_id, zone_id, account_id, domain_id, vm_id, name, type, physicalsize, virtualsize, created, deleted, vm_snapshot_id " + " FROM usage_snapshot_on_primary" + " WHERE account_id = ? " + " AND ( (created < ? AND deleted is NULL)" + " OR ( deleted BETWEEN ? AND ?)) ORDER BY created asc"; @@ -58,7 +56,7 @@ public void updateDeleted(UsageSnapshotOnPrimaryVO usage) { txn.commit(); } catch (Exception e) { txn.rollback(); - s_logger.warn("Error updating UsageSnapshotOnPrimaryVO", e); + logger.warn("Error updating UsageSnapshotOnPrimaryVO", e); } finally { txn.close(); } @@ -79,7 +77,7 @@ public List getUsageRecords(Long accountId, Long domai pstmt.setString(i++, DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), endDate)); pstmt.setString(i++, DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), startDate)); pstmt.setString(i++, DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), endDate)); - s_logger.debug("GET_USAGE_RECORDS_BY_ACCOUNT " + pstmt); + logger.debug("GET_USAGE_RECORDS_BY_ACCOUNT " + pstmt); ResultSet rs = pstmt.executeQuery(); while (rs.next()) { //id, zone_id, account_id, domain_iVMSnapshotVOd, vm_id, disk_offering_id, size, created, deleted @@ -111,7 +109,7 @@ public List getUsageRecords(Long accountId, Long domai } } catch (Exception e) { txn.rollback(); - s_logger.warn("Error getting usage records", e); + logger.warn("Error getting usage records", e); } finally { txn.close(); } diff --git a/engine/schema/src/main/java/com/cloud/usage/dao/UsageVPNUserDaoImpl.java b/engine/schema/src/main/java/com/cloud/usage/dao/UsageVPNUserDaoImpl.java index 9be0ca5d59a6..fa6f896df4a4 100644 --- a/engine/schema/src/main/java/com/cloud/usage/dao/UsageVPNUserDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/usage/dao/UsageVPNUserDaoImpl.java @@ -26,7 +26,6 @@ import com.cloud.exception.CloudException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.usage.UsageVPNUserVO; @@ -36,7 +35,6 @@ @Component public class UsageVPNUserDaoImpl extends GenericDaoBase implements UsageVPNUserDao { - public static final Logger s_logger = Logger.getLogger(UsageVPNUserDaoImpl.class.getName()); protected static final String UPDATE_DELETED = "UPDATE usage_vpn_user SET deleted = ? WHERE account_id = ? AND user_id = ? and deleted IS NULL"; protected static final String GET_USAGE_RECORDS_BY_ACCOUNT = "SELECT zone_id, account_id, domain_id, user_id, user_name, created, deleted " + "FROM usage_vpn_user " @@ -69,7 +67,7 @@ public void update(UsageVPNUserVO usage) { txn.commit(); } catch (Exception e) { txn.rollback(); - s_logger.error("Error updating UsageVPNUserVO:"+e.getMessage(), e); + logger.error("Error updating UsageVPNUserVO:"+e.getMessage(), e); } finally { txn.close(); } @@ -139,7 +137,7 @@ public List getUsageRecords(Long accountId, Long domainId, Date } } catch (Exception e) { txn.rollback(); - s_logger.warn("Error getting usage records", e); + logger.warn("Error getting usage records", e); } finally { txn.close(); } diff --git a/engine/schema/src/main/java/com/cloud/usage/dao/UsageVmDiskDaoImpl.java b/engine/schema/src/main/java/com/cloud/usage/dao/UsageVmDiskDaoImpl.java index 2b934770b7de..bc1cb06cfec0 100644 --- a/engine/schema/src/main/java/com/cloud/usage/dao/UsageVmDiskDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/usage/dao/UsageVmDiskDaoImpl.java @@ -23,7 +23,6 @@ import java.util.Map; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.usage.UsageVmDiskVO; @@ -33,7 +32,6 @@ @Component public class UsageVmDiskDaoImpl extends GenericDaoBase implements UsageVmDiskDao { - private static final Logger s_logger = Logger.getLogger(UsageVmDiskDaoImpl.class.getName()); private static final String SELECT_LATEST_STATS = "SELECT uvd.account_id, uvd.zone_id, uvd.vm_id, uvd.volume_id, uvd.io_read, uvd.io_write, uvd.agg_io_read, uvd.agg_io_write, " + "uvd.bytes_read, uvd.bytes_write, uvd.agg_bytes_read, uvd.agg_bytes_write, uvd.event_time_millis " @@ -81,7 +79,7 @@ public Map getRecentVmDiskStats() { } return returnMap; } catch (Exception ex) { - s_logger.error("error getting recent usage disk stats", ex); + logger.error("error getting recent usage disk stats", ex); } finally { txn.close(); } @@ -101,7 +99,7 @@ public void deleteOldStats(long maxEventTime) { txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("error deleting old usage disk stats", ex); + logger.error("error deleting old usage disk stats", ex); } } @@ -133,7 +131,7 @@ public void saveUsageVmDisks(List usageVmDisks) { txn.commit(); } catch (Exception ex) { txn.rollback(); - s_logger.error("error saving usage_vm_disk to cloud_usage db", ex); + logger.error("error saving usage_vm_disk to cloud_usage db", ex); throw new CloudRuntimeException(ex.getMessage()); } } diff --git a/engine/schema/src/main/java/com/cloud/usage/dao/UsageVolumeDaoImpl.java b/engine/schema/src/main/java/com/cloud/usage/dao/UsageVolumeDaoImpl.java index 0c35c118006b..4662a6f26ce8 100644 --- a/engine/schema/src/main/java/com/cloud/usage/dao/UsageVolumeDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/usage/dao/UsageVolumeDaoImpl.java @@ -26,7 +26,6 @@ import com.cloud.exception.CloudException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.usage.UsageVolumeVO; @@ -36,7 +35,6 @@ @Component public class UsageVolumeDaoImpl extends GenericDaoBase implements UsageVolumeDao { - public static final Logger s_logger = Logger.getLogger(UsageVolumeDaoImpl.class.getName()); protected static final String REMOVE_BY_USERID_VOLID = "DELETE FROM usage_volume WHERE account_id = ? AND volume_id = ?"; protected static final String UPDATE_DELETED = "UPDATE usage_volume SET deleted = ? WHERE account_id = ? AND volume_id = ? and deleted IS NULL"; @@ -69,7 +67,7 @@ public void removeBy(long accountId, long volId) { txn.commit(); } catch (Exception e) { txn.rollback(); - s_logger.warn("Error removing usageVolumeVO:"+e.getMessage(), e); + logger.warn("Error removing usageVolumeVO:"+e.getMessage(), e); } finally { txn.close(); } @@ -91,7 +89,7 @@ public void update(UsageVolumeVO usage) { txn.commit(); } catch (Exception e) { txn.rollback(); - s_logger.warn("Error updating UsageVolumeVO", e); + logger.warn("Error updating UsageVolumeVO", e); } finally { txn.close(); } @@ -169,7 +167,7 @@ public List getUsageRecords(Long accountId, Long domainId, Date s } } catch (Exception e) { txn.rollback(); - s_logger.warn("Error getting usage records", e); + logger.warn("Error getting usage records", e); } finally { txn.close(); } diff --git a/engine/schema/src/main/java/com/cloud/user/dao/AccountDaoImpl.java b/engine/schema/src/main/java/com/cloud/user/dao/AccountDaoImpl.java index 3dacbb70f394..eed5572a0b24 100644 --- a/engine/schema/src/main/java/com/cloud/user/dao/AccountDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/user/dao/AccountDaoImpl.java @@ -32,7 +32,6 @@ import com.cloud.utils.db.SearchCriteria.Op; import org.apache.commons.lang3.StringUtils; import com.cloud.utils.db.TransactionLegacy; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import java.sql.PreparedStatement; @@ -42,7 +41,6 @@ @Component public class AccountDaoImpl extends GenericDaoBase implements AccountDao { - private static final Logger s_logger = Logger.getLogger(AccountDaoImpl.class); private static final String FIND_USER_ACCOUNT_BY_API_KEY = "SELECT u.id, u.username, u.account_id, u.secret_key, u.state, " + "a.id, a.account_name, a.type, a.role_id, a.domain_id, a.state " + "FROM `cloud`.`user` u, `cloud`.`account` a " + "WHERE u.account_id = a.id AND u.api_key = ? and u.removed IS NULL"; @@ -161,7 +159,7 @@ public Pair findUserAccountByApiKey(String apiKey) { userAcctPair = new Pair(u, a); } } catch (Exception e) { - s_logger.warn("Exception finding user/acct by api key: " + apiKey, e); + logger.warn("Exception finding user/acct by api key: " + apiKey, e); } return userAcctPair; } @@ -300,7 +298,7 @@ public void markForCleanup(long accountId) { if (!account.getNeedsCleanup()) { account.setNeedsCleanup(true); if (!update(accountId, account)) { - s_logger.warn("Failed to mark account id=" + accountId + " for cleanup"); + logger.warn("Failed to mark account id=" + accountId + " for cleanup"); } } } @@ -320,7 +318,7 @@ public long getDomainIdForGivenAccountId(long id) { domain_id = account_vo.getDomainId(); } catch (Exception e) { - s_logger.warn("getDomainIdForGivenAccountId: Exception :" + e.getMessage()); + logger.warn("getDomainIdForGivenAccountId: Exception :" + e.getMessage()); } finally { return domain_id; diff --git a/engine/schema/src/main/java/com/cloud/user/dao/UserStatisticsDaoImpl.java b/engine/schema/src/main/java/com/cloud/user/dao/UserStatisticsDaoImpl.java index acadc2f09086..6f10c5d2a1b2 100644 --- a/engine/schema/src/main/java/com/cloud/user/dao/UserStatisticsDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/user/dao/UserStatisticsDaoImpl.java @@ -24,7 +24,6 @@ import java.util.TimeZone; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.user.UserStatisticsVO; @@ -36,7 +35,6 @@ @Component public class UserStatisticsDaoImpl extends GenericDaoBase implements UserStatisticsDao { - private static final Logger s_logger = Logger.getLogger(UserStatisticsDaoImpl.class); private static final String ACTIVE_AND_RECENTLY_DELETED_SEARCH = "SELECT us.id, us.data_center_id, us.account_id, us.public_ip_address, us.device_id, us.device_type, us.network_id, us.agg_bytes_received, us.agg_bytes_sent " + "FROM user_statistics us, account a " + "WHERE us.account_id = a.id AND (a.removed IS NULL OR a.removed >= ?) " + "ORDER BY us.id"; @@ -109,7 +107,7 @@ public List listActiveAndRecentlyDeleted(Date minRemovedDate, userStats.add(toEntityBean(rs, false)); } } catch (Exception ex) { - s_logger.error("error saving user stats to cloud_usage db", ex); + logger.error("error saving user stats to cloud_usage db", ex); } return userStats; } @@ -127,7 +125,7 @@ public List listUpdatedStats() { userStats.add(toEntityBean(rs, false)); } } catch (Exception ex) { - s_logger.error("error lisitng updated user stats", ex); + logger.error("error lisitng updated user stats", ex); } return userStats; } diff --git a/engine/schema/src/main/java/com/cloud/user/dao/VmDiskStatisticsDaoImpl.java b/engine/schema/src/main/java/com/cloud/user/dao/VmDiskStatisticsDaoImpl.java index 34fa2e79e98b..3f2a239a157c 100644 --- a/engine/schema/src/main/java/com/cloud/user/dao/VmDiskStatisticsDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/user/dao/VmDiskStatisticsDaoImpl.java @@ -24,7 +24,6 @@ import java.util.TimeZone; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.user.VmDiskStatisticsVO; @@ -36,7 +35,6 @@ @Component public class VmDiskStatisticsDaoImpl extends GenericDaoBase implements VmDiskStatisticsDao { - private static final Logger s_logger = Logger.getLogger(VmDiskStatisticsDaoImpl.class); private static final String ACTIVE_AND_RECENTLY_DELETED_SEARCH = "SELECT bcf.id, bcf.data_center_id, bcf.account_id, bcf.vm_id, bcf.volume_id, bcf.agg_io_read, bcf.agg_io_write, bcf.agg_bytes_read, bcf.agg_bytes_write " + "FROM vm_disk_statistics bcf, account a " + "WHERE bcf.account_id = a.id AND (a.removed IS NULL OR a.removed >= ?) " + "ORDER BY bcf.id"; @@ -104,7 +102,7 @@ public List listActiveAndRecentlyDeleted(Date minRemovedDate vmDiskStats.add(toEntityBean(rs, false)); } } catch (Exception ex) { - s_logger.error("error saving vm disk stats to cloud_usage db", ex); + logger.error("error saving vm disk stats to cloud_usage db", ex); } return vmDiskStats; } @@ -122,7 +120,7 @@ public List listUpdatedStats() { vmDiskStats.add(toEntityBean(rs, false)); } } catch (Exception ex) { - s_logger.error("error lisitng updated vm disk stats", ex); + logger.error("error lisitng updated vm disk stats", ex); } return vmDiskStats; } diff --git a/engine/schema/src/main/java/com/cloud/vm/VMInstanceVO.java b/engine/schema/src/main/java/com/cloud/vm/VMInstanceVO.java index f3560d68f495..adcf28a29b38 100644 --- a/engine/schema/src/main/java/com/cloud/vm/VMInstanceVO.java +++ b/engine/schema/src/main/java/com/cloud/vm/VMInstanceVO.java @@ -43,7 +43,8 @@ import org.apache.cloudstack.backup.Backup; import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.commons.codec.binary.Base64; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.utils.db.Encrypt; @@ -59,7 +60,7 @@ @Inheritance(strategy = InheritanceType.JOINED) @DiscriminatorColumn(name = "type", discriminatorType = DiscriminatorType.STRING, length = 32) public class VMInstanceVO implements VirtualMachine, FiniteStateObject { - private static final Logger s_logger = Logger.getLogger(VMInstanceVO.class); + protected transient Logger logger = LogManager.getLogger(getClass()); @Id @TableGenerator(name = "vm_instance_sq", table = "sequence", pkColumnName = "name", valueColumnName = "value", pkColumnValue = "vm_instance_seq", allocationSize = 1) @Column(name = "id", updatable = false, nullable = false) @@ -225,7 +226,7 @@ public VMInstanceVO(long id, long serviceOfferingId, String name, String instanc random.nextBytes(randomBytes); vncPassword = Base64.encodeBase64URLSafeString(randomBytes); } catch (NoSuchAlgorithmException e) { - s_logger.error("Unexpected exception in SecureRandom Algorithm selection ", e); + logger.error("Unexpected exception in SecureRandom Algorithm selection ", e); } } diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDaoImpl.java index 5b5c3505cb32..ef94a4d9f72e 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDaoImpl.java @@ -24,7 +24,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.info.ConsoleProxyLoadInfo; @@ -40,7 +39,6 @@ @Component public class ConsoleProxyDaoImpl extends GenericDaoBase implements ConsoleProxyDao { - private static final Logger s_logger = Logger.getLogger(ConsoleProxyDaoImpl.class); // // query SQL for returning console proxy assignment info as following @@ -215,7 +213,7 @@ public List> getProxyLoadMatrix() { l.add(new Pair(rs.getLong(1), rs.getInt(2))); } } catch (SQLException e) { - s_logger.debug("Caught SQLException: ", e); + logger.debug("Caught SQLException: ", e); } return l; } @@ -240,7 +238,7 @@ public List> getDatacenterStoragePoolHostInfo(long dcId, boo l.add(new Pair(rs.getLong(1), rs.getInt(2))); } } catch (SQLException e) { - s_logger.debug("Caught SQLException: ", e); + logger.debug("Caught SQLException: ", e); } return l; } @@ -259,7 +257,7 @@ public int getProxyStaticLoad(long proxyVmId) { return rs.getInt(1); } } catch (SQLException e) { - s_logger.debug("Caught SQLException: ", e); + logger.debug("Caught SQLException: ", e); } return 0; } @@ -277,7 +275,7 @@ public int getProxyActiveLoad(long proxyVmId) { return rs.getInt(1); } } catch (SQLException e) { - s_logger.debug("Caught SQLException: ", e); + logger.debug("Caught SQLException: ", e); } return 0; } @@ -299,7 +297,7 @@ private List getDatacenterLoadMatrix(String sql) { l.add(info); } } catch (SQLException e) { - s_logger.debug("Exception: ", e); + logger.debug("Exception: ", e); } return l; } @@ -321,7 +319,7 @@ public List getRunningProxyListByMsid(long msid) { l.add(rs.getLong(1)); } } catch (SQLException e) { - s_logger.debug("Caught SQLException: ", e); + logger.debug("Caught SQLException: ", e); } return l; } diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/SecondaryStorageVmDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/SecondaryStorageVmDaoImpl.java index 2b3c0289b23e..b2b719cd12ed 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/SecondaryStorageVmDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/SecondaryStorageVmDaoImpl.java @@ -23,7 +23,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.utils.db.Attribute; @@ -38,7 +37,6 @@ @Component public class SecondaryStorageVmDaoImpl extends GenericDaoBase implements SecondaryStorageVmDao { - private static final Logger s_logger = Logger.getLogger(SecondaryStorageVmDaoImpl.class); protected SearchBuilder DataCenterStatusSearch; protected SearchBuilder StateSearch; @@ -193,7 +191,7 @@ public List getRunningSecStorageVmListByMsid(SecondaryStorageVm.Role role, l.add(rs.getLong(1)); } } catch (SQLException e) { - s_logger.debug("Caught SQLException: ", e); + logger.debug("Caught SQLException: ", e); } return l; } @@ -263,7 +261,7 @@ public List listRunningSecStorageOrderByLoad(SecondaryStorageVm.Role role, l.add(rs.getLong(1)); } } catch (SQLException e) { - s_logger.error("Unexpected exception ", e); + logger.error("Unexpected exception ", e); } return l; diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/UserVmCloneSettingDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/UserVmCloneSettingDaoImpl.java index 0761f56917b3..344f4e86fed5 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/UserVmCloneSettingDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/UserVmCloneSettingDaoImpl.java @@ -20,7 +20,6 @@ import javax.annotation.PostConstruct; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.utils.db.DB; @@ -33,7 +32,6 @@ @Component @DB() public class UserVmCloneSettingDaoImpl extends GenericDaoBase implements UserVmCloneSettingDao { - public static final Logger s_logger = Logger.getLogger(UserVmCloneSettingDaoImpl.class); protected SearchBuilder vmIdSearch; protected SearchBuilder cloneTypeSearch; diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/UserVmDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/UserVmDaoImpl.java index 80fabf6a7f7b..f4ce01afef34 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/UserVmDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/UserVmDaoImpl.java @@ -31,7 +31,6 @@ import javax.inject.Inject; import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; import com.cloud.network.Network; import com.cloud.network.dao.NetworkDao; @@ -60,7 +59,6 @@ import com.cloud.vm.dao.UserVmData.SecurityGroupData; public class UserVmDaoImpl extends GenericDaoBase implements UserVmDao { - public static final Logger s_logger = Logger.getLogger(UserVmDaoImpl.class); protected SearchBuilder AccountPodSearch; protected SearchBuilder AccountDataCenterSearch; @@ -459,13 +457,13 @@ public List listPodIdsHavingVmsforAccount(long zoneId, long accountId) { } } catch (Exception e) { - s_logger.error("listPodIdsHavingVmsforAccount:Exception: " + e.getMessage()); + logger.error("listPodIdsHavingVmsforAccount:Exception: " + e.getMessage()); throw new CloudRuntimeException("listPodIdsHavingVmsforAccount:Exception: " + e.getMessage(), e); } txn.commit(); return result; } catch (Exception e) { - s_logger.error("listPodIdsHavingVmsforAccount:Exception : " + e.getMessage()); + logger.error("listPodIdsHavingVmsforAccount:Exception : " + e.getMessage()); throw new CloudRuntimeException("listPodIdsHavingVmsforAccount:Exception: " + e.getMessage(), e); } finally { @@ -477,7 +475,7 @@ public List listPodIdsHavingVmsforAccount(long zoneId, long accountId) { } catch (Exception e) { - s_logger.error("listPodIdsHavingVmsforAccount:Exception:" + e.getMessage()); + logger.error("listPodIdsHavingVmsforAccount:Exception:" + e.getMessage()); } } @@ -514,7 +512,7 @@ public Hashtable listVmDetails(Hashtable use } catch (Exception e) { - s_logger.error("listVmDetails:Exception:" + e.getMessage()); + logger.error("listVmDetails:Exception:" + e.getMessage()); throw new CloudRuntimeException("listVmDetails: Exception:" + e.getMessage(),e); } curr_index += VM_DETAILS_BATCH_SIZE; @@ -522,7 +520,7 @@ public Hashtable listVmDetails(Hashtable use } catch (Exception e) { - s_logger.error("listVmDetails:Exception:" + e.getMessage()); + logger.error("listVmDetails:Exception:" + e.getMessage()); throw new CloudRuntimeException("listVmDetails: Exception:" + e.getMessage(),e); } } @@ -550,20 +548,20 @@ public Hashtable listVmDetails(Hashtable use } catch (Exception e) { - s_logger.error("listVmDetails: Exception:" + e.getMessage()); + logger.error("listVmDetails: Exception:" + e.getMessage()); throw new CloudRuntimeException("listVmDetails: Exception:" + e.getMessage(),e); } } catch (Exception e) { - s_logger.error("listVmDetails:Exception:" + e.getMessage()); + logger.error("listVmDetails:Exception:" + e.getMessage()); throw new CloudRuntimeException("listVmDetails: Exception:" + e.getMessage(),e); } } txn.commit(); return userVmDataHash; } catch (Exception e) { - s_logger.error("listVmDetails:Exception:" + e.getMessage()); + logger.error("listVmDetails:Exception:" + e.getMessage()); throw new CloudRuntimeException("listVmDetails:Exception : ", e); } finally { @@ -575,7 +573,7 @@ public Hashtable listVmDetails(Hashtable use } catch (Exception e) { - s_logger.error("listVmDetails:Exception:" + e.getMessage()); + logger.error("listVmDetails:Exception:" + e.getMessage()); } } @@ -740,7 +738,7 @@ public List, Pair>> getVmsD } } } catch (SQLException e) { - s_logger.error("GetVmsDetailsByNames: Exception in sql: " + e.getMessage()); + logger.error("GetVmsDetailsByNames: Exception in sql: " + e.getMessage()); throw new CloudRuntimeException("GetVmsDetailsByNames: Exception: " + e.getMessage()); } @@ -763,7 +761,7 @@ public List> countVmsBySize(long dcId, int li result.add(new Ternary(rs.getInt(1), rs.getInt(2), rs.getInt(3))); } } catch (Exception e) { - s_logger.warn("Error counting vms by size for dcId= " + dcId, e); + logger.warn("Error counting vms by size for dcId= " + dcId, e); } return result; } diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java index 916687baeb4d..cc82813b412e 100755 --- a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java @@ -28,7 +28,6 @@ import javax.annotation.PostConstruct; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.host.HostVO; @@ -65,7 +64,6 @@ @Component public class VMInstanceDaoImpl extends GenericDaoBase implements VMInstanceDao { - public static final Logger s_logger = Logger.getLogger(VMInstanceDaoImpl.class); private static final int MAX_CONSECUTIVE_SAME_STATE_UPDATE_COUNT = 3; protected SearchBuilder VMClusterSearch; @@ -504,8 +502,8 @@ public void updateProxyId(long id, Long proxyId, Date time) { @Override public boolean updateState(State oldState, Event event, State newState, VirtualMachine vm, Object opaque) { if (newState == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("There's no way to transition from old state: " + oldState.toString() + " event: " + event.toString()); + if (logger.isDebugEnabled()) { + logger.debug("There's no way to transition from old state: " + oldState.toString() + " event: " + event.toString()); } return false; } @@ -547,7 +545,7 @@ public boolean updateState(State oldState, Event event, State newState, VirtualM if (result == 0) { VMInstanceVO vo = findByIdIncludingRemoved(vm.getId()); - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { if (vo != null) { StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString()); str.append(": DB Data={Host=").append(vo.getHostId()).append("; State=").append(vo.getState().toString()).append("; updated=").append(vo.getUpdated()) @@ -556,16 +554,16 @@ public boolean updateState(State oldState, Event event, State newState, VirtualM .append("; time=").append(vo.getUpdateTime()); str.append("} Stale Data: {Host=").append(oldHostId).append("; State=").append(oldState).append("; updated=").append(oldUpdated).append("; time=") .append(oldUpdateDate).append("}"); - s_logger.debug(str.toString()); + logger.debug(str.toString()); } else { - s_logger.debug("Unable to update the vm id=" + vm.getId() + "; the vm either doesn't exist or already removed"); + logger.debug("Unable to update the vm id=" + vm.getId() + "; the vm either doesn't exist or already removed"); } } if (vo != null && vo.getState() == newState) { // allow for concurrent update if target state has already been matched - s_logger.debug("VM " + vo.getInstanceName() + " state has been already been updated to " + newState); + logger.debug("VM " + vo.getInstanceName() + " state has been already been updated to " + newState); return true; } } @@ -827,7 +825,7 @@ public Long countByZoneAndStateAndHostTag(long dcId, State state, String hostTag return rs.getLong(1); } } catch (Exception e) { - s_logger.warn(String.format("Error counting vms by host tag for dcId= %s, hostTag= %s", dcId, hostTag), e); + logger.warn(String.format("Error counting vms by host tag for dcId= %s, hostTag= %s", dcId, hostTag), e); } return 0L; } diff --git a/engine/schema/src/main/java/com/cloud/vm/snapshot/dao/VMSnapshotDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/snapshot/dao/VMSnapshotDaoImpl.java index 1b1842dfd894..062960130aca 100644 --- a/engine/schema/src/main/java/com/cloud/vm/snapshot/dao/VMSnapshotDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/snapshot/dao/VMSnapshotDaoImpl.java @@ -21,7 +21,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.utils.db.GenericDaoBase; @@ -36,7 +35,6 @@ @Component public class VMSnapshotDaoImpl extends GenericDaoBase implements VMSnapshotDao { - private static final Logger s_logger = Logger.getLogger(VMSnapshotDaoImpl.class); private final SearchBuilder SnapshotSearch; private final SearchBuilder ExpungingSnapshotSearch; private final SearchBuilder SnapshotStatusSearch; @@ -143,7 +141,7 @@ public boolean updateState(State currentState, Event event, State nextState, VMS builder.set(vo, "updated", new Date()); int rows = update((VMSnapshotVO)vo, sc); - if (rows == 0 && s_logger.isDebugEnabled()) { + if (rows == 0 && logger.isDebugEnabled()) { VMSnapshotVO dbVol = findByIdIncludingRemoved(vo.getId()); if (dbVol != null) { StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString()); @@ -176,7 +174,7 @@ public boolean updateState(State currentState, Event event, State nextState, VMS .append("; updatedTime=") .append(oldUpdatedTime); } else { - s_logger.debug("Unable to update VM snapshot: id=" + vo.getId() + ", as there is no such snapshot exists in the database anymore"); + logger.debug("Unable to update VM snapshot: id=" + vo.getId() + ", as there is no such snapshot exists in the database anymore"); } } return rows > 0; diff --git a/engine/schema/src/main/java/org/apache/cloudstack/acl/dao/ProjectRolePermissionsDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/acl/dao/ProjectRolePermissionsDaoImpl.java index d88a6f5453dc..d028ca58b32d 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/acl/dao/ProjectRolePermissionsDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/acl/dao/ProjectRolePermissionsDaoImpl.java @@ -27,7 +27,6 @@ import org.apache.cloudstack.acl.RolePermissionEntity.Permission; import org.apache.cloudstack.acl.ProjectRolePermission; import org.apache.cloudstack.acl.ProjectRolePermissionVO; -import org.apache.log4j.Logger; import com.cloud.utils.db.Attribute; import com.cloud.utils.db.Filter; @@ -42,7 +41,6 @@ public class ProjectRolePermissionsDaoImpl extends GenericDaoBase implements ProjectRolePermissionsDao{ - private static final Logger LOGGER = Logger.getLogger(ProjectRolePermissionsDaoImpl.class); private final SearchBuilder ProjectRolePermissionsSearch; private Attribute sortOrderAttribute; diff --git a/engine/schema/src/main/java/org/apache/cloudstack/acl/dao/RolePermissionsDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/acl/dao/RolePermissionsDaoImpl.java index b63dd502ee7b..7802265928ed 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/acl/dao/RolePermissionsDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/acl/dao/RolePermissionsDaoImpl.java @@ -27,7 +27,6 @@ import org.apache.cloudstack.acl.RolePermission; import org.apache.cloudstack.acl.RolePermissionEntity.Permission; import org.apache.cloudstack.acl.RolePermissionVO; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.utils.db.Attribute; @@ -43,7 +42,6 @@ @Component public class RolePermissionsDaoImpl extends GenericDaoBase implements RolePermissionsDao { - protected static final Logger LOGGER = Logger.getLogger(RolePermissionsDaoImpl.class); private final SearchBuilder RolePermissionsSearchByRoleAndRule; private final SearchBuilder RolePermissionsSearch; @@ -90,7 +88,7 @@ private boolean updateSortOrder(final RolePermissionVO permissionBeingMoved, fin for (final RolePermissionVO permission : newOrderedPermissionsList) { permission.setSortOrder(sortOrder++); if (!update(permission.getId(), permission)) { - LOGGER.warn("Failed to update item's sort order with id:" + permission.getId() + " while moving permission with id:" + permissionBeingMoved.getId() + " to a new position"); + logger.warn("Failed to update item's sort order with id:" + permission.getId() + " while moving permission with id:" + permissionBeingMoved.getId() + " to a new position"); return false; } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/affinity/dao/AffinityGroupDomainMapDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/affinity/dao/AffinityGroupDomainMapDaoImpl.java index 1dd22df46d71..3efedd826dca 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/affinity/dao/AffinityGroupDomainMapDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/affinity/dao/AffinityGroupDomainMapDaoImpl.java @@ -27,9 +27,7 @@ import javax.annotation.PostConstruct; import org.apache.cloudstack.affinity.AffinityGroupDomainMapVO; -import org.apache.log4j.Logger; -import com.cloud.network.dao.NetworkDomainDaoImpl; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; @@ -37,7 +35,6 @@ import com.cloud.utils.db.TransactionLegacy; public class AffinityGroupDomainMapDaoImpl extends GenericDaoBase implements AffinityGroupDomainMapDao { - public static Logger logger = Logger.getLogger(NetworkDomainDaoImpl.class.getName()); private SearchBuilder ListByAffinityGroup; diff --git a/engine/schema/src/main/java/org/apache/cloudstack/engine/cloud/entity/api/db/dao/VMEntityDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/engine/cloud/entity/api/db/dao/VMEntityDaoImpl.java index 13278a75dd4f..a894e87bd6da 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/engine/cloud/entity/api/db/dao/VMEntityDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/engine/cloud/entity/api/db/dao/VMEntityDaoImpl.java @@ -22,7 +22,6 @@ import javax.annotation.PostConstruct; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.cloud.entity.api.db.VMEntityVO; @@ -37,7 +36,6 @@ @Component public class VMEntityDaoImpl extends GenericDaoBase implements VMEntityDao { - public static final Logger s_logger = Logger.getLogger(VMEntityDaoImpl.class); @Inject protected VMReservationDao _vmReservationDao; diff --git a/engine/schema/src/main/java/org/apache/cloudstack/ha/dao/HAConfigDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/ha/dao/HAConfigDaoImpl.java index 4d74e2e66566..1a27bb1de67f 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/ha/dao/HAConfigDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/ha/dao/HAConfigDaoImpl.java @@ -31,7 +31,6 @@ import org.apache.cloudstack.ha.HAConfig; import org.apache.cloudstack.ha.HAConfigVO; import org.apache.cloudstack.ha.HAResource; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import java.sql.PreparedStatement; @@ -42,7 +41,6 @@ @DB @Component public class HAConfigDaoImpl extends GenericDaoBase implements HAConfigDao { - private static final Logger LOG = Logger.getLogger(HAConfigDaoImpl.class); private static final String EXPIRE_OWNERSHIP = "UPDATE ha_config set mgmt_server_id=NULL where mgmt_server_id=?"; @@ -77,8 +75,8 @@ public HAConfigDaoImpl() { public boolean updateState(HAConfig.HAState currentState, HAConfig.Event event, HAConfig.HAState nextState, HAConfig vo, Object data) { HAConfigVO haConfig = (HAConfigVO) vo; if (haConfig == null) { - if (LOG.isTraceEnabled()) { - LOG.trace("Invalid ha config view object provided"); + if (logger.isTraceEnabled()) { + logger.trace("Invalid ha config view object provided"); } return false; } @@ -104,8 +102,8 @@ public boolean updateState(HAConfig.HAState currentState, HAConfig.Event event, ub.set(haConfig, MsIdAttr, newManagementServerId); int result = update(ub, sc, null); - if (LOG.isTraceEnabled() && result <= 0) { - LOG.trace(String.format("Failed to update HA state from:%s to:%s due to event:%s for the ha_config id:%d", currentState, nextState, event, haConfig.getId())); + if (logger.isTraceEnabled() && result <= 0) { + logger.trace(String.format("Failed to update HA state from:%s to:%s due to event:%s for the ha_config id:%d", currentState, nextState, event, haConfig.getId())); } return result > 0; } @@ -141,7 +139,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { pstmt.executeUpdate(); } catch (SQLException e) { txn.rollback(); - LOG.warn("Failed to expire HA ownership of management server id: " + serverId); + logger.warn("Failed to expire HA ownership of management server id: " + serverId); } } }); diff --git a/engine/schema/src/main/java/org/apache/cloudstack/network/dao/NetworkPermissionDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/network/dao/NetworkPermissionDaoImpl.java index ffc62b15dc2e..c4214e8dfce9 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/network/dao/NetworkPermissionDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/network/dao/NetworkPermissionDaoImpl.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.network.NetworkPermissionVO; @@ -31,7 +30,6 @@ @Component public class NetworkPermissionDaoImpl extends GenericDaoBase implements NetworkPermissionDao { - private static final Logger s_logger = Logger.getLogger(NetworkPermissionDaoImpl.class); private SearchBuilder NetworkAndAccountSearch; private SearchBuilder NetworkIdSearch; diff --git a/engine/schema/src/main/java/org/apache/cloudstack/outofbandmanagement/dao/OutOfBandManagementDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/outofbandmanagement/dao/OutOfBandManagementDaoImpl.java index af164326da70..375bb43e40eb 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/outofbandmanagement/dao/OutOfBandManagementDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/outofbandmanagement/dao/OutOfBandManagementDaoImpl.java @@ -31,7 +31,6 @@ import com.cloud.utils.db.UpdateBuilder; import org.apache.cloudstack.outofbandmanagement.OutOfBandManagement; import org.apache.cloudstack.outofbandmanagement.OutOfBandManagementVO; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import java.sql.PreparedStatement; @@ -41,7 +40,6 @@ @DB @Component public class OutOfBandManagementDaoImpl extends GenericDaoBase implements OutOfBandManagementDao { - private static final Logger LOG = Logger.getLogger(OutOfBandManagementDaoImpl.class); private SearchBuilder HostSearch; private SearchBuilder ManagementServerSearch; @@ -109,7 +107,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { pstmt.executeUpdate(); } catch (SQLException e) { txn.rollback(); - LOG.warn("Failed to expire ownership for out-of-band management server id: " + resource); + logger.warn("Failed to expire ownership for out-of-band management server id: " + resource); } } }); @@ -119,8 +117,8 @@ public void doInTransactionWithoutResult(TransactionStatus status) { public void expireServerOwnership(long serverId) { final String resetOwnerSql = "UPDATE oobm set mgmt_server_id=NULL, power_state=NULL where mgmt_server_id=?"; executeExpireOwnershipSql(resetOwnerSql, serverId); - if (LOG.isDebugEnabled()) { - LOG.debug("Expired out-of-band management ownership for hosts owned by management server id:" + serverId); + if (logger.isDebugEnabled()) { + logger.debug("Expired out-of-band management ownership for hosts owned by management server id:" + serverId); } } @@ -128,8 +126,8 @@ public void expireServerOwnership(long serverId) { public boolean updateState(OutOfBandManagement.PowerState oldStatus, OutOfBandManagement.PowerState.Event event, OutOfBandManagement.PowerState newStatus, OutOfBandManagement vo, Object data) { OutOfBandManagementVO oobmHost = (OutOfBandManagementVO) vo; if (oobmHost == null) { - if (LOG.isTraceEnabled()) { - LOG.trace("Invalid out-of-band management host view object provided"); + if (logger.isTraceEnabled()) { + logger.trace("Invalid out-of-band management host view object provided"); } return false; } @@ -156,8 +154,8 @@ public boolean updateState(OutOfBandManagement.PowerState oldStatus, OutOfBandMa ub.set(oobmHost, MsIdAttr, newManagementServerId); int result = update(ub, sc, null); - if (LOG.isDebugEnabled() && result <= 0) { - LOG.debug(String.format("Failed to update out-of-band management power state from:%s to:%s due to event:%s for the host id:%d", oldStatus, newStatus, event, oobmHost.getHostId())); + if (logger.isDebugEnabled() && result <= 0) { + logger.debug(String.format("Failed to update out-of-band management power state from:%s to:%s due to event:%s for the host id:%d", oldStatus, newStatus, event, oobmHost.getHostId())); } return result > 0; } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/region/dao/RegionDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/region/dao/RegionDaoImpl.java index e2ad5d9ed239..1e53c925e082 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/region/dao/RegionDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/region/dao/RegionDaoImpl.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.region.dao; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.region.RegionVO; @@ -28,7 +27,6 @@ @Component public class RegionDaoImpl extends GenericDaoBase implements RegionDao { - private static final Logger s_logger = Logger.getLogger(RegionDaoImpl.class); protected SearchBuilder NameSearch; protected SearchBuilder AllFieldsSearch; diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDaoImpl.java index 98cb6ca5b42d..c095f4222e76 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDaoImpl.java @@ -32,7 +32,6 @@ import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State; import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.hypervisor.Hypervisor; @@ -50,7 +49,6 @@ @Component public class SnapshotDataStoreDaoImpl extends GenericDaoBase implements SnapshotDataStoreDao { - private static final Logger s_logger = Logger.getLogger(SnapshotDataStoreDaoImpl.class); private static final String STORE_ID = "store_id"; private static final String STORE_ROLE = "store_role"; private static final String STATE = "state"; @@ -188,7 +186,7 @@ public boolean updateState(State currentState, Event event, State nextState, Dat message = String.format("Unable to update objectIndatastore: id=%s, as there is no such object exists in the database anymore", dataObj.getId()); } - s_logger.debug(message); + logger.debug(message); return false; } @@ -277,7 +275,7 @@ protected SnapshotDataStoreVO findOldestOrLatestSnapshotForVolume(long volumeId, } } } catch (SQLException e) { - s_logger.warn(String.format("Failed to find %s snapshot for volume [%s] in %s store due to [%s].", oldest ? "oldest" : "latest", volumeId, role, e.getMessage()), e); + logger.warn(String.format("Failed to find %s snapshot for volume [%s] in %s store due to [%s].", oldest ? "oldest" : "latest", volumeId, role, e.getMessage()), e); } return null; } @@ -286,7 +284,7 @@ protected SnapshotDataStoreVO findOldestOrLatestSnapshotForVolume(long volumeId, @DB public SnapshotDataStoreVO findParent(DataStoreRole role, Long storeId, Long volumeId) { if (!isSnapshotChainingRequired(volumeId)) { - s_logger.trace(String.format("Snapshot chaining is not required for snapshots of volume [%s]. Returning null as parent.", volumeId)); + logger.trace(String.format("Snapshot chaining is not required for snapshots of volume [%s]. Returning null as parent.", volumeId)); return null; } @@ -378,21 +376,21 @@ public void duplicateCacheRecordsOnRegionStore(long storeId) { List snapshots = listBy(sc); if (snapshots == null) { - s_logger.debug(String.format("There are no snapshots on cache store to duplicate to region store [%s].", storeId)); + logger.debug(String.format("There are no snapshots on cache store to duplicate to region store [%s].", storeId)); return; } - s_logger.info(String.format("Duplicating [%s] snapshot cache store records to region store [%s].", snapshots.size(), storeId)); + logger.info(String.format("Duplicating [%s] snapshot cache store records to region store [%s].", snapshots.size(), storeId)); for (SnapshotDataStoreVO snap : snapshots) { SnapshotDataStoreVO snapStore = findByStoreSnapshot(DataStoreRole.Image, storeId, snap.getSnapshotId()); if (snapStore != null) { - s_logger.debug(String.format("There is already an entry for snapshot [%s] on region store [%s].", snap.getSnapshotId(), storeId)); + logger.debug(String.format("There is already an entry for snapshot [%s] on region store [%s].", snap.getSnapshotId(), storeId)); continue; } - s_logger.info(String.format("Persisting an entry for snapshot [%s] on region store [%s].", snap.getSnapshotId(), storeId)); + logger.info(String.format("Persisting an entry for snapshot [%s] on region store [%s].", snap.getSnapshotId(), storeId)); SnapshotDataStoreVO ss = new SnapshotDataStoreVO(); ss.setSnapshotId(snap.getSnapshotId()); ss.setDataStoreId(storeId); @@ -434,9 +432,9 @@ public void updateStoreRoleToCache(long storeId) { sc.setParameters("destroyed", false); List snaps = listBy(sc); if (snaps != null) { - s_logger.info(String.format("Updating role to cache store for [%s] entries in snapshot_store_ref.", snaps.size())); + logger.info(String.format("Updating role to cache store for [%s] entries in snapshot_store_ref.", snaps.size())); for (SnapshotDataStoreVO snap : snaps) { - s_logger.debug(String.format("Updating role to cache store for entry [%s].", snap)); + logger.debug(String.format("Updating role to cache store for entry [%s].", snap)); snap.setRole(DataStoreRole.ImageCache); update(snap.getId(), snap); } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreVO.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreVO.java index 6f6ed4e08f27..a1dc05fce58b 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreVO.java @@ -29,7 +29,8 @@ import javax.persistence.Temporal; import javax.persistence.TemporalType; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; @@ -47,7 +48,7 @@ @Entity @Table(name = "snapshot_store_ref") public class SnapshotDataStoreVO implements StateObject, DataObjectInStore { - private static final Logger s_logger = Logger.getLogger(SnapshotDataStoreVO.class); + protected transient Logger logger = LogManager.getLogger(getClass()); @Id @GeneratedValue(strategy = GenerationType.IDENTITY) @@ -297,7 +298,7 @@ public void decrRefCnt() { refCnt--; } else { - s_logger.warn("We should not try to decrement a zero reference count even though our code has guarded"); + logger.warn("We should not try to decrement a zero reference count even though our code has guarded"); } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/TemplateDataStoreVO.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/TemplateDataStoreVO.java index a8d1af62f531..a6e7a5a4fea7 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/TemplateDataStoreVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/TemplateDataStoreVO.java @@ -29,7 +29,8 @@ import javax.persistence.Temporal; import javax.persistence.TemporalType; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; @@ -47,7 +48,7 @@ @Entity @Table(name = "template_store_ref") public class TemplateDataStoreVO implements StateObject, DataObjectInStore { - private static final Logger s_logger = Logger.getLogger(TemplateDataStoreVO.class); + protected transient Logger logger = LogManager.getLogger(getClass()); @Id @GeneratedValue(strategy = GenerationType.IDENTITY) @@ -382,7 +383,7 @@ public void decrRefCnt() { refCnt--; } else{ - s_logger.warn("We should not try to decrement a zero reference count even though our code has guarded"); + logger.warn("We should not try to decrement a zero reference count even though our code has guarded"); } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/VolumeDataStoreVO.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/VolumeDataStoreVO.java index bb21abbe44b3..d57dec8fbfd5 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/VolumeDataStoreVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/VolumeDataStoreVO.java @@ -29,7 +29,8 @@ import javax.persistence.Temporal; import javax.persistence.TemporalType; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; @@ -46,7 +47,7 @@ @Entity @Table(name = "volume_store_ref") public class VolumeDataStoreVO implements StateObject, DataObjectInStore { - private static final Logger s_logger = Logger.getLogger(VolumeDataStoreVO.class); + protected transient Logger logger = LogManager.getLogger(getClass()); @Id @GeneratedValue(strategy = GenerationType.IDENTITY) @@ -362,7 +363,7 @@ public void decrRefCnt() { refCnt--; } else { - s_logger.warn("We should not try to decrement a zero reference count even though our code has guarded"); + logger.warn("We should not try to decrement a zero reference count even though our code has guarded"); } } diff --git a/engine/schema/src/test/java/com/cloud/upgrade/dao/DatabaseAccessObjectTest.java b/engine/schema/src/test/java/com/cloud/upgrade/dao/DatabaseAccessObjectTest.java index bd05fbe3c4cd..4c07abda9389 100644 --- a/engine/schema/src/test/java/com/cloud/upgrade/dao/DatabaseAccessObjectTest.java +++ b/engine/schema/src/test/java/com/cloud/upgrade/dao/DatabaseAccessObjectTest.java @@ -16,7 +16,6 @@ // under the License. package com.cloud.upgrade.dao; - import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.contains; @@ -32,14 +31,13 @@ import java.sql.ResultSet; import java.sql.SQLException; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; -import org.springframework.test.util.ReflectionTestUtils; @RunWith(MockitoJUnitRunner.class) public class DatabaseAccessObjectTest { @@ -60,8 +58,7 @@ public class DatabaseAccessObjectTest { @Before public void setup() { - ReflectionTestUtils.setField(dao, "s_logger", loggerMock); - + dao.logger = loggerMock; } @Test diff --git a/engine/service/src/main/webapp/WEB-INF/log4j.xml b/engine/service/src/main/webapp/WEB-INF/log4j.xml index 19d48b47e836..48d61a10b413 100644 --- a/engine/service/src/main/webapp/WEB-INF/log4j.xml +++ b/engine/service/src/main/webapp/WEB-INF/log4j.xml @@ -16,24 +16,41 @@ specific language governing permissions and limitations under the License. --> - - - - - - - - - - - - - - - - - - - - - + + + + + net.sf.cglib.proxy + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/engine/storage/cache/src/main/java/org/apache/cloudstack/storage/cache/allocator/StorageCacheRandomAllocator.java b/engine/storage/cache/src/main/java/org/apache/cloudstack/storage/cache/allocator/StorageCacheRandomAllocator.java index 22b3f46a9463..fe3bb5cf00da 100644 --- a/engine/storage/cache/src/main/java/org/apache/cloudstack/storage/cache/allocator/StorageCacheRandomAllocator.java +++ b/engine/storage/cache/src/main/java/org/apache/cloudstack/storage/cache/allocator/StorageCacheRandomAllocator.java @@ -22,7 +22,8 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; @@ -39,7 +40,7 @@ @Component public class StorageCacheRandomAllocator implements StorageCacheAllocator { - private static final Logger s_logger = Logger.getLogger(StorageCacheRandomAllocator.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject DataStoreManager dataStoreMgr; @Inject @@ -52,13 +53,13 @@ public class StorageCacheRandomAllocator implements StorageCacheAllocator { @Override public DataStore getCacheStore(Scope scope) { if (scope.getScopeType() != ScopeType.ZONE) { - s_logger.debug("Can only support zone wide cache storage"); + logger.debug("Can only support zone wide cache storage"); return null; } List cacheStores = dataStoreMgr.getImageCacheStores(scope); if ((cacheStores == null) || (cacheStores.size() <= 0)) { - s_logger.debug("Can't find staging storage in zone: " + scope.getScopeId()); + logger.debug("Can't find staging storage in zone: " + scope.getScopeId()); return null; } @@ -68,13 +69,13 @@ public DataStore getCacheStore(Scope scope) { @Override public DataStore getCacheStore(DataObject data, Scope scope) { if (scope.getScopeType() != ScopeType.ZONE) { - s_logger.debug("Can only support zone wide cache storage"); + logger.debug("Can only support zone wide cache storage"); return null; } List cacheStores = dataStoreMgr.getImageCacheStores(scope); if (cacheStores.size() <= 0) { - s_logger.debug("Can't find staging storage in zone: " + scope.getScopeId()); + logger.debug("Can't find staging storage in zone: " + scope.getScopeId()); return null; } @@ -83,7 +84,7 @@ public DataStore getCacheStore(DataObject data, Scope scope) { for (DataStore store : cacheStores) { DataObjectInStore obj = objectInStoreMgr.findObject(data, store); if (obj != null && obj.getState() == ObjectInDataStoreStateMachine.State.Ready && statsCollector.imageStoreHasEnoughCapacity(store)) { - s_logger.debug("pick the cache store " + store.getId() + " where data is already there"); + logger.debug("pick the cache store " + store.getId() + " where data is already there"); return store; } } diff --git a/engine/storage/cache/src/main/java/org/apache/cloudstack/storage/cache/manager/StorageCacheManagerImpl.java b/engine/storage/cache/src/main/java/org/apache/cloudstack/storage/cache/manager/StorageCacheManagerImpl.java index a687ddfc4371..889d0ce14ccc 100644 --- a/engine/storage/cache/src/main/java/org/apache/cloudstack/storage/cache/manager/StorageCacheManagerImpl.java +++ b/engine/storage/cache/src/main/java/org/apache/cloudstack/storage/cache/manager/StorageCacheManagerImpl.java @@ -32,7 +32,8 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionService; @@ -64,7 +65,7 @@ import com.cloud.utils.exception.CloudRuntimeException; public class StorageCacheManagerImpl implements StorageCacheManager, Manager { - private static final Logger s_logger = Logger.getLogger(StorageCacheManagerImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject List storageCacheAllocator; @Inject @@ -195,7 +196,7 @@ protected void runInContext() { } } } catch (Exception e) { - s_logger.debug("Failed to execute CacheReplacementRunner: " + e.toString()); + logger.debug("Failed to execute CacheReplacementRunner: " + e.toString()); } finally { if (replacementLock != null) { replacementLock.unlock(); @@ -245,7 +246,7 @@ public DataObject createCacheObject(DataObject data, DataStore store) { String msg = "unsupported DataObject comes, then can't acquire correct lock object"; throw new CloudRuntimeException(msg); } - s_logger.debug("check " + typeName + " cache entry(id: " + dataId + ") on store(id: " + storeId + ")"); + logger.debug("check " + typeName + " cache entry(id: " + dataId + ") on store(id: " + storeId + ")"); DataObject existingDataObj = null; synchronized (lock) { @@ -271,13 +272,13 @@ public DataObject createCacheObject(DataObject data, DataStore store) { * Threads must release lock within waiting for cache copy and * must be waken up at completion. */ - s_logger.debug("waiting cache copy completion type: " + typeName + ", id: " + obj.getObjectId() + ", lock: " + lock.hashCode()); + logger.debug("waiting cache copy completion type: " + typeName + ", id: " + obj.getObjectId() + ", lock: " + lock.hashCode()); try { lock.wait(milliSeconds); } catch (InterruptedException e) { - s_logger.debug("[ignored] interrupted while waiting for cache copy completion."); + logger.debug("[ignored] interrupted while waiting for cache copy completion."); } - s_logger.debug("waken up"); + logger.debug("waken up"); now = new Date(); if (now.after(expiredDate)) { @@ -290,7 +291,7 @@ public DataObject createCacheObject(DataObject data, DataStore store) { } if (st == ObjectInDataStoreStateMachine.State.Ready) { - s_logger.debug("there is already one in the cache store"); + logger.debug("there is already one in the cache store"); DataObject dataObj = objectInStoreMgr.get(data, store, null); dataObj.incRefCount(); existingDataObj = dataObj; @@ -298,7 +299,7 @@ public DataObject createCacheObject(DataObject data, DataStore store) { } if(existingDataObj == null) { - s_logger.debug("create " + typeName + " cache entry(id: " + dataId + ") on store(id: " + storeId + ")"); + logger.debug("create " + typeName + " cache entry(id: " + dataId + ") on store(id: " + storeId + ")"); objOnCacheStore = store.create(data); } lock.notifyAll(); @@ -307,7 +308,7 @@ public DataObject createCacheObject(DataObject data, DataStore store) { return existingDataObj; } if (objOnCacheStore == null) { - s_logger.error("create " + typeName + " cache entry(id: " + dataId + ") on store(id: " + storeId + ") failed"); + logger.error("create " + typeName + " cache entry(id: " + dataId + ") on store(id: " + storeId + ") failed"); return null; } @@ -327,10 +328,10 @@ public DataObject createCacheObject(DataObject data, DataStore store) { return objOnCacheStore; } } catch (InterruptedException e) { - s_logger.debug("create cache storage failed: " + e.toString()); + logger.debug("create cache storage failed: " + e.toString()); throw new CloudRuntimeException(e); } catch (ExecutionException e) { - s_logger.debug("create cache storage failed: " + e.toString()); + logger.debug("create cache storage failed: " + e.toString()); throw new CloudRuntimeException(e); } finally { if (result == null) { @@ -340,7 +341,7 @@ public DataObject createCacheObject(DataObject data, DataStore store) { /* * Wake up all threads waiting for cache copy. */ - s_logger.debug("wake up all waiting threads(lock: " + lock.hashCode() + ")"); + logger.debug("wake up all waiting threads(lock: " + lock.hashCode() + ")"); lock.notifyAll(); } } diff --git a/engine/storage/configdrive/src/main/java/org/apache/cloudstack/storage/configdrive/ConfigDriveBuilder.java b/engine/storage/configdrive/src/main/java/org/apache/cloudstack/storage/configdrive/ConfigDriveBuilder.java index e02c0920004d..e1d51120efa3 100644 --- a/engine/storage/configdrive/src/main/java/org/apache/cloudstack/storage/configdrive/ConfigDriveBuilder.java +++ b/engine/storage/configdrive/src/main/java/org/apache/cloudstack/storage/configdrive/ConfigDriveBuilder.java @@ -37,7 +37,8 @@ import org.apache.commons.collections.MapUtils; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.joda.time.Duration; import com.cloud.network.NetworkModel; @@ -49,7 +50,7 @@ public class ConfigDriveBuilder { - public static final Logger LOG = Logger.getLogger(ConfigDriveBuilder.class); + protected static Logger LOGGER = LogManager.getLogger(ConfigDriveBuilder.class); /** * This is for mocking the File class. We cannot mock the File class directly because Mockito uses it internally. @@ -98,7 +99,7 @@ public static File base64StringToFile(String encodedIsoData, String folder, Stri try { Files.createDirectories(destPath.getParent()); } catch (final IOException e) { - LOG.warn("Exception hit while trying to recreate directory: " + destPath.getParent().toString()); + LOGGER.warn("Exception hit while trying to recreate directory: " + destPath.getParent().toString()); } return Files.write(destPath, decoded).toFile(); } @@ -139,7 +140,7 @@ private static void deleteTempDir(Path tempDir) { FileUtils.deleteDirectory(tempDir.toFile()); } } catch (IOException ioe) { - LOG.warn("Failed to delete ConfigDrive temporary directory: " + tempDir.toString(), ioe); + LOGGER.warn("Failed to delete ConfigDrive temporary directory: " + tempDir.toString(), ioe); } } @@ -151,7 +152,7 @@ private static void deleteTempDir(Path tempDir) { */ static String generateAndRetrieveIsoAsBase64Iso(String isoFileName, String driveLabel, String tempDirName) throws IOException { File tmpIsoStore = getFile(tempDirName, isoFileName); - Script command = new Script(getProgramToGenerateIso(), Duration.standardSeconds(300), LOG); + Script command = new Script(getProgramToGenerateIso(), Duration.standardSeconds(300), LOGGER); command.add("-o", tmpIsoStore.getAbsolutePath()); command.add("-ldots"); command.add("-allow-lowercase"); @@ -163,11 +164,11 @@ static String generateAndRetrieveIsoAsBase64Iso(String isoFileName, String drive command.add("-r"); command.add("-V", driveLabel); command.add(tempDirName); - LOG.debug("Executing config drive creation command: " + command.toString()); + LOGGER.debug("Executing config drive creation command: " + command.toString()); String result = command.execute(); if (StringUtils.isNotBlank(result)) { String errMsg = "Unable to create iso file: " + isoFileName + " due to ge" + result; - LOG.warn(errMsg); + LOGGER.warn(errMsg); throw new CloudRuntimeException(errMsg); } File tmpIsoFile = getFile(tmpIsoStore.getAbsolutePath()); @@ -242,7 +243,7 @@ static JsonObject createJsonObjectWithVmData(List vmData, String tempD String dataType = item[CONFIGDATA_DIR]; String fileName = item[CONFIGDATA_FILE]; String content = item[CONFIGDATA_CONTENT]; - LOG.debug(String.format("[createConfigDriveIsoForVM] dataType=%s, filename=%s, content=%s", dataType, fileName, (PASSWORD_FILE.equals(fileName) ? "********" : content))); + LOGGER.debug(String.format("[createConfigDriveIsoForVM] dataType=%s, filename=%s, content=%s", dataType, fileName, (PASSWORD_FILE.equals(fileName) ? "********" : content))); createFileInTempDirAnAppendOpenStackMetadataToJsonObject(tempDirName, metaData, dataType, fileName, content, customUserdataParams); } @@ -299,10 +300,10 @@ static void linkUserData(String tempDirName) { String userDataFilePath = tempDirName + ConfigDrive.cloudStackConfigDriveName + "userdata/user_data.txt"; File file = getFile(userDataFilePath); if (file.exists()) { - Script hardLink = new Script("ln", Duration.standardSeconds(300), LOG); + Script hardLink = new Script("ln", Duration.standardSeconds(300), LOGGER); hardLink.add(userDataFilePath); hardLink.add(tempDirName + ConfigDrive.openStackConfigDriveName + "user_data"); - LOG.debug("execute command: " + hardLink.toString()); + LOGGER.debug("execute command: " + hardLink.toString()); String executionResult = hardLink.execute(); if (StringUtils.isNotBlank(executionResult)) { diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java index 370753ed9230..0b0065361d07 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java @@ -51,7 +51,8 @@ import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO; import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity; import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.stereotype.Component; import com.cloud.agent.api.Answer; @@ -81,7 +82,7 @@ @Component public class AncientDataMotionStrategy implements DataMotionStrategy { - private static final Logger s_logger = Logger.getLogger(AncientDataMotionStrategy.class); + protected Logger logger = LogManager.getLogger(getClass()); private static final String NO_REMOTE_ENDPOINT_SSVM = "No remote endpoint to send command, check if host or ssvm is down?"; private static final String NO_REMOTE_ENDPOINT_WITH_ENCRYPTION = "No remote endpoint to send command, unable to find a valid endpoint. Requires encryption support: %s"; @@ -126,8 +127,8 @@ protected boolean needCacheStorage(DataObject srcData, DataObject destData) { if (destStoreTO instanceof NfsTO || destStoreTO.getRole() == DataStoreRole.ImageCache) { return false; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("needCacheStorage true, dest at " + destTO.getPath() + " dest role " + destStoreTO.getRole().toString() + srcTO.getPath() + " src role " + + if (logger.isDebugEnabled()) { + logger.debug("needCacheStorage true, dest at " + destTO.getPath() + " dest role " + destStoreTO.getRole().toString() + srcTO.getPath() + " src role " + srcStoreTO.getRole().toString()); } return true; @@ -157,7 +158,7 @@ private Scope pickCacheScopeForCopy(DataObject srcData, DataObject destData) { } else if (destScope.getScopeId() != null) { selectedScope = getZoneScope(destScope); } else { - s_logger.warn("Cannot find a zone-wide scope for movement that needs a cache storage"); + logger.warn("Cannot find a zone-wide scope for movement that needs a cache storage"); } return selectedScope; } @@ -177,7 +178,7 @@ protected Answer copyObject(DataObject srcData, DataObject destData, Host destHo VirtualMachineManager.ExecuteInSequence.value()); EndPoint ep = destHost != null ? RemoteHostEndPoint.getHypervisorHostEndPoint(destHost) : selector.select(srcForCopy, destData); if (ep == null) { - s_logger.error(NO_REMOTE_ENDPOINT_SSVM); + logger.error(NO_REMOTE_ENDPOINT_SSVM); answer = new Answer(cmd, false, NO_REMOTE_ENDPOINT_SSVM); } else { answer = ep.sendMessage(cmd); @@ -193,19 +194,19 @@ protected Answer copyObject(DataObject srcData, DataObject destData, Host destHo destData.getType() == DataObjectType.TEMPLATE)) { // volume transfer from primary to secondary. Volume transfer between primary pools are already handled by copyVolumeBetweenPools // Delete cache in order to certainly transfer a latest image. - if (s_logger.isDebugEnabled()) s_logger.debug("Delete " + cacheType + " cache(id: " + cacheId + + if (logger.isDebugEnabled()) logger.debug("Delete " + cacheType + " cache(id: " + cacheId + ", uuid: " + cacheUuid + ")"); cacheMgr.deleteCacheObject(srcForCopy); } else { // for template, we want to leave it on cache for performance reason if ((answer == null || !answer.getResult()) && srcForCopy.getRefCount() < 2) { // cache object created by this copy, not already there - s_logger.warn("Copy may not be handled correctly by agent(id: " + (ep != null ? ep.getId() : "\"unspecified\"") + ")." + + logger.warn("Copy may not be handled correctly by agent(id: " + (ep != null ? ep.getId() : "\"unspecified\"") + ")." + " Delete " + cacheType + " cache(id: " + cacheId + ", uuid: " + cacheUuid + ")"); cacheMgr.deleteCacheObject(srcForCopy); } else { - if (s_logger.isDebugEnabled()) s_logger.debug("Decrease reference count of " + cacheType + + if (logger.isDebugEnabled()) logger.debug("Decrease reference count of " + cacheType + " cache(id: " + cacheId + ", uuid: " + cacheUuid + ")"); cacheMgr.releaseCacheObject(srcForCopy); } @@ -213,7 +214,7 @@ protected Answer copyObject(DataObject srcData, DataObject destData, Host destHo } return answer; } catch (Exception e) { - if (s_logger.isDebugEnabled()) s_logger.debug("copy object failed: ", e); + if (logger.isDebugEnabled()) logger.debug("copy object failed: ", e); if (cacheData != null) { cacheMgr.deleteCacheObject(cacheData); } @@ -300,7 +301,7 @@ protected Answer copyVolumeFromSnapshot(DataObject snapObj, DataObject volObj) { Answer answer = null; if (ep == null) { - s_logger.error(NO_REMOTE_ENDPOINT_SSVM); + logger.error(NO_REMOTE_ENDPOINT_SSVM); answer = new Answer(cmd, false, NO_REMOTE_ENDPOINT_SSVM); } else { answer = ep.sendMessage(cmd); @@ -308,7 +309,7 @@ protected Answer copyVolumeFromSnapshot(DataObject snapObj, DataObject volObj) { return answer; } catch (Exception e) { - s_logger.error(basicErrMsg, e); + logger.error(basicErrMsg, e); throw new CloudRuntimeException(basicErrMsg); } finally { if (!(storTO instanceof NfsTO)) { @@ -324,14 +325,14 @@ protected Answer cloneVolume(DataObject template, DataObject volume) { EndPoint ep = selector.select(volume, anyVolumeRequiresEncryption(volume)); Answer answer = null; if (ep == null) { - s_logger.error(NO_REMOTE_ENDPOINT_SSVM); + logger.error(NO_REMOTE_ENDPOINT_SSVM); answer = new Answer(cmd, false, NO_REMOTE_ENDPOINT_SSVM); } else { answer = ep.sendMessage(cmd); } return answer; } catch (Exception e) { - if (s_logger.isDebugEnabled()) s_logger.debug("Failed to send to storage pool", e); + if (logger.isDebugEnabled()) logger.debug("Failed to send to storage pool", e); throw new CloudRuntimeException("Failed to send to storage pool", e); } } @@ -364,7 +365,7 @@ protected Answer copyVolumeBetweenPools(DataObject srcData, DataObject destData) Answer answer = null; if (ep == null) { String errMsg = String.format(NO_REMOTE_ENDPOINT_WITH_ENCRYPTION, encryptionRequired); - s_logger.error(errMsg); + logger.error(errMsg); answer = new Answer(cmd, false, errMsg); } else { answer = ep.sendMessage(cmd); @@ -388,7 +389,7 @@ protected Answer copyVolumeBetweenPools(DataObject srcData, DataObject destData) if (answer == null || !answer.getResult()) { if (answer != null) { - if (s_logger.isDebugEnabled()) s_logger.debug("copy to image store failed: " + answer.getDetails()); + if (logger.isDebugEnabled()) logger.debug("copy to image store failed: " + answer.getDetails()); } objOnImageStore.processEvent(Event.OperationFailed); imageStore.delete(objOnImageStore); @@ -403,7 +404,7 @@ protected Answer copyVolumeBetweenPools(DataObject srcData, DataObject destData) EndPoint ep = selector.select(objOnImageStore, destData, encryptionRequired); if (ep == null) { String errMsg = String.format(NO_REMOTE_ENDPOINT_WITH_ENCRYPTION, encryptionRequired); - s_logger.error(errMsg); + logger.error(errMsg); answer = new Answer(cmd, false, errMsg); } else { answer = ep.sendMessage(cmd); @@ -411,7 +412,7 @@ protected Answer copyVolumeBetweenPools(DataObject srcData, DataObject destData) if (answer == null || !answer.getResult()) { if (answer != null) { - if (s_logger.isDebugEnabled()) s_logger.debug("copy to primary store failed: " + answer.getDetails()); + if (logger.isDebugEnabled()) logger.debug("copy to primary store failed: " + answer.getDetails()); } objOnImageStore.processEvent(Event.OperationFailed); imageStore.delete(objOnImageStore); @@ -422,7 +423,7 @@ protected Answer copyVolumeBetweenPools(DataObject srcData, DataObject destData) objOnImageStore.processEvent(Event.OperationFailed); imageStore.delete(objOnImageStore); } - s_logger.error("Failed to perform operation: "+ e.getLocalizedMessage()); + logger.error("Failed to perform operation: "+ e.getLocalizedMessage()); throw e; } @@ -436,7 +437,7 @@ protected Answer copyVolumeBetweenPools(DataObject srcData, DataObject destData) Answer answer = null; if (ep == null) { String errMsg = String.format(NO_REMOTE_ENDPOINT_WITH_ENCRYPTION, encryptionRequired); - s_logger.error(errMsg); + logger.error(errMsg); answer = new Answer(cmd, false, errMsg); } else { answer = ep.sendMessage(cmd); @@ -468,19 +469,19 @@ protected Answer migrateVolumeToPool(DataObject srcData, DataObject destData) { Answer answer = null; if (ep == null) { String errMsg = String.format(NO_REMOTE_ENDPOINT_WITH_ENCRYPTION, encryptionRequired); - s_logger.error(errMsg); + logger.error(errMsg); answer = new Answer(command, false, errMsg); } else { - if (s_logger.isDebugEnabled()) s_logger.debug("Sending MIGRATE_COPY request to node " + ep); + if (logger.isDebugEnabled()) logger.debug("Sending MIGRATE_COPY request to node " + ep); answer = ep.sendMessage(command); - if (s_logger.isDebugEnabled()) s_logger.debug("Received MIGRATE_COPY response from node with answer: " + answer); + if (logger.isDebugEnabled()) logger.debug("Received MIGRATE_COPY response from node with answer: " + answer); } if (answer == null || !answer.getResult()) { throw new CloudRuntimeException("Failed to migrate volume " + volume + " to storage pool " + destPool); } else { // Update the volume details after migration. - if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE_COPY updating volume"); + if (logger.isDebugEnabled()) logger.debug("MIGRATE_COPY updating volume"); VolumeVO volumeVo = volDao.findById(volume.getId()); Long oldPoolId = volume.getPoolId(); @@ -500,7 +501,7 @@ protected Answer migrateVolumeToPool(DataObject srcData, DataObject destData) { } volumeVo.setFolder(folder); volDao.update(volume.getId(), volumeVo); - if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE_COPY update volume data complete"); + if (logger.isDebugEnabled()) logger.debug("MIGRATE_COPY update volume data complete"); } @@ -513,7 +514,7 @@ public void copyAsync(DataObject srcData, DataObject destData, Host destHost, As Answer answer = null; String errMsg = null; try { - if (s_logger.isDebugEnabled()) s_logger.debug("copyAsync inspecting src type " + srcData.getType().toString() + " copyAsync inspecting dest type " + destData.getType().toString()); + if (logger.isDebugEnabled()) logger.debug("copyAsync inspecting src type " + srcData.getType().toString() + " copyAsync inspecting dest type " + destData.getType().toString()); if (srcData.getType() == DataObjectType.SNAPSHOT && destData.getType() == DataObjectType.VOLUME) { answer = copyVolumeFromSnapshot(srcData, destData); } else if (srcData.getType() == DataObjectType.SNAPSHOT && destData.getType() == DataObjectType.TEMPLATE) { @@ -522,16 +523,16 @@ public void copyAsync(DataObject srcData, DataObject destData, Host destHost, As answer = cloneVolume(srcData, destData); } else if (destData.getType() == DataObjectType.VOLUME && srcData.getType() == DataObjectType.VOLUME && srcData.getDataStore().getRole() == DataStoreRole.Primary && destData.getDataStore().getRole() == DataStoreRole.Primary) { - if (s_logger.isDebugEnabled()) s_logger.debug("About to MIGRATE copy between datasources"); + if (logger.isDebugEnabled()) logger.debug("About to MIGRATE copy between datasources"); if (srcData.getId() == destData.getId()) { // The volume has to be migrated across storage pools. - if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE copy using migrateVolumeToPool STARTING"); + if (logger.isDebugEnabled()) logger.debug("MIGRATE copy using migrateVolumeToPool STARTING"); answer = migrateVolumeToPool(srcData, destData); - if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE copy using migrateVolumeToPool DONE: " + answer.getResult()); + if (logger.isDebugEnabled()) logger.debug("MIGRATE copy using migrateVolumeToPool DONE: " + answer.getResult()); } else { - if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE copy using copyVolumeBetweenPools STARTING"); + if (logger.isDebugEnabled()) logger.debug("MIGRATE copy using copyVolumeBetweenPools STARTING"); answer = copyVolumeBetweenPools(srcData, destData); - if (s_logger.isDebugEnabled()) s_logger.debug("MIGRATE copy using copyVolumeBetweenPools DONE: " + answer.getResult()); + if (logger.isDebugEnabled()) logger.debug("MIGRATE copy using copyVolumeBetweenPools DONE: " + answer.getResult()); } } else if (srcData.getType() == DataObjectType.SNAPSHOT && destData.getType() == DataObjectType.SNAPSHOT) { answer = copySnapshot(srcData, destData); @@ -543,7 +544,7 @@ public void copyAsync(DataObject srcData, DataObject destData, Host destHost, As errMsg = answer.getDetails(); } } catch (Exception e) { - if (s_logger.isDebugEnabled()) s_logger.debug("copy failed", e); + if (logger.isDebugEnabled()) logger.debug("copy failed", e); errMsg = e.toString(); } CopyCommandResult result = new CopyCommandResult(null, answer); @@ -574,7 +575,7 @@ protected Answer createTemplateFromSnapshot(DataObject srcData, DataObject destD CopyCommand cmd = new CopyCommand(srcData.getTO(), addFullCloneAndDiskprovisiongStrictnessFlagOnVMwareDest(destData.getTO()), _createprivatetemplatefromsnapshotwait, VirtualMachineManager.ExecuteInSequence.value()); Answer answer = null; if (ep == null) { - s_logger.error(NO_REMOTE_ENDPOINT_SSVM); + logger.error(NO_REMOTE_ENDPOINT_SSVM); answer = new Answer(cmd, false, NO_REMOTE_ENDPOINT_SSVM); } else { answer = ep.sendMessage(cmd); @@ -614,7 +615,7 @@ protected Answer copySnapshot(DataObject srcData, DataObject destData) { cmd.setOptions(options); EndPoint ep = selector.select(srcData, destData, encryptionRequired); if (ep == null) { - s_logger.error(NO_REMOTE_ENDPOINT_SSVM); + logger.error(NO_REMOTE_ENDPOINT_SSVM); answer = new Answer(cmd, false, NO_REMOTE_ENDPOINT_SSVM); } else { answer = ep.sendMessage(cmd); @@ -625,7 +626,7 @@ protected Answer copySnapshot(DataObject srcData, DataObject destData) { cmd.setOptions(options); EndPoint ep = selector.select(srcData, destData, StorageAction.BACKUPSNAPSHOT, encryptionRequired); if (ep == null) { - s_logger.error(NO_REMOTE_ENDPOINT_SSVM); + logger.error(NO_REMOTE_ENDPOINT_SSVM); answer = new Answer(cmd, false, NO_REMOTE_ENDPOINT_SSVM); } else { answer = ep.sendMessage(cmd); @@ -638,7 +639,7 @@ protected Answer copySnapshot(DataObject srcData, DataObject destData) { } return answer; } catch (Exception e) { - if (s_logger.isDebugEnabled()) s_logger.debug("copy snasphot failed: ", e); + if (logger.isDebugEnabled()) logger.debug("copy snasphot failed: ", e); if (cacheData != null) { cacheMgr.deleteCacheObject(cacheData); } diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java index c8edb7b8abcf..e55302b8044d 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java @@ -36,7 +36,8 @@ import org.apache.cloudstack.secret.dao.PassphraseDao; import org.apache.cloudstack.storage.command.CopyCmdAnswer; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.stereotype.Component; import com.cloud.agent.api.to.VirtualMachineTO; @@ -48,7 +49,7 @@ @Component public class DataMotionServiceImpl implements DataMotionService { - private static final Logger LOGGER = Logger.getLogger(DataMotionServiceImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject StorageStrategyFactory storageStrategyFactory; diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageDataMotionStrategy.java index f2ccce756900..bf8fa43fe6c2 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageDataMotionStrategy.java @@ -36,7 +36,6 @@ import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.MigrateCommand; @@ -74,7 +73,6 @@ public class KvmNonManagedStorageDataMotionStrategy extends StorageSystemDataMot @Inject private VirtualMachineManager virtualMachineManager; - private static final Logger LOGGER = Logger.getLogger(KvmNonManagedStorageDataMotionStrategy.class); /** * Uses the canHandle from the Super class {@link StorageSystemDataMotionStrategy}. If the storage pool is of file and the internalCanHandle from {@link StorageSystemDataMotionStrategy} CANT_HANDLE, returns the StrategyPriority.HYPERVISOR strategy priority. otherwise returns CANT_HANDLE. @@ -212,7 +210,7 @@ protected void copyTemplateToTargetFilesystemStorageIfNeeded(VolumeInfo srcVolum TemplateInfo directDownloadTemplateInfo = templateDataFactory.getReadyBypassedTemplateOnPrimaryStore(srcVolumeInfo.getTemplateId(), destDataStore.getId(), destHost.getId()); if (directDownloadTemplateInfo != null) { - LOGGER.debug(String.format("Template %s was of direct download type and successfully staged to primary store %s", directDownloadTemplateInfo.getId(), directDownloadTemplateInfo.getDataStore().getId())); + logger.debug(String.format("Template %s was of direct download type and successfully staged to primary store %s", directDownloadTemplateInfo.getId(), directDownloadTemplateInfo.getDataStore().getId())); return; } @@ -223,7 +221,7 @@ protected void copyTemplateToTargetFilesystemStorageIfNeeded(VolumeInfo srcVolum TemplateInfo sourceTemplateInfo = templateDataFactory.getTemplate(srcVolumeInfo.getTemplateId(), sourceTemplateDataStore); TemplateObjectTO sourceTemplate = new TemplateObjectTO(sourceTemplateInfo); - LOGGER.debug(String.format("Could not find template [id=%s, name=%s] on the storage pool [id=%s]; copying the template to the target storage pool.", + logger.debug(String.format("Could not find template [id=%s, name=%s] on the storage pool [id=%s]; copying the template to the target storage pool.", srcVolumeInfo.getTemplateId(), sourceTemplateInfo.getName(), destDataStore.getId())); TemplateInfo destTemplateInfo = templateDataFactory.getTemplate(srcVolumeInfo.getTemplateId(), destDataStore); @@ -236,7 +234,7 @@ protected void copyTemplateToTargetFilesystemStorageIfNeeded(VolumeInfo srcVolum return; } } - LOGGER.debug(String.format("Skipping 'copy template to target filesystem storage before migration' due to the template [%s] already exist on the storage pool [%s].", srcVolumeInfo.getTemplateId(), destStoragePool.getId())); + logger.debug(String.format("Skipping 'copy template to target filesystem storage before migration' due to the template [%s] already exist on the storage pool [%s].", srcVolumeInfo.getTemplateId(), destStoragePool.getId())); } /** @@ -282,7 +280,7 @@ protected void logInCaseOfTemplateCopyFailure(Answer copyCommandAnswer, Template if (copyCommandAnswer.getDetails() != null) { failureDetails = " Details: " + copyCommandAnswer.getDetails(); } - LOGGER.error(generateFailToCopyTemplateMessage(sourceTemplate, destDataStore) + failureDetails); + logger.error(generateFailToCopyTemplateMessage(sourceTemplate, destDataStore) + failureDetails); } } diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java index a93f624aa53c..03aa5b509888 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java @@ -71,7 +71,8 @@ import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -141,7 +142,7 @@ import org.apache.commons.collections.CollectionUtils; public class StorageSystemDataMotionStrategy implements DataMotionStrategy { - private static final Logger LOGGER = Logger.getLogger(StorageSystemDataMotionStrategy.class); + protected Logger logger = LogManager.getLogger(getClass()); private static final Random RANDOM = new Random(System.nanoTime()); private static final int LOCK_TIME_IN_SECONDS = 300; private static final String OPERATION_NOT_SUPPORTED = "This operation is not supported."; @@ -263,7 +264,7 @@ private boolean canHandle(DataObject dataObject) { Boolean supportsStorageSystemSnapshots = Boolean.valueOf(value); if (supportsStorageSystemSnapshots) { - LOGGER.info("Using 'StorageSystemDataMotionStrategy' (dataObject is a volume or snapshot and the storage system supports snapshots)"); + logger.info("Using 'StorageSystemDataMotionStrategy' (dataObject is a volume or snapshot and the storage system supports snapshots)"); return true; } @@ -273,7 +274,7 @@ private boolean canHandle(DataObject dataObject) { Boolean canCloneVolume = Boolean.valueOf(value); if (canCloneVolume) { - LOGGER.info("Using 'StorageSystemDataMotionStrategy' (dataObject is a template and the storage system can create a volume from a volume)"); + logger.info("Using 'StorageSystemDataMotionStrategy' (dataObject is a template and the storage system can create a volume from a volume)"); return true; } @@ -434,7 +435,7 @@ private void handleCopyAsyncForVolumes(VolumeInfo srcVolumeInfo, VolumeInfo dest } private void handleError(String errMsg, AsyncCompletionCallback callback) { - LOGGER.warn(errMsg); + logger.warn(errMsg); invokeCallback(errMsg, callback); @@ -638,8 +639,8 @@ private boolean needCacheStorage(DataObject srcData, DataObject destData) { return false; } - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("needCacheStorage true; dest at " + destTO.getPath() + ", dest role " + destStoreTO.getRole().toString() + "; src at " + + if (logger.isDebugEnabled()) { + logger.debug("needCacheStorage true; dest at " + destTO.getPath() + ", dest role " + destStoreTO.getRole().toString() + "; src at " + srcTO.getPath() + ", src role " + srcStoreTO.getRole().toString()); } @@ -657,7 +658,7 @@ private Scope pickCacheScopeForCopy(DataObject srcData, DataObject destData) { } else if (destScope.getScopeId() != null) { selectedScope = getZoneScope(destScope); } else { - LOGGER.warn("Cannot find a zone-wide scope for movement that needs a cache storage"); + logger.warn("Cannot find a zone-wide scope for movement that needs a cache storage"); } return selectedScope; @@ -770,7 +771,7 @@ private void handleVolumeMigrationForXenServer(VolumeInfo srcVolumeInfo, VolumeI if (ep == null) { String errMsg = "No remote endpoint to send command to; check if host or SSVM is down"; - LOGGER.error(errMsg); + logger.error(errMsg); answer = new Answer(command, false, errMsg); } else { @@ -811,7 +812,7 @@ private void handleFailedVolumeMigration(VolumeInfo srcVolumeInfo, VolumeInfo de _volumeService.revokeAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore()); } catch (Exception ex) { - LOGGER.warn("Failed to revoke access to the volume with the following ID: " + destVolumeInfo.getId()); + logger.warn("Failed to revoke access to the volume with the following ID: " + destVolumeInfo.getId()); } try { @@ -825,7 +826,7 @@ private void handleFailedVolumeMigration(VolumeInfo srcVolumeInfo, VolumeInfo de volumeDetailsDao.removeDetails(srcVolumeInfo.getId()); } catch (Exception ex) { - LOGGER.warn(ex.getMessage()); + logger.warn(ex.getMessage()); } VolumeVO volumeVO = _volumeDao.findById(srcVolumeInfo.getId()); @@ -972,7 +973,7 @@ else if (HypervisorType.VMware.equals(snapshotInfo.getHypervisorType()) || Hyper String noSupportForResignErrMsg = "Unable to locate an applicable host with which to perform a resignature operation : Cluster ID = " + hostVO.getClusterId(); - LOGGER.warn(noSupportForResignErrMsg); + logger.warn(noSupportForResignErrMsg); throw new CloudRuntimeException(noSupportForResignErrMsg); } @@ -1053,7 +1054,7 @@ else if (HypervisorType.VMware.equals(snapshotInfo.getHypervisorType()) || Hyper if (!copyCmdAnswer.getResult()) { errMsg = copyCmdAnswer.getDetails(); - LOGGER.warn(errMsg); + logger.warn(errMsg); throw new CloudRuntimeException(errMsg); } @@ -1074,7 +1075,7 @@ else if (HypervisorType.VMware.equals(snapshotInfo.getHypervisorType()) || Hyper if (ep == null) { errMsg = "No remote endpoint to send command, check if host or SSVM is down"; - LOGGER.error(errMsg); + logger.error(errMsg); copyCmdAnswer = new CopyCmdAnswer(errMsg); } else { @@ -1087,7 +1088,7 @@ else if (HypervisorType.VMware.equals(snapshotInfo.getHypervisorType()) || Hyper } catch (CloudRuntimeException | AgentUnavailableException | OperationTimedoutException ex) { String msg = "Failed to create template from snapshot (Snapshot ID = " + snapshotInfo.getId() + ") : "; - LOGGER.warn(msg, ex); + logger.warn(msg, ex); throw new CloudRuntimeException(msg + ex.getMessage(), ex); } finally { @@ -1122,7 +1123,7 @@ else if (HypervisorType.VMware.equals(snapshotInfo.getHypervisorType()) || Hyper } } catch (Exception ex) { - LOGGER.warn("Error processing snapshot event: " + ex.getMessage(), ex); + logger.warn("Error processing snapshot event: " + ex.getMessage(), ex); } } } @@ -1182,7 +1183,7 @@ private void handleCreateNonManagedVolumeFromManagedSnapshot(SnapshotInfo snapsh String noSupportForResignErrMsg = "Unable to locate an applicable host with which to perform a resignature operation : Cluster ID = " + volumeStoragePoolVO.getClusterId(); - LOGGER.warn(noSupportForResignErrMsg); + logger.warn(noSupportForResignErrMsg); throw new CloudRuntimeException(noSupportForResignErrMsg); } @@ -1219,7 +1220,7 @@ private void handleCreateNonManagedVolumeFromManagedSnapshot(SnapshotInfo snapsh if (!copyCmdAnswer.getResult()) { errMsg = copyCmdAnswer.getDetails(); - LOGGER.warn(errMsg); + logger.warn(errMsg); throw new CloudRuntimeException(errMsg); } @@ -1239,7 +1240,7 @@ private void handleCreateNonManagedVolumeFromManagedSnapshot(SnapshotInfo snapsh _volumeService.revokeAccess(snapshotInfo, hostVO, snapshotDataStore); } catch (Exception e) { - LOGGER.debug("Failed to revoke access from dest volume", e); + logger.debug("Failed to revoke access from dest volume", e); } if (usingBackendSnapshot) { @@ -1255,7 +1256,7 @@ private void handleCreateNonManagedVolumeFromManagedSnapshot(SnapshotInfo snapsh } } catch (Exception ex) { - LOGGER.warn("Error processing snapshot event: " + ex.getMessage(), ex); + logger.warn("Error processing snapshot event: " + ex.getMessage(), ex); } if (copyCmdAnswer == null) { @@ -1314,7 +1315,7 @@ private void handleCreateManagedVolumeFromNonManagedSnapshot(SnapshotInfo snapsh VolumeApiResult result = future.get(); if (result.isFailed()) { - LOGGER.error("Failed to create a volume: " + result.getResult()); + logger.error("Failed to create a volume: " + result.getResult()); throw new CloudRuntimeException(result.getResult()); } @@ -1409,7 +1410,7 @@ private void handleCreateVolumeFromTemplateBothOnStorageSystem(TemplateInfo temp String noSupportForResignErrMsg = "Unable to locate an applicable host with which to perform a resignature operation : Cluster ID = " + hostVO.getClusterId(); - LOGGER.warn(noSupportForResignErrMsg); + logger.warn(noSupportForResignErrMsg); throw new CloudRuntimeException(noSupportForResignErrMsg); } @@ -1442,7 +1443,7 @@ else if (volumeInfo.getFormat() == ImageFormat.OVA) { } if (result.isFailed()) { - LOGGER.warn("Failed to create a volume: " + result.getResult()); + logger.warn("Failed to create a volume: " + result.getResult()); throw new CloudRuntimeException(result.getResult()); } @@ -1486,7 +1487,7 @@ else if (volumeInfo.getFormat() == ImageFormat.OVA) { volumeInfo.getDataStore().getDriver().deleteAsync(volumeInfo.getDataStore(), volumeInfo, null); } catch (Exception exc) { - LOGGER.warn("Failed to delete volume", exc); + logger.warn("Failed to delete volume", exc); } if (templateInfo != null) { @@ -1533,7 +1534,7 @@ private void handleCreateManagedVolumeFromManagedSnapshot(SnapshotInfo snapshotI String noSupportForResignErrMsg = "Unable to locate an applicable host with which to perform a resignature operation : Cluster ID = " + hostVO.getClusterId(); - LOGGER.warn(noSupportForResignErrMsg); + logger.warn(noSupportForResignErrMsg); throw new CloudRuntimeException(noSupportForResignErrMsg); } @@ -1569,7 +1570,7 @@ private void handleCreateManagedVolumeFromManagedSnapshot(SnapshotInfo snapshotI } if (result.isFailed()) { - LOGGER.warn("Failed to create a volume: " + result.getResult()); + logger.warn("Failed to create a volume: " + result.getResult()); throw new CloudRuntimeException(result.getResult()); } @@ -1713,7 +1714,7 @@ private CopyCmdAnswer copyImageToVolume(DataObject srcDataObject, VolumeInfo des catch (CloudRuntimeException | AgentUnavailableException | OperationTimedoutException ex) { String msg = "Failed to copy image : "; - LOGGER.warn(msg, ex); + logger.warn(msg, ex); throw new CloudRuntimeException(msg + ex.getMessage(), ex); } @@ -1829,7 +1830,7 @@ private void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo, Pri ((PrimaryDataStoreDriver)volumeInfo.getDataStore().getDriver()).handleQualityOfServiceForVolumeMigration(volumeInfo, qualityOfServiceState); } catch (Exception ex) { - LOGGER.warn(ex); + logger.warn(ex); } } @@ -1933,10 +1934,10 @@ public void copyAsync(Map volumeDataStoreMap, VirtualMach } if (srcVolumeInfo.getTemplateId() != null) { - LOGGER.debug(String.format("Copying template [%s] of volume [%s] from source storage pool [%s] to target storage pool [%s].", srcVolumeInfo.getTemplateId(), srcVolumeInfo.getId(), sourceStoragePool.getId(), destStoragePool.getId())); + logger.debug(String.format("Copying template [%s] of volume [%s] from source storage pool [%s] to target storage pool [%s].", srcVolumeInfo.getTemplateId(), srcVolumeInfo.getId(), sourceStoragePool.getId(), destStoragePool.getId())); copyTemplateToTargetFilesystemStorageIfNeeded(srcVolumeInfo, sourceStoragePool, destDataStore, destStoragePool, destHost); } else { - LOGGER.debug(String.format("Skipping copy template from source storage pool [%s] to target storage pool [%s] before migration due to volume [%s] does not have a template.", sourceStoragePool.getId(), destStoragePool.getId(), srcVolumeInfo.getId())); + logger.debug(String.format("Skipping copy template from source storage pool [%s] to target storage pool [%s] before migration due to volume [%s] does not have a template.", sourceStoragePool.getId(), destStoragePool.getId(), srcVolumeInfo.getId())); } VolumeVO destVolume = duplicateVolumeOnAnotherStorage(srcVolume, destStoragePool); @@ -2026,7 +2027,7 @@ public void copyAsync(Map volumeDataStoreMap, VirtualMach Integer newVmCpuShares = ((PrepareForMigrationAnswer) pfma).getNewVmCpuShares(); if (newVmCpuShares != null) { - LOGGER.debug(String.format("Setting CPU shares to [%d] as part of migrate VM with volumes command for VM [%s].", newVmCpuShares, vmTO)); + logger.debug(String.format("Setting CPU shares to [%d] as part of migrate VM with volumes command for VM [%s].", newVmCpuShares, vmTO)); migrateCommand.setNewVmCpuShares(newVmCpuShares); } @@ -2052,7 +2053,7 @@ public void copyAsync(Map volumeDataStoreMap, VirtualMach String volumesAndStorages = volumeDataStoreMap.entrySet().stream().map(entry -> formatEntryOfVolumesAndStoragesAsJsonToDisplayOnLog(entry)).collect(Collectors.joining(",")); errMsg = String.format("Copy volume(s) to storage(s) [%s] and VM to host [%s] failed in StorageSystemDataMotionStrategy.copyAsync. Error message: [%s].", volumesAndStorages, formatMigrationElementsAsJsonToDisplayOnLog("vm", vmTO.getId(), srcHost.getId(), destHost.getId()), ex.getMessage()); - LOGGER.error(errMsg, ex); + logger.error(errMsg, ex); throw new CloudRuntimeException(errMsg); } finally { @@ -2176,7 +2177,7 @@ private void handlePostMigration(boolean success, Map sr } } catch (Exception e) { - LOGGER.debug("Failed to disconnect one or more (original) dest volumes", e); + logger.debug("Failed to disconnect one or more (original) dest volumes", e); } } @@ -2205,14 +2206,14 @@ private void handlePostMigration(boolean success, Map sr disconnectHostFromVolume(destHost, destVolumeInfo.getPoolId(), destVolumeInfo.get_iScsiName()); } catch (Exception e) { - LOGGER.debug("Failed to disconnect (new) dest volume", e); + logger.debug("Failed to disconnect (new) dest volume", e); } try { _volumeService.revokeAccess(destVolumeInfo, destHost, destVolumeInfo.getDataStore()); } catch (Exception e) { - LOGGER.debug("Failed to revoke access from dest volume", e); + logger.debug("Failed to revoke access from dest volume", e); } destVolumeInfo.processEvent(Event.OperationFailed); @@ -2226,10 +2227,10 @@ private void handlePostMigration(boolean success, Map sr AsyncCallFuture destroyFuture = _volumeService.expungeVolumeAsync(destVolumeInfo); if (destroyFuture.get().isFailed()) { - LOGGER.debug("Failed to clean up dest volume on storage"); + logger.debug("Failed to clean up dest volume on storage"); } } catch (Exception e) { - LOGGER.debug("Failed to clean up dest volume on storage", e); + logger.debug("Failed to clean up dest volume on storage", e); } } } @@ -2342,7 +2343,7 @@ protected void postVolumeCreationActions(VolumeInfo srcVolumeInfo, VolumeInfo de */ protected void prepareDiskWithSecretConsumerDetail(VirtualMachineTO vmTO, VolumeInfo srcVolume, String destPath) { if (vmTO.getDisks() != null) { - LOGGER.debug(String.format("Preparing VM TO '%s' disks with migration data", vmTO)); + logger.debug(String.format("Preparing VM TO '%s' disks with migration data", vmTO)); Arrays.stream(vmTO.getDisks()).filter(diskTO -> diskTO.getData().getId() == srcVolume.getId()).forEach( diskTO -> { if (diskTO.getDetails() == null) { diskTO.setDetails(new HashMap<>()); @@ -2396,7 +2397,7 @@ protected void verifyLiveMigrationForKVM(Map volumeDataSt */ protected void addSourcePoolToPoolsMap(Map sourcePools, StoragePoolVO srcStoragePoolVO, StoragePoolVO destStoragePoolVO) { if (destStoragePoolVO.isManaged() || !StoragePoolType.NetworkFilesystem.equals(destStoragePoolVO.getPoolType())) { - LOGGER.trace(String.format("Skipping adding source pool [%s] to map due to destination pool [%s] is managed or not NFS.", srcStoragePoolVO, destStoragePoolVO)); + logger.trace(String.format("Skipping adding source pool [%s] to map due to destination pool [%s] is managed or not NFS.", srcStoragePoolVO, destStoragePoolVO)); return; } @@ -2413,7 +2414,7 @@ protected void addSourcePoolToPoolsMap(Map sour */ private void verifyDestinationStorage(Map sourcePools, Host destHost) { if (MapUtils.isNotEmpty(sourcePools)) { - LOGGER.debug("Verifying source pools are already available on destination host " + destHost.getUuid()); + logger.debug("Verifying source pools are already available on destination host " + destHost.getUuid()); CheckStorageAvailabilityCommand cmd = new CheckStorageAvailabilityCommand(sourcePools); try { Answer answer = agentManager.send(destHost.getId(), cmd); @@ -2514,7 +2515,7 @@ private void handleCreateTemplateFromManagedVolume(VolumeInfo volumeInfo, Templa if (!copyCmdAnswer.getResult()) { errMsg = copyCmdAnswer.getDetails(); - LOGGER.warn(errMsg); + logger.warn(errMsg); throw new CloudRuntimeException(errMsg); } @@ -2528,7 +2529,7 @@ private void handleCreateTemplateFromManagedVolume(VolumeInfo volumeInfo, Templa catch (CloudRuntimeException | AgentUnavailableException | OperationTimedoutException ex) { String msg = "Failed to create template from volume (Volume ID = " + volumeInfo.getId() + ") : "; - LOGGER.warn(msg, ex); + logger.warn(msg, ex); throw new CloudRuntimeException(msg + ex.getMessage(), ex); } @@ -2538,7 +2539,7 @@ private void handleCreateTemplateFromManagedVolume(VolumeInfo volumeInfo, Templa _volumeService.revokeAccess(volumeInfo, hostVO, srcDataStore); } catch (Exception ex) { - LOGGER.warn("Error revoking access to volume (Volume ID = " + volumeInfo.getId() + "): " + ex.getMessage(), ex); + logger.warn("Error revoking access to volume (Volume ID = " + volumeInfo.getId() + "): " + ex.getMessage(), ex); } } @@ -2562,7 +2563,7 @@ private void handleCreateTemplateFromManagedVolume(VolumeInfo volumeInfo, Templa } } catch (Exception ex) { - LOGGER.warn("Error processing snapshot event: " + ex.getMessage(), ex); + logger.warn("Error processing snapshot event: " + ex.getMessage(), ex); } } } @@ -2778,7 +2779,7 @@ private CopyCmdAnswer performResignature(DataObject dataObj, HostVO hostVO, Map< if (!lock.lock(LOCK_TIME_IN_SECONDS)) { String errMsg = "Couldn't lock the DB (in performResignature) on the following string: " + dataStore.getUuid(); - LOGGER.warn(errMsg); + logger.warn(errMsg); throw new CloudRuntimeException(errMsg); } @@ -2793,7 +2794,7 @@ private CopyCmdAnswer performResignature(DataObject dataObj, HostVO hostVO, Map< String msg = "Failed to resign the DataObject with the following ID: " + dataObj.getId(); - LOGGER.warn(msg, ex); + logger.warn(msg, ex); throw new CloudRuntimeException(msg + ex.getMessage()); } @@ -2877,9 +2878,9 @@ private String migrateVolumeForKVM(VolumeInfo srcVolumeInfo, VolumeInfo destVolu _volumeService.revokeAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore()); handleQualityOfServiceForVolumeMigration(destVolumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.NO_MIGRATION); } catch (Throwable e) { - LOGGER.warn("During cleanup post-migration and exception occured: " + e); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Exception during post-migration cleanup.", e); + logger.warn("During cleanup post-migration and exception occured: " + e); + if (logger.isDebugEnabled()) { + logger.debug("Exception during post-migration cleanup.", e); } } } @@ -2920,7 +2921,7 @@ private String copyManagedVolumeToSecondaryStorage(VolumeInfo srcVolumeInfo, Vol catch (Exception ex) { String msg = "Failed to perform volume copy to secondary storage : "; - LOGGER.warn(msg, ex); + logger.warn(msg, ex); throw new CloudRuntimeException(msg + ex.getMessage()); } @@ -2997,7 +2998,7 @@ private CopyCmdAnswer performCopyOfVdi(VolumeInfo volumeInfo, SnapshotInfo snaps catch (CloudRuntimeException | AgentUnavailableException | OperationTimedoutException ex) { String msg = "Failed to perform VDI copy : "; - LOGGER.warn(msg, ex); + logger.warn(msg, ex); throw new CloudRuntimeException(msg + ex.getMessage(), ex); } diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/SecondaryStorageServiceImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/SecondaryStorageServiceImpl.java index 3557921a8936..730b003fcb06 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/SecondaryStorageServiceImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/SecondaryStorageServiceImpl.java @@ -44,7 +44,8 @@ import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao; import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.api.Answer; import com.cloud.secstorage.CommandExecLogDao; @@ -53,7 +54,7 @@ public class SecondaryStorageServiceImpl implements SecondaryStorageService { - private static final Logger s_logger = Logger.getLogger(SecondaryStorageServiceImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject DataMotionService motionSrv; @@ -126,11 +127,11 @@ public AsyncCallFuture migrateData(DataObject srcDataObject, D else { // Check if template in destination store, if yes, do not proceed if (srcDataObject instanceof TemplateInfo) { - s_logger.debug("Checking if template present at destination"); + logger.debug("Checking if template present at destination"); TemplateDataStoreVO templateStoreVO = templateStoreDao.findByStoreTemplate(destDatastore.getId(), srcDataObject.getId()); if (templateStoreVO != null) { String msg = "Template already exists in destination store"; - s_logger.debug(msg); + logger.debug(msg); res.setResult(msg); res.setSuccess(true); future.complete(res); @@ -143,9 +144,9 @@ public AsyncCallFuture migrateData(DataObject srcDataObject, D migrateJob(future, srcDataObject, destDataObject, destDatastore); } } catch (Exception e) { - s_logger.debug("Failed to copy Data", e); + logger.debug("Failed to copy Data", e); if (destDataObject != null) { - s_logger.info("Deleting data on destination store: " + destDataObject.getDataStore().getName()); + logger.info("Deleting data on destination store: " + destDataObject.getDataStore().getName()); destDataObject.getDataStore().delete(destDataObject); } if (!(srcDataObject instanceof VolumeInfo)) { @@ -178,7 +179,7 @@ protected Void migrateDataCallBack(AsyncCallbackDispatcher context = new TemplateOpContext<>(null,(TemplateObject)tmpl, null); @@ -556,7 +557,7 @@ public void handleTemplateSync(DataStore store) { caller.setContext(context); createTemplateAsync(tmpl, store, caller); } else { - s_logger.info("Skip downloading template " + tmplt.getUniqueName() + " since current data center does not have hypervisor " + + logger.info("Skip downloading template " + tmplt.getUniqueName() + " since current data center does not have hypervisor " + tmplt.getHypervisorType().toString()); } } @@ -575,17 +576,17 @@ public void handleTemplateSync(DataStore store) { Answer answer = null; if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; - s_logger.error(errMsg); + logger.error(errMsg); answer = new Answer(dtCommand, false, errMsg); } else { answer = ep.sendMessage(dtCommand); } if (answer == null || !answer.getResult()) { - s_logger.info("Failed to deleted template at store: " + store.getName()); + logger.info("Failed to deleted template at store: " + store.getName()); } else { String description = "Deleted template " + tInfo.getTemplateName() + " on secondary storage " + storeId; - s_logger.info(description); + logger.info(description); } } @@ -594,7 +595,7 @@ public void handleTemplateSync(DataStore store) { syncLock.unlock(); } } else { - s_logger.info("Couldn't get global lock on " + lockString + ", another thread may be doing template sync on data store " + storeId + " now."); + logger.info("Couldn't get global lock on " + lockString + ", another thread may be doing template sync on data store " + storeId + " now."); } } finally { syncLock.releaseRef(); @@ -669,7 +670,7 @@ protected Void createTemplateAsyncCallBack(AsyncCallbackDispatcher listTemplate(DataStore ssStore) { Answer answer = null; if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; - s_logger.error(errMsg); + logger.error(errMsg); answer = new Answer(cmd, false, errMsg); } else { answer = ep.sendMessage(cmd); @@ -702,8 +703,8 @@ private Map listTemplate(DataStore ssStore) { ListTemplateAnswer tanswer = (ListTemplateAnswer)answer; return tanswer.getTemplateInfo(); } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("can not list template for secondary storage host " + ssStore.getId()); + if (logger.isDebugEnabled()) { + logger.debug("can not list template for secondary storage host " + ssStore.getId()); } } @@ -840,7 +841,7 @@ private boolean createChildDataDiskTemplate(DatadiskTO dataDiskTemplate, VMTempl _resourceLimitMgr.incrementResourceCount(template.getAccountId(), ResourceType.secondary_storage, templateVO.getSize()); } else { // Delete the Datadisk templates that were already created as they are now invalid - s_logger.debug("Since creation of Datadisk template: " + templateVO.getId() + " failed, delete other Datadisk templates that were created as part of parent" + logger.debug("Since creation of Datadisk template: " + templateVO.getId() + " failed, delete other Datadisk templates that were created as part of parent" + " template download"); TemplateInfo parentTemplateInfo = imageFactory.getTemplate(templateVO.getParentTemplateId(), imageStore); cleanupDatadiskTemplates(parentTemplateInfo); @@ -855,7 +856,7 @@ private boolean finalizeParentTemplate(DatadiskTO dataDiskTemplate, VMTemplateVO TemplateApiResult result = null; result = templateFuture.get(); if (!result.isSuccess()) { - s_logger.debug("Since creation of parent template: " + templateInfo.getId() + " failed, delete Datadisk templates that were created as part of parent" + logger.debug("Since creation of parent template: " + templateInfo.getId() + " failed, delete Datadisk templates that were created as part of parent" + " template download"); cleanupDatadiskTemplates(templateInfo); } @@ -905,18 +906,18 @@ private void cleanupDatadiskTemplates(TemplateInfo parentTemplateInfo) { DataStore imageStore = parentTemplateInfo.getDataStore(); List datadiskTemplatesToDelete = _templateDao.listByParentTemplatetId(parentTemplateInfo.getId()); for (VMTemplateVO datadiskTemplateToDelete: datadiskTemplatesToDelete) { - s_logger.info("Delete template: " + datadiskTemplateToDelete.getId() + " from image store: " + imageStore.getName()); + logger.info("Delete template: " + datadiskTemplateToDelete.getId() + " from image store: " + imageStore.getName()); AsyncCallFuture future = deleteTemplateAsync(imageFactory.getTemplate(datadiskTemplateToDelete.getId(), imageStore)); try { TemplateApiResult result = future.get(); if (!result.isSuccess()) { - s_logger.warn("Failed to delete datadisk template: " + datadiskTemplateToDelete + " from image store: " + imageStore.getName() + " due to: " + result.getResult()); + logger.warn("Failed to delete datadisk template: " + datadiskTemplateToDelete + " from image store: " + imageStore.getName() + " due to: " + result.getResult()); break; } _vmTemplateZoneDao.deletePrimaryRecordsForTemplate(datadiskTemplateToDelete.getId()); _resourceLimitMgr.decrementResourceCount(datadiskTemplateToDelete.getAccountId(), ResourceType.secondary_storage, datadiskTemplateToDelete.getSize()); } catch (Exception e) { - s_logger.debug("Delete datadisk template failed", e); + logger.debug("Delete datadisk template failed", e); throw new CloudRuntimeException("Delete template Failed", e); } } @@ -1012,7 +1013,7 @@ protected Void syncTemplateCallBack(AsyncCallbackDispatcher copyTemplate(TemplateInfo srcTemplate, // generate a URL from source template ssvm to download to destination data store String url = generateCopyUrl(srcTemplate); if (url == null) { - s_logger.warn("Unable to start/resume copy of template " + srcTemplate.getUniqueName() + " to " + destStore.getName() + + logger.warn("Unable to start/resume copy of template " + srcTemplate.getUniqueName() + " to " + destStore.getName() + ", no secondary storage vm in running state in source zone"); throw new CloudRuntimeException("No secondary VM in running state in source template zone "); } TemplateObject tmplForCopy = (TemplateObject)_templateFactory.getTemplate(srcTemplate, destStore, null); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Setting source template url to " + url); + if (logger.isDebugEnabled()) { + logger.debug("Setting source template url to " + url); } tmplForCopy.setUrl(url); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Mark template_store_ref entry as Creating"); + if (logger.isDebugEnabled()) { + logger.debug("Mark template_store_ref entry as Creating"); } AsyncCallFuture future = new AsyncCallFuture(); DataObject templateOnStore = destStore.create(tmplForCopy); @@ -1089,8 +1090,8 @@ public AsyncCallFuture copyTemplate(TemplateInfo srcTemplate, ((TemplateObject)templateOnStore).getImage().setChecksum(null); } // else we don't know what to do. - if (s_logger.isDebugEnabled()) { - s_logger.debug("Invoke datastore driver createAsync to create template on destination store"); + if (logger.isDebugEnabled()) { + logger.debug("Invoke datastore driver createAsync to create template on destination store"); } try { TemplateOpContext context = new TemplateOpContext(null, (TemplateObject)templateOnStore, future); @@ -1121,7 +1122,7 @@ private String generateCopyUrl(String ipAddress, String dir, String path) { _sslCopy = Boolean.parseBoolean(sslCfg); } if(_sslCopy && (_ssvmUrlDomain == null || _ssvmUrlDomain.isEmpty())){ - s_logger.warn("Empty secondary storage url domain, ignoring SSL"); + logger.warn("Empty secondary storage url domain, ignoring SSL"); _sslCopy = false; } if (_sslCopy) { @@ -1141,7 +1142,7 @@ private String generateCopyUrl(TemplateInfo srcTemplate) { EndPoint ep = _epSelector.select(srcTemplate); if (ep != null) { if (ep.getPublicAddr() == null) { - s_logger.warn("A running secondary storage vm has a null public ip?"); + logger.warn("A running secondary storage vm has a null public ip?"); return null; } return generateCopyUrl(ep.getPublicAddr(), ((ImageStoreEntity)srcStore).getMountPoint(), srcTemplate.getInstallPath()); @@ -1194,7 +1195,7 @@ protected Void copyTemplateCallBack(AsyncCallbackDispatcher callback, TemplateOpContext context) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Performing copy template cross zone callback after completion"); + if (logger.isDebugEnabled()) { + logger.debug("Performing copy template cross zone callback after completion"); } TemplateInfo destTemplate = context.getTemplate(); CreateCmdResult result = callback.getResult(); @@ -1219,7 +1220,7 @@ protected Void copyTemplateCrossZoneCallBack(AsyncCallbackDispatcher listImageStoreByProvider(String provider) { @Override public List listImageCacheStores(Scope scope) { if (scope.getScopeType() != ScopeType.ZONE) { - s_logger.debug("only support zone wide image cache stores"); + logger.debug("only support zone wide image cache stores"); return null; } List stores = dataStoreDao.findImageCacheByScope(new ZoneScope(scope.getScopeId())); @@ -200,7 +201,7 @@ public int compare(DataStore store1, DataStore store2) { } // No store with space found - s_logger.error(String.format("Can't find an image storage in zone with less than %d usage", + logger.error(String.format("Can't find an image storage in zone with less than %d usage", Math.round(_statsCollector.getImageStoreCapacityThreshold()*100))); return null; } @@ -242,7 +243,7 @@ public List listImageStoresWithFreeCapacity(List imageStor // No store with space found if (stores.isEmpty()) { - s_logger.error(String.format("Can't find image storage in zone with less than %d usage", + logger.error(String.format("Can't find image storage in zone with less than %d usage", Math.round(_statsCollector.getImageStoreCapacityThreshold() * 100))); } return stores; diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java index d4e2c056763e..d59f6d4c54dd 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java @@ -26,7 +26,8 @@ import javax.inject.Inject; import com.cloud.storage.Upload; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; @@ -54,7 +55,7 @@ import com.cloud.utils.component.ComponentContext; public class ImageStoreImpl implements ImageStoreEntity { - private static final Logger s_logger = Logger.getLogger(ImageStoreImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject VMTemplateDao imageDao; @Inject @@ -153,10 +154,10 @@ public boolean delete(DataObject obj) { try { future.get(); } catch (InterruptedException e) { - s_logger.debug("failed delete obj", e); + logger.debug("failed delete obj", e); return false; } catch (ExecutionException e) { - s_logger.debug("failed delete obj", e); + logger.debug("failed delete obj", e); return false; } objectInStoreMgr.delete(obj); diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java index b688197bfb99..c04ba2207df1 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java @@ -24,7 +24,8 @@ import javax.inject.Inject; import com.cloud.user.UserData; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; @@ -59,7 +60,7 @@ @SuppressWarnings("serial") public class TemplateObject implements TemplateInfo { - private static final Logger s_logger = Logger.getLogger(TemplateObject.class); + protected Logger logger = LogManager.getLogger(getClass()); private VMTemplateVO imageVO; private DataStore dataStore; private String url; @@ -243,10 +244,10 @@ public void processEvent(ObjectInDataStoreStateMachine.Event event, Answer answe } objectInStoreMgr.update(this, event); } catch (NoTransitionException e) { - s_logger.debug("failed to update state", e); + logger.debug("failed to update state", e); throw new CloudRuntimeException("Failed to update state" + e.toString()); } catch (Exception ex) { - s_logger.debug("failed to process event and answer", ex); + logger.debug("failed to process event and answer", ex); objectInStoreMgr.delete(this); throw new CloudRuntimeException("Failed to process event", ex); } finally { @@ -398,7 +399,7 @@ public boolean canBeDeletedFromDataStore() { // Marking downloaded templates for deletion, but might skip any deletion handled for failed templates. // Only templates not downloaded and in error state (with no install path) cannot be deleted from the datastore, so doesn't impact last behavior for templates with other states if (downloadStatus == null || downloadStatus == Status.NOT_DOWNLOADED || (downloadStatus == Status.DOWNLOAD_ERROR && downloadPercent == 0)) { - s_logger.debug("Template: " + getId() + " cannot be deleted from the store: " + getDataStore().getId()); + logger.debug("Template: " + getId() + " cannot be deleted from the store: " + getDataStore().getId()); return false; } diff --git a/engine/storage/integration-test/src/test/java/org/apache/cloudstack/storage/test/DirectAgentManagerSimpleImpl.java b/engine/storage/integration-test/src/test/java/org/apache/cloudstack/storage/test/DirectAgentManagerSimpleImpl.java index c6003afee578..1d072985a667 100644 --- a/engine/storage/integration-test/src/test/java/org/apache/cloudstack/storage/test/DirectAgentManagerSimpleImpl.java +++ b/engine/storage/integration-test/src/test/java/org/apache/cloudstack/storage/test/DirectAgentManagerSimpleImpl.java @@ -26,7 +26,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; @@ -60,7 +59,6 @@ import com.cloud.utils.fsm.StateMachine2; public class DirectAgentManagerSimpleImpl extends ManagerBase implements AgentManager { - private static final Logger logger = Logger.getLogger(DirectAgentManagerSimpleImpl.class); private final Map hostResourcesMap = new HashMap(); @Inject HostDao hostDao; diff --git a/engine/storage/integration-test/src/test/java/org/apache/cloudstack/storage/test/MockRpcCallBack.java b/engine/storage/integration-test/src/test/java/org/apache/cloudstack/storage/test/MockRpcCallBack.java index 25f96c267509..8b3de65b4cca 100644 --- a/engine/storage/integration-test/src/test/java/org/apache/cloudstack/storage/test/MockRpcCallBack.java +++ b/engine/storage/integration-test/src/test/java/org/apache/cloudstack/storage/test/MockRpcCallBack.java @@ -18,7 +18,8 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; @@ -28,7 +29,7 @@ import com.cloud.utils.db.DB; public class MockRpcCallBack implements Runnable { - private static final Logger s_logger = Logger.getLogger(MockRpcCallBack.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject AgentManager agentMgr; private Command cmd; @@ -54,7 +55,7 @@ public void run() { Answer answer = agentMgr.send(hostId, cmd); callback.complete(answer); } catch (Throwable e) { - s_logger.debug("send command failed:", e); + logger.debug("send command failed:", e); } } diff --git a/engine/storage/object/src/main/java/org/apache/cloudstack/storage/object/ObjectStorageServiceImpl.java b/engine/storage/object/src/main/java/org/apache/cloudstack/storage/object/ObjectStorageServiceImpl.java index a0db89bad4e1..40edc66d4d0e 100644 --- a/engine/storage/object/src/main/java/org/apache/cloudstack/storage/object/ObjectStorageServiceImpl.java +++ b/engine/storage/object/src/main/java/org/apache/cloudstack/storage/object/ObjectStorageServiceImpl.java @@ -18,11 +18,8 @@ package org.apache.cloudstack.storage.object; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectStorageService; -import org.apache.log4j.Logger; public class ObjectStorageServiceImpl implements ObjectStorageService { - private static final Logger s_logger = Logger.getLogger(ObjectStorageServiceImpl.class); - } diff --git a/engine/storage/object/src/main/java/org/apache/cloudstack/storage/object/manager/ObjectStoreProviderManagerImpl.java b/engine/storage/object/src/main/java/org/apache/cloudstack/storage/object/manager/ObjectStoreProviderManagerImpl.java index 40f503692e18..222b21e0ce84 100644 --- a/engine/storage/object/src/main/java/org/apache/cloudstack/storage/object/manager/ObjectStoreProviderManagerImpl.java +++ b/engine/storage/object/src/main/java/org/apache/cloudstack/storage/object/manager/ObjectStoreProviderManagerImpl.java @@ -29,7 +29,6 @@ import org.apache.cloudstack.storage.object.ObjectStoreEntity; import org.apache.cloudstack.storage.object.datastore.ObjectStoreProviderManager; import org.apache.cloudstack.storage.object.store.ObjectStoreImpl; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import javax.annotation.PostConstruct; @@ -41,7 +40,6 @@ @Component public class ObjectStoreProviderManagerImpl implements ObjectStoreProviderManager, Configurable { - private static final Logger s_logger = Logger.getLogger(ObjectStoreProviderManagerImpl.class); @Inject ObjectStoreDao objectStoreDao; diff --git a/engine/storage/object/src/main/java/org/apache/cloudstack/storage/object/store/ObjectStoreImpl.java b/engine/storage/object/src/main/java/org/apache/cloudstack/storage/object/store/ObjectStoreImpl.java index 825b349bdfce..3c525ba93646 100644 --- a/engine/storage/object/src/main/java/org/apache/cloudstack/storage/object/store/ObjectStoreImpl.java +++ b/engine/storage/object/src/main/java/org/apache/cloudstack/storage/object/store/ObjectStoreImpl.java @@ -29,14 +29,12 @@ import org.apache.cloudstack.storage.datastore.db.ObjectStoreVO; import org.apache.cloudstack.storage.object.ObjectStoreDriver; import org.apache.cloudstack.storage.object.ObjectStoreEntity; -import org.apache.log4j.Logger; import java.util.Date; import java.util.List; import java.util.Map; public class ObjectStoreImpl implements ObjectStoreEntity { - private static final Logger s_logger = Logger.getLogger(ObjectStoreImpl.class); protected ObjectStoreDriver driver; protected ObjectStoreVO objectStoreVO; diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/CephSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/CephSnapshotStrategy.java index 19b3fc87f4e3..04cca2e8f923 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/CephSnapshotStrategy.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/CephSnapshotStrategy.java @@ -27,7 +27,6 @@ import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.log4j.Logger; import com.cloud.storage.DataStoreRole; import com.cloud.storage.Snapshot; @@ -44,7 +43,6 @@ public class CephSnapshotStrategy extends StorageSystemSnapshotStrategy { @Inject private VolumeDao volumeDao; - private static final Logger s_logger = Logger.getLogger(CephSnapshotStrategy.class); @Override public StrategyPriority canHandle(Snapshot snapshot, Long zoneId, SnapshotOperation op) { @@ -71,7 +69,7 @@ public boolean revertSnapshot(SnapshotInfo snapshotInfo) { VolumeInfo volumeInfo = snapshotInfo.getBaseVolume(); ImageFormat imageFormat = volumeInfo.getFormat(); if (!ImageFormat.RAW.equals(imageFormat)) { - s_logger.error(String.format("Does not support revert snapshot of the image format [%s] on Ceph/RBD. Can only rollback snapshots of format RAW", imageFormat)); + logger.error(String.format("Does not support revert snapshot of the image format [%s] on Ceph/RBD. Can only rollback snapshots of format RAW", imageFormat)); return false; } diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java index f1f073db1701..7e902bc61fe5 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java @@ -44,7 +44,6 @@ import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.BooleanUtils; -import org.apache.log4j.Logger; import com.cloud.agent.api.to.DataTO; import com.cloud.event.EventTypes; @@ -79,7 +78,6 @@ public class DefaultSnapshotStrategy extends SnapshotStrategyBase { - private static final Logger s_logger = Logger.getLogger(DefaultSnapshotStrategy.class); @Inject SnapshotService snapshotSvr; @@ -136,12 +134,12 @@ public SnapshotInfo backupSnapshot(SnapshotInfo snapshot) { try { snapObj.processEvent(Snapshot.Event.OperationNotPerformed); } catch (NoTransitionException e) { - s_logger.debug("Failed to change state: " + snapshot.getId() + ": " + e.toString()); + logger.debug("Failed to change state: " + snapshot.getId() + ": " + e.toString()); throw new CloudRuntimeException(e.toString()); } return snapshotDataFactory.getSnapshot(snapObj.getId(), store); } else { - s_logger.debug("parent snapshot hasn't been backed up yet"); + logger.debug("parent snapshot hasn't been backed up yet"); } } @@ -195,7 +193,7 @@ public SnapshotInfo backupSnapshot(SnapshotInfo snapshot) { protected boolean deleteSnapshotChain(SnapshotInfo snapshot, String storageToString) { DataTO snapshotTo = snapshot.getTO(); - s_logger.debug(String.format("Deleting %s chain of snapshots.", snapshotTo)); + logger.debug(String.format("Deleting %s chain of snapshots.", snapshotTo)); boolean result = false; boolean resultIsSet = false; @@ -205,11 +203,11 @@ protected boolean deleteSnapshotChain(SnapshotInfo snapshot, String storageToStr SnapshotInfo child = snapshot.getChild(); if (child != null) { - s_logger.debug(String.format("Snapshot [%s] has child [%s], not deleting it on the storage [%s]", snapshotTo, child.getTO(), storageToString)); + logger.debug(String.format("Snapshot [%s] has child [%s], not deleting it on the storage [%s]", snapshotTo, child.getTO(), storageToString)); break; } - s_logger.debug(String.format("Snapshot [%s] does not have children; therefore, we will delete it and its parents.", snapshotTo)); + logger.debug(String.format("Snapshot [%s] does not have children; therefore, we will delete it and its parents.", snapshotTo)); SnapshotInfo parent = snapshot.getParent(); boolean deleted = false; @@ -217,7 +215,7 @@ protected boolean deleteSnapshotChain(SnapshotInfo snapshot, String storageToStr if (parent.getPath() != null && parent.getPath().equalsIgnoreCase(snapshot.getPath())) { //NOTE: if both snapshots share the same path, it's for xenserver's empty delta snapshot. We can't delete the snapshot on the backend, as parent snapshot still reference to it //Instead, mark it as destroyed in the db. - s_logger.debug(String.format("Snapshot [%s] is an empty delta snapshot; therefore, we will only mark it as destroyed in the database.", snapshotTo)); + logger.debug(String.format("Snapshot [%s] is an empty delta snapshot; therefore, we will only mark it as destroyed in the database.", snapshotTo)); deleted = true; if (!resultIsSet) { result = true; @@ -232,7 +230,7 @@ protected boolean deleteSnapshotChain(SnapshotInfo snapshot, String storageToStr if (r) { List cacheSnaps = snapshotDataFactory.listSnapshotOnCache(snapshot.getId()); for (SnapshotInfo cacheSnap : cacheSnaps) { - s_logger.debug(String.format("Deleting snapshot %s from image cache [%s].", snapshotTo, cacheSnap.getDataStore().getName())); + logger.debug(String.format("Deleting snapshot %s from image cache [%s].", snapshotTo, cacheSnap.getDataStore().getName())); cacheSnap.delete(); } } @@ -242,14 +240,14 @@ protected boolean deleteSnapshotChain(SnapshotInfo snapshot, String storageToStr resultIsSet = true; } } catch (Exception e) { - s_logger.error(String.format("Failed to delete snapshot [%s] on storage [%s] due to [%s].", snapshotTo, storageToString, e.getMessage()), e); + logger.error(String.format("Failed to delete snapshot [%s] on storage [%s] due to [%s].", snapshotTo, storageToString, e.getMessage()), e); } } snapshot = parent; } } catch (Exception e) { - s_logger.error(String.format("Failed to delete snapshot [%s] on storage [%s] due to [%s].", snapshotTo, storageToString, e.getMessage()), e); + logger.error(String.format("Failed to delete snapshot [%s] on storage [%s] due to [%s].", snapshotTo, storageToString, e.getMessage()), e); } return result; } @@ -362,9 +360,9 @@ protected Boolean deleteSnapshotInfo(SnapshotInfo snapshotInfo, SnapshotVO snaps if (!DataStoreRole.Primary.equals(dataStore.getRole())) { verifyIfTheSnapshotIsBeingUsedByAnyVolume(snapshotObject); if (deleteSnapshotChain(snapshotInfo, storageToString)) { - s_logger.debug(String.format("%s was deleted on %s. We will mark the snapshot as destroyed.", snapshotVo, storageToString)); + logger.debug(String.format("%s was deleted on %s. We will mark the snapshot as destroyed.", snapshotVo, storageToString)); } else { - s_logger.debug(String.format("%s was not deleted on %s; however, we will mark the snapshot as destroyed for future garbage collecting.", snapshotVo, + logger.debug(String.format("%s was not deleted on %s; however, we will mark the snapshot as destroyed for future garbage collecting.", snapshotVo, storageToString)); } snapshotStoreDao.updateDisplayForSnapshotStoreRole(snapshotVo.getId(), dataStore.getId(), dataStore.getRole(), false); @@ -376,12 +374,12 @@ protected Boolean deleteSnapshotInfo(SnapshotInfo snapshotInfo, SnapshotVO snaps snapshotStoreDao.updateDisplayForSnapshotStoreRole(snapshotVo.getId(), dataStore.getId(), dataStore.getRole(), false); return true; } - s_logger.debug(String.format("Failed to delete %s on %s.", snapshotVo, storageToString)); + logger.debug(String.format("Failed to delete %s on %s.", snapshotVo, storageToString)); if (isLastSnapshotRef) { snapshotObject.processEvent(Snapshot.Event.OperationFailed); } } catch (NoTransitionException ex) { - s_logger.warn(String.format("Failed to delete %s on %s due to %s.", snapshotVo, storageToString, ex.getMessage()), ex); + logger.warn(String.format("Failed to delete %s on %s due to %s.", snapshotVo, storageToString, ex.getMessage()), ex); } return false; } @@ -395,11 +393,11 @@ protected boolean deleteSnapshotInPrimaryStorage(SnapshotInfo snapshotInfo, Snap msg = String.format("%s We will mark the snapshot as destroyed.", msg); snapshotObject.processEvent(Snapshot.Event.OperationSucceeded); } - s_logger.debug(msg); + logger.debug(msg); return true; } } catch (CloudRuntimeException ex) { - s_logger.warn(String.format("Unable do delete snapshot %s on %s due to [%s]. The reference will be marked as 'Destroying' for future garbage collecting.", + logger.warn(String.format("Unable do delete snapshot %s on %s due to [%s]. The reference will be marked as 'Destroying' for future garbage collecting.", snapshotVo, storageToString, ex.getMessage()), ex); } return false; @@ -464,7 +462,7 @@ public boolean revertSnapshot(SnapshotInfo snapshot) { result = snapshotSvr.revertSnapshot(snapshot); if (!result) { - s_logger.debug("Failed to revert snapshot: " + snapshot.getId()); + logger.debug("Failed to revert snapshot: " + snapshot.getId()); throw new CloudRuntimeException("Failed to revert snapshot: " + snapshot.getId()); } @@ -509,7 +507,7 @@ public SnapshotInfo takeSnapshot(SnapshotInfo snapshot) { try { result = snapshotSvr.takeSnapshot(snapshot); if (result.isFailed()) { - s_logger.debug("Failed to take snapshot: " + result.getResult()); + logger.debug("Failed to take snapshot: " + result.getResult()); throw new CloudRuntimeException(result.getResult()); } } finally { @@ -564,7 +562,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } } } catch (Exception e) { - s_logger.debug("Failed to clean up snapshots on primary storage", e); + logger.debug("Failed to clean up snapshots on primary storage", e); } } }); @@ -583,7 +581,7 @@ public StrategyPriority canHandle(Snapshot snapshot, Long zoneId, SnapshotOperat return StrategyPriority.CANT_HANDLE; } if (zoneId != null && SnapshotOperation.DELETE.equals(op)) { - s_logger.debug(String.format("canHandle for zone ID: %d, operation: %s - %s", zoneId, op, StrategyPriority.DEFAULT)); + logger.debug(String.format("canHandle for zone ID: %d, operation: %s - %s", zoneId, op, StrategyPriority.DEFAULT)); } return StrategyPriority.DEFAULT; } diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/ScaleIOSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/ScaleIOSnapshotStrategy.java index 3dee4f4aa94f..0d48cb944aee 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/ScaleIOSnapshotStrategy.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/ScaleIOSnapshotStrategy.java @@ -26,7 +26,6 @@ import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.log4j.Logger; import com.cloud.storage.DataStoreRole; import com.cloud.storage.Snapshot; @@ -42,7 +41,6 @@ public class ScaleIOSnapshotStrategy extends StorageSystemSnapshotStrategy { @Inject private VolumeDao volumeDao; - private static final Logger LOG = Logger.getLogger(ScaleIOSnapshotStrategy.class); @Override public StrategyPriority canHandle(Snapshot snapshot, Long zoneId, SnapshotOperation op) { @@ -73,7 +71,7 @@ public boolean revertSnapshot(SnapshotInfo snapshotInfo) { VolumeInfo volumeInfo = snapshotInfo.getBaseVolume(); Storage.ImageFormat imageFormat = volumeInfo.getFormat(); if (!Storage.ImageFormat.RAW.equals(imageFormat)) { - LOG.error(String.format("Does not support revert snapshot of the image format [%s] on PowerFlex. Can only rollback snapshots of format RAW", imageFormat)); + logger.error(String.format("Does not support revert snapshot of the image format [%s] on PowerFlex. Can only rollback snapshots of format RAW", imageFormat)); return false; } diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java index 6cf68f64fd92..961a647d7a8c 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java @@ -41,7 +41,8 @@ import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; import org.apache.cloudstack.storage.to.SnapshotObjectTO; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.to.DataObjectType; @@ -60,7 +61,7 @@ import com.cloud.utils.fsm.NoTransitionException; public class SnapshotObject implements SnapshotInfo { - private static final Logger s_logger = Logger.getLogger(SnapshotObject.class); + protected Logger logger = LogManager.getLogger(getClass()); private SnapshotVO snapshot; private DataStore store; private Object payload; @@ -182,7 +183,7 @@ public void markBackedUp() throws CloudRuntimeException{ try { processEvent(Event.OperationNotPerformed); } catch (NoTransitionException ex) { - s_logger.error("no transition error: ", ex); + logger.error("no transition error: ", ex); throw new CloudRuntimeException("Error marking snapshot backed up: " + this.snapshot.getId() + " " + ex.getMessage()); } @@ -235,7 +236,7 @@ public void processEvent(ObjectInDataStoreStateMachine.Event event) { try { objectInStoreMgr.update(this, event); } catch (Exception e) { - s_logger.debug("Failed to update state:" + e.toString()); + logger.debug("Failed to update state:" + e.toString()); throw new CloudRuntimeException("Failed to update state: " + e.toString()); } finally { DataObjectInStore obj = objectInStoreMgr.findObject(this, this.getDataStore()); @@ -369,12 +370,12 @@ public void processEvent(ObjectInDataStoreStateMachine.Event event, Answer answe if (snapshotTO.getVolume() != null && snapshotTO.getVolume().getPath() != null) { VolumeVO vol = volumeDao.findByUuid(snapshotTO.getVolume().getUuid()); if (vol != null) { - s_logger.info("Update volume path change due to snapshot operation, volume " + vol.getId() + " path: " + vol.getPath() + "->" + + logger.info("Update volume path change due to snapshot operation, volume " + vol.getId() + " path: " + vol.getPath() + "->" + snapshotTO.getVolume().getPath()); vol.setPath(snapshotTO.getVolume().getPath()); volumeDao.update(vol.getId(), vol); } else { - s_logger.error("Cound't find the original volume with uuid: " + snapshotTO.getVolume().getUuid()); + logger.error("Cound't find the original volume with uuid: " + snapshotTO.getVolume().getUuid()); } } } else { diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java index 9c7ee9834749..dafc40e0674d 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java @@ -57,7 +57,8 @@ import org.apache.cloudstack.storage.heuristics.HeuristicRuleHelper; import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity; import org.apache.cloudstack.storage.to.SnapshotObjectTO; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.api.Answer; import com.cloud.configuration.Config; @@ -79,7 +80,7 @@ import com.cloud.utils.fsm.NoTransitionException; public class SnapshotServiceImpl implements SnapshotService { - private static final Logger s_logger = Logger.getLogger(SnapshotServiceImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject protected SnapshotDao _snapshotDao; @Inject @@ -176,7 +177,7 @@ private String generateCopyUrlBase(String hostname, String dir) { _sslCopy = Boolean.parseBoolean(sslCfg); } if(_sslCopy && (_ssvmUrlDomain == null || _ssvmUrlDomain.isEmpty())){ - s_logger.warn("Empty secondary storage url domain, ignoring SSL"); + logger.warn("Empty secondary storage url domain, ignoring SSL"); _sslCopy = false; } if (_sslCopy) { @@ -197,12 +198,12 @@ protected Void createSnapshotAsyncCallback(AsyncCallbackDispatcher future = context.future; SnapshotResult snapResult = new SnapshotResult(snapshot, result.getAnswer()); if (result.isFailed()) { - s_logger.debug("create snapshot " + context.snapshot.getName() + " failed: " + result.getResult()); + logger.debug("create snapshot " + context.snapshot.getName() + " failed: " + result.getResult()); try { snapshot.processEvent(Snapshot.Event.OperationFailed); snapshot.processEvent(Event.OperationFailed); } catch (Exception e) { - s_logger.debug("Failed to update snapshot state due to " + e.getMessage()); + logger.debug("Failed to update snapshot state due to " + e.getMessage()); } snapResult.setResult(result.getResult()); @@ -214,12 +215,12 @@ protected Void createSnapshotAsyncCallback(AsyncCallbackDispatcher snapshotDataStoreVOs = _snapshotStoreDao.findBySnapshotId(snapshotId); for (SnapshotDataStoreVO snapshotDataStoreVO : snapshotDataStoreVOs) { - s_logger.debug("Remove snapshot " + snapshotId + ", status " + snapshotDataStoreVO.getState() + + logger.debug("Remove snapshot " + snapshotId + ", status " + snapshotDataStoreVO.getState() + " on snapshot_store_ref table with id: " + snapshotDataStoreVO.getId()); _snapshotStoreDao.remove(snapshotDataStoreVO.getId()); } - s_logger.debug("Remove snapshot " + snapshotId + " status " + snaphsot.getState() + " from snapshot table"); + logger.debug("Remove snapshot " + snapshotId + " status " + snaphsot.getState() + " from snapshot table"); _snapshotDao.remove(snapshotId); } } @@ -631,8 +632,8 @@ private void syncSnapshotToRegionStore(long snapshotId, DataStore store){ throw new CloudRuntimeException("Cannot find an entry in snapshot_store_ref for snapshot " + snapshotId + " on region store: " + store.getName()); } if (snapOnStore.getPath() == null || snapOnStore.getPath().length() == 0) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("sync snapshot " + snapshotId + " from cache to object store..."); + if (logger.isDebugEnabled()) { + logger.debug("sync snapshot " + snapshotId + " from cache to object store..."); } // snapshot is not on region store yet, sync to region store SnapshotInfo srcSnapshot = _snapshotFactory.getReadySnapshotOnCache(snapshotId); @@ -688,7 +689,7 @@ protected Void syncSnapshotCallBack(AsyncCallbackDispatcher copySnapshot(SnapshotInfo snapshot, Strin SnapshotObject snapshotForCopy = (SnapshotObject)_snapshotFactory.getSnapshot(snapshot, store); snapshotForCopy.setUrl(copyUrl); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Mark snapshot_store_ref entry as Creating"); + if (logger.isDebugEnabled()) { + logger.debug("Mark snapshot_store_ref entry as Creating"); } AsyncCallFuture future = new AsyncCallFuture(); DataObject snapshotOnStore = store.create(snapshotForCopy); ((SnapshotObject)snapshotOnStore).setUrl(copyUrl); snapshotOnStore.processEvent(Event.CreateOnlyRequested); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Invoke datastore driver createAsync to create snapshot on destination store"); + if (logger.isDebugEnabled()) { + logger.debug("Invoke datastore driver createAsync to create snapshot on destination store"); } try { CopySnapshotContext context = new CopySnapshotContext<>(null, (SnapshotObject)snapshotOnStore, snapshotForCopy, future); @@ -768,7 +769,7 @@ public AsyncCallFuture queryCopySnapshot(SnapshotInfo snapshot) AsyncCallFuture future = new AsyncCallFuture<>(); EndPoint ep = epSelector.select(snapshot); if (ep == null) { - s_logger.error(String.format("Failed to find endpoint for generating copy URL for snapshot %d with store %d", snapshot.getId(), snapshot.getDataStore().getId())); + logger.error(String.format("Failed to find endpoint for generating copy URL for snapshot %d with store %d", snapshot.getId(), snapshot.getDataStore().getId())); throw new ResourceUnavailableException("No secondary VM in running state in source snapshot zone", DataCenter.class, snapshot.getDataCenterId()); } DataStore store = snapshot.getDataStore(); diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotStrategyBase.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotStrategyBase.java index ba16e75f737a..2bfcbc107f75 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotStrategyBase.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotStrategyBase.java @@ -21,8 +21,11 @@ import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotStrategy; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; public abstract class SnapshotStrategyBase implements SnapshotStrategy { + protected Logger logger = LogManager.getLogger(getClass()); @Inject SnapshotService snapshotSvr; diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/StorageSystemSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/StorageSystemSnapshotStrategy.java index dabb8d17702d..9838e41f8f6c 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/StorageSystemSnapshotStrategy.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/StorageSystemSnapshotStrategy.java @@ -45,7 +45,6 @@ import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; @@ -91,7 +90,6 @@ @Component public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase { - private static final Logger s_logger = Logger.getLogger(StorageSystemSnapshotStrategy.class); @Inject private AgentManager agentMgr; @Inject private ClusterDao clusterDao; @@ -132,7 +130,7 @@ public SnapshotInfo backupSnapshot(SnapshotInfo snapshotInfo) { if (!canStorageSystemCreateVolumeFromSnapshot) { String msg = "Cannot archive snapshot: 'canStorageSystemCreateVolumeFromSnapshot' was false."; - s_logger.warn(msg); + logger.warn(msg); throw new CloudRuntimeException(msg); } @@ -142,7 +140,7 @@ public SnapshotInfo backupSnapshot(SnapshotInfo snapshotInfo) { if (!computeClusterSupportsResign) { String msg = "Cannot archive snapshot: 'computeClusterSupportsResign' was false."; - s_logger.warn(msg); + logger.warn(msg); throw new CloudRuntimeException(msg); } @@ -185,7 +183,7 @@ private boolean cleanupSnapshotOnPrimaryStore(long snapshotId) { SnapshotObject snapshotObj = (SnapshotObject)snapshotDataFactory.getSnapshotOnPrimaryStore(snapshotId); if (snapshotObj == null) { - s_logger.debug("Can't find snapshot; deleting it in DB"); + logger.debug("Can't find snapshot; deleting it in DB"); snapshotDao.remove(snapshotId); @@ -205,14 +203,14 @@ private boolean cleanupSnapshotOnPrimaryStore(long snapshotId) { try { snapshotObj.processEvent(Snapshot.Event.OperationFailed); } catch (NoTransitionException e1) { - s_logger.debug("Failed to change snapshot state: " + e1.toString()); + logger.debug("Failed to change snapshot state: " + e1.toString()); } throw new InvalidParameterValueException("Unable to perform delete operation, Snapshot with id: " + snapshotId + " is in use "); } } catch (NoTransitionException e) { - s_logger.debug("Failed to set the state to destroying: ", e); + logger.debug("Failed to set the state to destroying: ", e); return false; } @@ -226,13 +224,13 @@ private boolean cleanupSnapshotOnPrimaryStore(long snapshotId) { snapshotObj.getName(), null, null, 0L, snapshotObj.getClass().getName(), snapshotObj.getUuid()); } catch (Exception e) { - s_logger.debug("Failed to delete snapshot: ", e); + logger.debug("Failed to delete snapshot: ", e); try { snapshotObj.processEvent(Snapshot.Event.OperationFailed); } catch (NoTransitionException e1) { - s_logger.debug("Failed to change snapshot state: " + e.toString()); + logger.debug("Failed to change snapshot state: " + e.toString()); } return false; @@ -302,7 +300,7 @@ public boolean revertSnapshot(SnapshotInfo snapshotInfo) { if (!volumeInfo.getPoolId().equals(snapshotStoragePoolId)) { String errMsg = "Storage pool mismatch"; - s_logger.error(errMsg); + logger.error(errMsg); throw new CloudRuntimeException(errMsg); } @@ -313,7 +311,7 @@ public boolean revertSnapshot(SnapshotInfo snapshotInfo) { if (!storageSystemSupportsCapability) { String errMsg = "Storage pool revert capability not supported"; - s_logger.error(errMsg); + logger.error(errMsg); throw new CloudRuntimeException(errMsg); } @@ -335,7 +333,7 @@ protected void executeRevertSnapshot(SnapshotInfo snapshotInfo, VolumeInfo volum if (snapshotVO == null) { String errMsg = "Failed to acquire lock on the following snapshot: " + snapshotInfo.getId(); - s_logger.error(errMsg); + logger.error(errMsg); throw new CloudRuntimeException(errMsg); } @@ -362,7 +360,7 @@ protected void executeRevertSnapshot(SnapshotInfo snapshotInfo, VolumeInfo volum String errMsg = String.format("Failed to revert volume [name:%s, format:%s] to snapshot [id:%s] state", volumeInfo.getName(), volumeInfo.getFormat(), snapshotInfo.getSnapshotId()); - s_logger.error(errMsg); + logger.error(errMsg); throw new CloudRuntimeException(errMsg); } @@ -500,7 +498,7 @@ else if (volumeInfo.getFormat() == ImageFormat.OVA || volumeInfo.getFormat() == result = snapshotSvr.takeSnapshot(snapshotInfo); if (result.isFailed()) { - s_logger.debug("Failed to take a snapshot: " + result.getResult()); + logger.debug("Failed to take a snapshot: " + result.getResult()); throw new CloudRuntimeException(result.getResult()); } @@ -539,7 +537,7 @@ public void postSnapshotCreation(SnapshotInfo snapshot) { try { snapshotSvr.deleteSnapshot(snapshot); } catch (Exception e) { - s_logger.warn("Failed to clean up snapshot '" + snapshot.getId() + "' on primary storage: " + e.getMessage()); + logger.warn("Failed to clean up snapshot '" + snapshot.getId() + "' on primary storage: " + e.getMessage()); } } @@ -571,7 +569,7 @@ private VMSnapshot takeHypervisorSnapshot(VolumeInfo volumeInfo) { Thread.sleep(60000); } catch (Exception ex) { - s_logger.warn(ex.getMessage(), ex); + logger.warn(ex.getMessage(), ex); } return vmSnapshot; @@ -686,7 +684,7 @@ private void performSnapshotAndCopyOnHostSide(VolumeInfo volumeInfo, SnapshotInf if (hostVO == null) { final String errMsg = "Unable to locate an applicable host"; - s_logger.error("performSnapshotAndCopyOnHostSide: " + errMsg); + logger.error("performSnapshotAndCopyOnHostSide: " + errMsg); throw new CloudRuntimeException(errMsg); } @@ -724,7 +722,7 @@ private void performSnapshotAndCopyOnHostSide(VolumeInfo volumeInfo, SnapshotInf } } catch (Exception ex) { - s_logger.debug(ex.getMessage(), ex); + logger.debug(ex.getMessage(), ex); } } @@ -894,13 +892,13 @@ private void markAsBackedUp(SnapshotObject snapshotObj) { snapshotObj.processEvent(Snapshot.Event.OperationSucceeded); } catch (NoTransitionException ex) { - s_logger.debug("Failed to change state: " + ex.toString()); + logger.debug("Failed to change state: " + ex.toString()); try { snapshotObj.processEvent(Snapshot.Event.OperationFailed); } catch (NoTransitionException ex2) { - s_logger.debug("Failed to change state: " + ex2.toString()); + logger.debug("Failed to change state: " + ex2.toString()); } } } diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/DefaultVMSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/DefaultVMSnapshotStrategy.java index e2815c005c44..1d3788a03014 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/DefaultVMSnapshotStrategy.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/DefaultVMSnapshotStrategy.java @@ -32,7 +32,6 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -77,7 +76,6 @@ import com.cloud.vm.snapshot.dao.VMSnapshotDao; public class DefaultVMSnapshotStrategy extends ManagerBase implements VMSnapshotStrategy { - private static final Logger s_logger = Logger.getLogger(DefaultVMSnapshotStrategy.class); @Inject VMSnapshotHelper vmSnapshotHelper; @Inject @@ -164,7 +162,7 @@ public VMSnapshot takeVMSnapshot(VMSnapshot vmSnapshot) { answer = (CreateVMSnapshotAnswer)agentMgr.send(hostId, ccmd); if (answer != null && answer.getResult()) { processAnswer(vmSnapshotVO, userVm, answer, hostId); - s_logger.debug("Create vm snapshot " + vmSnapshot.getName() + " succeeded for vm: " + userVm.getInstanceName()); + logger.debug("Create vm snapshot " + vmSnapshot.getName() + " succeeded for vm: " + userVm.getInstanceName()); result = true; long new_chain_size=0; for (VolumeObjectTO volumeTo : answer.getVolumeTOs()) { @@ -177,21 +175,21 @@ public VMSnapshot takeVMSnapshot(VMSnapshot vmSnapshot) { String errMsg = "Creating VM snapshot: " + vmSnapshot.getName() + " failed"; if (answer != null && answer.getDetails() != null) errMsg = errMsg + " due to " + answer.getDetails(); - s_logger.error(errMsg); + logger.error(errMsg); throw new CloudRuntimeException(errMsg); } } catch (OperationTimedoutException e) { - s_logger.debug("Creating VM snapshot: " + vmSnapshot.getName() + " failed: " + e.toString()); + logger.debug("Creating VM snapshot: " + vmSnapshot.getName() + " failed: " + e.toString()); throw new CloudRuntimeException("Creating VM snapshot: " + vmSnapshot.getName() + " failed: " + e.toString()); } catch (AgentUnavailableException e) { - s_logger.debug("Creating VM snapshot: " + vmSnapshot.getName() + " failed", e); + logger.debug("Creating VM snapshot: " + vmSnapshot.getName() + " failed", e); throw new CloudRuntimeException("Creating VM snapshot: " + vmSnapshot.getName() + " failed: " + e.toString()); } finally { if (!result) { try { vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationFailed); } catch (NoTransitionException e1) { - s_logger.error("Cannot set vm snapshot state due to: " + e1.getMessage()); + logger.error("Cannot set vm snapshot state due to: " + e1.getMessage()); } } } @@ -204,7 +202,7 @@ public boolean deleteVMSnapshot(VMSnapshot vmSnapshot) { try { vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.ExpungeRequested); } catch (NoTransitionException e) { - s_logger.debug("Failed to change vm snapshot state with event ExpungeRequested"); + logger.debug("Failed to change vm snapshot state with event ExpungeRequested"); throw new CloudRuntimeException("Failed to change vm snapshot state with event ExpungeRequested: " + e.getMessage()); } @@ -235,7 +233,7 @@ public boolean deleteVMSnapshot(VMSnapshot vmSnapshot) { return true; } else { String errMsg = (answer == null) ? null : answer.getDetails(); - s_logger.error("Delete vm snapshot " + vmSnapshot.getName() + " of vm " + userVm.getInstanceName() + " failed due to " + errMsg); + logger.error("Delete vm snapshot " + vmSnapshot.getName() + " of vm " + userVm.getInstanceName() + " failed due to " + errMsg); processAnswer(vmSnapshotVO, userVm, answer, hostId); throw new CloudRuntimeException("Delete vm snapshot " + vmSnapshot.getName() + " of vm " + userVm.getInstanceName() + " failed due to " + errMsg); } @@ -271,7 +269,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) throws NoTran }); } catch (Exception e) { String errMsg = "Error while process answer: " + as.getClass() + " due to " + e.getMessage(); - s_logger.error(errMsg, e); + logger.error(errMsg, e); throw new CloudRuntimeException(errMsg); } } @@ -377,7 +375,7 @@ protected void publishUsageEvent(String type, VMSnapshot vmSnapshot, UserVm user UsageEventUtils.publishUsageEvent(type, vmSnapshot.getAccountId(), userVm.getDataCenterId(), userVm.getId(), vmSnapshot.getName(), 0L, 0L, vmSnapSize, virtualSize, VMSnapshot.class.getName(), vmSnapshot.getUuid(), details); } catch (Exception e) { - s_logger.error("Failed to publis usage event " + type, e); + logger.error("Failed to publis usage event " + type, e); } } @@ -420,21 +418,21 @@ public boolean revertVMSnapshot(VMSnapshot vmSnapshot) { String errMsg = "Revert VM: " + userVm.getInstanceName() + " to snapshot: " + vmSnapshotVO.getName() + " failed"; if (answer != null && answer.getDetails() != null) errMsg = errMsg + " due to " + answer.getDetails(); - s_logger.error(errMsg); + logger.error(errMsg); throw new CloudRuntimeException(errMsg); } } catch (OperationTimedoutException e) { - s_logger.debug("Failed to revert vm snapshot", e); + logger.debug("Failed to revert vm snapshot", e); throw new CloudRuntimeException(e.getMessage()); } catch (AgentUnavailableException e) { - s_logger.debug("Failed to revert vm snapshot", e); + logger.debug("Failed to revert vm snapshot", e); throw new CloudRuntimeException(e.getMessage()); } finally { if (!result) { try { vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationFailed); } catch (NoTransitionException e1) { - s_logger.error("Cannot set vm snapshot state due to: " + e1.getMessage()); + logger.error("Cannot set vm snapshot state due to: " + e1.getMessage()); } } } @@ -451,7 +449,7 @@ public boolean deleteVMSnapshotFromDB(VMSnapshot vmSnapshot, boolean unmanage) { try { vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.ExpungeRequested); } catch (NoTransitionException e) { - s_logger.debug("Failed to change vm snapshot state with event ExpungeRequested"); + logger.debug("Failed to change vm snapshot state with event ExpungeRequested"); throw new CloudRuntimeException("Failed to change vm snapshot state with event ExpungeRequested: " + e.getMessage()); } UserVm userVm = userVmDao.findById(vmSnapshot.getVmId()); diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java index 50afa647dc37..d27beecfddac 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java @@ -38,7 +38,6 @@ import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil; import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; import com.cloud.agent.api.VMSnapshotTO; import com.cloud.alert.AlertManager; @@ -70,7 +69,6 @@ import com.cloud.vm.snapshot.dao.VMSnapshotDetailsDao; public class ScaleIOVMSnapshotStrategy extends ManagerBase implements VMSnapshotStrategy { - private static final Logger LOGGER = Logger.getLogger(ScaleIOVMSnapshotStrategy.class); @Inject VMSnapshotHelper vmSnapshotHelper; @Inject @@ -213,7 +211,7 @@ public VMSnapshot takeVMSnapshot(VMSnapshot vmSnapshot) { finalizeCreate(vmSnapshotVO, volumeTOs); result = true; - LOGGER.debug("Create vm snapshot " + vmSnapshot.getName() + " succeeded for vm: " + userVm.getInstanceName()); + logger.debug("Create vm snapshot " + vmSnapshot.getName() + " succeeded for vm: " + userVm.getInstanceName()); long new_chain_size=0; for (VolumeObjectTO volumeTo : volumeTOs) { @@ -224,7 +222,7 @@ public VMSnapshot takeVMSnapshot(VMSnapshot vmSnapshot) { return vmSnapshot; } catch (Exception e) { String errMsg = "Unable to take vm snapshot due to: " + e.getMessage(); - LOGGER.warn(errMsg, e); + logger.warn(errMsg, e); throw new CloudRuntimeException(errMsg); } } finally { @@ -236,7 +234,7 @@ public VMSnapshot takeVMSnapshot(VMSnapshot vmSnapshot) { String message = "Snapshot operation failed for VM: " + userVm.getDisplayName() + ", Please check and delete if any stale volumes created with VM snapshot id: " + vmSnapshot.getVmId(); alertManager.sendAlert(AlertManager.AlertType.ALERT_TYPE_VM_SNAPSHOT, userVm.getDataCenterId(), userVm.getPodIdToDeployIn(), subject, message); } catch (NoTransitionException e1) { - LOGGER.error("Cannot set vm snapshot state due to: " + e1.getMessage()); + logger.error("Cannot set vm snapshot state due to: " + e1.getMessage()); } } } @@ -274,7 +272,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) throws NoTran }); } catch (Exception e) { String errMsg = "Error while finalize create vm snapshot: " + vmSnapshot.getName() + " due to " + e.getMessage(); - LOGGER.error(errMsg, e); + logger.error(errMsg, e); throw new CloudRuntimeException(errMsg); } } @@ -317,14 +315,14 @@ public boolean revertVMSnapshot(VMSnapshot vmSnapshot) { result = true; } catch (Exception e) { String errMsg = "Revert VM: " + userVm.getInstanceName() + " to snapshot: " + vmSnapshotVO.getName() + " failed due to " + e.getMessage(); - LOGGER.error(errMsg, e); + logger.error(errMsg, e); throw new CloudRuntimeException(errMsg); } finally { if (!result) { try { vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationFailed); } catch (NoTransitionException e1) { - LOGGER.error("Cannot set vm snapshot state due to: " + e1.getMessage()); + logger.error("Cannot set vm snapshot state due to: " + e1.getMessage()); } } } @@ -361,7 +359,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) throws NoTran }); } catch (Exception e) { String errMsg = "Error while finalize revert vm snapshot: " + vmSnapshot.getName() + " due to " + e.getMessage(); - LOGGER.error(errMsg, e); + logger.error(errMsg, e); throw new CloudRuntimeException(errMsg); } } @@ -374,7 +372,7 @@ public boolean deleteVMSnapshot(VMSnapshot vmSnapshot) { try { vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.ExpungeRequested); } catch (NoTransitionException e) { - LOGGER.debug("Failed to change vm snapshot state with event ExpungeRequested"); + logger.debug("Failed to change vm snapshot state with event ExpungeRequested"); throw new CloudRuntimeException("Failed to change vm snapshot state with event ExpungeRequested: " + e.getMessage()); } @@ -397,7 +395,7 @@ public boolean deleteVMSnapshot(VMSnapshot vmSnapshot) { if (volumesDeleted <= 0) { throw new CloudRuntimeException("Failed to delete VM snapshot: " + vmSnapshot.getName()); } else if (volumesDeleted != volumeTOs.size()) { - LOGGER.warn("Unable to delete all volumes of the VM snapshot: " + vmSnapshot.getName()); + logger.warn("Unable to delete all volumes of the VM snapshot: " + vmSnapshot.getName()); } finalizeDelete(vmSnapshotVO, volumeTOs); @@ -410,7 +408,7 @@ public boolean deleteVMSnapshot(VMSnapshot vmSnapshot) { return true; } catch (Exception e) { String errMsg = "Unable to delete vm snapshot: " + vmSnapshot.getName() + " of vm " + userVm.getInstanceName() + " due to " + e.getMessage(); - LOGGER.warn(errMsg, e); + logger.warn(errMsg, e); throw new CloudRuntimeException(errMsg); } } @@ -453,7 +451,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) throws NoTran }); } catch (Exception e) { String errMsg = "Error while finalize delete vm snapshot: " + vmSnapshot.getName() + " due to " + e.getMessage(); - LOGGER.error(errMsg, e); + logger.error(errMsg, e); throw new CloudRuntimeException(errMsg); } } @@ -463,7 +461,7 @@ public boolean deleteVMSnapshotFromDB(VMSnapshot vmSnapshot, boolean unmanage) { try { vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.ExpungeRequested); } catch (NoTransitionException e) { - LOGGER.debug("Failed to change vm snapshot state with event ExpungeRequested"); + logger.debug("Failed to change vm snapshot state with event ExpungeRequested"); throw new CloudRuntimeException("Failed to change vm snapshot state with event ExpungeRequested: " + e.getMessage()); } UserVm userVm = userVmDao.findById(vmSnapshot.getVmId()); @@ -507,7 +505,7 @@ private void publishUsageEvent(String type, VMSnapshot vmSnapshot, UserVm userVm UsageEventUtils.publishUsageEvent(type, vmSnapshot.getAccountId(), userVm.getDataCenterId(), userVm.getId(), vmSnapshot.getName(), 0L, 0L, vmSnapSize, virtualSize, VMSnapshot.class.getName(), vmSnapshot.getUuid(), details); } catch (Exception e) { - LOGGER.error("Failed to publish usage event " + type, e); + logger.error("Failed to publish usage event " + type, e); } } diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/StorageVMSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/StorageVMSnapshotStrategy.java index f5d70817333c..ec73246851cd 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/StorageVMSnapshotStrategy.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/StorageVMSnapshotStrategy.java @@ -39,7 +39,6 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; import com.cloud.agent.api.CreateVMSnapshotAnswer; import com.cloud.agent.api.CreateVMSnapshotCommand; @@ -75,7 +74,6 @@ import com.cloud.vm.snapshot.dao.VMSnapshotDetailsDao; public class StorageVMSnapshotStrategy extends DefaultVMSnapshotStrategy { - private static final Logger s_logger = Logger.getLogger(StorageVMSnapshotStrategy.class); @Inject VolumeApiService volumeService; @Inject @@ -148,7 +146,7 @@ public VMSnapshot takeVMSnapshot(VMSnapshot vmSnapshot) { vmSnapshotVO.setParent(current.getId()); } CreateVMSnapshotCommand ccmd = new CreateVMSnapshotCommand(userVm.getInstanceName(), userVm.getUuid(), target, volumeTOs, guestOS.getDisplayName()); - s_logger.info("Creating VM snapshot for KVM hypervisor without memory"); + logger.info("Creating VM snapshot for KVM hypervisor without memory"); List vinfos = new ArrayList<>(); for (VolumeObjectTO volumeObjectTO : volumeTOs) { @@ -166,7 +164,7 @@ public VMSnapshot takeVMSnapshot(VMSnapshot vmSnapshot) { thawCmd = new FreezeThawVMCommand(userVm.getInstanceName()); thawCmd.setOption(FreezeThawVMCommand.THAW); if (freezeAnswer != null && freezeAnswer.getResult()) { - s_logger.info("The virtual machine is frozen"); + logger.info("The virtual machine is frozen"); for (VolumeInfo vol : vinfos) { long startSnapshtot = System.nanoTime(); SnapshotInfo snapInfo = createDiskSnapshot(vmSnapshot, forRollback, vol); @@ -175,14 +173,14 @@ public VMSnapshot takeVMSnapshot(VMSnapshot vmSnapshot) { thawAnswer = (FreezeThawVMAnswer) agentMgr.send(hostId, thawCmd); throw new CloudRuntimeException("Could not take snapshot for volume with id=" + vol.getId()); } - s_logger.info(String.format("Snapshot with id=%s, took %s milliseconds", snapInfo.getId(), + logger.info(String.format("Snapshot with id=%s, took %s milliseconds", snapInfo.getId(), TimeUnit.MILLISECONDS.convert(elapsedTime(startSnapshtot), TimeUnit.NANOSECONDS))); } answer = new CreateVMSnapshotAnswer(ccmd, true, ""); answer.setVolumeTOs(volumeTOs); thawAnswer = (FreezeThawVMAnswer) agentMgr.send(hostId, thawCmd); if (thawAnswer != null && thawAnswer.getResult()) { - s_logger.info(String.format( + logger.info(String.format( "Virtual machne is thawed. The freeze of virtual machine took %s milliseconds.", TimeUnit.MILLISECONDS.convert(elapsedTime(startFreeze), TimeUnit.NANOSECONDS))); } @@ -191,7 +189,7 @@ public VMSnapshot takeVMSnapshot(VMSnapshot vmSnapshot) { } if (answer != null && answer.getResult()) { processAnswer(vmSnapshotVO, userVm, answer, null); - s_logger.debug("Create vm snapshot " + vmSnapshot.getName() + " succeeded for vm: " + userVm.getInstanceName()); + logger.debug("Create vm snapshot " + vmSnapshot.getName() + " succeeded for vm: " + userVm.getInstanceName()); long new_chain_size = 0; for (VolumeObjectTO volumeTo : answer.getVolumeTOs()) { publishUsageEvent(EventTypes.EVENT_VM_SNAPSHOT_CREATE, vmSnapshot, userVm, volumeTo); @@ -202,27 +200,27 @@ public VMSnapshot takeVMSnapshot(VMSnapshot vmSnapshot) { return vmSnapshot; } else { String errMsg = "Creating VM snapshot: " + vmSnapshot.getName() + " failed"; - s_logger.error(errMsg); + logger.error(errMsg); throw new CloudRuntimeException(errMsg); } } catch (OperationTimedoutException e) { - s_logger.debug("Creating VM snapshot: " + vmSnapshot.getName() + " failed: " + e.toString()); + logger.debug("Creating VM snapshot: " + vmSnapshot.getName() + " failed: " + e.toString()); throw new CloudRuntimeException( "Creating VM snapshot: " + vmSnapshot.getName() + " failed: " + e.toString()); } catch (AgentUnavailableException e) { - s_logger.debug("Creating VM snapshot: " + vmSnapshot.getName() + " failed", e); + logger.debug("Creating VM snapshot: " + vmSnapshot.getName() + " failed", e); throw new CloudRuntimeException( "Creating VM snapshot: " + vmSnapshot.getName() + " failed: " + e.toString()); } catch (CloudRuntimeException e) { throw new CloudRuntimeException(e.getMessage()); } finally { if (thawAnswer == null && freezeAnswer != null) { - s_logger.info(String.format("Freeze of virtual machine took %s milliseconds.", TimeUnit.MILLISECONDS + logger.info(String.format("Freeze of virtual machine took %s milliseconds.", TimeUnit.MILLISECONDS .convert(elapsedTime(startFreeze), TimeUnit.NANOSECONDS))); try { thawAnswer = (FreezeThawVMAnswer) agentMgr.send(hostId, thawCmd); } catch (AgentUnavailableException | OperationTimedoutException e) { - s_logger.debug("Could not unfreeze the VM due to " + e); + logger.debug("Could not unfreeze the VM due to " + e); } } if (!result) { @@ -238,7 +236,7 @@ public VMSnapshot takeVMSnapshot(VMSnapshot vmSnapshot) { } vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationFailed); } catch (NoTransitionException e1) { - s_logger.error("Cannot set vm snapshot state due to: " + e1.getMessage()); + logger.error("Cannot set vm snapshot state due to: " + e1.getMessage()); } } } @@ -251,7 +249,7 @@ public boolean deleteVMSnapshot(VMSnapshot vmSnapshot) { try { vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.ExpungeRequested); } catch (NoTransitionException e) { - s_logger.debug("Failed to change vm snapshot state with event ExpungeRequested"); + logger.debug("Failed to change vm snapshot state with event ExpungeRequested"); throw new CloudRuntimeException( "Failed to change vm snapshot state with event ExpungeRequested: " + e.getMessage()); } @@ -289,7 +287,7 @@ public boolean deleteVMSnapshot(VMSnapshot vmSnapshot) { } } String errMsg = String.format("Delete of VM snapshot [%s] of VM [%s] failed due to [%s]", vmSnapshot.getName(), userVm.getUserId(), err); - s_logger.error(errMsg, err); + logger.error(errMsg, err); throw new CloudRuntimeException(errMsg, err); } } @@ -325,14 +323,14 @@ public boolean revertVMSnapshot(VMSnapshot vmSnapshot) { processAnswer(vmSnapshotVO, userVm, answer, null); result = true; } catch (CloudRuntimeException e) { - s_logger.error(e); + logger.error(e); throw new CloudRuntimeException(e); } finally { if (!result) { try { vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationFailed); } catch (NoTransitionException e1) { - s_logger.error("Cannot set vm snapshot state due to: " + e1.getMessage()); + logger.error("Cannot set vm snapshot state due to: " + e1.getMessage()); } } } @@ -382,7 +380,7 @@ protected void rollbackDiskSnapshot(SnapshotInfo snapshotInfo) { Long snapshotID = snapshotInfo.getId(); SnapshotVO snapshot = snapshotDao.findById(snapshotID); deleteSnapshotByStrategy(snapshot); - s_logger.debug("Rollback is executed: deleting snapshot with id:" + snapshotID); + logger.debug("Rollback is executed: deleting snapshot with id:" + snapshotID); } protected void deleteSnapshotByStrategy(SnapshotVO snapshot) { diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/RemoteHostEndPoint.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/RemoteHostEndPoint.java index b438bc121e4c..fdde4ce3e624 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/RemoteHostEndPoint.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/RemoteHostEndPoint.java @@ -26,7 +26,8 @@ import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.managed.context.ManagedContextRunnable; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; @@ -51,7 +52,7 @@ import com.cloud.vm.dao.SecondaryStorageVmDao; public class RemoteHostEndPoint implements EndPoint { - private static final Logger s_logger = Logger.getLogger(RemoteHostEndPoint.class); + protected Logger logger = LogManager.getLogger(getClass()); private long hostId; private String hostAddress; @@ -125,10 +126,10 @@ public Answer sendMessage(Command cmd) { return agentMgr.send(newHostId, cmd); } catch (AgentUnavailableException e) { errMsg = e.toString(); - s_logger.debug("Failed to send command, due to Agent:" + getId() + ", " + e.toString()); + logger.debug("Failed to send command, due to Agent:" + getId() + ", " + e.toString()); } catch (OperationTimedoutException e) { errMsg = e.toString(); - s_logger.debug("Failed to send command, due to Agent:" + getId() + ", " + e.toString()); + logger.debug("Failed to send command, due to Agent:" + getId() + ", " + e.toString()); } throw new CloudRuntimeException("Failed to send command, due to Agent:" + getId() + ", " + errMsg); } @@ -216,8 +217,8 @@ public void sendMessageAsync(Command cmd, AsyncCompletionCallback callba // update endpoint with new host if changed setId(newHostId); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Sending command " + cmd.toString() + " to host: " + newHostId); + if (logger.isDebugEnabled()) { + logger.debug("Sending command " + cmd.toString() + " to host: " + newHostId); } agentMgr.send(newHostId, new Commands(cmd), new CmdRunner(callback)); } catch (AgentUnavailableException e) { diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java index 89a7b577ae7f..1a754396199e 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java @@ -38,7 +38,6 @@ import org.apache.commons.lang3.StringUtils; import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; import com.cloud.utils.Pair; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; @@ -66,7 +65,6 @@ import com.cloud.vm.VirtualMachineProfile; public abstract class AbstractStoragePoolAllocator extends AdapterBase implements StoragePoolAllocator { - private static final Logger s_logger = Logger.getLogger(AbstractStoragePoolAllocator.class); protected BigDecimal storageOverprovisioningFactor = new BigDecimal(1); protected String allocationAlgorithm = "random"; @@ -136,7 +134,7 @@ protected List reorderPoolsByCapacity(DeploymentPlan plan, List poolIdsByCapacity = capacityDao.orderHostsByFreeCapacity(zoneId, clusterId, capacityType); - s_logger.debug(String.format("List of pools in descending order of available capacity [%s].", poolIdsByCapacity)); + logger.debug(String.format("List of pools in descending order of available capacity [%s].", poolIdsByCapacity)); //now filter the given list of Pools by this ordered list @@ -165,7 +163,7 @@ protected List reorderPoolsByNumberOfVolumes(DeploymentPlan plan, L Long clusterId = plan.getClusterId(); List poolIdsByVolCount = volumeDao.listPoolIdsByVolumeCount(dcId, podId, clusterId, account.getAccountId()); - s_logger.debug(String.format("List of pools in ascending order of number of volumes for account [%s] is [%s].", account, poolIdsByVolCount)); + logger.debug(String.format("List of pools in ascending order of number of volumes for account [%s] is [%s].", account, poolIdsByVolCount)); // now filter the given list of Pools by this ordered list Map poolMap = new HashMap<>(); @@ -186,15 +184,15 @@ protected List reorderPoolsByNumberOfVolumes(DeploymentPlan plan, L @Override public List reorderPools(List pools, VirtualMachineProfile vmProfile, DeploymentPlan plan, DiskProfile dskCh) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("reordering pools"); + if (logger.isTraceEnabled()) { + logger.trace("reordering pools"); } if (pools == null) { - s_logger.trace("There are no pools to reorder; returning null."); + logger.trace("There are no pools to reorder; returning null."); return null; } - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("reordering %d pools", pools.size())); + if (logger.isTraceEnabled()) { + logger.trace(String.format("reordering %d pools", pools.size())); } Account account = null; if (vmProfile.getVirtualMachine() != null) { @@ -204,8 +202,8 @@ public List reorderPools(List pools, VirtualMachinePro pools = reorderStoragePoolsBasedOnAlgorithm(pools, plan, account); if (vmProfile.getVirtualMachine() == null) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("The VM is null, skipping pools reordering by disk provisioning type."); + if (logger.isTraceEnabled()) { + logger.trace("The VM is null, skipping pools reordering by disk provisioning type."); } return pools; } @@ -222,8 +220,8 @@ List reorderStoragePoolsBasedOnAlgorithm(List pools, D if (allocationAlgorithm.equals("random") || allocationAlgorithm.equals("userconcentratedpod_random") || (account == null)) { reorderRandomPools(pools); } else if (StringUtils.equalsAny(allocationAlgorithm, "userdispersing", "firstfitleastconsumed")) { - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("Using reordering algorithm [%s]", allocationAlgorithm)); + if (logger.isTraceEnabled()) { + logger.trace(String.format("Using reordering algorithm [%s]", allocationAlgorithm)); } if (allocationAlgorithm.equals("userdispersing")) { @@ -236,13 +234,13 @@ List reorderStoragePoolsBasedOnAlgorithm(List pools, D } void reorderRandomPools(List pools) { - StorageUtil.traceLogStoragePools(pools, s_logger, "pools to choose from: "); - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("Shuffle this so that we don't check the pools in the same order. Algorithm == '%s' (or no account?)", allocationAlgorithm)); + StorageUtil.traceLogStoragePools(pools, logger, "pools to choose from: "); + if (logger.isTraceEnabled()) { + logger.trace(String.format("Shuffle this so that we don't check the pools in the same order. Algorithm == '%s' (or no account?)", allocationAlgorithm)); } - StorageUtil.traceLogStoragePools(pools, s_logger, "pools to shuffle: "); + StorageUtil.traceLogStoragePools(pools, logger, "pools to shuffle: "); Collections.shuffle(pools, secureRandom); - StorageUtil.traceLogStoragePools(pools, s_logger, "shuffled list of pools to choose from: "); + StorageUtil.traceLogStoragePools(pools, logger, "shuffled list of pools to choose from: "); } private List reorderPoolsByDiskProvisioningType(List pools, DiskProfile diskProfile) { @@ -267,15 +265,15 @@ private List reorderPoolsByDiskProvisioningType(List p } protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh, DeploymentPlan plan) { - s_logger.debug(String.format("Checking if storage pool [%s] is suitable to disk [%s].", pool, dskCh)); + logger.debug(String.format("Checking if storage pool [%s] is suitable to disk [%s].", pool, dskCh)); if (avoid.shouldAvoid(pool)) { - s_logger.debug(String.format("StoragePool [%s] is in avoid set, skipping this pool to allocation of disk [%s].", pool, dskCh)); + logger.debug(String.format("StoragePool [%s] is in avoid set, skipping this pool to allocation of disk [%s].", pool, dskCh)); return false; } if (dskCh.requiresEncryption() && !pool.getPoolType().supportsEncryption()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Storage pool type '%s' doesn't support encryption required for volume, skipping this pool", pool.getPoolType())); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Storage pool type '%s' doesn't support encryption required for volume, skipping this pool", pool.getPoolType())); } return false; } @@ -284,20 +282,20 @@ protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh, if (clusterId != null) { ClusterVO cluster = clusterDao.findById(clusterId); if (!(cluster.getHypervisorType() == dskCh.getHypervisorType())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("StoragePool's Cluster does not have required hypervisorType, skipping this pool"); + if (logger.isDebugEnabled()) { + logger.debug("StoragePool's Cluster does not have required hypervisorType, skipping this pool"); } return false; } } else if (pool.getHypervisor() != null && !pool.getHypervisor().equals(HypervisorType.Any) && !(pool.getHypervisor() == dskCh.getHypervisorType())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("StoragePool does not have required hypervisorType, skipping this pool"); + if (logger.isDebugEnabled()) { + logger.debug("StoragePool does not have required hypervisorType, skipping this pool"); } return false; } if (!checkDiskProvisioningSupport(dskCh, pool)) { - s_logger.debug(String.format("Storage pool [%s] does not have support to disk provisioning of disk [%s].", pool, ReflectionToStringBuilderUtils.reflectOnlySelectedFields(dskCh, + logger.debug(String.format("Storage pool [%s] does not have support to disk provisioning of disk [%s].", pool, ReflectionToStringBuilderUtils.reflectOnlySelectedFields(dskCh, "type", "name", "diskOfferingId", "templateId", "volumeId", "provisioningType", "hyperType"))); return false; } @@ -308,12 +306,12 @@ protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh, Volume volume = volumeDao.findById(dskCh.getVolumeId()); if(!storageMgr.storagePoolCompatibleWithVolumePool(pool, volume)) { - s_logger.debug(String.format("Pool [%s] is not compatible with volume [%s], skipping it.", pool, volume)); + logger.debug(String.format("Pool [%s] is not compatible with volume [%s], skipping it.", pool, volume)); return false; } if (pool.isManaged() && !storageUtil.managedStoragePoolCanScale(pool, plan.getClusterId(), plan.getHostId())) { - s_logger.debug(String.format("Cannot allocate pool [%s] to volume [%s] because the max number of managed clustered filesystems has been exceeded.", pool, volume)); + logger.debug(String.format("Cannot allocate pool [%s] to volume [%s] because the max number of managed clustered filesystems has been exceeded.", pool, volume)); return false; } @@ -322,13 +320,13 @@ protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh, requestVolumeDiskProfilePairs.add(new Pair<>(volume, dskCh)); if (dskCh.getHypervisorType() == HypervisorType.VMware) { if (pool.getPoolType() == Storage.StoragePoolType.DatastoreCluster && storageMgr.isStoragePoolDatastoreClusterParent(pool)) { - s_logger.debug(String.format("Skipping allocation of pool [%s] to volume [%s] because this pool is a parent datastore cluster.", pool, volume)); + logger.debug(String.format("Skipping allocation of pool [%s] to volume [%s] because this pool is a parent datastore cluster.", pool, volume)); return false; } if (pool.getParent() != 0L) { StoragePoolVO datastoreCluster = storagePoolDao.findById(pool.getParent()); if (datastoreCluster == null || (datastoreCluster != null && datastoreCluster.getStatus() != StoragePoolStatus.Up)) { - s_logger.debug(String.format("Skipping allocation of pool [%s] to volume [%s] because this pool is not in [%s] state.", datastoreCluster, volume, StoragePoolStatus.Up)); + logger.debug(String.format("Skipping allocation of pool [%s] to volume [%s] because this pool is not in [%s] state.", datastoreCluster, volume, StoragePoolStatus.Up)); return false; } } @@ -336,11 +334,11 @@ protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh, try { boolean isStoragePoolStoragepolicyComplaince = storageMgr.isStoragePoolCompliantWithStoragePolicy(requestVolumeDiskProfilePairs, pool); if (!isStoragePoolStoragepolicyComplaince) { - s_logger.debug(String.format("Skipping allocation of pool [%s] to volume [%s] because this pool is not compliant with the storage policy required by the volume.", pool, volume)); + logger.debug(String.format("Skipping allocation of pool [%s] to volume [%s] because this pool is not compliant with the storage policy required by the volume.", pool, volume)); return false; } } catch (StorageUnavailableException e) { - s_logger.warn(String.format("Could not verify storage policy complaince against storage pool %s due to exception %s", pool.getUuid(), e.getMessage())); + logger.warn(String.format("Could not verify storage policy complaince against storage pool %s due to exception %s", pool.getUuid(), e.getMessage())); return false; } } @@ -368,13 +366,13 @@ private boolean checkHypervisorCompatibility(HypervisorType hyperType, Volume.Ty //LXC ROOT disks supports NFS and local storage pools only if(!(Storage.StoragePoolType.NetworkFilesystem.equals(poolType) || Storage.StoragePoolType.Filesystem.equals(poolType)) ){ - s_logger.debug("StoragePool does not support LXC ROOT disk, skipping this pool"); + logger.debug("StoragePool does not support LXC ROOT disk, skipping this pool"); return false; } } else if (Volume.Type.DATADISK.equals(volType)){ //LXC DATA disks supports RBD storage pool only if(!Storage.StoragePoolType.RBD.equals(poolType)){ - s_logger.debug("StoragePool does not support LXC DATA disk, skipping this pool"); + logger.debug("StoragePool does not support LXC DATA disk, skipping this pool"); return false; } } @@ -385,18 +383,18 @@ private boolean checkHypervisorCompatibility(HypervisorType hyperType, Volume.Ty protected void logDisabledStoragePools(long dcId, Long podId, Long clusterId, ScopeType scope) { List disabledPools = storagePoolDao.findDisabledPoolsByScope(dcId, podId, clusterId, scope); if (disabledPools != null && !disabledPools.isEmpty()) { - s_logger.trace(String.format("Ignoring pools [%s] as they are in disabled state.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(disabledPools))); + logger.trace(String.format("Ignoring pools [%s] as they are in disabled state.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(disabledPools))); } } protected void logStartOfSearch(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, int returnUpTo, boolean bypassStorageTypeCheck){ - s_logger.trace(String.format("%s is looking for storage pools that match the VM's disk profile [%s], virtual machine profile [%s] and " + logger.trace(String.format("%s is looking for storage pools that match the VM's disk profile [%s], virtual machine profile [%s] and " + "deployment plan [%s]. Returning up to [%d] and bypassStorageTypeCheck [%s].", this.getClass().getSimpleName(), dskCh, vmProfile, plan, returnUpTo, bypassStorageTypeCheck)); } protected void logEndOfSearch(List storagePoolList) { - s_logger.debug(String.format("%s is returning [%s] suitable storage pools [%s].", this.getClass().getSimpleName(), storagePoolList.size(), + logger.debug(String.format("%s is returning [%s] suitable storage pools [%s].", this.getClass().getSimpleName(), storagePoolList.size(), Arrays.toString(storagePoolList.toArray()))); } diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java index 9c0f84ab14a3..8856e12b27ba 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java @@ -26,7 +26,6 @@ import com.cloud.storage.VolumeApiServiceImpl; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.deploy.DeploymentPlan; @@ -40,7 +39,6 @@ @Component public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocator { - private static final Logger s_logger = Logger.getLogger(ClusterScopeStoragePoolAllocator.class); @Inject DiskOfferingDao _diskOfferingDao; @@ -64,35 +62,35 @@ protected List select(DiskProfile dskCh, VirtualMachineProfile vmPr // clusterId == null here because it will break ClusterWide primary // storage volume operation where // only podId is passed into this call. - s_logger.debug("ClusterScopeStoragePoolAllocator is returning null since the pod ID is null. This may be a zone wide storage."); + logger.debug("ClusterScopeStoragePoolAllocator is returning null since the pod ID is null. This may be a zone wide storage."); return null; } if (dskCh.getTags() != null && dskCh.getTags().length != 0) { - s_logger.debug(String.format("Looking for pools in dc [%s], pod [%s], cluster [%s], and having tags [%s]. Disabled pools will be ignored.", dcId, podId, clusterId, + logger.debug(String.format("Looking for pools in dc [%s], pod [%s], cluster [%s], and having tags [%s]. Disabled pools will be ignored.", dcId, podId, clusterId, Arrays.toString(dskCh.getTags()))); } else { - s_logger.debug(String.format("Looking for pools in dc [%s], pod [%s] and cluster [%s]. Disabled pools will be ignored.", dcId, podId, clusterId)); + logger.debug(String.format("Looking for pools in dc [%s], pod [%s] and cluster [%s]. Disabled pools will be ignored.", dcId, podId, clusterId)); } - if (s_logger.isTraceEnabled()) { + if (logger.isTraceEnabled()) { // Log the pools details that are ignored because they are in disabled state logDisabledStoragePools(dcId, podId, clusterId, ScopeType.CLUSTER); } List pools = storagePoolDao.findPoolsByTags(dcId, podId, clusterId, dskCh.getTags(), true, VolumeApiServiceImpl.storageTagRuleExecutionTimeout.value()); pools.addAll(storagePoolJoinDao.findStoragePoolByScopeAndRuleTags(dcId, podId, clusterId, ScopeType.CLUSTER, List.of(dskCh.getTags()))); - s_logger.debug(String.format("Found pools [%s] that match with tags [%s].", pools, Arrays.toString(dskCh.getTags()))); + logger.debug(String.format("Found pools [%s] that match with tags [%s].", pools, Arrays.toString(dskCh.getTags()))); // add remaining pools in cluster, that did not match tags, to avoid set List allPools = storagePoolDao.findPoolsByTags(dcId, podId, clusterId, null, false, 0); allPools.removeAll(pools); for (StoragePoolVO pool : allPools) { - s_logger.trace(String.format("Adding pool [%s] to the 'avoid' set since it did not match any tags.", pool)); + logger.trace(String.format("Adding pool [%s] to the 'avoid' set since it did not match any tags.", pool)); avoid.addPool(pool.getId()); } if (pools.size() == 0) { - s_logger.debug(String.format("No storage pools available for [%s] volume allocation.", ServiceOffering.StorageType.shared)); + logger.debug(String.format("No storage pools available for [%s] volume allocation.", ServiceOffering.StorageType.shared)); return suitablePools; } @@ -102,10 +100,10 @@ protected List select(DiskProfile dskCh, VirtualMachineProfile vmPr } StoragePool storagePool = (StoragePool)dataStoreMgr.getPrimaryDataStore(pool.getId()); if (filter(avoid, storagePool, dskCh, plan)) { - s_logger.debug(String.format("Found suitable local storage pool [%s] to allocate disk [%s] to it, adding to list.", pool, dskCh)); + logger.debug(String.format("Found suitable local storage pool [%s] to allocate disk [%s] to it, adding to list.", pool, dskCh)); suitablePools.add(storagePool); } else { - s_logger.debug(String.format("Adding storage pool [%s] to avoid set during allocation of disk [%s].", pool, dskCh)); + logger.debug(String.format("Adding storage pool [%s] to avoid set during allocation of disk [%s].", pool, dskCh)); avoid.addPool(pool.getId()); } } diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/GarbageCollectingStoragePoolAllocator.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/GarbageCollectingStoragePoolAllocator.java index 3fa69499ff12..c6f18f33f0b2 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/GarbageCollectingStoragePoolAllocator.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/GarbageCollectingStoragePoolAllocator.java @@ -22,7 +22,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; @@ -36,7 +35,6 @@ import com.cloud.vm.VirtualMachineProfile; public class GarbageCollectingStoragePoolAllocator extends AbstractStoragePoolAllocator { - private static final Logger s_logger = Logger.getLogger(GarbageCollectingStoragePoolAllocator.class); StoragePoolAllocator _firstFitStoragePoolAllocator; StoragePoolAllocator _localStoragePoolAllocator; @@ -50,7 +48,7 @@ public class GarbageCollectingStoragePoolAllocator extends AbstractStoragePoolAl public List select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo, boolean bypassStorageTypeCheck) { logStartOfSearch(dskCh, vmProfile, plan, returnUpTo, bypassStorageTypeCheck); if (!_storagePoolCleanupEnabled) { - s_logger.debug("Storage pool cleanup is not enabled, so GarbageCollectingStoragePoolAllocator is being skipped."); + logger.debug("Storage pool cleanup is not enabled, so GarbageCollectingStoragePoolAllocator is being skipped."); return null; } diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java index 4ec15b9e43f4..0c845aaa317b 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java @@ -26,7 +26,6 @@ import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.capacity.dao.CapacityDao; @@ -44,7 +43,6 @@ @Component public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator { - private static final Logger s_logger = Logger.getLogger(LocalStoragePoolAllocator.class); @Inject StoragePoolHostDao _poolHostDao; @@ -64,11 +62,11 @@ protected List select(DiskProfile dskCh, VirtualMachineProfile vmPr logStartOfSearch(dskCh, vmProfile, plan, returnUpTo, bypassStorageTypeCheck); if (!bypassStorageTypeCheck && !dskCh.useLocalStorage()) { - s_logger.debug("LocalStoragePoolAllocator is returning null since the disk profile does not use local storage and bypassStorageTypeCheck is false."); + logger.debug("LocalStoragePoolAllocator is returning null since the disk profile does not use local storage and bypassStorageTypeCheck is false."); return null; } - if (s_logger.isTraceEnabled()) { + if (logger.isTraceEnabled()) { // Log the pools details that are ignored because they are in disabled state logDisabledStoragePools(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId(), ScopeType.HOST); } @@ -82,10 +80,10 @@ protected List select(DiskProfile dskCh, VirtualMachineProfile vmPr if (pool != null && pool.isLocal()) { StoragePool storagePool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId()); if (filter(avoid, storagePool, dskCh, plan)) { - s_logger.debug(String.format("Found suitable local storage pool [%s] to allocate disk [%s] to it, adding to list.", pool, dskCh)); + logger.debug(String.format("Found suitable local storage pool [%s] to allocate disk [%s] to it, adding to list.", pool, dskCh)); suitablePools.add(storagePool); } else { - s_logger.debug(String.format("Adding storage pool [%s] to avoid set during allocation of disk [%s].", pool, dskCh)); + logger.debug(String.format("Adding storage pool [%s] to avoid set during allocation of disk [%s].", pool, dskCh)); avoid.addPool(pool.getId()); } } @@ -97,7 +95,7 @@ protected List select(DiskProfile dskCh, VirtualMachineProfile vmPr } else { if (plan.getPodId() == null) { // zone wide primary storage deployment - s_logger.debug("LocalStoragePoolAllocator is returning null since both the host ID and pod ID are null. That means this should be a zone wide primary storage deployment."); + logger.debug("LocalStoragePoolAllocator is returning null since both the host ID and pod ID are null. That means this should be a zone wide primary storage deployment."); return null; } List availablePools = @@ -109,10 +107,10 @@ protected List select(DiskProfile dskCh, VirtualMachineProfile vmPr } StoragePool storagePool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId()); if (filter(avoid, storagePool, dskCh, plan)) { - s_logger.debug(String.format("Found suitable local storage pool [%s] to allocate disk [%s] to it, adding to list.", pool, dskCh)); + logger.debug(String.format("Found suitable local storage pool [%s] to allocate disk [%s] to it, adding to list.", pool, dskCh)); suitablePools.add(storagePool); } else { - s_logger.debug(String.format("Adding storage pool [%s] to avoid set during allocation of disk [%s].", pool, dskCh)); + logger.debug(String.format("Adding storage pool [%s] to avoid set during allocation of disk [%s].", pool, dskCh)); avoid.addPool(pool.getId()); } } diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java index ba130b4e2e5f..30c8d07fc225 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java @@ -24,7 +24,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; @@ -43,7 +42,6 @@ @Component public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator { - private static final Logger LOGGER = Logger.getLogger(ZoneWideStoragePoolAllocator.class); @Inject private DataStoreManager dataStoreMgr; @Inject @@ -57,7 +55,7 @@ protected List select(DiskProfile dskCh, VirtualMachineProfile vmPr return null; } - if (LOGGER.isTraceEnabled()) { + if (logger.isTraceEnabled()) { // Log the pools details that are ignored because they are in disabled state logDisabledStoragePools(plan.getDataCenterId(), null, null, ScopeType.ZONE); } @@ -66,7 +64,7 @@ protected List select(DiskProfile dskCh, VirtualMachineProfile vmPr List storagePools = storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), dskCh.getTags(), true); storagePools.addAll(storagePoolJoinDao.findStoragePoolByScopeAndRuleTags(plan.getDataCenterId(), null, null, ScopeType.ZONE, List.of(dskCh.getTags()))); if (storagePools.isEmpty()) { - LOGGER.debug(String.format("Could not find any zone wide storage pool that matched with any of the following tags [%s].", Arrays.toString(dskCh.getTags()))); + logger.debug(String.format("Could not find any zone wide storage pool that matched with any of the following tags [%s].", Arrays.toString(dskCh.getTags()))); storagePools = new ArrayList<>(); } @@ -94,11 +92,11 @@ protected List select(DiskProfile dskCh, VirtualMachineProfile vmPr } StoragePool storagePool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(storage.getId()); if (filter(avoid, storagePool, dskCh, plan)) { - LOGGER.debug(String.format("Found suitable local storage pool [%s] to allocate disk [%s] to it, adding to list.", storagePool, dskCh)); + logger.debug(String.format("Found suitable local storage pool [%s] to allocate disk [%s] to it, adding to list.", storagePool, dskCh)); suitablePools.add(storagePool); } else { if (canAddStoragePoolToAvoidSet(storage)) { - LOGGER.debug(String.format("Adding storage pool [%s] to avoid set during allocation of disk [%s].", storagePool, dskCh)); + logger.debug(String.format("Adding storage pool [%s] to avoid set during allocation of disk [%s].", storagePool, dskCh)); avoid.addPool(storagePool.getId()); } } @@ -125,8 +123,8 @@ protected List reorderPoolsByCapacity(DeploymentPlan plan, } List poolIdsByCapacity = capacityDao.orderHostsByFreeCapacity(zoneId, null, capacityType); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("List of zone-wide storage pools in descending order of free capacity: "+ poolIdsByCapacity); + if (logger.isDebugEnabled()) { + logger.debug("List of zone-wide storage pools in descending order of free capacity: "+ poolIdsByCapacity); } //now filter the given list of Pools by this ordered list @@ -154,8 +152,8 @@ protected List reorderPoolsByNumberOfVolumes(DeploymentPlan plan, L long dcId = plan.getDataCenterId(); List poolIdsByVolCount = volumeDao.listZoneWidePoolIdsByVolumeCount(dcId, account.getAccountId()); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("List of pools in ascending order of number of volumes for account id: " + account.getAccountId() + " is: " + poolIdsByVolCount); + if (logger.isDebugEnabled()) { + logger.debug("List of pools in ascending order of number of volumes for account id: " + account.getAccountId() + " is: " + poolIdsByVolCount); } // now filter the given list of Pools by this ordered list diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/datastore/DataObjectManagerImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/datastore/DataObjectManagerImpl.java index 460f869bebf3..7c1cbb55b3b1 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/datastore/DataObjectManagerImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/datastore/DataObjectManagerImpl.java @@ -20,7 +20,8 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; @@ -42,7 +43,7 @@ @Component public class DataObjectManagerImpl implements DataObjectManager { - private static final Logger s_logger = Logger.getLogger(DataObjectManagerImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject ObjectInDataStoreManager objectInDataStoreMgr; @Inject @@ -57,13 +58,13 @@ protected DataObject waitingForCreated(DataObject dataObj, DataStore dataStore) try { Thread.sleep(waitingTime); } catch (InterruptedException e) { - s_logger.debug("sleep interrupted", e); + logger.debug("sleep interrupted", e); throw new CloudRuntimeException("sleep interrupted", e); } obj = objectInDataStoreMgr.findObject(dataObj, dataStore); if (obj == null) { - s_logger.debug("can't find object in db, maybe it's cleaned up already, exit waiting"); + logger.debug("can't find object in db, maybe it's cleaned up already, exit waiting"); break; } if (obj.getState() == ObjectInDataStoreStateMachine.State.Ready) { @@ -73,7 +74,7 @@ protected DataObject waitingForCreated(DataObject dataObj, DataStore dataStore) } while (retries > 0); if (obj == null || retries <= 0) { - s_logger.debug("waiting too long for template downloading, marked it as failed"); + logger.debug("waiting too long for template downloading, marked it as failed"); throw new CloudRuntimeException("waiting too long for template downloading, marked it as failed"); } return objectInDataStoreMgr.get(dataObj, dataStore, null); @@ -138,7 +139,7 @@ public void createAsync(DataObject data, DataStore store, AsyncCompletionCallbac try { objectInDataStoreMgr.update(objInStore, ObjectInDataStoreStateMachine.Event.OperationFailed); } catch (Exception e1) { - s_logger.debug("state transaction failed", e1); + logger.debug("state transaction failed", e1); } CreateCmdResult result = new CreateCmdResult(null, null); result.setSuccess(false); @@ -149,7 +150,7 @@ public void createAsync(DataObject data, DataStore store, AsyncCompletionCallbac try { objectInDataStoreMgr.update(objInStore, ObjectInDataStoreStateMachine.Event.OperationFailed); } catch (Exception e1) { - s_logger.debug("state transaction failed", e1); + logger.debug("state transaction failed", e1); } CreateCmdResult result = new CreateCmdResult(null, null); result.setSuccess(false); @@ -182,7 +183,7 @@ protected Void createAsynCallback(AsyncCallbackDispatcher try { objectInDataStoreMgr.update(data, Event.DestroyRequested); } catch (NoTransitionException e) { - s_logger.debug("destroy failed", e); + logger.debug("destroy failed", e); CreateCmdResult res = new CreateCmdResult(null, null); callback.complete(res); } catch (ConcurrentOperationException e) { - s_logger.debug("destroy failed", e); + logger.debug("destroy failed", e); CreateCmdResult res = new CreateCmdResult(null, null); callback.complete(res); } @@ -333,18 +334,18 @@ protected Void deleteAsynCallback(AsyncCallbackDispatcher { - private static final Logger s_logger = Logger.getLogger(DataStoreProviderManagerImpl.class); List providers; protected Map providerMap = new ConcurrentHashMap(); @@ -127,18 +125,18 @@ protected boolean registerProvider(DataStoreProvider provider) { String providerName = provider.getName(); if (providerMap.get(providerName) != null) { - s_logger.debug("Did not register data store provider, provider name: " + providerName + " is not unique"); + logger.debug("Did not register data store provider, provider name: " + providerName + " is not unique"); return false; } - s_logger.debug("registering data store provider:" + provider.getName()); + logger.debug("registering data store provider:" + provider.getName()); providerMap.put(providerName, provider); try { boolean registrationResult = provider.configure(copyParams); if (!registrationResult) { providerMap.remove(providerName); - s_logger.debug("Failed to register data store provider: " + providerName); + logger.debug("Failed to register data store provider: " + providerName); return false; } @@ -152,7 +150,7 @@ protected boolean registerProvider(DataStoreProvider provider) { objectStoreProviderMgr.registerDriver(provider.getName(), (ObjectStoreDriver)provider.getDataStoreDriver()); } } catch (Exception e) { - s_logger.debug("configure provider failed", e); + logger.debug("configure provider failed", e); providerMap.remove(providerName); return false; } diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/db/ObjectInDataStoreDaoImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/db/ObjectInDataStoreDaoImpl.java index a492e76b9913..4b13c100a44d 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/db/ObjectInDataStoreDaoImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/db/ObjectInDataStoreDaoImpl.java @@ -21,7 +21,6 @@ import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; @@ -36,7 +35,6 @@ @Component public class ObjectInDataStoreDaoImpl extends GenericDaoBase implements ObjectInDataStoreDao { - private static final Logger s_logger = Logger.getLogger(ObjectInDataStoreDaoImpl.class); private SearchBuilder updateStateSearch; @Override @@ -69,7 +67,7 @@ public boolean updateState(State currentState, Event event, State nextState, Dat builder.set(vo, "updated", new Date()); int rows = update(vo, sc); - if (rows == 0 && s_logger.isDebugEnabled()) { + if (rows == 0 && logger.isDebugEnabled()) { ObjectInDataStoreVO dbVol = findByIdIncludingRemoved(vo.getId()); if (dbVol != null) { StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString()); @@ -102,7 +100,7 @@ public boolean updateState(State currentState, Event event, State nextState, Dat .append("; updatedTime=") .append(oldUpdatedTime); } else { - s_logger.debug("Unable to update objectIndatastore: id=" + vo.getId() + ", as there is no such object exists in the database anymore"); + logger.debug("Unable to update objectIndatastore: id=" + vo.getId() + ", as there is no such object exists in the database anymore"); } } return rows > 0; diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java index bc16bafd7a9f..e6fac9a40639 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java @@ -46,7 +46,8 @@ import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.storage.LocalHostEndpoint; import org.apache.cloudstack.storage.RemoteHostEndPoint; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.stereotype.Component; import com.cloud.capacity.CapacityManager; @@ -69,7 +70,7 @@ @Component public class DefaultEndPointSelector implements EndPointSelector { - private static final Logger s_logger = Logger.getLogger(DefaultEndPointSelector.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject private HostDao hostDao; @Inject @@ -174,10 +175,10 @@ protected EndPoint findEndPointInScope(Scope scope, String sqlBase, Long poolId, host = hostDao.findById(id); } } catch (SQLException e) { - s_logger.warn("can't find endpoint", e); + logger.warn("can't find endpoint", e); } } catch (SQLException e) { - s_logger.warn("can't find endpoint", e); + logger.warn("can't find endpoint", e); } if (host == null) { return null; @@ -298,7 +299,7 @@ public EndPoint select(DataObject srcData, DataObject destData, StorageAction ac @Override public EndPoint select(DataObject srcData, DataObject destData, StorageAction action, boolean encryptionRequired) { - s_logger.error("IR24 select BACKUPSNAPSHOT from primary to secondary " + srcData.getId() + " dest=" + destData.getId()); + logger.error("IR24 select BACKUPSNAPSHOT from primary to secondary " + srcData.getId() + " dest=" + destData.getId()); if (action == StorageAction.BACKUPSNAPSHOT && srcData.getDataStore().getRole() == DataStoreRole.Primary) { SnapshotInfo srcSnapshot = (SnapshotInfo)srcData; VolumeInfo volumeInfo = srcSnapshot.getBaseVolume(); @@ -424,11 +425,11 @@ public EndPoint select(DataStore store, String downloadUrl){ } } catch (URISyntaxException e) { - s_logger.debug("Received URISyntaxException for url" +downloadUrl); + logger.debug("Received URISyntaxException for url" +downloadUrl); } // If ssvm doesn't exist then find any ssvm in the zone. - s_logger.debug("Couldn't find ssvm for url" +downloadUrl); + logger.debug("Couldn't find ssvm for url" +downloadUrl); return findEndpointForImageStorage(store); } diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/HypervisorHelperImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/HypervisorHelperImpl.java index 9b7007dc4d66..10af5d55d619 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/HypervisorHelperImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/HypervisorHelperImpl.java @@ -23,7 +23,8 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; @@ -56,7 +57,7 @@ import com.cloud.vm.snapshot.VMSnapshot; public class HypervisorHelperImpl implements HypervisorHelper { - private static final Logger s_logger = Logger.getLogger(HypervisorHelperImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject EndPointSelector selector; @Inject @@ -79,7 +80,7 @@ public DataTO introduceObject(DataTO object, Scope scope, Long storeId) { Answer answer = null; if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; - s_logger.error(errMsg); + logger.error(errMsg); answer = new Answer(cmd, false, errMsg); } else { answer = ep.sendMessage(cmd); @@ -99,7 +100,7 @@ public boolean forgetObject(DataTO object, Scope scope, Long storeId) { Answer answer = null; if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; - s_logger.error(errMsg); + logger.error(errMsg); answer = new Answer(cmd, false, errMsg); } else { answer = ep.sendMessage(cmd); @@ -107,7 +108,7 @@ public boolean forgetObject(DataTO object, Scope scope, Long storeId) { if (answer == null || !answer.getResult()) { String errMsg = answer == null ? null : answer.getDetails(); if (errMsg != null) { - s_logger.debug("Failed to forget object: " + errMsg); + logger.debug("Failed to forget object: " + errMsg); } return false; } diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java index 369630a1a73e..9606da1e3372 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java @@ -55,7 +55,8 @@ import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO; import org.apache.cloudstack.storage.endpoint.DefaultEndPointSelector; import org.apache.cloudstack.storage.image.deployasis.DeployAsIsHelper; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -93,7 +94,7 @@ import com.cloud.vm.dao.SecondaryStorageVmDao; public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver { - private static final Logger LOGGER = Logger.getLogger(BaseImageStoreDriverImpl.class); + protected Logger logger = LogManager.getLogger(BaseImageStoreDriverImpl.class); @Inject protected VMTemplateDao _templateDao; @@ -179,20 +180,20 @@ public void createAsync(DataStore dataStore, DataObject data, AsyncCompletionCal caller.setContext(context); if (data.getType() == DataObjectType.TEMPLATE) { caller.setCallback(caller.getTarget().createTemplateAsyncCallback(null, null)); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Downloading template to data store " + dataStore.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Downloading template to data store " + dataStore.getId()); } _downloadMonitor.downloadTemplateToStorage(data, caller); } else if (data.getType() == DataObjectType.VOLUME) { caller.setCallback(caller.getTarget().createVolumeAsyncCallback(null, null)); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Downloading volume to data store " + dataStore.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Downloading volume to data store " + dataStore.getId()); } _downloadMonitor.downloadVolumeToStorage(data, caller); } else if (data.getType() == DataObjectType.SNAPSHOT) { caller.setCallback(caller.getTarget().createSnapshotAsyncCallback(null, null)); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Downloading volume to data store " + dataStore.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Downloading volume to data store " + dataStore.getId()); } _downloadMonitor.downloadSnapshotToStorage(data, caller); } @@ -200,8 +201,8 @@ public void createAsync(DataStore dataStore, DataObject data, AsyncCompletionCal protected Void createTemplateAsyncCallback(AsyncCallbackDispatcher callback, CreateContext context) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Performing image store createTemplate async callback"); + if (logger.isDebugEnabled()) { + logger.debug("Performing image store createTemplate async callback"); } DownloadAnswer answer = callback.getResult(); DataObject obj = context.data; @@ -215,16 +216,16 @@ protected Void createTemplateAsyncCallback(AsyncCallbackDispatcher eps = _epSelector.findAllEndpointsForScope(srcdata.getDataStore()); if (eps == null || eps.isEmpty()) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; - LOGGER.error(errMsg); + logger.error(errMsg); answer = new Answer(cmd, false, errMsg); } else { // select endpoint with least number of commands running on them @@ -447,10 +448,10 @@ private Answer sendToLeastBusyEndpoint(List eps, CopyCommand cmd) { return answer; } catch (AgentUnavailableException e) { errMsg = e.toString(); - LOGGER.debug("Failed to send command, due to Agent:" + endPoint.getId() + ", " + e.toString()); + logger.debug("Failed to send command, due to Agent:" + endPoint.getId() + ", " + e.toString()); } catch (OperationTimedoutException e) { errMsg = e.toString(); - LOGGER.debug("Failed to send command, due to Agent:" + endPoint.getId() + ", " + e.toString()); + logger.debug("Failed to send command, due to Agent:" + endPoint.getId() + ", " + e.toString()); } throw new CloudRuntimeException("Failed to send command, due to Agent:" + endPoint.getId() + ", " + errMsg); } @@ -480,8 +481,8 @@ public void deleteEntityExtractUrl(DataStore store, String installPath, String u @Override public List getDataDiskTemplates(DataObject obj, String configurationId) { List dataDiskDetails = new ArrayList(); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Get the data disks present in the OVA template"); + if (logger.isDebugEnabled()) { + logger.debug("Get the data disks present in the OVA template"); } DataStore store = obj.getDataStore(); GetDatadisksCommand cmd = new GetDatadisksCommand(obj.getTO(), configurationId); @@ -489,7 +490,7 @@ public List getDataDiskTemplates(DataObject obj, String configuratio Answer answer = null; if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; - LOGGER.error(errMsg); + logger.error(errMsg); answer = new Answer(cmd, false, errMsg); } else { answer = ep.sendMessage(cmd); @@ -508,14 +509,14 @@ public List getDataDiskTemplates(DataObject obj, String configuratio public Void createDataDiskTemplateAsync(TemplateInfo dataDiskTemplate, String path, String diskId, boolean bootable, long fileSize, AsyncCompletionCallback callback) { Answer answer = null; String errMsg = null; - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Create Datadisk template: " + dataDiskTemplate.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Create Datadisk template: " + dataDiskTemplate.getId()); } CreateDatadiskTemplateCommand cmd = new CreateDatadiskTemplateCommand(dataDiskTemplate.getTO(), path, diskId, fileSize, bootable); EndPoint ep = _defaultEpSelector.select(dataDiskTemplate.getDataStore()); if (ep == null) { errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; - LOGGER.error(errMsg); + logger.error(errMsg); answer = new Answer(cmd, false, errMsg); } else { answer = ep.sendMessage(cmd); @@ -534,7 +535,7 @@ private Integer getCopyCmdsCountToSpecificSSVM(Long ssvmId) { } private List ssvmWithLeastMigrateJobs() { - LOGGER.debug("Picking ssvm from the pool with least commands running on it"); + logger.debug("Picking ssvm from the pool with least commands running on it"); String query = "select host_id, count(*) from cmd_exec_log group by host_id order by 2;"; TransactionLegacy txn = TransactionLegacy.currentTxn(); @@ -547,7 +548,7 @@ private List ssvmWithLeastMigrateJobs() { result.add((long) rs.getInt(1)); } } catch (SQLException e) { - LOGGER.debug("SQLException caught", e); + logger.debug("SQLException caught", e); } return result; } diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java index cb14506ad171..aceab4506781 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java @@ -28,7 +28,6 @@ import javax.naming.ConfigurationException; import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; @@ -58,7 +57,6 @@ @Component public class TemplateDataStoreDaoImpl extends GenericDaoBase implements TemplateDataStoreDao { - private static final Logger s_logger = Logger.getLogger(TemplateDataStoreDaoImpl.class); private SearchBuilder updateStateSearch; private SearchBuilder storeSearch; private SearchBuilder cacheSearch; @@ -199,7 +197,7 @@ public boolean updateState(State currentState, Event event, State nextState, Dat } int rows = update(dataObj, sc); - if (rows == 0 && s_logger.isDebugEnabled()) { + if (rows == 0 && logger.isDebugEnabled()) { TemplateDataStoreVO dbVol = findByIdIncludingRemoved(dataObj.getId()); if (dbVol != null) { StringBuilder str = new StringBuilder("Unable to update ").append(dataObj.toString()); @@ -232,7 +230,7 @@ public boolean updateState(State currentState, Event event, State nextState, Dat .append("; updatedTime=") .append(oldUpdatedTime); } else { - s_logger.debug("Unable to update objectIndatastore: id=" + dataObj.getId() + ", as there is no such object exists in the database anymore"); + logger.debug("Unable to update objectIndatastore: id=" + dataObj.getId() + ", as there is no such object exists in the database anymore"); } } return rows > 0; @@ -494,7 +492,7 @@ public void duplicateCacheRecordsOnRegionStore(long storeId) { List tmpls = listBy(sc); // create an entry for each template record, but with empty install path since the content is not yet on region-wide store yet if (tmpls != null) { - s_logger.info("Duplicate " + tmpls.size() + " template cache store records to region store"); + logger.info("Duplicate " + tmpls.size() + " template cache store records to region store"); for (TemplateDataStoreVO tmpl : tmpls) { long templateId = tmpl.getTemplateId(); VMTemplateVO template = _tmpltDao.findById(templateId); @@ -502,15 +500,15 @@ public void duplicateCacheRecordsOnRegionStore(long storeId) { throw new CloudRuntimeException("No template is found for template id: " + templateId); } if (template.getTemplateType() == TemplateType.SYSTEM) { - s_logger.info("No need to duplicate system template since it will be automatically downloaded while adding region store"); + logger.info("No need to duplicate system template since it will be automatically downloaded while adding region store"); continue; } TemplateDataStoreVO tmpStore = findByStoreTemplate(storeId, tmpl.getTemplateId()); if (tmpStore != null) { - s_logger.info("There is already entry for template " + tmpl.getTemplateId() + " on region store " + storeId); + logger.info("There is already entry for template " + tmpl.getTemplateId() + " on region store " + storeId); continue; } - s_logger.info("Persisting an entry for template " + tmpl.getTemplateId() + " on region store " + storeId); + logger.info("Persisting an entry for template " + tmpl.getTemplateId() + " on region store " + storeId); TemplateDataStoreVO ts = new TemplateDataStoreVO(); ts.setTemplateId(tmpl.getTemplateId()); ts.setDataStoreId(storeId); @@ -545,7 +543,7 @@ public void updateStoreRoleToCachce(long storeId) { sc.setParameters("destroyed", false); List tmpls = listBy(sc); if (tmpls != null) { - s_logger.info("Update to cache store role for " + tmpls.size() + " entries in template_store_ref"); + logger.info("Update to cache store role for " + tmpls.size() + " entries in template_store_ref"); for (TemplateDataStoreVO tmpl : tmpls) { tmpl.setDataStoreRole(DataStoreRole.ImageCache); update(tmpl.getId(), tmpl); @@ -605,7 +603,7 @@ public void expireDnldUrlsForZone(Long dcId){ txn.commit(); } catch (Exception e) { txn.rollback(); - s_logger.warn("Failed expiring download urls for dcId: " + dcId, e); + logger.warn("Failed expiring download urls for dcId: " + dcId, e); } } diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/VolumeDataStoreDaoImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/VolumeDataStoreDaoImpl.java index dcdc9ea56c24..2c3d5ccfdde8 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/VolumeDataStoreDaoImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/VolumeDataStoreDaoImpl.java @@ -29,7 +29,6 @@ import com.cloud.utils.db.Filter; import com.cloud.utils.exception.CloudRuntimeException; import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; @@ -52,7 +51,6 @@ @Component public class VolumeDataStoreDaoImpl extends GenericDaoBase implements VolumeDataStoreDao { - private static final Logger s_logger = Logger.getLogger(VolumeDataStoreDaoImpl.class); private SearchBuilder updateStateSearch; private SearchBuilder volumeSearch; private SearchBuilder storeSearch; @@ -150,7 +148,7 @@ public boolean updateState(State currentState, Event event, State nextState, Dat } int rows = update(dataObj, sc); - if (rows == 0 && s_logger.isDebugEnabled()) { + if (rows == 0 && logger.isDebugEnabled()) { VolumeDataStoreVO dbVol = findByIdIncludingRemoved(dataObj.getId()); if (dbVol != null) { StringBuilder str = new StringBuilder("Unable to update ").append(dataObj.toString()); @@ -183,7 +181,7 @@ public boolean updateState(State currentState, Event event, State nextState, Dat .append("; updatedTime=") .append(oldUpdatedTime); } else { - s_logger.debug("Unable to update objectIndatastore: id=" + dataObj.getId() + ", as there is no such object exists in the database anymore"); + logger.debug("Unable to update objectIndatastore: id=" + dataObj.getId() + ", as there is no such object exists in the database anymore"); } } return rows > 0; @@ -296,14 +294,14 @@ public void duplicateCacheRecordsOnRegionStore(long storeId) { } // create an entry for each record, but with empty install path since the content is not yet on region-wide store yet if (vols != null) { - s_logger.info("Duplicate " + vols.size() + " volume cache store records to region store"); + logger.info("Duplicate " + vols.size() + " volume cache store records to region store"); for (VolumeDataStoreVO vol : vols) { VolumeDataStoreVO volStore = findByStoreVolume(storeId, vol.getVolumeId()); if (volStore != null) { - s_logger.info("There is already entry for volume " + vol.getVolumeId() + " on region store " + storeId); + logger.info("There is already entry for volume " + vol.getVolumeId() + " on region store " + storeId); continue; } - s_logger.info("Persisting an entry for volume " + vol.getVolumeId() + " on region store " + storeId); + logger.info("Persisting an entry for volume " + vol.getVolumeId() + " on region store " + storeId); VolumeDataStoreVO vs = new VolumeDataStoreVO(); vs.setVolumeId(vol.getVolumeId()); vs.setDataStoreId(storeId); @@ -380,7 +378,7 @@ public void expireDnldUrlsForZone(Long dcId){ txn.commit(); } catch (Exception e) { txn.rollback(); - s_logger.warn("Failed expiring download urls for dcId: " + dcId, e); + logger.warn("Failed expiring download urls for dcId: " + dcId, e); } } diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/deployasis/DeployAsIsHelperImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/deployasis/DeployAsIsHelperImpl.java index 2d0a0f2b32a5..b39ef1dd1163 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/deployasis/DeployAsIsHelperImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/deployasis/DeployAsIsHelperImpl.java @@ -56,7 +56,8 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.ArrayUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.stereotype.Component; import javax.inject.Inject; @@ -71,7 +72,7 @@ @Component public class DeployAsIsHelperImpl implements DeployAsIsHelper { - private static final Logger LOGGER = Logger.getLogger(DeployAsIsHelperImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); private static Gson gson; @Inject @@ -128,7 +129,7 @@ private void persistTemplateOVFInformation(long templateId, OVFInformationTO ovf if (guestOsInfo != null) { String osType = guestOsInfo.first(); String osDescription = guestOsInfo.second(); - LOGGER.info("Guest OS information retrieved from the template: " + osType + " - " + osDescription); + logger.info("Guest OS information retrieved from the template: " + osType + " - " + osDescription); handleGuestOsFromOVFDescriptor(templateId, osType, osDescription, minimumHardwareVersion); } } @@ -139,14 +140,14 @@ public boolean persistTemplateOVFInformationAndUpdateGuestOS(long templateId, OV persistTemplateOVFInformation(templateId, ovfInformationTO); } } catch (Exception e) { - LOGGER.error("Error persisting deploy-as-is details for template " + templateId, e); + logger.error("Error persisting deploy-as-is details for template " + templateId, e); tmpltStoreVO.setErrorString(e.getMessage()); tmpltStoreVO.setState(Failed); tmpltStoreVO.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOAD_ERROR); templateDataStoreDao.update(tmpltStoreVO.getId(), tmpltStoreVO); return false; } - LOGGER.info("Successfully persisted deploy-as-is details for template " + templateId); + logger.info("Successfully persisted deploy-as-is details for template " + templateId); return true; } @@ -162,16 +163,16 @@ private Long retrieveTemplateGuestOsIdFromGuestOsInfo(long templateId, String gu } String minimumHypervisorVersion = getMinimumSupportedHypervisorVersionForHardwareVersion(minimumHardwareVersion); - LOGGER.info("Minimum hardware version " + minimumHardwareVersion + " matched to hypervisor version " + minimumHypervisorVersion + ". " + + logger.info("Minimum hardware version " + minimumHardwareVersion + " matched to hypervisor version " + minimumHypervisorVersion + ". " + "Checking guest OS supporting this version"); List guestOsMappings = guestOSHypervisorDao.listByOsNameAndHypervisorMinimumVersion(guestOsType, hypervisor.toString(), minimumHypervisorVersion); if (CollectionUtils.isNotEmpty(guestOsMappings)) { - if (LOGGER.isDebugEnabled()) { + if (logger.isDebugEnabled()) { String msg = String.format("number of hypervisor mappings for guest os \"%s\" is: %d", guestOsType, guestOsMappings.size()); - LOGGER.debug(msg); + logger.debug(msg); } Long guestOsId = null; if (guestOsMappings.size() == 1) { @@ -207,7 +208,7 @@ private void handleGuestOsFromOVFDescriptor(long templateId, String guestOsType, String minimumHardwareVersion) { Long guestOsId = retrieveTemplateGuestOsIdFromGuestOsInfo(templateId, guestOsType, guestOsDescription, minimumHardwareVersion); if (guestOsId != null) { - LOGGER.info("Updating deploy-as-is template guest OS to " + guestOsType); + logger.info("Updating deploy-as-is template guest OS to " + guestOsType); VMTemplateVO template = templateDao.findById(templateId); updateTemplateGuestOsId(template, guestOsId); } @@ -223,7 +224,7 @@ private void updateDeployAsIsTemplateToNewGuestOs(VMTemplateVO template, String Hypervisor.HypervisorType hypervisor, Collection hypervisorVersions) { GuestOSVO newGuestOs = createGuestOsEntry(guestOsDescription); for (String hypervisorVersion : hypervisorVersions) { - LOGGER.info(String.format("Adding a new guest OS mapping for hypervisor: %s version: %s and " + + logger.info(String.format("Adding a new guest OS mapping for hypervisor: %s version: %s and " + "guest OS: %s", hypervisor.toString(), hypervisorVersion, guestOsType)); createGuestOsHypervisorMapping(newGuestOs.getId(), guestOsType, hypervisor.toString(), hypervisorVersion); } @@ -278,7 +279,7 @@ protected String mapHardwareVersionToHypervisorVersion(String hardwareVersion) { hypervisorVersion = "6.7"; } } catch (NumberFormatException e) { - LOGGER.error("Cannot parse hardware version " + hwVersion + " to integer. Using default hypervisor version", e); + logger.error("Cannot parse hardware version " + hwVersion + " to integer. Using default hypervisor version", e); } return hypervisorVersion; } @@ -332,7 +333,7 @@ public Map getAllocatedVirtualMachineNicsAdapterMapping(Virtual if (ArrayUtils.isNotEmpty(nics)) { if (nics.length != networks.size()) { String msg = "Different number of networks provided vs networks defined in deploy-as-is template"; - LOGGER.error(msg); + logger.error(msg); return map; } for (int i = 0; i < nics.length; i++) { @@ -347,16 +348,16 @@ public void persistTemplateDeployAsIsInformationTOList(long templateId, List informationTOList) { for (TemplateDeployAsIsInformationTO informationTO : informationTOList) { String propKey = getKeyFromInformationTO(informationTO); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format("Saving property %s for template %d as detail", propKey, templateId)); + if (logger.isTraceEnabled()) { + logger.trace(String.format("Saving property %s for template %d as detail", propKey, templateId)); } String propValue = null; try { propValue = getValueFromInformationTO(informationTO); } catch (RuntimeException re) { - LOGGER.error("gson marshalling of property object fails: " + propKey,re); + logger.error("gson marshalling of property object fails: " + propKey,re); } catch (IOException e) { - LOGGER.error("Could not decompress the license for template " + templateId, e); + logger.error("Could not decompress the license for template " + templateId, e); } saveTemplateDeployAsIsPropertyAttribute(templateId, propKey, propValue); } @@ -391,18 +392,18 @@ private String getKeyFromInformationTO(TemplateDeployAsIsInformationTO informati } private void saveTemplateDeployAsIsPropertyAttribute(long templateId, String key, String value) { - if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format("Saving property %s for template %d as detail", key, templateId)); + if (logger.isTraceEnabled()) { + logger.trace(String.format("Saving property %s for template %d as detail", key, templateId)); } if (templateDeployAsIsDetailsDao.findDetail(templateId,key) != null) { - LOGGER.debug(String.format("Detail '%s' existed for template %d, deleting.", key, templateId)); + logger.debug(String.format("Detail '%s' existed for template %d, deleting.", key, templateId)); templateDeployAsIsDetailsDao.removeDetail(templateId,key); } - if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format("Template detail for template %d to save is '%s': '%s'", templateId, key, value)); + if (logger.isTraceEnabled()) { + logger.trace(String.format("Template detail for template %d to save is '%s': '%s'", templateId, key, value)); } TemplateDeployAsIsDetailVO detailVO = new TemplateDeployAsIsDetailVO(templateId, key, value); - LOGGER.debug("Persisting template details " + detailVO.getName() + " from OVF properties for template " + templateId); + logger.debug("Persisting template details " + detailVO.getName() + " from OVF properties for template " + templateId); templateDeployAsIsDetailsDao.persist(detailVO); } diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/object/BaseObjectStoreDriverImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/object/BaseObjectStoreDriverImpl.java index e6027a1959f8..8d45c959b59b 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/object/BaseObjectStoreDriverImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/object/BaseObjectStoreDriverImpl.java @@ -27,12 +27,13 @@ import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.framework.async.AsyncRpcContext; import org.apache.cloudstack.storage.command.CommandResult; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import java.util.Map; public abstract class BaseObjectStoreDriverImpl implements ObjectStoreDriver { - private static final Logger LOGGER = Logger.getLogger(BaseObjectStoreDriverImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); @Override public Map getCapabilities() { diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java index fbb4a6e16188..8044a2dfa5e2 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java @@ -28,7 +28,8 @@ import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; @@ -57,7 +58,7 @@ @Component public class PrimaryDataStoreHelper { - private static final Logger s_logger = Logger.getLogger(PrimaryDataStoreHelper.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject private PrimaryDataStoreDao dataStoreDao; @Inject @@ -108,7 +109,7 @@ public DataStore createPrimaryDataStore(PrimaryDataStoreParameters params) { if (user == null || password == null) { String errMsg = "Missing cifs user and password details. Add them as details parameter."; - s_logger.warn(errMsg); + logger.warn(errMsg); throw new InvalidParameterValueException(errMsg); } else { try { @@ -261,7 +262,7 @@ public boolean deletePrimaryDataStore(DataStore store) { this._capacityDao.removeBy(Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED, null, null, null, poolVO.getId()); txn.commit(); - s_logger.debug("Storage pool id=" + poolVO.getId() + " is removed successfully"); + logger.debug("Storage pool id=" + poolVO.getId() + " is removed successfully"); return true; } diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java index e392c2615f17..7f373fa9988c 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java @@ -47,7 +47,8 @@ import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.cloudstack.storage.volume.VolumeObject; import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.api.to.DataObjectType; import com.cloud.agent.api.to.DataStoreTO; @@ -69,7 +70,7 @@ @SuppressWarnings("serial") public class PrimaryDataStoreImpl implements PrimaryDataStore { - private static final Logger s_logger = Logger.getLogger(PrimaryDataStoreImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); protected PrimaryDataStoreDriver driver; protected StoragePoolVO pdsv; @@ -186,7 +187,7 @@ public Scope getScope() { if (poolHosts.size() > 0) { return new HostScope(poolHosts.get(0).getHostId(), vo.getClusterId(), vo.getDataCenterId()); } - s_logger.debug("can't find a local storage in pool host table: " + vo.getId()); + logger.debug("can't find a local storage in pool host table: " + vo.getId()); } return null; } @@ -296,29 +297,29 @@ public DataObject create(DataObject obj, boolean createEntryInTempSpoolRef, Stri VMTemplateStoragePoolVO templateStoragePoolRef; GlobalLock lock = GlobalLock.getInternLock(templateIdPoolIdString); if (!lock.lock(5)) { - s_logger.debug("Couldn't lock the db on the string " + templateIdPoolIdString); + logger.debug("Couldn't lock the db on the string " + templateIdPoolIdString); return null; } try { templateStoragePoolRef = templatePoolDao.findByPoolTemplate(getId(), obj.getId(), configuration); if (templateStoragePoolRef == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Not found (" + templateIdPoolIdString + ") in template_spool_ref, persisting it"); + if (logger.isDebugEnabled()) { + logger.debug("Not found (" + templateIdPoolIdString + ") in template_spool_ref, persisting it"); } templateStoragePoolRef = new VMTemplateStoragePoolVO(getId(), obj.getId(), configuration); templateStoragePoolRef = templatePoolDao.persist(templateStoragePoolRef); } } catch (Throwable t) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Failed to insert (" + templateIdPoolIdString + ") to template_spool_ref", t); + if (logger.isDebugEnabled()) { + logger.debug("Failed to insert (" + templateIdPoolIdString + ") to template_spool_ref", t); } templateStoragePoolRef = templatePoolDao.findByPoolTemplate(getId(), obj.getId(), configuration); if (templateStoragePoolRef == null) { throw new CloudRuntimeException("Failed to create template storage pool entry"); } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Another thread already inserts " + templateStoragePoolRef.getId() + " to template_spool_ref", t); + if (logger.isDebugEnabled()) { + logger.debug("Another thread already inserts " + templateStoragePoolRef.getId() + " to template_spool_ref", t); } } } finally { @@ -326,7 +327,7 @@ public DataObject create(DataObject obj, boolean createEntryInTempSpoolRef, Stri lock.releaseRef(); } } catch (Exception e) { - s_logger.debug("Caught exception ", e); + logger.debug("Caught exception ", e); } } else if (obj.getType() == DataObjectType.SNAPSHOT) { return objectInStoreMgr.create(obj, this); diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java index a453f2d48c21..b8f90e465380 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java @@ -50,13 +50,14 @@ import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import javax.inject.Inject; import java.util.List; public class DefaultHostListener implements HypervisorHostListener { - private static final Logger s_logger = Logger.getLogger(DefaultHostListener.class); + protected Logger logger = LogManager.getLogger(getClass()); /** * Wait time for modify storage pool command to complete. We should wait for 5 minutes for the command to complete. @@ -98,7 +99,7 @@ public boolean hostAdded(long hostId) { private boolean createPersistentNetworkResourcesOnHost(long hostId) { HostVO host = hostDao.findById(hostId); if (host == null) { - s_logger.warn(String.format("Host with id %ld can't be found", hostId)); + logger.warn(String.format("Host with id %ld can't be found", hostId)); return false; } setupPersistentNetwork(host); @@ -127,7 +128,7 @@ public boolean hostConnect(long hostId, long poolId) throws StorageConflictExcep StoragePool pool = (StoragePool) this.dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary); ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, pool); cmd.setWait(modifyStoragePoolCommandWait); - s_logger.debug(String.format("Sending modify storage pool command to agent: %d for storage pool: %d with timeout %d seconds", + logger.debug(String.format("Sending modify storage pool command to agent: %d for storage pool: %d with timeout %d seconds", hostId, poolId, cmd.getWait())); final Answer answer = agentMgr.easySend(hostId, cmd); @@ -150,7 +151,7 @@ public boolean hostConnect(long hostId, long poolId) throws StorageConflictExcep List localStoragePools = this.primaryStoreDao.listLocalStoragePoolByPath(pool.getDataCenterId(), datastoreName); for (StoragePoolVO localStoragePool : localStoragePools) { if (datastoreName.equals(localStoragePool.getPath())) { - s_logger.warn("Storage pool: " + pool.getId() + " has already been added as local storage: " + localStoragePool.getName()); + logger.warn("Storage pool: " + pool.getId() + " has already been added as local storage: " + localStoragePool.getName()); throw new StorageConflictException("Cannot add shared storage pool: " + pool.getId() + " because it has already been added as local storage:" + localStoragePool.getName()); } @@ -166,7 +167,7 @@ public boolean hostConnect(long hostId, long poolId) throws StorageConflictExcep storageService.updateStorageCapabilities(poolId, false); - s_logger.info("Connection established between storage pool " + pool + " and host " + hostId); + logger.info("Connection established between storage pool " + pool + " and host " + hostId); return createPersistentNetworkResourcesOnHost(hostId); } @@ -204,7 +205,7 @@ public boolean hostAboutToBeRemoved(long hostId) { // send host the cleanup persistent network resources HostVO host = hostDao.findById(hostId); if (host == null) { - s_logger.warn("Host with id " + hostId + " can't be found"); + logger.warn("Host with id " + hostId + " can't be found"); return false; } @@ -215,12 +216,12 @@ public boolean hostAboutToBeRemoved(long hostId) { new CleanupPersistentNetworkResourceCommand(createNicTOFromNetworkAndOffering(persistentNetworkVO, networkOfferingVO, host)); Answer answer = agentMgr.easySend(hostId, cleanupCmd); if (answer == null) { - s_logger.error("Unable to get answer to the cleanup persistent network command " + persistentNetworkVO.getId()); + logger.error("Unable to get answer to the cleanup persistent network command " + persistentNetworkVO.getId()); continue; } if (!answer.getResult()) { String msg = String.format("Unable to cleanup persistent network resources from network %d on the host %d", persistentNetworkVO.getId(), hostId); - s_logger.error(msg); + logger.error(msg); } } return true; @@ -235,7 +236,7 @@ public boolean hostRemoved(long hostId, long clusterId) { public boolean hostEnabled(long hostId) { HostVO host = hostDao.findById(hostId); if (host == null) { - s_logger.warn(String.format("Host with id %d can't be found", hostId)); + logger.warn(String.format("Host with id %d can't be found", hostId)); return false; } setupPersistentNetwork(host); @@ -255,7 +256,7 @@ private void setupPersistentNetwork(HostVO host) { } if (!answer.getResult()) { String msg = String.format("Unable to create persistent network resources for network %d on the host %d in zone %d", networkVO.getId(), host.getId(), networkVO.getDataCenterId()); - s_logger.error(msg); + logger.error(msg); } } } diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java index 9e41e0d4d0ec..8bb9d40fcfe8 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java @@ -50,7 +50,8 @@ import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO; import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.storage.DownloadAnswer; @@ -84,7 +85,7 @@ import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; public class VolumeObject implements VolumeInfo { - private static final Logger s_logger = Logger.getLogger(VolumeObject.class); + protected Logger logger = LogManager.getLogger(getClass()); protected VolumeVO volumeVO; private StateMachine2 _volStateMachine; protected DataStore dataStore; @@ -231,7 +232,7 @@ public boolean stateTransit(Volume.Event event) { } } catch (NoTransitionException e) { String errorMessage = String.format("Failed to transit volume %s to [%s] due to [%s].", volumeVO.getVolumeDescription(), event, e.getMessage()); - s_logger.warn(errorMessage, e); + logger.warn(errorMessage, e); throw new CloudRuntimeException(errorMessage, e); } return result; @@ -442,7 +443,7 @@ protected void updateObjectInDataStoreManager(ObjectInDataStoreStateMachine.Even } catch (ConcurrentOperationException | NoTransitionException e) { String message = String.format("Failed to update %sto state [%s] due to [%s].", volumeVO == null ? "" : String.format("volume %s ", volumeVO.getVolumeDescription()), getMapOfEvents().get(event), e.getMessage()); - s_logger.warn(message, e); + logger.warn(message, e); throw new CloudRuntimeException(message, e); } finally { expungeEntryOnOperationFailed(event, callExpungeEntry); @@ -688,7 +689,7 @@ protected void updateVolumeInfo(VolumeObjectTO newVolume, VolumeVO volumeVo, boo volumeDao.update(volumeVo.getId(), volumeVo); String newValues = ReflectionToStringBuilderUtils.reflectOnlySelectedFields(volumeVo, "path", "size", "format", "encryptFormat", "poolId"); - s_logger.debug(String.format("Updated %s from %s to %s ", volumeVo.getVolumeDescription(), previousValues, newValues)); + logger.debug(String.format("Updated %s from %s to %s ", volumeVo.getVolumeDescription(), previousValues, newValues)); } protected void updateResourceCount(VolumeObjectTO newVolume, VolumeVO oldVolume) { @@ -722,7 +723,7 @@ protected void handleProcessEventCopyCmdAnswerNotPrimaryStore(VolumeObjectTO new volumeStoreDao.update(volStore.getId(), volStore); String newValues = ReflectionToStringBuilderUtils.reflectOnlySelectedFields(volStore, "installPath", "size"); - s_logger.debug(String.format("Updated volume_store_ref %s from %s to %s.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(volStore, "id", "volumeId"), + logger.debug(String.format("Updated volume_store_ref %s from %s to %s.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(volStore, "id", "volumeId"), previousValues, newValues)); } @@ -754,7 +755,7 @@ protected void handleProcessEventAnswer(DownloadAnswer downloadAnswer) { volumeStoreDao.update(volumeDataStoreVo.getId(), volumeDataStoreVo); String newValues = ReflectionToStringBuilderUtils.reflectOnlySelectedFields(volumeDataStoreVo, "installPath", "checksum"); - s_logger.debug(String.format("Updated volume_store_ref %s from %s to %s.", ReflectionToStringBuilderUtils. + logger.debug(String.format("Updated volume_store_ref %s from %s to %s.", ReflectionToStringBuilderUtils. reflectOnlySelectedFields(volumeDataStoreVo, "id", "volumeId"), previousValues, newValues)); } @Override @@ -896,15 +897,15 @@ public void doInTransactionWithoutResult(TransactionStatus status) { volumeVO.setPassphraseId(null); volumeDao.persist(volumeVO); - s_logger.debug(String.format("Checking to see if we can delete passphrase id %s", passphraseId)); + logger.debug(String.format("Checking to see if we can delete passphrase id %s", passphraseId)); List volumes = volumeDao.listVolumesByPassphraseId(passphraseId); if (volumes != null && !volumes.isEmpty()) { - s_logger.debug("Other volumes use this passphrase, skipping deletion"); + logger.debug("Other volumes use this passphrase, skipping deletion"); return; } - s_logger.debug(String.format("Deleting passphrase %s", passphraseId)); + logger.debug(String.format("Deleting passphrase %s", passphraseId)); passphraseDao.remove(passphraseId); } } diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index 8a3cd39ecbf6..2685f6d3e0c1 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -81,7 +81,8 @@ import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; @@ -146,7 +147,7 @@ @Component public class VolumeServiceImpl implements VolumeService { - private static final Logger s_logger = Logger.getLogger(VolumeServiceImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject protected AgentManager agentMgr; @Inject @@ -350,9 +351,9 @@ public AsyncCallFuture expungeVolumeAsync(VolumeInfo volume) { AsyncCallFuture future = new AsyncCallFuture(); VolumeApiResult result = new VolumeApiResult(volume); if (volume.getDataStore() == null) { - s_logger.info("Expunge volume with no data store specified"); + logger.info("Expunge volume with no data store specified"); if (canVolumeBeRemoved(volume.getId())) { - s_logger.info("Volume " + volume.getId() + " is not referred anywhere, remove it from volumes table"); + logger.info("Volume " + volume.getId() + " is not referred anywhere, remove it from volumes table"); volDao.remove(volume.getId()); } future.complete(result); @@ -364,7 +365,7 @@ public AsyncCallFuture expungeVolumeAsync(VolumeInfo volume) { if (volumeStore != null) { if (volumeStore.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOAD_IN_PROGRESS) { String msg = "Volume: " + volume.getName() + " is currently being uploaded; can't delete it."; - s_logger.debug(msg); + logger.debug(msg); result.setSuccess(false); result.setResult(msg); future.complete(result); @@ -374,7 +375,7 @@ public AsyncCallFuture expungeVolumeAsync(VolumeInfo volume) { VolumeVO vol = volDao.findById(volume.getId()); if (vol == null) { - s_logger.debug("Volume " + volume.getId() + " is not found"); + logger.debug("Volume " + volume.getId() + " is not found"); future.complete(result); return future; } @@ -383,8 +384,8 @@ public AsyncCallFuture expungeVolumeAsync(VolumeInfo volume) { // not created on primary store if (volumeStore == null) { // also not created on secondary store - if (s_logger.isDebugEnabled()) { - s_logger.debug("Marking volume that was never created as destroyed: " + vol); + if (logger.isDebugEnabled()) { + logger.debug("Marking volume that was never created as destroyed: " + vol); } VMTemplateVO template = templateDao.findById(vol.getTemplateId()); if (template != null && !template.isDeployAsIs()) { @@ -458,7 +459,7 @@ public Void deleteVolumeCallback(AsyncCallbackDispatcher expungeVolumeFuture = expungeVolumeAsync(volumeInfo); VolumeApiResult expungeVolumeResult = expungeVolumeFuture.get(); if (expungeVolumeResult.isFailed()) { - s_logger.warn("Failed to expunge volume: " + volumeInfo.getId() + " that was created"); + logger.warn("Failed to expunge volume: " + volumeInfo.getId() + " that was created"); throw new CloudRuntimeException("Failed to expunge volume: " + volumeInfo.getId() + " that was created"); } } catch (Exception ex) { if (canVolumeBeRemoved(volumeInfo.getId())) { volDao.remove(volumeInfo.getId()); } - s_logger.warn("Unable to expunge volume: " + volumeInfo.getId() + " due to: " + ex.getMessage()); + logger.warn("Unable to expunge volume: " + volumeInfo.getId() + " due to: " + ex.getMessage()); throw new CloudRuntimeException("Unable to expunge volume: " + volumeInfo.getId() + " due to: " + ex.getMessage()); } } @@ -1340,7 +1341,7 @@ public TemplateInfo createManagedStorageTemplate(long srcTemplateId, long destDa int storagePoolMaxWaitSeconds = NumbersUtil.parseInt(configDao.getValue(Config.StoragePoolMaxWaitSeconds.key()), 3600); if (!lock.lock(storagePoolMaxWaitSeconds)) { - s_logger.debug("Unable to create managed storage template, couldn't lock on " + templateIdManagedPoolIdLockString); + logger.debug("Unable to create managed storage template, couldn't lock on " + templateIdManagedPoolIdLockString); throw new CloudRuntimeException("Unable to create managed storage template, couldn't lock on " + templateIdManagedPoolIdLockString); } @@ -1388,7 +1389,7 @@ public TemplateInfo createManagedStorageTemplate(long srcTemplateId, long destDa //Download and copy template to the managed volume TemplateInfo templateOnPrimaryNow = tmplFactory.getReadyBypassedTemplateOnManagedStorage(srcTemplateId, templateOnPrimary, destDataStoreId, destHostId); if (templateOnPrimaryNow == null) { - s_logger.debug("Failed to prepare ready bypassed template: " + srcTemplateId + " on primary storage: " + templateOnPrimary.getId()); + logger.debug("Failed to prepare ready bypassed template: " + srcTemplateId + " on primary storage: " + templateOnPrimary.getId()); throw new CloudRuntimeException("Failed to prepare ready bypassed template: " + srcTemplateId + " on primary storage: " + templateOnPrimary.getId()); } templateOnPrimary.processEvent(Event.OperationSuccessed); @@ -1401,7 +1402,7 @@ public TemplateInfo createManagedStorageTemplate(long srcTemplateId, long destDa } catch (StorageAccessException e) { throw e; } catch (Throwable e) { - s_logger.debug("Failed to create template on managed primary storage", e); + logger.debug("Failed to create template on managed primary storage", e); if (templateOnPrimary != null) { templateOnPrimary.processEvent(Event.OperationFailed); } @@ -1431,7 +1432,7 @@ public AsyncCallFuture createManagedStorageVolumeFromTemplateAs AsyncCallFuture future = new AsyncCallFuture<>(); if (storageCanCloneVolume && computeSupportsVolumeClone) { - s_logger.debug("Storage " + destDataStoreId + " can support cloning using a cached template and compute side is OK with volume cloning."); + logger.debug("Storage " + destDataStoreId + " can support cloning using a cached template and compute side is OK with volume cloning."); GlobalLock lock = null; TemplateInfo templateOnPrimary = null; @@ -1445,7 +1446,7 @@ public AsyncCallFuture createManagedStorageVolumeFromTemplateAs int storagePoolMaxWaitSeconds = NumbersUtil.parseInt(configDao.getValue(Config.StoragePoolMaxWaitSeconds.key()), 3600); if (!lock.lock(storagePoolMaxWaitSeconds)) { - s_logger.debug("Unable to create managed storage template/volume, couldn't lock on " + tmplIdManagedPoolIdLockString); + logger.debug("Unable to create managed storage template/volume, couldn't lock on " + tmplIdManagedPoolIdLockString); throw new CloudRuntimeException("Unable to create managed storage template/volume, couldn't lock on " + tmplIdManagedPoolIdLockString); } @@ -1477,7 +1478,7 @@ public AsyncCallFuture createManagedStorageVolumeFromTemplateAs result.setResult(e.getLocalizedMessage()); result.setSuccess(false); future.complete(result); - s_logger.warn("Failed to create template on primary storage", e); + logger.warn("Failed to create template on primary storage", e); return future; } finally { if (lock != null) { @@ -1488,7 +1489,7 @@ public AsyncCallFuture createManagedStorageVolumeFromTemplateAs if (destPrimaryDataStore.getPoolType() != StoragePoolType.PowerFlex) { // We have a template on primary storage. Clone it to new volume. - s_logger.debug("Creating a clone from template on primary storage " + destDataStoreId); + logger.debug("Creating a clone from template on primary storage " + destDataStoreId); createManagedVolumeCloneTemplateAsync(volumeInfo, templateOnPrimary, destPrimaryDataStore, future); } else { @@ -1497,7 +1498,7 @@ public AsyncCallFuture createManagedStorageVolumeFromTemplateAs destHost, future, destDataStoreId, srcTemplateInfo.getId()); } } else { - s_logger.debug("Primary storage does not support cloning or no support for UUID resigning on the host side; copying the template normally"); + logger.debug("Primary storage does not support cloning or no support for UUID resigning on the host side; copying the template normally"); createManagedVolumeCopyTemplateAsync(volumeInfo, destPrimaryDataStore, srcTemplateInfo, destHost, future); } @@ -1517,11 +1518,11 @@ private void createManagedVolumeCopyManagedTemplateAsyncWithLock(VolumeInfo volu int storagePoolMaxWaitSeconds = NumbersUtil.parseInt(configDao.getValue(Config.StoragePoolMaxWaitSeconds.key()), 3600); if (!lock.lock(storagePoolMaxWaitSeconds)) { - s_logger.debug("Unable to create volume from template, couldn't lock on " + tmplIdManagedPoolIdDestinationHostLockString); + logger.debug("Unable to create volume from template, couldn't lock on " + tmplIdManagedPoolIdDestinationHostLockString); throw new CloudRuntimeException("Unable to create volume from template, couldn't lock on " + tmplIdManagedPoolIdDestinationHostLockString); } - s_logger.debug("Copying the template to the volume on primary storage"); + logger.debug("Copying the template to the volume on primary storage"); createManagedVolumeCopyManagedTemplateAsync(volumeInfo, destPrimaryDataStore, templateOnPrimary, destHost, future); } finally { if (lock != null) { @@ -1628,7 +1629,7 @@ public AsyncCallFuture createVolumeFromSnapshot(VolumeInfo volu caller.setCallback(caller.getTarget().createVolumeFromSnapshotCallback(null, null)).setContext(context); motionSrv.copyAsync(snapshot, volumeOnStore, caller); } catch (Exception e) { - s_logger.debug("create volume from snapshot failed", e); + logger.debug("create volume from snapshot failed", e); VolumeApiResult result = new VolumeApiResult(volume); result.setResult(e.toString()); future.complete(result); @@ -1659,7 +1660,7 @@ protected Void createVolumeFromSnapshotCallback(AsyncCallbackDispatcher copyVolumeFromImageToPrimary(VolumeIn motionSrv.copyAsync(srcVolume, destVolume, caller); return future; } catch (Exception e) { - s_logger.error("failed to copy volume from image store", e); + logger.error("failed to copy volume from image store", e); if (destVolume != null) { destVolume.processEvent(Event.OperationFailed); } @@ -1779,7 +1780,7 @@ protected AsyncCallFuture copyVolumeFromPrimaryToImage(VolumeIn motionSrv.copyAsync(srcVolume, destVolume, caller); return future; } catch (Exception e) { - s_logger.error("failed to copy volume to image store", e); + logger.error("failed to copy volume to image store", e); if (destVolume != null) { destVolume.getDataStore().delete(destVolume); } @@ -1817,7 +1818,7 @@ protected Void copyVolumeFromPrimaryToImageCallback(AsyncCallbackDispatcher copyVolume(VolumeInfo srcVolume, DataStore destStore) { DataStore srcStore = srcVolume.getDataStore(); - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { String srcRole = (srcStore != null && srcStore.getRole() != null ? srcVolume.getDataStore().getRole().toString() : ""); String msg = String.format("copying %s(id=%d, role=%s) to %s (id=%d, role=%s)" @@ -1827,7 +1828,7 @@ public AsyncCallFuture copyVolume(VolumeInfo srcVolume, DataSto , destStore.getName() , destStore.getId() , destStore.getRole()); - s_logger.debug(msg); + logger.debug(msg); } if (srcVolume.getState() == Volume.State.Uploaded) { @@ -1849,7 +1850,7 @@ public AsyncCallFuture copyVolume(VolumeInfo srcVolume, DataSto VolumeApiResult res = new VolumeApiResult(srcVolume); try { if (!snapshotMgr.canOperateOnVolume(srcVolume)) { - s_logger.debug("There are snapshots creating on this volume, can not move this volume"); + logger.debug("There are snapshots creating on this volume, can not move this volume"); res.setResult("There are snapshots creating on this volume, can not move this volume"); future.complete(res); @@ -1866,9 +1867,9 @@ public AsyncCallFuture copyVolume(VolumeInfo srcVolume, DataSto caller.setCallback(caller.getTarget().copyVolumeCallBack(null, null)).setContext(context); motionSrv.copyAsync(srcVolume, destVolume, caller); } catch (Exception e) { - s_logger.error("Failed to copy volume:" + e); - if(s_logger.isDebugEnabled()) { - s_logger.debug("Failed to copy volume.", e); + logger.error("Failed to copy volume:" + e); + if(logger.isDebugEnabled()) { + logger.debug("Failed to copy volume.", e); } res.setResult(e.toString()); future.complete(res); @@ -1889,7 +1890,7 @@ protected Void copyVolumeCallBack(AsyncCallbackDispatcher copyManagedVolume(VolumeInfo srcVolume, VolumeApiResult res = new VolumeApiResult(srcVolume); try { if (!snapshotMgr.canOperateOnVolume(srcVolume)) { - s_logger.debug("There are snapshots creating for this volume, can not move this volume"); + logger.debug("There are snapshots creating for this volume, can not move this volume"); res.setResult("There are snapshots creating for this volume, can not move this volume"); future.complete(res); return future; } if (snapshotMgr.backedUpSnapshotsExistsForVolume(srcVolume)) { - s_logger.debug("There are backed up snapshots for this volume, can not move."); + logger.debug("There are backed up snapshots for this volume, can not move."); res.setResult("[UNSUPPORTED] There are backed up snapshots for this volume, can not move. Please try again after removing them."); future.complete(res); return future; @@ -2018,7 +2019,7 @@ private AsyncCallFuture copyManagedVolume(VolumeInfo srcVolume, Host hostWithPoolsAccess = _storageMgr.findUpAndEnabledHostWithAccessToStoragePools(poolIds); if (hostWithPoolsAccess == null) { - s_logger.debug("No host(s) available with pool access, can not move this volume"); + logger.debug("No host(s) available with pool access, can not move this volume"); res.setResult("No host(s) available with pool access, can not move this volume"); future.complete(res); return future; @@ -2031,7 +2032,7 @@ private AsyncCallFuture copyManagedVolume(VolumeInfo srcVolume, AsyncCallFuture createVolumeFuture = createVolumeAsync(destVolume, destStore); VolumeApiResult createVolumeResult = createVolumeFuture.get(); if (createVolumeResult.isFailed()) { - s_logger.debug("Failed to create dest volume " + destVolume.getId() + ", volume can be removed"); + logger.debug("Failed to create dest volume " + destVolume.getId() + ", volume can be removed"); destroyVolume(destVolume.getId()); destVolume.processEvent(Event.ExpungeRequested); destVolume.processEvent(Event.OperationSuccessed); @@ -2078,9 +2079,9 @@ private AsyncCallFuture copyManagedVolume(VolumeInfo srcVolume, motionSrv.copyAsync(srcVolume, destVolume, hostWithPoolsAccess, caller); } catch (Exception e) { - s_logger.error("Copy to managed volume failed due to: " + e); - if(s_logger.isDebugEnabled()) { - s_logger.debug("Copy to managed volume failed.", e); + logger.error("Copy to managed volume failed due to: " + e); + if(logger.isDebugEnabled()) { + logger.debug("Copy to managed volume failed.", e); } res.setResult(e.toString()); future.complete(res); @@ -2120,7 +2121,7 @@ protected Void copyManagedVolumeCallBack(AsyncCallbackDispatcher migrateVolume(VolumeInfo srcVolume, Data VolumeApiResult res = new VolumeApiResult(srcVolume); try { if (!snapshotMgr.canOperateOnVolume(srcVolume)) { - s_logger.debug("Snapshots are being created on this volume. This volume cannot be migrated now."); + logger.debug("Snapshots are being created on this volume. This volume cannot be migrated now."); res.setResult("Snapshots are being created on this volume. This volume cannot be migrated now."); future.complete(res); return future; @@ -2227,7 +2228,7 @@ public AsyncCallFuture migrateVolume(VolumeInfo srcVolume, Data caller.setCallback(caller.getTarget().migrateVolumeCallBack(null, null)).setContext(context); motionSrv.copyAsync(srcVolume, destVolume, caller); } catch (Exception e) { - s_logger.debug("Failed to migrate volume", e); + logger.debug("Failed to migrate volume", e); res.setResult(e.toString()); future.complete(res); } @@ -2254,7 +2255,7 @@ protected Void migrateVolumeCallBack(AsyncCallbackDispatcher migrateVolumes(Map for (Map.Entry entry : volumeMap.entrySet()) { VolumeInfo volume = entry.getKey(); if (!snapshotMgr.canOperateOnVolume(volume)) { - s_logger.debug("Snapshots are being created on a volume. Volumes cannot be migrated now."); + logger.debug("Snapshots are being created on a volume. Volumes cannot be migrated now."); res.setResult("Snapshots are being created on a volume. Volumes cannot be migrated now."); future.complete(res); @@ -2307,7 +2308,7 @@ public AsyncCallFuture migrateVolumes(Map motionSrv.copyAsync(volumeMap, vmTo, srcHost, destHost, caller); } catch (Exception e) { - s_logger.debug("Failed to copy volume", e); + logger.debug("Failed to copy volume", e); res.setResult(e.toString()); future.complete(res); } @@ -2337,7 +2338,7 @@ protected Void migrateVmWithVolumesCallBack(AsyncCallbackDispatcher registerVolumeForPostUpload(VolumeInfo volume, EndPoint ep = _epSelector.select(store); if (ep == null) { String errorMessage = "There is no secondary storage VM for image store " + store.getName(); - s_logger.warn(errorMessage); + logger.warn(errorMessage); throw new CloudRuntimeException(errorMessage); } DataObject volumeOnStore = store.create(volume); @@ -2411,7 +2412,7 @@ protected Void registerVolumeCallback(AsyncCallbackDispatcher resize(VolumeInfo volume) { try { volume.processEvent(Event.ResizeRequested); } catch (Exception e) { - s_logger.debug("Failed to change state to resize", e); + logger.debug("Failed to change state to resize", e); result.setResult(e.toString()); future.complete(result); return future; @@ -2466,7 +2467,7 @@ public AsyncCallFuture resize(VolumeInfo volume) { try { volume.getDataStore().getDriver().resize(volume, caller); } catch (Exception e) { - s_logger.debug("Failed to change state to resize", e); + logger.debug("Failed to change state to resize", e); result.setResult(e.toString()); @@ -2513,7 +2514,7 @@ protected Void resizeVolumeCallback(AsyncCallbackDispatcher 0) { for (VolumeDataStoreVO volumeHost : toBeDownloaded) { if (volumeHost.getDownloadUrl() == null) { // If url is null, skip downloading - s_logger.info("Skip downloading volume " + volumeHost.getVolumeId() + " since no download url is specified."); + logger.info("Skip downloading volume " + volumeHost.getVolumeId() + " since no download url is specified."); continue; } @@ -2661,12 +2662,12 @@ public void handleVolumeSync(DataStore store) { // means that this is a duplicate entry from migration of previous NFS to staging. if (store.getScope().getScopeType() == ScopeType.REGION) { if (volumeHost.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOADED && volumeHost.getInstallPath() == null) { - s_logger.info("Skip sync volume for migration of previous NFS to object store"); + logger.info("Skip sync volume for migration of previous NFS to object store"); continue; } } - s_logger.debug("Volume " + volumeHost.getVolumeId() + " needs to be downloaded to " + store.getName()); + logger.debug("Volume " + volumeHost.getVolumeId() + " needs to be downloaded to " + store.getName()); // reset volume status back to Allocated VolumeObject vol = (VolumeObject)volFactory.getVolume(volumeHost.getVolumeId()); vol.processEvent(Event.OperationFailed); // reset back volume status @@ -2695,24 +2696,24 @@ public void handleVolumeSync(DataStore store) { Answer answer = null; if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; - s_logger.error(errMsg); + logger.error(errMsg); answer = new Answer(dtCommand, false, errMsg); } else { answer = ep.sendMessage(dtCommand); } if (answer == null || !answer.getResult()) { - s_logger.info("Failed to deleted volume at store: " + store.getName()); + logger.info("Failed to deleted volume at store: " + store.getName()); } else { String description = "Deleted volume " + tInfo.getTemplateName() + " on secondary storage " + storeId; - s_logger.info(description); + logger.info(description); } } } finally { syncLock.unlock(); } } else { - s_logger.info("Couldn't get global lock on " + lockString + ", another thread may be doing volume sync on data store " + storeId + " now."); + logger.info("Couldn't get global lock on " + lockString + ", another thread may be doing volume sync on data store " + storeId + " now."); } } finally { syncLock.releaseRef(); @@ -2725,7 +2726,7 @@ private Map listVolume(DataStore store) { Answer answer = null; if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; - s_logger.error(errMsg); + logger.error(errMsg); answer = new Answer(cmd, false, errMsg); } else { answer = ep.sendMessage(cmd); @@ -2734,8 +2735,8 @@ private Map listVolume(DataStore store) { ListVolumeAnswer tanswer = (ListVolumeAnswer)answer; return tanswer.getTemplateInfo(); } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Can not list volumes for image store " + store.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Can not list volumes for image store " + store.getId()); } } @@ -2748,11 +2749,11 @@ public SnapshotInfo takeSnapshot(VolumeInfo volume) { try { snapshot = snapshotMgr.takeSnapshot(volume); } catch (CloudRuntimeException cre) { - s_logger.error("Take snapshot: " + volume.getId() + " failed", cre); + logger.error("Take snapshot: " + volume.getId() + " failed", cre); throw cre; } catch (Exception e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("unknown exception while taking snapshot for volume " + volume.getId() + " was caught", e); + if (logger.isDebugEnabled()) { + logger.debug("unknown exception while taking snapshot for volume " + volume.getId() + " was caught", e); } throw new CloudRuntimeException("Failed to take snapshot", e); } @@ -2807,10 +2808,10 @@ public void moveVolumeOnSecondaryStorageToAnotherAccount(Volume volume, Account VolumeDataStoreVO volumeStore = _volumeStoreDao.findByVolume(volume.getId()); if (volumeStore == null) { - s_logger.debug(String.format("Volume [%s] is not present in the secondary storage. Therefore we do not need to move it in the secondary storage.", volume)); + logger.debug(String.format("Volume [%s] is not present in the secondary storage. Therefore we do not need to move it in the secondary storage.", volume)); return; } - s_logger.debug(String.format("Volume [%s] is present in secondary storage. It will be necessary to move it from the source account's [%s] folder to the destination " + logger.debug(String.format("Volume [%s] is present in secondary storage. It will be necessary to move it from the source account's [%s] folder to the destination " + "account's [%s] folder.", volume.getUuid(), sourceAccount, destAccount)); @@ -2829,17 +2830,17 @@ public void moveVolumeOnSecondaryStorageToAnotherAccount(Volume volume, Account String msg = String.format("Unable to move volume [%s] from [%s] (source account's [%s] folder) to [%s] (destination account's [%s] folder) in the secondary storage, due " + "to [%s].", volume.getUuid(), srcPath.getParent(), sourceAccount, destPath, destAccount, answer.getDetails()); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } - s_logger.debug(String.format("Volume [%s] was moved from [%s] (source account's [%s] folder) to [%s] (destination account's [%s] folder) in the secondary storage.", + logger.debug(String.format("Volume [%s] was moved from [%s] (source account's [%s] folder) to [%s] (destination account's [%s] folder) in the secondary storage.", volume.getUuid(), srcPath.getParent(), sourceAccount, destPath, destAccount)); volumeStore.setInstallPath(String.format("%s/%s", destPath, srcPath.getFileName().toString())); if (!_volumeStoreDao.update(volumeStore.getId(), volumeStore)) { String msg = String.format("Unable to update volume [%s] install path in the DB.", volumeStore.getVolumeId()); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } } diff --git a/engine/userdata/cloud-init/src/main/java/org/apache/cloudstack/userdata/CloudInitUserDataProvider.java b/engine/userdata/cloud-init/src/main/java/org/apache/cloudstack/userdata/CloudInitUserDataProvider.java index 65996f181a9c..6e1086c631ec 100644 --- a/engine/userdata/cloud-init/src/main/java/org/apache/cloudstack/userdata/CloudInitUserDataProvider.java +++ b/engine/userdata/cloud-init/src/main/java/org/apache/cloudstack/userdata/CloudInitUserDataProvider.java @@ -38,7 +38,6 @@ import org.apache.commons.codec.binary.Base64; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.utils.component.AdapterBase; import com.cloud.utils.exception.CloudRuntimeException; @@ -62,8 +61,6 @@ protected enum FormatType { Map.entry(FormatType.INCLUDE_FILE, INCLUDE_FILE_CONTENT_TYPE) ); - private static final Logger LOGGER = Logger.getLogger(CloudInitUserDataProvider.class); - private static final Session session = Session.getDefaultInstance(new Properties()); @Override @@ -108,7 +105,7 @@ protected FormatType mapUserDataHeaderToFormatType(String header) { } else { String msg = String.format("Cannot recognise the user data format type from the header line: %s." + "Supported types are: cloud-config, bash script, cloud-boothook, include file or MIME", header); - LOGGER.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } } @@ -120,7 +117,7 @@ protected FormatType mapUserDataHeaderToFormatType(String header) { protected FormatType getUserDataFormatType(String userdata) { if (StringUtils.isBlank(userdata)) { String msg = "User data expected but provided empty user data"; - LOGGER.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } @@ -263,7 +260,7 @@ public String appendUserData(String encodedUserData1, String encodedUserData2) { } catch (MessagingException | IOException | CloudRuntimeException e) { String msg = String.format("Error attempting to merge user data as a multipart user data. " + "Reason: %s", e.getMessage()); - LOGGER.error(msg, e); + logger.error(msg, e); throw new CloudRuntimeException(msg, e); } } diff --git a/framework/cluster/src/main/java/com/cloud/cluster/ClusterFenceManagerImpl.java b/framework/cluster/src/main/java/com/cloud/cluster/ClusterFenceManagerImpl.java index 4f5e0344e73b..203ebe6e3d45 100644 --- a/framework/cluster/src/main/java/com/cloud/cluster/ClusterFenceManagerImpl.java +++ b/framework/cluster/src/main/java/com/cloud/cluster/ClusterFenceManagerImpl.java @@ -23,14 +23,12 @@ import javax.naming.ConfigurationException; import org.apache.cloudstack.management.ManagementServerHost; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.utils.component.ManagerBase; @Component public class ClusterFenceManagerImpl extends ManagerBase implements ClusterFenceManager, ClusterManagerListener { - private static final Logger s_logger = Logger.getLogger(ClusterFenceManagerImpl.class); @Inject ClusterManager _clusterMgr; @@ -51,7 +49,7 @@ public void onManagementNodeLeft(List nodeList, @Override public void onManagementNodeIsolated() { - s_logger.error("Received node isolation notification, will perform self-fencing and shut myself down"); + logger.error("Received node isolation notification, will perform self-fencing and shut myself down"); System.exit(SELF_FENCING_EXIT_CODE); } } diff --git a/framework/cluster/src/main/java/com/cloud/cluster/ClusterManagerImpl.java b/framework/cluster/src/main/java/com/cloud/cluster/ClusterManagerImpl.java index 289638fe22d8..e4e55eb9348a 100644 --- a/framework/cluster/src/main/java/com/cloud/cluster/ClusterManagerImpl.java +++ b/framework/cluster/src/main/java/com/cloud/cluster/ClusterManagerImpl.java @@ -47,7 +47,6 @@ import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.utils.identity.ManagementServerNode; -import org.apache.log4j.Logger; import com.cloud.cluster.dao.ManagementServerHostDao; import com.cloud.cluster.dao.ManagementServerHostPeerDao; @@ -70,7 +69,6 @@ import com.cloud.utils.net.NetUtils; public class ClusterManagerImpl extends ManagerBase implements ClusterManager, Configurable { - private static final Logger s_logger = Logger.getLogger(ClusterManagerImpl.class); private static final int EXECUTOR_SHUTDOWN_TIMEOUT = 1000; // 1 second private static final int DEFAULT_OUTGOING_WORKERS = 5; @@ -176,7 +174,7 @@ private void cancelClusterRequestToPeer(final String strPeer) { } for (final ClusterServiceRequestPdu pdu : candidates) { - s_logger.warn("Cancel cluster request PDU to peer: " + strPeer + ", pdu: " + pdu.getJsonPackage()); + logger.warn("Cancel cluster request PDU to peer: " + strPeer + ", pdu: " + pdu.getJsonPackage()); synchronized (pdu) { pdu.notifyAll(); } @@ -260,13 +258,13 @@ private void onSendingClusterPdu() { try { peerService = getPeerService(pdu.getDestPeer()); } catch (final RemoteException e) { - s_logger.error("Unable to get cluster service on peer : " + pdu.getDestPeer()); + logger.error("Unable to get cluster service on peer : " + pdu.getDestPeer()); } if (peerService != null) { try { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cluster PDU " + getSelfPeerName() + " -> " + pdu.getDestPeer() + ". agent: " + pdu.getAgentId() + ", pdu seq: " + + if (logger.isDebugEnabled()) { + logger.debug("Cluster PDU " + getSelfPeerName() + " -> " + pdu.getDestPeer() + ". agent: " + pdu.getAgentId() + ", pdu seq: " + pdu.getSequenceId() + ", pdu ack seq: " + pdu.getAckSequenceId() + ", json: " + pdu.getJsonPackage()); } @@ -276,8 +274,8 @@ private void onSendingClusterPdu() { final String strResult = peerService.execute(pdu); profiler.stop(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cluster PDU " + getSelfPeerName() + " -> " + pdu.getDestPeer() + " completed. time: " + + if (logger.isDebugEnabled()) { + logger.debug("Cluster PDU " + getSelfPeerName() + " -> " + pdu.getDestPeer() + " completed. time: " + profiler.getDurationInMillis() + "ms. agent: " + pdu.getAgentId() + ", pdu seq: " + pdu.getSequenceId() + ", pdu ack seq: " + pdu.getAckSequenceId() + ", json: " + pdu.getJsonPackage()); } @@ -288,15 +286,15 @@ private void onSendingClusterPdu() { } catch (final RemoteException e) { invalidatePeerService(pdu.getDestPeer()); - if (s_logger.isInfoEnabled()) { - s_logger.info("Exception on remote execution, peer: " + pdu.getDestPeer() + ", iteration: " + i + ", exception message :" + + if (logger.isInfoEnabled()) { + logger.info("Exception on remote execution, peer: " + pdu.getDestPeer() + ", iteration: " + i + ", exception message :" + e.getMessage()); } } } } } catch (final Throwable e) { - s_logger.error("Unexcpeted exception: ", e); + logger.error("Unexcpeted exception: ", e); } } } @@ -320,11 +318,11 @@ protected void runInContext() { requestPdu.notifyAll(); } } else { - s_logger.warn("Original request has already been cancelled. pdu: " + pdu.getJsonPackage()); + logger.warn("Original request has already been cancelled. pdu: " + pdu.getJsonPackage()); } } else if (pdu.getPduType() == ClusterServicePdu.PDU_TYPE_STATUS_UPDATE) { if (statusAdministrator == null) { - s_logger.warn("No status administration to report a status update too."); + logger.warn("No status administration to report a status update too."); } else { statusAdministrator.newStatus(pdu); } @@ -348,7 +346,7 @@ protected void runInContext() { } }); } catch (final Throwable e) { - s_logger.error("Unexcpeted exception: ", e); + logger.error("Unexcpeted exception: ", e); } } } @@ -381,12 +379,12 @@ public void broadcast(final long agentId, final String cmds) { continue; // Skip myself. } try { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Forwarding " + cmds + " to " + peer.getMsid()); + if (logger.isDebugEnabled()) { + logger.debug("Forwarding " + cmds + " to " + peer.getMsid()); } executeAsync(peerName, agentId, cmds, true); } catch (final Exception e) { - s_logger.warn("Caught exception while talkign to " + peer.getMsid()); + logger.warn("Caught exception while talkign to " + peer.getMsid()); } } } @@ -409,14 +407,14 @@ public void publishStatus(final String status) { for (final ManagementServerHostVO peer : peers) { final String peerName = Long.toString(peer.getMsid()); try { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Forwarding " + status + " to " + peer.getMsid()); + if (logger.isDebugEnabled()) { + logger.debug("Forwarding " + status + " to " + peer.getMsid()); } sendStatus(peerName, status); } catch (final Exception e) { String msg = String.format("Caught exception while talking to %d", peer.getMsid()); - s_logger.warn(msg); - s_logger.debug(msg, e); + logger.warn(msg); + logger.debug(msg, e); } } } @@ -434,8 +432,8 @@ public void sendStatus(final String strPeer, final String status) { @Override public String execute(final String strPeer, final long agentId, final String cmds, final boolean stopOnError) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(getSelfPeerName() + " -> " + strPeer + "." + agentId + " " + cmds); + if (logger.isDebugEnabled()) { + logger.debug(getSelfPeerName() + " -> " + strPeer + "." + agentId + " " + cmds); } final ClusterServiceRequestPdu pdu = new ClusterServiceRequestPdu(); @@ -454,8 +452,8 @@ public String execute(final String strPeer, final long agentId, final String cmd } } - if (s_logger.isDebugEnabled()) { - s_logger.debug(getSelfPeerName() + " -> " + strPeer + "." + agentId + " completed. result: " + pdu.getResponseResult()); + if (logger.isDebugEnabled()) { + logger.debug(getSelfPeerName() + " -> " + strPeer + "." + agentId + " completed. result: " + pdu.getResponseResult()); } if (pdu.getResponseResult() != null && pdu.getResponseResult().length() > 0) { @@ -484,7 +482,7 @@ public void registerListener(final ClusterManagerListener listener) { // Note : we don't check duplicates synchronized (_listeners) { - s_logger.info("register cluster listener " + listener.getClass()); + logger.info("register cluster listener " + listener.getClass()); _listeners.add(listener); } @@ -493,18 +491,18 @@ public void registerListener(final ClusterManagerListener listener) { @Override public void unregisterListener(final ClusterManagerListener listener) { synchronized (_listeners) { - s_logger.info("unregister cluster listener " + listener.getClass()); + logger.info("unregister cluster listener " + listener.getClass()); _listeners.remove(listener); } } public void notifyNodeJoined(final List nodeList) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Notify management server node join to listeners."); + if (logger.isDebugEnabled()) { + logger.debug("Notify management server node join to listeners."); for (final ManagementServerHostVO mshost : nodeList) { - s_logger.debug("Joining node, IP: " + mshost.getServiceIP() + ", msid: " + mshost.getMsid()); + logger.debug("Joining node, IP: " + mshost.getServiceIP() + ", msid: " + mshost.getMsid()); } } @@ -518,13 +516,13 @@ public void notifyNodeJoined(final List nodeList) { } public void notifyNodeLeft(final List nodeList) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Notify management server node left to listeners."); + if (logger.isDebugEnabled()) { + logger.debug("Notify management server node left to listeners."); } for (final ManagementServerHostVO mshost : nodeList) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Leaving node, IP: " + mshost.getServiceIP() + ", msid: " + mshost.getMsid()); + if (logger.isDebugEnabled()) { + logger.debug("Leaving node, IP: " + mshost.getServiceIP() + ", msid: " + mshost.getMsid()); } cancelClusterRequestToPeer(String.valueOf(mshost.getMsid())); } @@ -539,8 +537,8 @@ public void notifyNodeLeft(final List nodeList) { } public void notifyNodeIsolated() { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Notify management server node isolation to listeners"); + if (logger.isDebugEnabled()) { + logger.debug("Notify management server node isolation to listeners"); } synchronized (_listeners) { @@ -595,16 +593,16 @@ protected void runInContext() { profilerHeartbeatUpdate.start(); txn.transitToAutoManagedConnection(TransactionLegacy.CLOUD_DB); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Cluster manager heartbeat update, id:" + _mshostId); + if (logger.isTraceEnabled()) { + logger.trace("Cluster manager heartbeat update, id:" + _mshostId); } _mshostDao.update(_mshostId, _runId, DateUtil.currentGMTTime()); profilerHeartbeatUpdate.stop(); profilerPeerScan.start(); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Cluster manager peer-scan, id:" + _mshostId); + if (logger.isTraceEnabled()) { + logger.trace("Cluster manager peer-scan, id:" + _mshostId); } if (!_peerScanInited) { @@ -619,18 +617,18 @@ protected void runInContext() { profiler.stop(); if (profiler.getDurationInMillis() >= HeartbeatInterval.value()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Management server heartbeat takes too long to finish. profiler: " + profiler.toString() + ", profilerHeartbeatUpdate: " + + if (logger.isDebugEnabled()) { + logger.debug("Management server heartbeat takes too long to finish. profiler: " + profiler.toString() + ", profilerHeartbeatUpdate: " + profilerHeartbeatUpdate.toString() + ", profilerPeerScan: " + profilerPeerScan.toString()); } } } } catch (final CloudRuntimeException e) { - s_logger.error("Runtime DB exception ", e.getCause()); + logger.error("Runtime DB exception ", e.getCause()); if (e.getCause() instanceof ClusterInvalidSessionException) { - s_logger.error("Invalid cluster session found, fence it"); + logger.error("Invalid cluster session found, fence it"); queueNotification(new ClusterManagerMessage(ClusterManagerMessage.MessageType.nodeIsolated)); } @@ -640,7 +638,7 @@ protected void runInContext() { } catch (final ActiveFencingException e) { queueNotification(new ClusterManagerMessage(ClusterManagerMessage.MessageType.nodeIsolated)); } catch (final Throwable e) { - s_logger.error("Unexpected exception in cluster heartbeat", e); + logger.error("Unexpected exception in cluster heartbeat", e); if (isRootCauseConnectionRelated(e.getCause())) { invalidHeartbeatConnection(); } @@ -669,7 +667,7 @@ private void invalidHeartbeatConnection() { if (conn != null) { _heartbeatConnection.reset(conn); } else { - s_logger.error("DB communication problem detected, fence it"); + logger.error("DB communication problem detected, fence it"); queueNotification(new ClusterManagerMessage(ClusterManagerMessage.MessageType.nodeIsolated)); } // The stand-alone connection does not have to be closed here because there will be another reference to it. @@ -702,11 +700,11 @@ protected void runInContext() { profiler.stop(); if (profiler.getDurationInMillis() > 1000) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Notifying management server join event took " + profiler.getDurationInMillis() + " ms"); + if (logger.isDebugEnabled()) { + logger.debug("Notifying management server join event took " + profiler.getDurationInMillis() + " ms"); } } else { - s_logger.warn("Notifying management server join event took " + profiler.getDurationInMillis() + " ms"); + logger.warn("Notifying management server join event took " + profiler.getDurationInMillis() + " ms"); } } break; @@ -720,11 +718,11 @@ protected void runInContext() { profiler.stop(); if (profiler.getDurationInMillis() > 1000) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Notifying management server leave event took " + profiler.getDurationInMillis() + " ms"); + if (logger.isDebugEnabled()) { + logger.debug("Notifying management server leave event took " + profiler.getDurationInMillis() + " ms"); } } else { - s_logger.warn("Notifying management server leave event took " + profiler.getDurationInMillis() + " ms"); + logger.warn("Notifying management server leave event took " + profiler.getDurationInMillis() + " ms"); } } break; @@ -739,7 +737,7 @@ protected void runInContext() { } } catch (final Throwable e) { - s_logger.warn("Unexpected exception during cluster notification. ", e); + logger.warn("Unexpected exception during cluster notification. ", e); } } @@ -806,18 +804,18 @@ private void initPeerScan() { if (orphanList.size() > 0) { for (final Long orphanMsid : orphanList) { // construct fake ManagementServerHostVO based on orphan MSID - s_logger.info("Add orphan management server msid found in host table to initial clustering notification, orphan msid: " + orphanMsid); + logger.info("Add orphan management server msid found in host table to initial clustering notification, orphan msid: " + orphanMsid); inactiveList.add(new ManagementServerHostVO(orphanMsid, 0, "orphan", 0, new Date())); } } else { - s_logger.info("We are good, no orphan management server msid in host table is found"); + logger.info("We are good, no orphan management server msid in host table is found"); } if (inactiveList.size() > 0) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Found " + inactiveList.size() + " inactive management server node based on timestamp"); + if (logger.isInfoEnabled()) { + logger.info("Found " + inactiveList.size() + " inactive management server node based on timestamp"); for (final ManagementServerHostVO host : inactiveList) { - s_logger.info("management server node msid: " + host.getMsid() + ", name: " + host.getName() + ", service ip: " + host.getServiceIP() + + logger.info("management server node msid: " + host.getMsid() + ", name: " + host.getName() + ", service ip: " + host.getServiceIP() + ", version: " + host.getVersion()); } } @@ -825,7 +823,7 @@ private void initPeerScan() { final List downHostList = new ArrayList(); for (final ManagementServerHostVO host : inactiveList) { if (!pingManagementNode(host)) { - s_logger.warn("Management node " + host.getId() + " is detected inactive by timestamp and also not pingable"); + logger.warn("Management node " + host.getId() + " is detected inactive by timestamp and also not pingable"); downHostList.add(host); } } @@ -834,7 +832,7 @@ private void initPeerScan() { queueNotification(new ClusterManagerMessage(ClusterManagerMessage.MessageType.nodeRemoved, downHostList)); } } else { - s_logger.info("No inactive management server node found"); + logger.info("No inactive management server node found"); } } @@ -859,7 +857,7 @@ private void peerScan() throws ActiveFencingException { if (_mshostPeerDao.countStateSeenInPeers(_mshostId, _runId, ManagementServerHost.State.Down) > 0) { final String msg = "We have detected that at least one management server peer reports that this management server is down, perform active fencing to avoid split-brain situation"; - s_logger.error(msg); + logger.error(msg); throw new ActiveFencingException(msg); } @@ -869,24 +867,24 @@ private void peerScan() throws ActiveFencingException { final ManagementServerHostVO current = getInListById(entry.getKey(), currentList); if (current == null) { if (entry.getKey().longValue() != _mshostId.longValue()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Detected management node left, id:" + entry.getKey() + ", nodeIP:" + entry.getValue().getServiceIP()); + if (logger.isDebugEnabled()) { + logger.debug("Detected management node left, id:" + entry.getKey() + ", nodeIP:" + entry.getValue().getServiceIP()); } removedNodeList.add(entry.getValue()); } } else { if (current.getRunid() == 0) { if (entry.getKey().longValue() != _mshostId.longValue()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Detected management node left because of invalidated session, id:" + entry.getKey() + ", nodeIP:" + + if (logger.isDebugEnabled()) { + logger.debug("Detected management node left because of invalidated session, id:" + entry.getKey() + ", nodeIP:" + entry.getValue().getServiceIP()); } invalidatedNodeList.add(entry.getValue()); } } else { if (entry.getValue().getRunid() != current.getRunid()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Detected management node left and rejoined quickly, id:" + entry.getKey() + ", nodeIP:" + entry.getValue().getServiceIP()); + if (logger.isDebugEnabled()) { + logger.debug("Detected management node left and rejoined quickly, id:" + entry.getKey() + ", nodeIP:" + entry.getValue().getServiceIP()); } entry.getValue().setRunid(current.getRunid()); @@ -906,7 +904,7 @@ private void peerScan() throws ActiveFencingException { try { JmxUtil.unregisterMBean("ClusterManager", "Node " + mshost.getId()); } catch (final Exception e) { - s_logger.warn("Unable to deregiester cluster node from JMX monitoring due to exception " + e.toString()); + logger.warn("Unable to deregiester cluster node from JMX monitoring due to exception " + e.toString()); } } @@ -921,15 +919,15 @@ private void peerScan() throws ActiveFencingException { while (it.hasNext()) { final ManagementServerHostVO mshost = it.next(); if (!pingManagementNode(mshost)) { - s_logger.warn("Management node " + mshost.getId() + " is detected inactive by timestamp and also not pingable"); + logger.warn("Management node " + mshost.getId() + " is detected inactive by timestamp and also not pingable"); _activePeers.remove(mshost.getId()); try { JmxUtil.unregisterMBean("ClusterManager", "Node " + mshost.getId()); } catch (final Exception e) { - s_logger.warn("Unable to deregiester cluster node from JMX monitoring due to exception " + e.toString()); + logger.warn("Unable to deregiester cluster node from JMX monitoring due to exception " + e.toString()); } } else { - s_logger.info("Management node " + mshost.getId() + " is detected inactive by timestamp but is pingable"); + logger.info("Management node " + mshost.getId() + " is detected inactive by timestamp but is pingable"); it.remove(); } } @@ -944,15 +942,15 @@ private void peerScan() throws ActiveFencingException { if (!_activePeers.containsKey(mshost.getId())) { _activePeers.put(mshost.getId(), mshost); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Detected management node joined, id:" + mshost.getId() + ", nodeIP:" + mshost.getServiceIP()); + if (logger.isDebugEnabled()) { + logger.debug("Detected management node joined, id:" + mshost.getId() + ", nodeIP:" + mshost.getServiceIP()); } newNodeList.add(mshost); try { JmxUtil.registerMBean("ClusterManager", "Node " + mshost.getId(), new ClusterManagerMBeanImpl(this, mshost)); } catch (final Exception e) { - s_logger.warn("Unable to register cluster node into JMX monitoring due to exception " + ExceptionUtil.toString(e)); + logger.warn("Unable to register cluster node into JMX monitoring due to exception " + ExceptionUtil.toString(e)); } } } @@ -964,8 +962,8 @@ private void peerScan() throws ActiveFencingException { profiler.stop(); if (profiler.getDurationInMillis() >= HeartbeatInterval.value()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Peer scan takes too long to finish. profiler: " + profiler.toString() + ", profilerQueryActiveList: " + + if (logger.isDebugEnabled()) { + logger.debug("Peer scan takes too long to finish. profiler: " + profiler.toString() + ", profilerQueryActiveList: " + profilerQueryActiveList.toString() + ", profilerSyncClusterInfo: " + profilerSyncClusterInfo.toString() + ", profilerInvalidatedNodeList: " + profilerInvalidatedNodeList.toString() + ", profilerRemovedList: " + profilerRemovedList.toString()); } @@ -984,8 +982,8 @@ private static ManagementServerHostVO getInListById(final Long id, final List() { @@ -1010,14 +1008,14 @@ public ManagementServerHostVO doInTransaction(final TransactionStatus status) { mshost.setState(ManagementServerHost.State.Up); mshost.setUuid(UUID.randomUUID().toString()); _mshostDao.persist(mshost); - if (s_logger.isInfoEnabled()) { - s_logger.info("New instance of management server msid " + _msId + ", runId " + _runId + " is being started"); + if (logger.isInfoEnabled()) { + logger.info("New instance of management server msid " + _msId + ", runId " + _runId + " is being started"); } } else { _mshostDao.update(mshost.getId(), _runId, NetUtils.getCanonicalHostName(), version, _clusterNodeIP, _currentServiceAdapter.getServicePort(), DateUtil.currentGMTTime()); - if (s_logger.isInfoEnabled()) { - s_logger.info("Management server " + _msId + ", runId " + _runId + " is being started"); + if (logger.isInfoEnabled()) { + logger.info("Management server " + _msId + ", runId " + _runId + " is being started"); } } @@ -1026,8 +1024,8 @@ public ManagementServerHostVO doInTransaction(final TransactionStatus status) { }); _mshostId = mshost.getId(); - if (s_logger.isInfoEnabled()) { - s_logger.info("Management server (host id : " + _mshostId + ") is being started at " + _clusterNodeIP + ":" + _currentServiceAdapter.getServicePort()); + if (logger.isInfoEnabled()) { + logger.info("Management server (host id : " + _mshostId + ") is being started at " + _clusterNodeIP + ":" + _currentServiceAdapter.getServicePort()); } _mshostPeerDao.clearPeerInfo(_mshostId); @@ -1036,8 +1034,8 @@ public ManagementServerHostVO doInTransaction(final TransactionStatus status) { _heartbeatScheduler.scheduleAtFixedRate(getHeartbeatTask(), HeartbeatInterval.value(), HeartbeatInterval.value(), TimeUnit.MILLISECONDS); _notificationExecutor.submit(getNotificationTask()); - if (s_logger.isInfoEnabled()) { - s_logger.info("Cluster manager was started successfully"); + if (logger.isInfoEnabled()) { + logger.info("Cluster manager was started successfully"); } return true; @@ -1046,8 +1044,8 @@ public ManagementServerHostVO doInTransaction(final TransactionStatus status) { @Override @DB public boolean stop() { - if (s_logger.isInfoEnabled()) { - s_logger.info("Stopping Cluster manager, msid : " + _msId); + if (logger.isInfoEnabled()) { + logger.info("Stopping Cluster manager, msid : " + _msId); } if (_mshostId != null) { @@ -1068,8 +1066,8 @@ public boolean stop() { } catch (final InterruptedException e) { } - if (s_logger.isInfoEnabled()) { - s_logger.info("Cluster manager is stopped"); + if (logger.isInfoEnabled()) { + logger.info("Cluster manager is stopped"); } return true; @@ -1077,8 +1075,8 @@ public boolean stop() { @Override public boolean configure(final String name, final Map params) throws ConfigurationException { - if (s_logger.isInfoEnabled()) { - s_logger.info("Start configuring cluster manager : " + name); + if (logger.isInfoEnabled()) { + logger.info("Start configuring cluster manager : " + name); } final Properties dbProps = DbProperties.getDbProperties(); @@ -1088,8 +1086,8 @@ public boolean configure(final String name, final Map params) th } _clusterNodeIP = _clusterNodeIP.trim(); - if (s_logger.isInfoEnabled()) { - s_logger.info("Cluster node IP : " + _clusterNodeIP); + if (logger.isInfoEnabled()) { + logger.info("Cluster node IP : " + _clusterNodeIP); } if (!NetUtils.isLocalAddress(_clusterNodeIP)) { @@ -1114,8 +1112,8 @@ public boolean configure(final String name, final Map params) th checkConflicts(); - if (s_logger.isInfoEnabled()) { - s_logger.info("Cluster manager is configured."); + if (logger.isInfoEnabled()) { + logger.info("Cluster manager is configured."); } return true; } @@ -1173,7 +1171,7 @@ private boolean pingManagementNode(final ManagementServerHostVO mshost) { final String targetIp = mshost.getServiceIP(); if ("127.0.0.1".equals(targetIp) || "0.0.0.0".equals(targetIp)) { - s_logger.info("ping management node cluster service can not be performed on self"); + logger.info("ping management node cluster service can not be performed on self"); return false; } @@ -1181,7 +1179,7 @@ private boolean pingManagementNode(final ManagementServerHostVO mshost) { while (--retry > 0) { SocketChannel sch = null; try { - s_logger.info("Trying to connect to " + targetIp); + logger.info("Trying to connect to " + targetIp); sch = SocketChannel.open(); sch.configureBlocking(true); sch.socket().setSoTimeout(5000); @@ -1191,9 +1189,9 @@ private boolean pingManagementNode(final ManagementServerHostVO mshost) { return true; } catch (final IOException e) { if (e instanceof ConnectException) { - s_logger.error("Unable to ping management server at " + targetIp + ":" + mshost.getServicePort() + " due to ConnectException"); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to ping management server at " + targetIp + ":" + mshost.getServicePort() + " due to ConnectException", e); + logger.error("Unable to ping management server at " + targetIp + ":" + mshost.getServicePort() + " due to ConnectException"); + if (logger.isDebugEnabled()) { + logger.debug("Unable to ping management server at " + targetIp + ":" + mshost.getServicePort() + " due to ConnectException", e); } return false; } @@ -1212,7 +1210,7 @@ private boolean pingManagementNode(final ManagementServerHostVO mshost) { } } - s_logger.error("Unable to ping management server at " + targetIp + ":" + mshost.getServicePort() + " after retries"); + logger.error("Unable to ping management server at " + targetIp + ":" + mshost.getServicePort() + " after retries"); return false; } @@ -1229,25 +1227,25 @@ private void checkConflicts() throws ConfigurationException { if ("127.0.0.1".equals(_clusterNodeIP)) { if (pingManagementNode(peer.getMsid())) { final String msg = "Detected another management node with localhost IP is already running, please check your cluster configuration"; - s_logger.error(msg); + logger.error(msg); throw new ConfigurationException(msg); } else { final String msg = "Detected another management node with localhost IP is considered as running in DB, however it is not pingable, we will continue cluster initialization with this management server node"; - s_logger.info(msg); + logger.info(msg); } } else { if (pingManagementNode(peer.getMsid())) { final String msg = "Detected that another management node with the same IP " + peer.getServiceIP() + " is already running, please check your cluster configuration"; - s_logger.error(msg); + logger.error(msg); throw new ConfigurationException(msg); } else { final String msg = "Detected that another management node with the same IP " + peer.getServiceIP() + " is considered as running in DB, however it is not pingable, we will continue cluster initialization with this management server node"; - s_logger.info(msg); + logger.info(msg); } } } diff --git a/framework/cluster/src/main/java/com/cloud/cluster/ClusterServiceServletAdapter.java b/framework/cluster/src/main/java/com/cloud/cluster/ClusterServiceServletAdapter.java index 7451b5f42269..937ef4a62491 100644 --- a/framework/cluster/src/main/java/com/cloud/cluster/ClusterServiceServletAdapter.java +++ b/framework/cluster/src/main/java/com/cloud/cluster/ClusterServiceServletAdapter.java @@ -23,7 +23,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.cloudstack.framework.config.ConfigDepot; import com.cloud.cluster.dao.ManagementServerHostDao; @@ -34,7 +33,6 @@ public class ClusterServiceServletAdapter extends AdapterBase implements ClusterServiceAdapter { - private static final Logger s_logger = Logger.getLogger(ClusterServiceServletAdapter.class); private static final int DEFAULT_SERVICE_PORT = 9090; private static final int DEFAULT_REQUEST_TIMEOUT = 300; // 300 seconds @@ -59,7 +57,7 @@ public ClusterService getPeerService(String strPeer) throws RemoteException { try { init(); } catch (ConfigurationException e) { - s_logger.error("Unable to init ClusterServiceServletAdapter"); + logger.error("Unable to init ClusterServiceServletAdapter"); throw new RemoteException("Unable to init ClusterServiceServletAdapter"); } @@ -75,7 +73,7 @@ public String getServiceEndpointName(String strPeer) { try { init(); } catch (ConfigurationException e) { - s_logger.error("Unable to init ClusterServiceServletAdapter"); + logger.error("Unable to init ClusterServiceServletAdapter"); return null; } @@ -126,7 +124,7 @@ private void init() throws ConfigurationException { Properties dbProps = DbProperties.getDbProperties(); _clusterServicePort = NumbersUtil.parseInt(dbProps.getProperty("cluster.servlet.port"), DEFAULT_SERVICE_PORT); - if (s_logger.isInfoEnabled()) - s_logger.info("Cluster servlet port : " + _clusterServicePort); + if (logger.isInfoEnabled()) + logger.info("Cluster servlet port : " + _clusterServicePort); } } diff --git a/framework/cluster/src/main/java/com/cloud/cluster/ClusterServiceServletContainer.java b/framework/cluster/src/main/java/com/cloud/cluster/ClusterServiceServletContainer.java index 69cc871dc642..ac468089f473 100644 --- a/framework/cluster/src/main/java/com/cloud/cluster/ClusterServiceServletContainer.java +++ b/framework/cluster/src/main/java/com/cloud/cluster/ClusterServiceServletContainer.java @@ -41,14 +41,14 @@ import org.apache.http.protocol.ResponseContent; import org.apache.http.protocol.ResponseDate; import org.apache.http.protocol.ResponseServer; -import org.apache.log4j.Logger; import org.apache.cloudstack.managed.context.ManagedContextRunnable; import com.cloud.utils.concurrency.NamedThreadFactory; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; public class ClusterServiceServletContainer { - private static final Logger s_logger = Logger.getLogger(ClusterServiceServletContainer.class); private ListenerThread listenerThread; @@ -70,6 +70,8 @@ public void stop() { } static class ListenerThread extends Thread { + + private static Logger LOGGER = LogManager.getLogger(ListenerThread.class); private HttpService _httpService = null; private volatile ServerSocket _serverSocket = null; private HttpParams _params = null; @@ -81,7 +83,7 @@ public ListenerThread(HttpRequestHandler requestHandler, int port) { try { _serverSocket = new ServerSocket(port); } catch (IOException ioex) { - s_logger.error("error initializing cluster service servlet container", ioex); + LOGGER.error("error initializing cluster service servlet container", ioex); return; } @@ -114,7 +116,7 @@ public void stopRunning() { try { _serverSocket.close(); } catch (IOException e) { - s_logger.info("[ignored] error on closing server socket", e); + LOGGER.info("[ignored] error on closing server socket", e); } _serverSocket = null; } @@ -122,8 +124,8 @@ public void stopRunning() { @Override public void run() { - if (s_logger.isInfoEnabled()) - s_logger.info("Cluster service servlet container listening on port " + _serverSocket.getLocalPort()); + if (LOGGER.isInfoEnabled()) + LOGGER.info("Cluster service servlet container listening on port " + _serverSocket.getLocalPort()); while (_serverSocket != null) { try { @@ -138,47 +140,47 @@ protected void runInContext() { HttpContext context = new BasicHttpContext(null); try { while (!Thread.interrupted() && conn.isOpen()) { - if (s_logger.isTraceEnabled()) - s_logger.trace("dispatching cluster request from " + conn.getRemoteAddress().toString()); + if (LOGGER.isTraceEnabled()) + LOGGER.trace("dispatching cluster request from " + conn.getRemoteAddress().toString()); _httpService.handleRequest(conn, context); - if (s_logger.isTraceEnabled()) - s_logger.trace("Cluster request from " + conn.getRemoteAddress().toString() + " is processed"); + if (LOGGER.isTraceEnabled()) + LOGGER.trace("Cluster request from " + conn.getRemoteAddress().toString() + " is processed"); } } catch (ConnectionClosedException ex) { // client close and read time out exceptions are expected // when KEEP-AVLIE is enabled - s_logger.trace("Client closed connection", ex); + LOGGER.trace("Client closed connection", ex); } catch (IOException ex) { - s_logger.trace("I/O error", ex); + LOGGER.trace("I/O error", ex); } catch (HttpException ex) { - s_logger.error("Unrecoverable HTTP protocol violation", ex); + LOGGER.error("Unrecoverable HTTP protocol violation", ex); } finally { try { conn.shutdown(); } catch (IOException ignore) { - s_logger.error("unexpected exception", ignore); + LOGGER.error("unexpected exception", ignore); } } } }); } catch (Throwable e) { - s_logger.error("Unexpected exception ", e); + LOGGER.error("Unexpected exception ", e); // back off to avoid spinning if the exception condition keeps coming back try { Thread.sleep(1000); } catch (InterruptedException e1) { - s_logger.debug("[ignored] interrupted while waiting to retry running the servlet container."); + LOGGER.debug("[ignored] interrupted while waiting to retry running the servlet container."); } } } _executor.shutdown(); - if (s_logger.isInfoEnabled()) - s_logger.info("Cluster service servlet container shutdown"); + if (LOGGER.isInfoEnabled()) + LOGGER.info("Cluster service servlet container shutdown"); } } } diff --git a/framework/cluster/src/main/java/com/cloud/cluster/ClusterServiceServletHttpHandler.java b/framework/cluster/src/main/java/com/cloud/cluster/ClusterServiceServletHttpHandler.java index f697ade8a39b..4e94f434f8f8 100644 --- a/framework/cluster/src/main/java/com/cloud/cluster/ClusterServiceServletHttpHandler.java +++ b/framework/cluster/src/main/java/com/cloud/cluster/ClusterServiceServletHttpHandler.java @@ -29,10 +29,11 @@ import org.apache.http.protocol.HttpContext; import org.apache.http.protocol.HttpRequestHandler; import org.apache.http.util.EntityUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; public class ClusterServiceServletHttpHandler implements HttpRequestHandler { - private static final Logger s_logger = Logger.getLogger(ClusterServiceServletHttpHandler.class); + protected Logger logger = LogManager.getLogger(getClass()); private final ClusterManager manager; @@ -44,27 +45,27 @@ public ClusterServiceServletHttpHandler(ClusterManager manager) { public void handle(HttpRequest request, HttpResponse response, HttpContext context) throws HttpException, IOException { try { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Start Handling cluster HTTP request"); + if (logger.isTraceEnabled()) { + logger.trace("Start Handling cluster HTTP request"); } parseRequest(request); handleRequest(request, response); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Handle cluster HTTP request done"); + if (logger.isTraceEnabled()) { + logger.trace("Handle cluster HTTP request done"); } } catch (final Throwable e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Exception " + e.toString()); + if (logger.isDebugEnabled()) { + logger.debug("Exception " + e.toString()); } try { writeResponse(response, HttpStatus.SC_INTERNAL_SERVER_ERROR, null); } catch (final Throwable e2) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Exception " + e2.toString()); + if (logger.isDebugEnabled()) { + logger.debug("Exception " + e2.toString()); } } } @@ -88,8 +89,8 @@ private void parseRequest(HttpRequest request) throws IOException { final String name = URLDecoder.decode(paramValue[0]); final String value = URLDecoder.decode(paramValue[1]); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Parsed request parameter " + name + "=" + value); + if (logger.isTraceEnabled()) { + logger.trace("Parsed request parameter " + name + "=" + value); } request.getParams().setParameter(name, value); } @@ -134,22 +135,22 @@ protected void handleRequest(HttpRequest req, HttpResponse response) { case RemoteMethodConstants.METHOD_UNKNOWN: default: assert false; - s_logger.error("unrecognized method " + nMethod); + logger.error("unrecognized method " + nMethod); break; } } catch (final Throwable e) { - s_logger.error("Unexpected exception when processing cluster service request : ", e); + logger.error("Unexpected exception when processing cluster service request : ", e); } if (responseContent != null) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Write response with HTTP OK " + responseContent); + if (logger.isTraceEnabled()) { + logger.trace("Write response with HTTP OK " + responseContent); } writeResponse(response, HttpStatus.SC_OK, responseContent); } else { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Write response with HTTP Bad request"); + if (logger.isTraceEnabled()) { + logger.trace("Write response with HTTP Bad request"); } writeResponse(response, HttpStatus.SC_BAD_REQUEST, null); @@ -184,8 +185,8 @@ private String handleDeliverPduMethodCall(HttpRequest req) { private String handlePingMethodCall(HttpRequest req) { final String callingPeer = (String)req.getParams().getParameter("callingPeer"); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Handle ping request from " + callingPeer); + if (logger.isDebugEnabled()) { + logger.debug("Handle ping request from " + callingPeer); } return "true"; diff --git a/framework/cluster/src/main/java/com/cloud/cluster/ClusterServiceServletImpl.java b/framework/cluster/src/main/java/com/cloud/cluster/ClusterServiceServletImpl.java index ec8b90866d0e..b60012dbeef1 100644 --- a/framework/cluster/src/main/java/com/cloud/cluster/ClusterServiceServletImpl.java +++ b/framework/cluster/src/main/java/com/cloud/cluster/ClusterServiceServletImpl.java @@ -25,13 +25,14 @@ import org.apache.commons.httpclient.MultiThreadedHttpConnectionManager; import org.apache.commons.httpclient.methods.PostMethod; import org.apache.commons.httpclient.params.HttpClientParams; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.utils.Profiler; public class ClusterServiceServletImpl implements ClusterService { private static final long serialVersionUID = 4574025200012566153L; - private static final Logger s_logger = Logger.getLogger(ClusterServiceServletImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); private String _serviceUrl; @@ -41,7 +42,7 @@ public ClusterServiceServletImpl() { } public ClusterServiceServletImpl(final String serviceUrl) { - s_logger.info("Setup cluster service servlet. service url: " + serviceUrl + ", request timeout: " + ClusterServiceAdapter.ClusterMessageTimeOut.value() + + logger.info("Setup cluster service servlet. service url: " + serviceUrl + ", request timeout: " + ClusterServiceAdapter.ClusterMessageTimeOut.value() + " seconds"); _serviceUrl = serviceUrl; @@ -68,8 +69,8 @@ public String execute(final ClusterServicePdu pdu) throws RemoteException { @Override public boolean ping(final String callingPeer) throws RemoteException { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Ping at " + _serviceUrl); + if (logger.isDebugEnabled()) { + logger.debug("Ping at " + _serviceUrl); } final HttpClient client = getHttpClient(); @@ -95,20 +96,20 @@ private String executePostMethod(final HttpClient client, final PostMethod metho if (response == HttpStatus.SC_OK) { result = method.getResponseBodyAsString(); profiler.stop(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("POST " + _serviceUrl + " response :" + result + ", responding time: " + profiler.getDurationInMillis() + " ms"); + if (logger.isDebugEnabled()) { + logger.debug("POST " + _serviceUrl + " response :" + result + ", responding time: " + profiler.getDurationInMillis() + " ms"); } } else { profiler.stop(); - s_logger.error("Invalid response code : " + response + ", from : " + _serviceUrl + ", method : " + method.getParameter("method") + " responding time: " + + logger.error("Invalid response code : " + response + ", from : " + _serviceUrl + ", method : " + method.getParameter("method") + " responding time: " + profiler.getDurationInMillis()); } } catch (final HttpException e) { - s_logger.error("HttpException from : " + _serviceUrl + ", method : " + method.getParameter("method")); + logger.error("HttpException from : " + _serviceUrl + ", method : " + method.getParameter("method")); } catch (final IOException e) { - s_logger.error("IOException from : " + _serviceUrl + ", method : " + method.getParameter("method")); + logger.error("IOException from : " + _serviceUrl + ", method : " + method.getParameter("method")); } catch (final Throwable e) { - s_logger.error("Exception from : " + _serviceUrl + ", method : " + method.getParameter("method") + ", exception :", e); + logger.error("Exception from : " + _serviceUrl + ", method : " + method.getParameter("method") + ", exception :", e); } finally { method.releaseConnection(); } diff --git a/framework/cluster/src/main/java/com/cloud/cluster/dao/ManagementServerHostDaoImpl.java b/framework/cluster/src/main/java/com/cloud/cluster/dao/ManagementServerHostDaoImpl.java index 715dfe26bae6..7b69889c853e 100644 --- a/framework/cluster/src/main/java/com/cloud/cluster/dao/ManagementServerHostDaoImpl.java +++ b/framework/cluster/src/main/java/com/cloud/cluster/dao/ManagementServerHostDaoImpl.java @@ -26,7 +26,6 @@ import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; import com.cloud.cluster.ClusterInvalidSessionException; import org.apache.cloudstack.management.ManagementServerHost; @@ -42,7 +41,6 @@ import com.cloud.utils.exception.CloudRuntimeException; public class ManagementServerHostDaoImpl extends GenericDaoBase implements ManagementServerHostDao { - private static final Logger s_logger = Logger.getLogger(ManagementServerHostDaoImpl.class); private final SearchBuilder MsIdSearch; private final SearchBuilder ActiveSearch; @@ -99,7 +97,7 @@ public void update(long id, long runid, String name, String version, String serv pstmt.executeUpdate(); txn.commit(); } catch (Exception e) { - s_logger.warn("Unexpected exception, ", e); + logger.warn("Unexpected exception, ", e); throw new RuntimeException(e.getMessage(), e); } } @@ -119,7 +117,7 @@ public boolean remove(Long id) { txn.commit(); return true; } catch (Exception e) { - s_logger.warn("Unexpected exception, ", e); + logger.warn("Unexpected exception, ", e); throw new RuntimeException(e.getMessage(), e); } } @@ -141,11 +139,11 @@ public void update(long id, long runid, Date lastUpdate) { txn.commit(); if (count < 1) { - s_logger.info("Invalid cluster session detected, runId " + runid + " is no longer valid"); + logger.info("Invalid cluster session detected, runId " + runid + " is no longer valid"); throw new CloudRuntimeException("Invalid cluster session detected, runId " + runid + " is no longer valid", new ClusterInvalidSessionException("runId " + runid + " is no longer valid")); } } catch (Exception e) { - s_logger.warn("Unexpected exception, ", e); + logger.warn("Unexpected exception, ", e); throw new RuntimeException(e.getMessage(), e); } } @@ -181,7 +179,7 @@ public int increaseAlertCount(long id) { changedRows = pstmt.executeUpdate(); txn.commit(); } catch (Exception e) { - s_logger.warn("Unexpected exception, ", e); + logger.warn("Unexpected exception, ", e); throw new RuntimeException(e.getMessage(), e); } @@ -223,7 +221,7 @@ public void update(long id, long runId, State state, Date lastUpdate) { int count = pstmt.executeUpdate(); if (count < 1) { - s_logger.info("Invalid cluster session detected, runId " + runId + " is no longer valid"); + logger.info("Invalid cluster session detected, runId " + runId + " is no longer valid"); throw new CloudRuntimeException("Invalid cluster session detected, runId " + runId + " is no longer valid", new ClusterInvalidSessionException("runId " + runId + " is no longer valid")); } } catch (SQLException e) { diff --git a/framework/cluster/src/main/java/com/cloud/cluster/dao/ManagementServerHostPeerDaoImpl.java b/framework/cluster/src/main/java/com/cloud/cluster/dao/ManagementServerHostPeerDaoImpl.java index a7a56c738c16..827be4fe2998 100644 --- a/framework/cluster/src/main/java/com/cloud/cluster/dao/ManagementServerHostPeerDaoImpl.java +++ b/framework/cluster/src/main/java/com/cloud/cluster/dao/ManagementServerHostPeerDaoImpl.java @@ -19,7 +19,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.management.ManagementServerHost; import com.cloud.cluster.ManagementServerHostPeerVO; @@ -30,7 +29,6 @@ import com.cloud.utils.db.TransactionLegacy; public class ManagementServerHostPeerDaoImpl extends GenericDaoBase implements ManagementServerHostPeerDao { - private static final Logger s_logger = Logger.getLogger(ManagementServerHostPeerDaoImpl.class); private final SearchBuilder ClearPeerSearch; private final SearchBuilder FindForUpdateSearch; @@ -85,7 +83,7 @@ public void updatePeerInfo(long ownerMshost, long peerMshost, long peerRunid, Ma } txn.commit(); } catch (Exception e) { - s_logger.warn("Unexpected exception, ", e); + logger.warn("Unexpected exception, ", e); txn.rollback(); } } diff --git a/framework/config/src/main/java/org/apache/cloudstack/framework/config/dao/ConfigurationDaoImpl.java b/framework/config/src/main/java/org/apache/cloudstack/framework/config/dao/ConfigurationDaoImpl.java index 4e7b127a4178..7c4a6f9a609e 100644 --- a/framework/config/src/main/java/org/apache/cloudstack/framework/config/dao/ConfigurationDaoImpl.java +++ b/framework/config/src/main/java/org/apache/cloudstack/framework/config/dao/ConfigurationDaoImpl.java @@ -25,7 +25,6 @@ import javax.naming.ConfigurationException; import org.apache.cloudstack.framework.config.impl.ConfigurationVO; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.utils.component.ComponentLifecycle; @@ -39,7 +38,6 @@ @Component public class ConfigurationDaoImpl extends GenericDaoBase implements ConfigurationDao { - private static final Logger s_logger = Logger.getLogger(ConfigurationDaoImpl.class); private Map _configs = null; private boolean _premium; @@ -145,7 +143,7 @@ public boolean update(String name, String value) { stmt.executeUpdate(); return true; } catch (Exception e) { - s_logger.warn("Unable to update Configuration Value", e); + logger.warn("Unable to update Configuration Value", e); } return false; } @@ -162,7 +160,7 @@ public boolean update(String name, String category, String value) { return true; } } catch (Exception e) { - s_logger.warn("Unable to update Configuration Value", e); + logger.warn("Unable to update Configuration Value", e); } return false; } @@ -196,7 +194,7 @@ public String getValueAndInitIfNotExist(String name, String category, String ini } return returnValue; } catch (Exception e) { - s_logger.warn("Unable to update Configuration Value", e); + logger.warn("Unable to update Configuration Value", e); throw new CloudRuntimeException("Unable to initialize configuration variable: " + name); } diff --git a/framework/config/src/main/java/org/apache/cloudstack/framework/config/impl/ConfigDepotImpl.java b/framework/config/src/main/java/org/apache/cloudstack/framework/config/impl/ConfigDepotImpl.java index 75a3ea4d947b..fa48670ce0cb 100644 --- a/framework/config/src/main/java/org/apache/cloudstack/framework/config/impl/ConfigDepotImpl.java +++ b/framework/config/src/main/java/org/apache/cloudstack/framework/config/impl/ConfigDepotImpl.java @@ -37,7 +37,8 @@ import org.apache.cloudstack.framework.config.dao.ConfigurationSubGroupDao; import org.apache.commons.lang.ObjectUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.utils.Pair; import com.cloud.utils.Ternary; @@ -71,7 +72,7 @@ * validation class to validate the value the admin input for the key. */ public class ConfigDepotImpl implements ConfigDepot, ConfigDepotAdmin { - private final static Logger s_logger = Logger.getLogger(ConfigDepotImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject ConfigurationDao _configDao; @Inject @@ -125,7 +126,7 @@ protected void populateConfiguration(Date date, Configurable configurable) { if (_configured.contains(configurable)) return; - s_logger.debug("Retrieving keys from " + configurable.getClass().getSimpleName()); + logger.debug("Retrieving keys from " + configurable.getClass().getSimpleName()); for (ConfigKey key : configurable.getConfigKeys()) { Pair> previous = _allKeys.get(key.key()); diff --git a/framework/db/src/main/java/com/cloud/utils/db/ConnectionConcierge.java b/framework/db/src/main/java/com/cloud/utils/db/ConnectionConcierge.java index 2ae0de90f268..7cf34e6955c3 100644 --- a/framework/db/src/main/java/com/cloud/utils/db/ConnectionConcierge.java +++ b/framework/db/src/main/java/com/cloud/utils/db/ConnectionConcierge.java @@ -30,7 +30,8 @@ import javax.management.StandardMBean; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.managed.context.ManagedContextRunnable; @@ -45,9 +46,9 @@ */ public class ConnectionConcierge { - static final Logger s_logger = Logger.getLogger(ConnectionConcierge.class); + protected Logger logger = LogManager.getLogger(getClass()); - static final ConnectionConciergeManager s_mgr = new ConnectionConciergeManager(); + private final ConnectionConciergeManager sMgr = new ConnectionConciergeManager(); Connection _conn; String _name; @@ -57,7 +58,7 @@ public class ConnectionConcierge { int _holdability; public ConnectionConcierge(String name, Connection conn, boolean keepAlive) { - _name = name + s_mgr.getNextId(); + _name = name + sMgr.getNextId(); _keepAlive = keepAlive; try { _autoCommit = conn.getAutoCommit(); @@ -73,7 +74,7 @@ public void reset(Connection conn) { try { release(); } catch (Throwable th) { - s_logger.error("Unable to release a connection", th); + logger.error("Unable to release a connection", th); } _conn = conn; try { @@ -81,10 +82,10 @@ public void reset(Connection conn) { _conn.setHoldability(_holdability); _conn.setTransactionIsolation(_isolationLevel); } catch (SQLException e) { - s_logger.error("Unable to release a connection", e); + logger.error("Unable to release a connection", e); } - s_mgr.register(_name, this); - s_logger.debug("Registering a database connection for " + _name); + sMgr.register(_name, this); + logger.debug("Registering a database connection for " + _name); } public final Connection conn() { @@ -92,7 +93,7 @@ public final Connection conn() { } public void release() { - s_mgr.unregister(_name); + sMgr.unregister(_name); try { if (_conn != null) { _conn.close(); @@ -114,7 +115,7 @@ public boolean keepAlive() { return _keepAlive; } - protected static class ConnectionConciergeManager extends StandardMBean implements ConnectionConciergeMBean { + protected class ConnectionConciergeManager extends StandardMBean implements ConnectionConciergeMBean { ScheduledExecutorService _executor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("ConnectionKeeper")); final ConcurrentHashMap _conns = new ConcurrentHashMap(); final AtomicInteger _idGenerator = new AtomicInteger(); @@ -125,7 +126,7 @@ protected static class ConnectionConciergeManager extends StandardMBean implemen try { JmxUtil.registerMBean("DB Connections", "DB Connections", this); } catch (Exception e) { - s_logger.error("Unable to register mbean", e); + logger.error("Unable to register mbean", e); } } @@ -147,7 +148,7 @@ protected String testValidity(String name, Connection conn) { try (PreparedStatement pstmt = conn.prepareStatement("SELECT 1");) { pstmt.executeQuery(); } catch (Throwable th) { - s_logger.error("Unable to keep the db connection for " + name, th); + logger.error("Unable to keep the db connection for " + name, th); return th.toString(); } } @@ -187,7 +188,7 @@ public String resetKeepAliveTask(int seconds) { try { _executor.shutdown(); } catch (Exception e) { - s_logger.error("Unable to shutdown executor", e); + logger.error("Unable to shutdown executor", e); } } @@ -196,13 +197,13 @@ public String resetKeepAliveTask(int seconds) { _executor.scheduleAtFixedRate(new ManagedContextRunnable() { @Override protected void runInContext() { - s_logger.trace("connection concierge keep alive task"); + logger.trace("connection concierge keep alive task"); for (Map.Entry entry : _conns.entrySet()) { String name = entry.getKey(); ConnectionConcierge concierge = entry.getValue(); if (concierge.keepAlive()) { if (testValidity(name, concierge.conn()) != null) { - s_logger.info("Resetting DB connection " + name); + logger.info("Resetting DB connection " + name); resetConnection(name); } } diff --git a/framework/db/src/main/java/com/cloud/utils/db/DbUtil.java b/framework/db/src/main/java/com/cloud/utils/db/DbUtil.java index 68424bc5dd9e..88397f54d4f4 100644 --- a/framework/db/src/main/java/com/cloud/utils/db/DbUtil.java +++ b/framework/db/src/main/java/com/cloud/utils/db/DbUtil.java @@ -41,12 +41,13 @@ import javax.persistence.Table; import javax.persistence.Transient; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import static com.cloud.utils.AutoCloseableUtil.closeAutoCloseable; public class DbUtil { - protected final static Logger LOGGER = Logger.getLogger(DbUtil.class); + protected static Logger LOGGER = LogManager.getLogger(DbUtil.class); private static Map s_connectionForGlobalLocks = new HashMap(); diff --git a/framework/db/src/main/java/com/cloud/utils/db/DriverLoader.java b/framework/db/src/main/java/com/cloud/utils/db/DriverLoader.java index 55fc1dbb6ed7..ac783fa046ff 100644 --- a/framework/db/src/main/java/com/cloud/utils/db/DriverLoader.java +++ b/framework/db/src/main/java/com/cloud/utils/db/DriverLoader.java @@ -23,13 +23,14 @@ import java.util.List; import java.util.Map; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.utils.exception.CloudRuntimeException; public class DriverLoader { - private static final Logger LOGGER = Logger.getLogger(DriverLoader.class.getName()); + protected static Logger LOGGER = LogManager.getLogger(DriverLoader.class); private static final List LOADED_DRIVERS; private static final Map DRIVERS; diff --git a/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java b/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java index b46b82f50b06..fdb48e31f554 100644 --- a/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java +++ b/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java @@ -59,7 +59,6 @@ import javax.persistence.Table; import javax.persistence.TableGenerator; -import org.apache.log4j.Logger; import com.cloud.utils.DateUtil; import com.cloud.utils.NumbersUtil; @@ -121,7 +120,6 @@ **/ @DB public abstract class GenericDaoBase extends ComponentLifecycleBase implements GenericDao, ComponentMethodInterceptable { - private final static Logger s_logger = Logger.getLogger(GenericDaoBase.class); protected final static TimeZone s_gmtTimeZone = TimeZone.getTimeZone("GMT"); @@ -268,26 +266,26 @@ protected GenericDaoBase() { _searchEnhancer.setSuperclass(_entityBeanType); _searchEnhancer.setCallback(new UpdateBuilder(this)); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Select SQL: " + _partialSelectSql.first().toString()); - s_logger.trace("Remove SQL: " + (_removeSql != null ? _removeSql.first() : "No remove sql")); - s_logger.trace("Select by Id SQL: " + _selectByIdSql); - s_logger.trace("Table References: " + _tables); - s_logger.trace("Insert SQLs:"); + if (logger.isTraceEnabled()) { + logger.trace("Select SQL: " + _partialSelectSql.first().toString()); + logger.trace("Remove SQL: " + (_removeSql != null ? _removeSql.first() : "No remove sql")); + logger.trace("Select by Id SQL: " + _selectByIdSql); + logger.trace("Table References: " + _tables); + logger.trace("Insert SQLs:"); for (final Pair insertSql : _insertSqls) { - s_logger.trace(insertSql.first()); + logger.trace(insertSql.first()); } - s_logger.trace("Delete SQLs"); + logger.trace("Delete SQLs"); for (final Pair deletSql : _deleteSqls) { - s_logger.trace(deletSql.first()); + logger.trace(deletSql.first()); } - s_logger.trace("Collection SQLs"); + logger.trace("Collection SQLs"); for (Attribute attr : _ecAttributes) { EcInfo info = (EcInfo)attr.attache; - s_logger.trace(info.insertSql); - s_logger.trace(info.selectSql); + logger.trace(info.insertSql); + logger.trace(info.selectSql); } } @@ -417,7 +415,7 @@ public List searchIncludingRemoved(SearchCriteria sc, final Filter filter, } } - if (s_logger.isDebugEnabled() && lock != null) { + if (logger.isDebugEnabled() && lock != null) { txn.registerLock(pstmt.toString()); } final ResultSet rs = pstmt.executeQuery(); @@ -784,8 +782,8 @@ protected int addJoinAttributes(int count, PreparedStatement pstmt, Collection clazz = _idField.getType(); final AttributeOverride[] overrides = DbUtil.getAttributeOverrides(_idField); for (final Field field : clazz.getDeclaredFields()) { @@ -1396,7 +1394,7 @@ protected int checkCountOfRecordsAgainstTheResultSetSize(int count, int resultSe String stackTrace = ExceptionUtils.getStackTrace(new CloudRuntimeException(String.format("The query to count all the records of [%s] resulted in a value smaller than" + " the result set's size [count of records: %s, result set's size: %s]. Using the result set's size instead.", _entityBeanType, count, resultSetSize))); - s_logger.warn(stackTrace); + logger.warn(stackTrace); return resultSetSize; } @@ -1730,7 +1728,7 @@ protected T toEntityBean(final ResultSet result, final boolean cache) throws SQL try { _cache.put(new Element(_idField.get(entity), entity)); } catch (final Exception e) { - s_logger.debug("Can't put it in the cache", e); + logger.debug("Can't put it in the cache", e); } } @@ -1752,7 +1750,7 @@ protected T toVO(ResultSet result, boolean cache) throws SQLException { try { _cache.put(new Element(_idField.get(entity), entity)); } catch (final Exception e) { - s_logger.debug("Can't put it in the cache", e); + logger.debug("Can't put it in the cache", e); } } @@ -1959,7 +1957,7 @@ protected void createCache(final Map params) { final int idle = NumbersUtil.parseInt((String)params.get("cache.time.to.idle"), 300); _cache = new Cache(getName(), maxElements, false, live == -1, live == -1 ? Integer.MAX_VALUE : live, idle); cm.addCache(_cache); - s_logger.info("Cache created: " + _cache.toString()); + logger.info("Cache created: " + _cache.toString()); } else { _cache = null; } diff --git a/framework/db/src/main/java/com/cloud/utils/db/GlobalLock.java b/framework/db/src/main/java/com/cloud/utils/db/GlobalLock.java index 662ba921ce90..523f90b28669 100644 --- a/framework/db/src/main/java/com/cloud/utils/db/GlobalLock.java +++ b/framework/db/src/main/java/com/cloud/utils/db/GlobalLock.java @@ -22,7 +22,8 @@ import java.util.Map; import java.util.concurrent.Callable; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.utils.Profiler; @@ -43,7 +44,7 @@ // lock.releaseRef(); // public class GlobalLock { - protected final static Logger s_logger = Logger.getLogger(GlobalLock.class); + protected Logger logger = LogManager.getLogger(getClass()); private String name; private int lockCount = 0; @@ -74,7 +75,7 @@ public int releaseRef() { refCount = referenceCount; if (referenceCount < 0) - s_logger.warn("Unmatched Global lock " + name + " reference usage detected, check your code!"); + logger.warn("Unmatched Global lock " + name + " reference usage detected, check your code!"); if (referenceCount == 0) needToRemove = true; @@ -101,14 +102,14 @@ public static GlobalLock getInternLock(String name) { } } - private static void releaseInternLock(String name) { + private void releaseInternLock(String name) { synchronized (s_lockMap) { GlobalLock lock = s_lockMap.get(name); if (lock != null) { if (lock.referenceCount == 0) s_lockMap.remove(name); } else { - s_logger.warn("Releasing " + name + ", but it is already released."); + logger.warn("Releasing " + name + ", but it is already released."); } } } @@ -121,12 +122,12 @@ public boolean lock(int timeoutSeconds) { while (true) { synchronized (this) { if (ownerThread != null && ownerThread == Thread.currentThread()) { - s_logger.warn("Global lock re-entrance detected"); + logger.warn("Global lock re-entrance detected"); lockCount++; - if (s_logger.isTraceEnabled()) - s_logger.trace("lock " + name + " is acquired, lock count :" + lockCount); + if (logger.isTraceEnabled()) + logger.trace("lock " + name + " is acquired, lock count :" + lockCount); return true; } @@ -156,8 +157,8 @@ public boolean lock(int timeoutSeconds) { lockCount++; holdingStartTick = System.currentTimeMillis(); - if (s_logger.isTraceEnabled()) - s_logger.trace("lock " + name + " is acquired, lock count :" + lockCount); + if (logger.isTraceEnabled()) + logger.trace("lock " + name + " is acquired, lock count :" + lockCount); return true; } } else { @@ -183,8 +184,8 @@ public boolean unlock() { ownerThread = null; DbUtil.releaseGlobalLock(name); - if (s_logger.isTraceEnabled()) - s_logger.trace("lock " + name + " is returned to free state, total holding time :" + (System.currentTimeMillis() - holdingStartTick)); + if (logger.isTraceEnabled()) + logger.trace("lock " + name + " is returned to free state, total holding time :" + (System.currentTimeMillis() - holdingStartTick)); holdingStartTick = 0; // release holding position in intern map when we released the DB connection @@ -192,8 +193,8 @@ public boolean unlock() { notifyAll(); } - if (s_logger.isTraceEnabled()) - s_logger.trace("lock " + name + " is released, lock count :" + lockCount); + if (logger.isTraceEnabled()) + logger.trace("lock " + name + " is released, lock count :" + lockCount); return true; } return false; @@ -204,15 +205,15 @@ public String getName() { return name; } - public static T executeWithLock(final String operationId, final int lockAcquisitionTimeout, final Callable operation) throws Exception { + public T executeWithLock(final String operationId, final int lockAcquisitionTimeout, final Callable operation) throws Exception { final GlobalLock lock = GlobalLock.getInternLock(operationId); try { if (!lock.lock(lockAcquisitionTimeout)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(format("Failed to acquire lock for operation id %1$s", operationId)); + if (logger.isDebugEnabled()) { + logger.debug(format("Failed to acquire lock for operation id %1$s", operationId)); } return null; } @@ -229,7 +230,7 @@ public static T executeWithLock(final String operationId, final int lockAcqu } - public static T executeWithNoWaitLock(final String operationId, final Callable operation) throws Exception { + public T executeWithNoWaitLock(final String operationId, final Callable operation) throws Exception { return executeWithLock(operationId, 0, operation); diff --git a/framework/db/src/main/java/com/cloud/utils/db/Merovingian2.java b/framework/db/src/main/java/com/cloud/utils/db/Merovingian2.java index 485a68ab3134..a9a1ce4272eb 100644 --- a/framework/db/src/main/java/com/cloud/utils/db/Merovingian2.java +++ b/framework/db/src/main/java/com/cloud/utils/db/Merovingian2.java @@ -29,7 +29,8 @@ import javax.management.StandardMBean; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.utils.DateUtil; import com.cloud.utils.exception.CloudRuntimeException; @@ -37,7 +38,7 @@ import com.cloud.utils.time.InaccurateClock; public class Merovingian2 extends StandardMBean implements MerovingianMBean { - private static final Logger s_logger = Logger.getLogger(Merovingian2.class); + protected static Logger LOGGER = LogManager.getLogger(Merovingian2.class); private static final String ACQUIRE_SQL = "INSERT INTO op_lock (op_lock.key, op_lock.mac, op_lock.ip, op_lock.thread, op_lock.acquired_on, waiters) VALUES (?, ?, ?, ?, ?, 1)"; @@ -70,14 +71,14 @@ private Merovingian2(long msId) { conn.setAutoCommit(true); _concierge = new ConnectionConcierge("LockController", conn, true); } catch (SQLException e) { - s_logger.error("Unable to get a new db connection", e); + LOGGER.error("Unable to get a new db connection", e); throw new CloudRuntimeException("Unable to initialize a connection to the database for locking purposes", e); } finally { if (_concierge == null && conn != null) { try { conn.close(); } catch (SQLException e) { - s_logger.debug("closing connection failed after everything else.", e); + LOGGER.debug("closing connection failed after everything else.", e); } } } @@ -90,7 +91,7 @@ public static synchronized Merovingian2 createLockController(long msId) { try { JmxUtil.registerMBean("Locks", "Locks", s_instance); } catch (Exception e) { - s_logger.error("Unable to register for JMX", e); + LOGGER.error("Unable to register for JMX", e); } return s_instance; } @@ -123,8 +124,8 @@ public boolean acquire(String key, int timeInSeconds) { String threadName = th.getName(); int threadId = System.identityHashCode(th); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Acquiring lck-" + key + " with wait time of " + timeInSeconds); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Acquiring lck-" + key + " with wait time of " + timeInSeconds); } long startTime = InaccurateClock.getTime(); @@ -139,17 +140,17 @@ public boolean acquire(String key, int timeInSeconds) { } } try { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Sleeping more time while waiting for lck-" + key); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Sleeping more time while waiting for lck-" + key); } Thread.sleep(5000); } catch (InterruptedException e) { - s_logger.debug("[ignored] interrupted while aquiring " + key); + LOGGER.debug("[ignored] interrupted while aquiring " + key); } } String msg = "Timed out on acquiring lock " + key + " . Waited for " + ((InaccurateClock.getTime() - startTime)/1000) + "seconds"; Exception e = new CloudRuntimeException(msg); - s_logger.warn(msg, e); + LOGGER.warn(msg, e); return false; } @@ -161,8 +162,8 @@ protected boolean increment(String key, String threadName, int threadId) { pstmt.setInt(4, threadId); int rows = pstmt.executeUpdate(); assert (rows <= 1) : "hmm...non unique key? " + pstmt; - if (s_logger.isTraceEnabled()) { - s_logger.trace("lck-" + key + (rows == 1 ? " acquired again" : " failed to acquire again")); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("lck-" + key + (rows == 1 ? " acquired again" : " failed to acquire again")); } if (rows == 1) { incrCount(); @@ -170,7 +171,7 @@ protected boolean increment(String key, String threadName, int threadId) { } return false; } catch (Exception e) { - s_logger.error("increment:Exception:"+e.getMessage()); + LOGGER.error("increment:Exception:"+e.getMessage()); throw new CloudRuntimeException("increment:Exception:"+e.getMessage(), e); } } @@ -186,8 +187,8 @@ protected boolean doAcquire(String key, String threadName, int threadId) { try { int rows = pstmt.executeUpdate(); if (rows == 1) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Acquired for lck-" + key); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Acquired for lck-" + key); } incrCount(); return true; @@ -198,11 +199,11 @@ protected boolean doAcquire(String key, String threadName, int threadId) { } } } catch (SQLException e) { - s_logger.error("doAcquire:Exception:"+e.getMessage()); + LOGGER.error("doAcquire:Exception:"+e.getMessage()); throw new CloudRuntimeException("Unable to lock " + key + ". Waited " + (InaccurateClock.getTime() - startTime), e); } - s_logger.trace("Unable to acquire lck-" + key); + LOGGER.trace("Unable to acquire lck-" + key); return false; } @@ -216,11 +217,11 @@ protected Map isLocked(String key) { } return toLock(rs); }catch (SQLException e) { - s_logger.error("isLocked:Exception:"+e.getMessage()); + LOGGER.error("isLocked:Exception:"+e.getMessage()); throw new CloudRuntimeException("isLocked:Exception:"+e.getMessage(), e); } } catch (SQLException e) { - s_logger.error("isLocked:Exception:"+e.getMessage()); + LOGGER.error("isLocked:Exception:"+e.getMessage()); throw new CloudRuntimeException("isLocked:Exception:"+e.getMessage(), e); } } @@ -231,20 +232,20 @@ public void cleanupThisServer() { @Override public void cleanupForServer(long msId) { - s_logger.info("Cleaning up locks for " + msId); + LOGGER.info("Cleaning up locks for " + msId); try { synchronized (_concierge.conn()) { try(PreparedStatement pstmt = _concierge.conn().prepareStatement(CLEANUP_MGMT_LOCKS_SQL);) { pstmt.setLong(1, msId); int rows = pstmt.executeUpdate(); - s_logger.info("Released " + rows + " locks for " + msId); + LOGGER.info("Released " + rows + " locks for " + msId); }catch (Exception e) { - s_logger.error("cleanupForServer:Exception:"+e.getMessage()); + LOGGER.error("cleanupForServer:Exception:"+e.getMessage()); throw new CloudRuntimeException("cleanupForServer:Exception:"+e.getMessage(), e); } } } catch (Exception e) { - s_logger.error("cleanupForServer:Exception:"+e.getMessage()); + LOGGER.error("cleanupForServer:Exception:"+e.getMessage()); throw new CloudRuntimeException("cleanupForServer:Exception:"+e.getMessage(), e); } } @@ -262,30 +263,30 @@ public boolean release(String key) { int rows = pstmt.executeUpdate(); assert (rows <= 1) : "hmmm....keys not unique? " + pstmt; - if (s_logger.isTraceEnabled()) { - s_logger.trace("lck-" + key + " released"); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("lck-" + key + " released"); } if (rows == 1) { try (PreparedStatement rel_sql_pstmt = _concierge.conn().prepareStatement(RELEASE_SQL);) { rel_sql_pstmt.setString(1, key); rel_sql_pstmt.setLong(2, _msId); int result = rel_sql_pstmt.executeUpdate(); - if (result == 1 && s_logger.isTraceEnabled()) { - s_logger.trace("lck-" + key + " removed"); + if (result == 1 && LOGGER.isTraceEnabled()) { + LOGGER.trace("lck-" + key + " removed"); } decrCount(); }catch (Exception e) { - s_logger.error("release:Exception:"+ e.getMessage()); + LOGGER.error("release:Exception:"+ e.getMessage()); throw new CloudRuntimeException("release:Exception:"+ e.getMessage(), e); } } else if (rows < 1) { String msg = ("Was unable to find lock for the key " + key + " and thread id " + threadId); Exception e = new CloudRuntimeException(msg); - s_logger.warn(msg, e); + LOGGER.warn(msg, e); } return rows == 1; } catch (Exception e) { - s_logger.error("release:Exception:"+ e.getMessage()); + LOGGER.error("release:Exception:"+ e.getMessage()); throw new CloudRuntimeException("release:Exception:"+ e.getMessage(), e); } } @@ -320,11 +321,11 @@ protected List> getLocks(String sql, Long msId) { { return toLocks(rs); }catch (Exception e) { - s_logger.error("getLocks:Exception:"+e.getMessage()); + LOGGER.error("getLocks:Exception:"+e.getMessage()); throw new CloudRuntimeException("getLocks:Exception:"+e.getMessage(), e); } } catch (Exception e) { - s_logger.error("getLocks:Exception:"+e.getMessage()); + LOGGER.error("getLocks:Exception:"+e.getMessage()); throw new CloudRuntimeException("getLocks:Exception:"+e.getMessage(), e); } } @@ -360,11 +361,11 @@ public List> getLocksAcquiredBy(long msId, String threadName return toLocks(rs); } catch (Exception e) { - s_logger.error("getLocksAcquiredBy:Exception:"+e.getMessage()); + LOGGER.error("getLocksAcquiredBy:Exception:"+e.getMessage()); throw new CloudRuntimeException("Can't get locks " + pstmt, e); } } catch (Exception e) { - s_logger.error("getLocksAcquiredBy:Exception:"+e.getMessage()); + LOGGER.error("getLocksAcquiredBy:Exception:"+e.getMessage()); throw new CloudRuntimeException("getLocksAcquiredBy:Exception:"+e.getMessage(), e); } } @@ -390,21 +391,21 @@ public void cleanupThread() { assert (false) : "Abandon hope, all ye who enter here....There were still " + rows + ":" + c + " locks not released when the transaction ended, check for lock not released or @DB is not added to the code that using the locks!"; } catch (Exception e) { - s_logger.error("cleanupThread:Exception:" + e.getMessage()); + LOGGER.error("cleanupThread:Exception:" + e.getMessage()); throw new CloudRuntimeException("cleanupThread:Exception:" + e.getMessage(), e); } } @Override public boolean releaseLockAsLastResortAndIReallyKnowWhatIAmDoing(String key) { - s_logger.info("Releasing a lock from JMX lck-" + key); + LOGGER.info("Releasing a lock from JMX lck-" + key); try (PreparedStatement pstmt = _concierge.conn().prepareStatement(RELEASE_LOCK_SQL);) { pstmt.setString(1, key); int rows = pstmt.executeUpdate(); return rows > 0; } catch (Exception e) { - s_logger.error("releaseLockAsLastResortAndIReallyKnowWhatIAmDoing : Exception: " + e.getMessage()); + LOGGER.error("releaseLockAsLastResortAndIReallyKnowWhatIAmDoing : Exception: " + e.getMessage()); return false; } } diff --git a/framework/db/src/main/java/com/cloud/utils/db/ScriptRunner.java b/framework/db/src/main/java/com/cloud/utils/db/ScriptRunner.java index 51124f693c5b..56fa8593161b 100644 --- a/framework/db/src/main/java/com/cloud/utils/db/ScriptRunner.java +++ b/framework/db/src/main/java/com/cloud/utils/db/ScriptRunner.java @@ -29,13 +29,14 @@ import java.sql.SQLException; import java.sql.Statement; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; /** * Tool to run database scripts */ public class ScriptRunner { - private static Logger s_logger = Logger.getLogger(ScriptRunner.class); + private static Logger LOGGER = LogManager.getLogger(ScriptRunner.class); private static final String DEFAULT_DELIMITER = ";"; @@ -208,17 +209,17 @@ private void print(Object o) { private void println(Object o) { _logBuffer.append(o); if (verbosity) - s_logger.debug(_logBuffer.toString()); + LOGGER.debug(_logBuffer.toString()); _logBuffer = new StringBuffer(); } private void printlnError(Object o) { - s_logger.error("" + o); + LOGGER.error("" + o); } private void flush() { if (_logBuffer.length() > 0) { - s_logger.debug(_logBuffer.toString()); + LOGGER.debug(_logBuffer.toString()); _logBuffer = new StringBuffer(); } } diff --git a/framework/db/src/main/java/com/cloud/utils/db/SequenceFetcher.java b/framework/db/src/main/java/com/cloud/utils/db/SequenceFetcher.java index 0ea8401a03cc..f902fda3bf14 100644 --- a/framework/db/src/main/java/com/cloud/utils/db/SequenceFetcher.java +++ b/framework/db/src/main/java/com/cloud/utils/db/SequenceFetcher.java @@ -29,7 +29,8 @@ import javax.persistence.TableGenerator; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.utils.concurrency.NamedThreadFactory; @@ -42,7 +43,7 @@ * */ public class SequenceFetcher { - private final static Logger s_logger = Logger.getLogger(SequenceFetcher.class); + protected Logger logger = LogManager.getLogger(getClass()); ExecutorService _executors; private final static Random random = new Random(); @@ -63,7 +64,7 @@ public T getNextSequence(Class clazz, TableGenerator tg, Object key, bool try { return future.get(); } catch (Exception e) { - s_logger.warn("Unable to get sequeunce for " + tg.table() + ":" + tg.pkColumnValue(), e); + logger.warn("Unable to get sequeunce for " + tg.table() + ":" + tg.pkColumnValue(), e); return null; } } @@ -138,11 +139,11 @@ public T call() throws Exception { } } } catch (SQLException e) { - s_logger.warn("Caught this exception when running: " + (selectStmt != null ? selectStmt.toString() : ""), e); + logger.warn("Caught this exception when running: " + (selectStmt != null ? selectStmt.toString() : ""), e); } if (obj == null) { - s_logger.warn("Unable to get a sequence: " + updateStmt.toString()); + logger.warn("Unable to get a sequence: " + updateStmt.toString()); return null; } @@ -153,7 +154,7 @@ public T call() throws Exception { txn.commit(); return (T)obj; } catch (SQLException e) { - s_logger.warn("Caught this exception when running: " + (updateStmt != null ? updateStmt.toString() : ""), e); + logger.warn("Caught this exception when running: " + (updateStmt != null ? updateStmt.toString() : ""), e); } } } diff --git a/framework/db/src/main/java/com/cloud/utils/db/Transaction.java b/framework/db/src/main/java/com/cloud/utils/db/Transaction.java index c6a491a216d0..24cd76e2e100 100644 --- a/framework/db/src/main/java/com/cloud/utils/db/Transaction.java +++ b/framework/db/src/main/java/com/cloud/utils/db/Transaction.java @@ -18,14 +18,12 @@ import java.util.concurrent.atomic.AtomicLong; -import org.apache.log4j.Logger; public class Transaction { private final static AtomicLong counter = new AtomicLong(0); private final static TransactionStatus STATUS = new TransactionStatus() { }; - private static final Logger s_logger = Logger.getLogger(Transaction.class); @SuppressWarnings("deprecation") public static T execute(TransactionCallbackWithException callback) throws E { diff --git a/framework/db/src/main/java/com/cloud/utils/db/TransactionLegacy.java b/framework/db/src/main/java/com/cloud/utils/db/TransactionLegacy.java index df0df60f5194..be3ec1f5a676 100644 --- a/framework/db/src/main/java/com/cloud/utils/db/TransactionLegacy.java +++ b/framework/db/src/main/java/com/cloud/utils/db/TransactionLegacy.java @@ -42,7 +42,8 @@ import org.apache.commons.pool2.ObjectPool; import org.apache.commons.pool2.impl.GenericObjectPool; import org.apache.commons.pool2.impl.GenericObjectPoolConfig; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.utils.Pair; import com.cloud.utils.PropertiesUtil; @@ -64,10 +65,10 @@ * it is stored with TLS and is one per thread. Use appropriately. */ public class TransactionLegacy implements Closeable { - private static final Logger s_logger = Logger.getLogger(Transaction.class.getName() + "." + "Transaction"); - private static final Logger s_stmtLogger = Logger.getLogger(Transaction.class.getName() + "." + "Statement"); - private static final Logger s_lockLogger = Logger.getLogger(Transaction.class.getName() + "." + "Lock"); - private static final Logger s_connLogger = Logger.getLogger(Transaction.class.getName() + "." + "Connection"); + protected static Logger LOGGER = LogManager.getLogger(Transaction.class.getName() + "." + "Transaction"); + protected Logger stmtLogger = LogManager.getLogger(Transaction.class.getName() + "." + "Statement"); + protected Logger lockLogger = LogManager.getLogger(Transaction.class.getName() + "." + "Lock"); + protected static Logger CONN_LOGGER = LogManager.getLogger(Transaction.class.getName() + "." + "Connection"); private static final ThreadLocal tls = new ThreadLocal(); private static final String START_TXN = "start_txn"; @@ -89,7 +90,7 @@ public class TransactionLegacy implements Closeable { try { JmxUtil.registerMBean("Transaction", "Transaction", s_mbean); } catch (Exception e) { - s_logger.error("Unable to register mbean for transaction", e); + LOGGER.error("Unable to register mbean for transaction", e); } } @@ -152,8 +153,8 @@ public static TransactionLegacy open(final String name) { public static TransactionLegacy open(final String name, final short databaseId, final boolean forceDbChange) { TransactionLegacy txn = tls.get(); if (txn == null) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Creating the transaction: " + name); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Creating the transaction: " + name); } txn = new TransactionLegacy(name, false, databaseId); tls.set(txn); @@ -198,7 +199,7 @@ protected StackElement peekInStack(Object obj) { } public void registerLock(String sql) { - if (_txn && s_lockLogger.isDebugEnabled()) { + if (_txn && lockLogger.isDebugEnabled()) { Pair time = new Pair(sql, System.currentTimeMillis()); _lockTimes.add(time); } @@ -210,8 +211,8 @@ public boolean dbTxnStarted() { public static Connection getStandaloneConnectionWithException() throws SQLException { Connection conn = s_ds.getConnection(); - if (s_connLogger.isTraceEnabled()) { - s_connLogger.trace("Retrieving a standalone connection: dbconn" + System.identityHashCode(conn)); + if (CONN_LOGGER.isTraceEnabled()) { + CONN_LOGGER.trace("Retrieving a standalone connection: dbconn" + System.identityHashCode(conn)); } return conn; } @@ -220,7 +221,7 @@ public static Connection getStandaloneConnection() { try { return getStandaloneConnectionWithException(); } catch (SQLException e) { - s_logger.error("Unexpected exception: ", e); + LOGGER.error("Unexpected exception: ", e); return null; } } @@ -228,12 +229,12 @@ public static Connection getStandaloneConnection() { public static Connection getStandaloneUsageConnection() { try { Connection conn = s_usageDS.getConnection(); - if (s_connLogger.isTraceEnabled()) { - s_connLogger.trace("Retrieving a standalone connection for usage: dbconn" + System.identityHashCode(conn)); + if (CONN_LOGGER.isTraceEnabled()) { + CONN_LOGGER.trace("Retrieving a standalone connection for usage: dbconn" + System.identityHashCode(conn)); } return conn; } catch (SQLException e) { - s_logger.warn("Unexpected exception: ", e); + LOGGER.warn("Unexpected exception: ", e); return null; } } @@ -241,12 +242,12 @@ public static Connection getStandaloneUsageConnection() { public static Connection getStandaloneSimulatorConnection() { try { Connection conn = s_simulatorDS.getConnection(); - if (s_connLogger.isTraceEnabled()) { - s_connLogger.trace("Retrieving a standalone connection for simulator: dbconn" + System.identityHashCode(conn)); + if (CONN_LOGGER.isTraceEnabled()) { + CONN_LOGGER.trace("Retrieving a standalone connection for simulator: dbconn" + System.identityHashCode(conn)); } return conn; } catch (SQLException e) { - s_logger.warn("Unexpected exception: ", e); + LOGGER.warn("Unexpected exception: ", e); return null; } } @@ -301,12 +302,12 @@ protected static boolean checkAnnotation(int stack, TransactionLegacy txn) { } // relax stack structure for several places that @DB required injection is not in place - s_logger.warn("Non-standard stack context that Transaction context is manaully placed into the calling chain. Stack chain: " + sb); + LOGGER.warn("Non-standard stack context that Transaction context is manaully placed into the calling chain. Stack chain: " + sb); return true; } protected static String buildName() { - if (s_logger.isDebugEnabled()) { + if (LOGGER.isDebugEnabled()) { final StackTraceElement[] stacks = Thread.currentThread().getStackTrace(); final StringBuilder str = new StringBuilder(); int i = 3, j = 3; @@ -398,14 +399,14 @@ public boolean release(final String name) { */ @Deprecated public void start() { - if (s_logger.isTraceEnabled()) { - s_logger.trace("txn: start requested by: " + buildName()); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("txn: start requested by: " + buildName()); } _stack.push(new StackElement(START_TXN, null)); if (_txn) { - s_logger.trace("txn: has already been started."); + LOGGER.trace("txn: has already been started."); return; } @@ -414,10 +415,10 @@ public void start() { _txnTime = System.currentTimeMillis(); if (_conn != null) { try { - s_logger.trace("txn: set auto commit to false"); + LOGGER.trace("txn: set auto commit to false"); _conn.setAutoCommit(false); } catch (final SQLException e) { - s_logger.warn("Unable to set auto commit: ", e); + LOGGER.warn("Unable to set auto commit: ", e); throw new CloudRuntimeException("Unable to set auto commit: ", e); } } @@ -426,8 +427,8 @@ public void start() { protected void closePreviousStatement() { if (_stmt != null) { try { - if (s_stmtLogger.isTraceEnabled()) { - s_stmtLogger.trace("Closing: " + _stmt.toString()); + if (stmtLogger.isTraceEnabled()) { + stmtLogger.trace("Closing: " + _stmt.toString()); } try { ResultSet rs = _stmt.getResultSet(); @@ -435,11 +436,11 @@ protected void closePreviousStatement() { rs.close(); } } catch (SQLException e) { - s_stmtLogger.trace("Unable to close resultset"); + stmtLogger.trace("Unable to close resultset"); } _stmt.close(); } catch (final SQLException e) { - s_stmtLogger.trace("Unable to close statement: " + _stmt.toString()); + stmtLogger.trace("Unable to close statement: " + _stmt.toString()); } finally { _stmt = null; } @@ -466,8 +467,8 @@ public PreparedStatement prepareAutoCloseStatement(final String sql) throws SQLE public PreparedStatement prepareStatement(final String sql) throws SQLException { final Connection conn = getConnection(); final PreparedStatement pstmt = conn.prepareStatement(sql); - if (s_stmtLogger.isTraceEnabled()) { - s_stmtLogger.trace("Preparing: " + sql); + if (stmtLogger.isTraceEnabled()) { + stmtLogger.trace("Preparing: " + sql); } return pstmt; } @@ -486,8 +487,8 @@ public PreparedStatement prepareStatement(final String sql) throws SQLException public PreparedStatement prepareAutoCloseStatement(final String sql, final int autoGeneratedKeys) throws SQLException { final Connection conn = getConnection(); final PreparedStatement pstmt = conn.prepareStatement(sql, autoGeneratedKeys); - if (s_stmtLogger.isTraceEnabled()) { - s_stmtLogger.trace("Preparing: " + sql); + if (stmtLogger.isTraceEnabled()) { + stmtLogger.trace("Preparing: " + sql); } closePreviousStatement(); _stmt = pstmt; @@ -508,8 +509,8 @@ public PreparedStatement prepareAutoCloseStatement(final String sql, final int a public PreparedStatement prepareAutoCloseStatement(final String sql, final String[] columnNames) throws SQLException { final Connection conn = getConnection(); final PreparedStatement pstmt = conn.prepareStatement(sql, columnNames); - if (s_stmtLogger.isTraceEnabled()) { - s_stmtLogger.trace("Preparing: " + sql); + if (stmtLogger.isTraceEnabled()) { + stmtLogger.trace("Preparing: " + sql); } closePreviousStatement(); _stmt = pstmt; @@ -529,8 +530,8 @@ public PreparedStatement prepareAutoCloseStatement(final String sql, final Strin public PreparedStatement prepareAutoCloseStatement(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { final Connection conn = getConnection(); final PreparedStatement pstmt = conn.prepareStatement(sql, resultSetType, resultSetConcurrency, resultSetHoldability); - if (s_stmtLogger.isTraceEnabled()) { - s_stmtLogger.trace("Preparing: " + sql); + if (stmtLogger.isTraceEnabled()) { + stmtLogger.trace("Preparing: " + sql); } closePreviousStatement(); _stmt = pstmt; @@ -555,7 +556,7 @@ public Connection getConnection() throws SQLException { if (s_ds != null) { _conn = s_ds.getConnection(); } else { - s_logger.warn("A static-initialized variable becomes null, process is dying?"); + LOGGER.warn("A static-initialized variable becomes null, process is dying?"); throw new CloudRuntimeException("Database is not initialized, process is dying?"); } break; @@ -563,7 +564,7 @@ public Connection getConnection() throws SQLException { if (s_usageDS != null) { _conn = s_usageDS.getConnection(); } else { - s_logger.warn("A static-initialized variable becomes null, process is dying?"); + LOGGER.warn("A static-initialized variable becomes null, process is dying?"); throw new CloudRuntimeException("Database is not initialized, process is dying?"); } break; @@ -571,7 +572,7 @@ public Connection getConnection() throws SQLException { if (s_simulatorDS != null) { _conn = s_simulatorDS.getConnection(); } else { - s_logger.warn("A static-initialized variable becomes null, process is dying?"); + LOGGER.warn("A static-initialized variable becomes null, process is dying?"); throw new CloudRuntimeException("Database is not initialized, process is dying?"); } break; @@ -587,12 +588,12 @@ public Connection getConnection() throws SQLException { // see http://dev.mysql.com/doc/refman/5.0/en/innodb-deadlocks.html // _stack.push(new StackElement(CREATE_CONN, null)); - if (s_connLogger.isTraceEnabled()) { - s_connLogger.trace("Creating a DB connection with " + (_txn ? " txn: " : " no txn: ") + " for " + _dbId + ": dbconn" + System.identityHashCode(_conn) + + if (CONN_LOGGER.isTraceEnabled()) { + CONN_LOGGER.trace("Creating a DB connection with " + (_txn ? " txn: " : " no txn: ") + " for " + _dbId + ": dbconn" + System.identityHashCode(_conn) + ". Stack: " + buildName()); } } else { - s_logger.trace("conn: Using existing DB connection"); + LOGGER.trace("conn: Using existing DB connection"); } return _conn; @@ -602,8 +603,8 @@ protected boolean takeOver(final String name, final boolean create) { if (_stack.size() != 0) { if (!create) { // If it is not a create transaction, then let's just use the current one. - if (s_logger.isTraceEnabled()) { - s_logger.trace("Using current transaction: " + toString()); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Using current transaction: " + toString()); } mark(name); return false; @@ -613,19 +614,19 @@ protected boolean takeOver(final String name, final boolean create) { if (se.type == CREATE_TXN) { // This create is called inside of another create. Which is ok? // We will let that create be responsible for cleaning up. - if (s_logger.isTraceEnabled()) { - s_logger.trace("Create using current transaction: " + toString()); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Create using current transaction: " + toString()); } mark(name); return false; } - s_logger.warn("Encountered a transaction that has leaked. Cleaning up. " + toString()); + LOGGER.warn("Encountered a transaction that has leaked. Cleaning up. " + toString()); cleanup(); } - if (s_logger.isTraceEnabled()) { - s_logger.trace("Took over the transaction: " + name); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Took over the transaction: " + name); } _stack.push(new StackElement(create ? CREATE_TXN : CURRENT_TXN, name)); _name = name; @@ -656,7 +657,7 @@ public void close() { removeUpTo(CURRENT_TXN, null); if (_stack.size() == 0) { - s_logger.trace("Transaction is done"); + LOGGER.trace("Transaction is done"); cleanup(); } } @@ -670,8 +671,8 @@ public void close() { */ public boolean close(final String name) { if (_name == null) { // Already cleaned up. - if (s_logger.isTraceEnabled()) { - s_logger.trace("Already cleaned up." + buildName()); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Already cleaned up." + buildName()); } return true; } @@ -681,13 +682,13 @@ public boolean close(final String name) { return false; } - if (s_logger.isDebugEnabled() && _stack.size() > 2) { - s_logger.debug("Transaction is not closed properly: " + toString() + ". Called by " + buildName()); + if (LOGGER.isDebugEnabled() && _stack.size() > 2) { + LOGGER.debug("Transaction is not closed properly: " + toString() + ". Called by " + buildName()); } cleanup(); - s_logger.trace("All done"); + LOGGER.trace("All done"); return true; } @@ -696,9 +697,9 @@ protected boolean hasTxnInStack() { } protected void clearLockTimes() { - if (s_lockLogger.isDebugEnabled()) { + if (lockLogger.isDebugEnabled()) { for (Pair time : _lockTimes) { - s_lockLogger.trace("SQL " + time.first() + " took " + (System.currentTimeMillis() - time.second())); + lockLogger.trace("SQL " + time.first() + " took " + (System.currentTimeMillis() - time.second())); } _lockTimes.clear(); } @@ -706,7 +707,7 @@ protected void clearLockTimes() { public boolean commit() { if (!_txn) { - s_logger.warn("txn: Commit called when it is not a transaction: " + buildName()); + LOGGER.warn("txn: Commit called when it is not a transaction: " + buildName()); return false; } @@ -720,8 +721,8 @@ public boolean commit() { } if (hasTxnInStack()) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("txn: Not committing because transaction started elsewhere: " + buildName() + " / " + toString()); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("txn: Not committing because transaction started elsewhere: " + buildName() + " / " + toString()); } return false; } @@ -730,7 +731,7 @@ public boolean commit() { try { if (_conn != null) { _conn.commit(); - s_logger.trace("txn: DB Changes committed. Time = " + (System.currentTimeMillis() - _txnTime)); + LOGGER.trace("txn: DB Changes committed. Time = " + (System.currentTimeMillis() - _txnTime)); clearLockTimes(); closeConnection(); } @@ -749,22 +750,22 @@ protected void closeConnection() { } if (_txn) { - s_connLogger.trace("txn: Not closing DB connection because we're still in a transaction."); + CONN_LOGGER.trace("txn: Not closing DB connection because we're still in a transaction."); return; } try { // we should only close db connection when it is not user managed if (_dbId != CONNECTED_DB) { - if (s_connLogger.isTraceEnabled()) { - s_connLogger.trace("Closing DB connection: dbconn" + System.identityHashCode(_conn)); + if (CONN_LOGGER.isTraceEnabled()) { + CONN_LOGGER.trace("Closing DB connection: dbconn" + System.identityHashCode(_conn)); } _conn.close(); _conn = null; s_mbean.removeTransaction(this); } } catch (final SQLException e) { - s_logger.warn("Unable to close connection", e); + LOGGER.warn("Unable to close connection", e); } } @@ -782,8 +783,8 @@ protected void removeUpTo(String type, Object ref) { } if (item.type == CURRENT_TXN) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Releasing the current txn: " + (item.ref != null ? item.ref : "")); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Releasing the current txn: " + (item.ref != null ? item.ref : "")); } } else if (item.type == CREATE_CONN) { closeConnection(); @@ -795,13 +796,13 @@ protected void removeUpTo(String type, Object ref) { _conn.rollback((Savepoint)ref); rollback = false; } catch (final SQLException e) { - s_logger.warn("Unable to rollback Txn.", e); + LOGGER.warn("Unable to rollback Txn.", e); } } } else if (item.type == STATEMENT) { try { - if (s_stmtLogger.isTraceEnabled()) { - s_stmtLogger.trace("Closing: " + ref.toString()); + if (stmtLogger.isTraceEnabled()) { + stmtLogger.trace("Closing: " + ref.toString()); } Statement stmt = (Statement)ref; try { @@ -810,21 +811,21 @@ protected void removeUpTo(String type, Object ref) { rs.close(); } } catch (SQLException e) { - s_stmtLogger.trace("Unable to close resultset"); + stmtLogger.trace("Unable to close resultset"); } stmt.close(); } catch (final SQLException e) { - s_stmtLogger.trace("Unable to close statement: " + item); + stmtLogger.trace("Unable to close statement: " + item); } } else if (item.type == ATTACHMENT) { TransactionAttachment att = (TransactionAttachment)item.ref; - if (s_logger.isTraceEnabled()) { - s_logger.trace("Cleaning up " + att.getName()); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Cleaning up " + att.getName()); } att.cleanup(); } } catch (Exception e) { - s_logger.error("Unable to clean up " + item, e); + LOGGER.error("Unable to clean up " + item, e); } } @@ -836,8 +837,8 @@ protected void removeUpTo(String type, Object ref) { protected void rollbackTransaction() { closePreviousStatement(); if (!_txn) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Rollback called for " + _name + " when there's no transaction: " + buildName()); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Rollback called for " + _name + " when there's no transaction: " + buildName()); } return; } @@ -845,15 +846,15 @@ protected void rollbackTransaction() { _txn = false; try { if (_conn != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Rolling back the transaction: Time = " + (System.currentTimeMillis() - _txnTime) + " Name = " + _name + "; called by " + buildName()); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Rolling back the transaction: Time = " + (System.currentTimeMillis() - _txnTime) + " Name = " + _name + "; called by " + buildName()); } _conn.rollback(); } clearLockTimes(); closeConnection(); } catch (final SQLException e) { - s_logger.warn("Unable to rollback", e); + LOGGER.warn("Unable to rollback", e); } } @@ -863,7 +864,7 @@ protected void rollbackSavepoint(Savepoint sp) { _conn.rollback(sp); } } catch (SQLException e) { - s_logger.warn("Unable to rollback to savepoint " + sp); + LOGGER.warn("Unable to rollback to savepoint " + sp); } if (!hasTxnInStack()) { @@ -979,7 +980,7 @@ private TransactionLegacy() { protected void finalize() throws Throwable { if (!(_conn == null && (_stack == null || _stack.size() == 0))) { assert (false) : "Oh Alex oh alex...something is wrong with how we're doing this"; - s_logger.error("Something went wrong that a transaction is orphaned before db connection is closed"); + LOGGER.error("Something went wrong that a transaction is orphaned before db connection is closed"); cleanup(); } } @@ -1025,7 +1026,7 @@ public static void initDataSource(Properties dbProps) { return; s_dbHAEnabled = Boolean.valueOf(dbProps.getProperty("db.ha.enabled")); - s_logger.info("Is Data Base High Availiability enabled? Ans : " + s_dbHAEnabled); + LOGGER.info("Is Data Base High Availiability enabled? Ans : " + s_dbHAEnabled); String loadBalanceStrategy = dbProps.getProperty("db.ha.loadBalanceStrategy"); // FIXME: If params are missing...default them???? final int cloudMaxActive = Integer.parseInt(dbProps.getProperty("db.cloud.maxActive")); @@ -1048,7 +1049,7 @@ public static void initDataSource(Properties dbProps) { } else if (cloudIsolationLevel.equalsIgnoreCase("readuncommitted")) { isolationLevel = Connection.TRANSACTION_READ_UNCOMMITTED; } else { - s_logger.warn("Unknown isolation level " + cloudIsolationLevel + ". Using read uncommitted"); + LOGGER.warn("Unknown isolation level " + cloudIsolationLevel + ". Using read uncommitted"); } final boolean cloudTestOnBorrow = Boolean.parseBoolean(dbProps.getProperty("db.cloud.testOnBorrow")); @@ -1111,7 +1112,7 @@ public static void initDataSource(Properties dbProps) { simulatorConnectionUri = simulatorDriver + "://" + simulatorHost + ":" + simulatorPort + "/" + simulatorDbName + "?autoReconnect=" + simulatorAutoReconnect; } else { - s_logger.warn("db.simulator.uri was set, ignoring the following properties on db.properties: [db.simulator.driver, db.simulator.host, db.simulator.port, " + LOGGER.warn("db.simulator.uri was set, ignoring the following properties on db.properties: [db.simulator.driver, db.simulator.host, db.simulator.port, " + "db.simulator.name, db.simulator.autoReconnect]."); String[] splitUri = simulatorUri.split(":"); simulatorDriver = String.format("%s:%s", splitUri[0], splitUri[1]); @@ -1123,13 +1124,13 @@ public static void initDataSource(Properties dbProps) { s_simulatorDS = createDataSource(simulatorConnectionUri, simulatorUsername, simulatorPassword, simulatorMaxActive, simulatorMaxIdle, simulatorMaxWait, null, null, null, null, cloudValidationQuery, isolationLevel); } catch (Exception e) { - s_logger.debug("Simulator DB properties are not available. Not initializing simulator DS"); + LOGGER.debug("Simulator DB properties are not available. Not initializing simulator DS"); } } catch (final Exception e) { s_ds = getDefaultDataSource("cloud"); s_usageDS = getDefaultDataSource("cloud_usage"); s_simulatorDS = getDefaultDataSource("cloud_simulator"); - s_logger.warn( + LOGGER.warn( "Unable to load db configuration, using defaults with 5 connections. Falling back on assumed datasource on localhost:3306 using username:password=cloud:cloud. Please check your configuration", e); } @@ -1144,7 +1145,7 @@ protected static Pair getConnectionUriAndDriver(Properties dbPro driver = dbProps.getProperty(String.format("db.%s.driver", schema)); connectionUri = getPropertiesAndBuildConnectionUri(dbProps, loadBalanceStrategy, driver, useSSL, schema); } else { - s_logger.warn(String.format("db.%s.uri was set, ignoring the following properties for schema %s of db.properties: [host, port, name, driver, autoReconnect, url.params," + LOGGER.warn(String.format("db.%s.uri was set, ignoring the following properties for schema %s of db.properties: [host, port, name, driver, autoReconnect, url.params," + " replicas, ha.loadBalanceStrategy, ha.enable, failOverReadOnly, reconnectAtTxEnd, autoReconnectForPools, secondsBeforeRetrySource, queriesBeforeRetrySource, " + "initialTimeout].", schema, schema)); @@ -1153,7 +1154,7 @@ protected static Pair getConnectionUriAndDriver(Properties dbPro connectionUri = propertyUri; } - s_logger.info(String.format("Using the following URI to connect to %s database [%s].", schema, connectionUri)); + LOGGER.info(String.format("Using the following URI to connect to %s database [%s].", schema, connectionUri)); return new Pair<>(connectionUri, driver); } @@ -1169,7 +1170,7 @@ protected static String getPropertiesAndBuildConnectionUri(Properties dbProps, S if (s_dbHAEnabled) { dbHaParams = getDBHAParams(schema, dbProps); replicas = dbProps.getProperty(String.format("db.%s.replicas", schema)); - s_logger.info(String.format("The replicas configured for %s data base are %s.", schema, replicas)); + LOGGER.info(String.format("The replicas configured for %s data base are %s.", schema, replicas)); } return buildConnectionUri(loadBalanceStrategy, driver, useSSL, host, replicas, port, dbName, autoReconnect, urlParams, dbHaParams); diff --git a/framework/db/src/test/java/com/cloud/utils/db/DbAnnotatedBase.java b/framework/db/src/test/java/com/cloud/utils/db/DbAnnotatedBase.java index 146f79adf0b0..8a010203b510 100644 --- a/framework/db/src/test/java/com/cloud/utils/db/DbAnnotatedBase.java +++ b/framework/db/src/test/java/com/cloud/utils/db/DbAnnotatedBase.java @@ -21,13 +21,14 @@ import junit.framework.Assert; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.stereotype.Component; @Component @DB public class DbAnnotatedBase { - private static final Logger s_logger = Logger.getLogger(DbAnnotatedBase.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject DummyComponent _dummy; @@ -38,7 +39,7 @@ public void initTest() { } public void MethodWithClassDbAnnotated() { - s_logger.info("called"); + logger.info("called"); _dummy.sayHello(); } } diff --git a/framework/db/src/test/java/com/cloud/utils/db/ElementCollectionTest.java b/framework/db/src/test/java/com/cloud/utils/db/ElementCollectionTest.java index 44f183dc8cdd..aed076a6d6b8 100644 --- a/framework/db/src/test/java/com/cloud/utils/db/ElementCollectionTest.java +++ b/framework/db/src/test/java/com/cloud/utils/db/ElementCollectionTest.java @@ -29,10 +29,8 @@ import junit.framework.TestCase; -import org.apache.log4j.Logger; public class ElementCollectionTest extends TestCase { - static final Logger s_logger = Logger.getLogger(ElementCollectionTest.class); ArrayList ar = null; List lst = null; Collection coll = null; diff --git a/framework/db/src/test/java/com/cloud/utils/db/GlobalLockTest.java b/framework/db/src/test/java/com/cloud/utils/db/GlobalLockTest.java index 19927edb81c5..afd756eb2700 100644 --- a/framework/db/src/test/java/com/cloud/utils/db/GlobalLockTest.java +++ b/framework/db/src/test/java/com/cloud/utils/db/GlobalLockTest.java @@ -16,7 +16,8 @@ // under the License. package com.cloud.utils.db; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.junit.Test; import org.junit.runner.RunWith; import org.springframework.test.context.ContextConfiguration; @@ -27,10 +28,10 @@ @RunWith(SpringJUnit4ClassRunner.class) @ContextConfiguration(locations = "classpath:/testContext.xml") public class GlobalLockTest { - public static final Logger s_logger = Logger.getLogger(GlobalLockTest.class); + protected Logger logger = LogManager.getLogger(getClass()); private final static GlobalLock WorkLock = GlobalLock.getInternLock("SecurityGroupWork"); - public static class Worker implements Runnable { + public class Worker implements Runnable { int id = 0; int timeoutSeconds = 10; int jobDuration = 2; @@ -54,7 +55,7 @@ public void run() { Thread.sleep(jobDuration * 1000); } } catch (InterruptedException e) { - s_logger.debug("[ignored] interrupted while testing global lock."); + logger.debug("[ignored] interrupted while testing global lock."); } finally { if (locked) { boolean unlocked = WorkLock.unlock(); diff --git a/framework/db/src/test/java/com/cloud/utils/db/Merovingian2Test.java b/framework/db/src/test/java/com/cloud/utils/db/Merovingian2Test.java index eb8b96dc7472..37c0ba776d0d 100644 --- a/framework/db/src/test/java/com/cloud/utils/db/Merovingian2Test.java +++ b/framework/db/src/test/java/com/cloud/utils/db/Merovingian2Test.java @@ -19,13 +19,14 @@ import junit.framework.Assert; import junit.framework.TestCase; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.junit.After; import org.junit.Before; import org.junit.Test; public class Merovingian2Test extends TestCase { - static final Logger s_logger = Logger.getLogger(Merovingian2Test.class); + protected Logger logger = LogManager.getLogger(Merovingian2Test.class); Merovingian2 _lockController = Merovingian2.createLockController(1234); @Override @@ -43,15 +44,15 @@ protected void tearDown() throws Exception { @Test public void testLockAndRelease() { - s_logger.info("Testing first acquire"); + logger.info("Testing first acquire"); boolean result = _lockController.acquire("first" + 1234, 5); Assert.assertTrue(result); - s_logger.info("Testing acquire of different lock"); + logger.info("Testing acquire of different lock"); result = _lockController.acquire("second" + 1234, 5); Assert.assertTrue(result); - s_logger.info("Testing reacquire of the same lock"); + logger.info("Testing reacquire of the same lock"); result = _lockController.acquire("first" + 1234, 5); Assert.assertTrue(result); @@ -61,14 +62,14 @@ public void testLockAndRelease() { count = _lockController.owns("second" + 1234); Assert.assertEquals(count, 1); - s_logger.info("Testing release of the first lock"); + logger.info("Testing release of the first lock"); result = _lockController.release("first" + 1234); Assert.assertTrue(result); count = _lockController.owns("first" + 1234); Assert.assertEquals(count, 1); - s_logger.info("Testing release of the second lock"); + logger.info("Testing release of the second lock"); result = _lockController.release("second" + 1234); Assert.assertTrue(result); diff --git a/framework/ipc/src/main/java/org/apache/cloudstack/framework/async/AsyncCallbackDispatcher.java b/framework/ipc/src/main/java/org/apache/cloudstack/framework/async/AsyncCallbackDispatcher.java index b9aa12bc5b00..2eafe21e5936 100644 --- a/framework/ipc/src/main/java/org/apache/cloudstack/framework/async/AsyncCallbackDispatcher.java +++ b/framework/ipc/src/main/java/org/apache/cloudstack/framework/async/AsyncCallbackDispatcher.java @@ -29,11 +29,12 @@ import net.sf.cglib.proxy.MethodInterceptor; import net.sf.cglib.proxy.MethodProxy; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; @SuppressWarnings("rawtypes") public class AsyncCallbackDispatcher implements AsyncCompletionCallback { - private static final Logger s_logger = Logger.getLogger(AsyncCallbackDispatcher.class); + protected Logger logger = LogManager.getLogger(getClass()); private Method _callbackMethod; private final T _targetObject; @@ -100,7 +101,7 @@ public Object intercept(Object arg0, Method arg1, Object[] arg2, MethodProxy arg }); return t; } catch (Throwable e) { - s_logger.error("Unexpected exception", e); + logger.error("Unexpected exception", e); } return null; diff --git a/framework/ipc/src/main/java/org/apache/cloudstack/framework/client/ClientTransportProvider.java b/framework/ipc/src/main/java/org/apache/cloudstack/framework/client/ClientTransportProvider.java index ae28f900e544..b2fbd60351a1 100644 --- a/framework/ipc/src/main/java/org/apache/cloudstack/framework/client/ClientTransportProvider.java +++ b/framework/ipc/src/main/java/org/apache/cloudstack/framework/client/ClientTransportProvider.java @@ -23,7 +23,8 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.framework.serializer.MessageSerializer; import org.apache.cloudstack.framework.transport.TransportEndpoint; @@ -34,7 +35,7 @@ import com.cloud.utils.concurrency.NamedThreadFactory; public class ClientTransportProvider implements TransportProvider { - final static Logger s_logger = Logger.getLogger(ClientTransportProvider.class); + protected Logger logger = LogManager.getLogger(getClass()); public static final int DEFAULT_WORKER_POOL_SIZE = 5; private final Map _endpointSites = new HashMap(); @@ -72,7 +73,7 @@ protected void runInContext() { try { _connection.connect(_serverAddress, _serverPort); } catch (Throwable e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "error during ipc client initialization: " + e.getLocalizedMessage()); } } diff --git a/framework/ipc/src/main/java/org/apache/cloudstack/framework/messagebus/MessageBusBase.java b/framework/ipc/src/main/java/org/apache/cloudstack/framework/messagebus/MessageBusBase.java index c0713567cd6e..742fd90c33e0 100644 --- a/framework/ipc/src/main/java/org/apache/cloudstack/framework/messagebus/MessageBusBase.java +++ b/framework/ipc/src/main/java/org/apache/cloudstack/framework/messagebus/MessageBusBase.java @@ -26,7 +26,8 @@ import java.util.List; import java.util.Map; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.framework.serializer.MessageSerializer; @@ -41,7 +42,7 @@ public class MessageBusBase implements MessageBus { private final SubscriptionNode _subscriberRoot; private MessageSerializer _messageSerializer; - private static final Logger s_logger = Logger.getLogger(MessageBusBase.class); + protected Logger logger = LogManager.getLogger(getClass()); public MessageBusBase() { _gate = new Gate(); @@ -65,8 +66,8 @@ public void subscribe(String subject, MessageSubscriber subscriber) { assert (subject != null); assert (subscriber != null); if (_gate.enter()) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Enter gate in message bus subscribe"); + if (logger.isTraceEnabled()) { + logger.trace("Enter gate in message bus subscribe"); } try { SubscriptionNode current = locate(subject, null, true); @@ -85,8 +86,8 @@ public void subscribe(String subject, MessageSubscriber subscriber) { @Override public void unsubscribe(String subject, MessageSubscriber subscriber) { if (_gate.enter()) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Enter gate in message bus unsubscribe"); + if (logger.isTraceEnabled()) { + logger.trace("Enter gate in message bus unsubscribe"); } try { if (subject != null) { @@ -109,8 +110,8 @@ public void unsubscribe(String subject, MessageSubscriber subscriber) { @Override public void clearAll() { if (_gate.enter()) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Enter gate in message bus clearAll"); + if (logger.isTraceEnabled()) { + logger.trace("Enter gate in message bus clearAll"); } try { _subscriberRoot.clearAll(); @@ -128,8 +129,8 @@ public void clearAll() { @Override public void prune() { if (_gate.enter()) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Enter gate in message bus prune"); + if (logger.isTraceEnabled()) { + logger.trace("Enter gate in message bus prune"); } try { doPrune(); @@ -164,11 +165,11 @@ public void publish(String senderAddress, String subject, PublishScope scope, Ob // publish cannot be in DB transaction, which may hold DB lock too long, and we are guarding this here if (!noDbTxn()){ String errMsg = "NO EVENT PUBLISH CAN BE WRAPPED WITHIN DB TRANSACTION!"; - s_logger.error(errMsg, new CloudRuntimeException(errMsg)); + logger.error(errMsg, new CloudRuntimeException(errMsg)); } if (_gate.enter(true)) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Enter gate in message bus publish"); + if (logger.isTraceEnabled()) { + logger.trace("Enter gate in message bus publish"); } try { List chainFromTop = new ArrayList(); @@ -326,7 +327,7 @@ public boolean enter(boolean wait) { try { wait(); } catch (InterruptedException e) { - s_logger.debug("[ignored] interrupted while guarding re-entrance on message bus."); + logger.debug("[ignored] interrupted while guarding re-entrance on message bus."); } } else { break; @@ -346,8 +347,8 @@ public void leave() { onGateOpen(); } finally { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Open gate of message bus"); + if (logger.isTraceEnabled()) { + logger.trace("Open gate of message bus"); } _reentranceCount--; assert (_reentranceCount == 0); diff --git a/framework/ipc/src/main/java/org/apache/cloudstack/framework/messagebus/MessageDetector.java b/framework/ipc/src/main/java/org/apache/cloudstack/framework/messagebus/MessageDetector.java index 4bcf9b1ebf42..6e8919ddc066 100644 --- a/framework/ipc/src/main/java/org/apache/cloudstack/framework/messagebus/MessageDetector.java +++ b/framework/ipc/src/main/java/org/apache/cloudstack/framework/messagebus/MessageDetector.java @@ -18,10 +18,11 @@ */ package org.apache.cloudstack.framework.messagebus; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; public class MessageDetector implements MessageSubscriber { - private static final Logger s_logger = Logger.getLogger(MessageDetector.class); + protected Logger logger = LogManager.getLogger(getClass()); private MessageBus _messageBus; private String[] _subjects; @@ -33,7 +34,7 @@ public MessageDetector() { public void waitAny(long timeoutInMilliseconds) { if (timeoutInMilliseconds < 100) { - s_logger.warn("waitAny is passed with a too short time-out interval. " + timeoutInMilliseconds + "ms"); + logger.warn("waitAny is passed with a too short time-out interval. " + timeoutInMilliseconds + "ms"); timeoutInMilliseconds = 100; } @@ -41,7 +42,7 @@ public void waitAny(long timeoutInMilliseconds) { try { wait(timeoutInMilliseconds); } catch (InterruptedException e) { - s_logger.debug("[ignored] interrupted while waiting on any message."); + logger.debug("[ignored] interrupted while waiting on any message."); } } } diff --git a/framework/ipc/src/main/java/org/apache/cloudstack/framework/messagebus/MessageDispatcher.java b/framework/ipc/src/main/java/org/apache/cloudstack/framework/messagebus/MessageDispatcher.java index e93bbc2ad4ab..5584aa11ef9c 100644 --- a/framework/ipc/src/main/java/org/apache/cloudstack/framework/messagebus/MessageDispatcher.java +++ b/framework/ipc/src/main/java/org/apache/cloudstack/framework/messagebus/MessageDispatcher.java @@ -25,10 +25,11 @@ import java.util.List; import java.util.Map; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; public class MessageDispatcher implements MessageSubscriber { - private static final Logger s_logger = Logger.getLogger(MessageDispatcher.class); + protected Logger logger = LogManager.getLogger(getClass()); private static Map, List> s_handlerCache = new HashMap, List>(); @@ -63,7 +64,7 @@ public static void removeDispatcher(Object targetObject) { } } - public static boolean dispatch(Object target, String subject, String senderAddress, Object args) { + public boolean dispatch(Object target, String subject, String senderAddress, Object args) { assert (subject != null); assert (target != null); @@ -74,20 +75,20 @@ public static boolean dispatch(Object target, String subject, String senderAddre try { handler.invoke(target, subject, senderAddress, args); } catch (IllegalArgumentException e) { - s_logger.error("Unexpected exception when calling " + target.getClass().getName() + "." + handler.getName(), e); + logger.error("Unexpected exception when calling " + target.getClass().getName() + "." + handler.getName(), e); throw new RuntimeException("IllegalArgumentException when invoking event handler for subject: " + subject); } catch (IllegalAccessException e) { - s_logger.error("Unexpected exception when calling " + target.getClass().getName() + "." + handler.getName(), e); + logger.error("Unexpected exception when calling " + target.getClass().getName() + "." + handler.getName(), e); throw new RuntimeException("IllegalAccessException when invoking event handler for subject: " + subject); } catch (InvocationTargetException e) { - s_logger.error("Unexpected exception when calling " + target.getClass().getName() + "." + handler.getName(), e); + logger.error("Unexpected exception when calling " + target.getClass().getName() + "." + handler.getName(), e); throw new RuntimeException("InvocationTargetException when invoking event handler for subject: " + subject); } return true; } - public static Method resolveHandler(Class handlerClz, String subject) { + public Method resolveHandler(Class handlerClz, String subject) { synchronized (s_handlerCache) { List handlerList = s_handlerCache.get(handlerClz); if (handlerList != null) { @@ -100,7 +101,7 @@ public static Method resolveHandler(Class handlerClz, String subject) { } } } else { - s_logger.error("Handler class " + handlerClz.getName() + " is not registered"); + logger.error("Handler class " + handlerClz.getName() + " is not registered"); } } @@ -112,8 +113,8 @@ private static boolean match(String expression, String param) { } private void buildHandlerMethodCache(Class handlerClz) { - if (s_logger.isInfoEnabled()) - s_logger.info("Build message handler cache for " + handlerClz.getName()); + if (logger.isInfoEnabled()) + logger.info("Build message handler cache for " + handlerClz.getName()); synchronized (s_handlerCache) { List handlerList = s_handlerCache.get(handlerClz); @@ -130,20 +131,20 @@ private void buildHandlerMethodCache(Class handlerClz) { method.setAccessible(true); handlerList.add(method); - if (s_logger.isInfoEnabled()) - s_logger.info("Add message handler " + handlerClz.getName() + "." + method.getName() + " to cache"); + if (logger.isInfoEnabled()) + logger.info("Add message handler " + handlerClz.getName() + "." + method.getName() + " to cache"); } } clz = clz.getSuperclass(); } } else { - if (s_logger.isInfoEnabled()) - s_logger.info("Message handler for class " + handlerClz.getName() + " is already in cache"); + if (logger.isInfoEnabled()) + logger.info("Message handler for class " + handlerClz.getName() + " is already in cache"); } } - if (s_logger.isInfoEnabled()) - s_logger.info("Done building message handler cache for " + handlerClz.getName()); + if (logger.isInfoEnabled()) + logger.info("Done building message handler cache for " + handlerClz.getName()); } } diff --git a/framework/ipc/src/main/java/org/apache/cloudstack/framework/serializer/OnwireClassRegistry.java b/framework/ipc/src/main/java/org/apache/cloudstack/framework/serializer/OnwireClassRegistry.java index 24ccfe42afc5..3cc643956e93 100644 --- a/framework/ipc/src/main/java/org/apache/cloudstack/framework/serializer/OnwireClassRegistry.java +++ b/framework/ipc/src/main/java/org/apache/cloudstack/framework/serializer/OnwireClassRegistry.java @@ -32,7 +32,8 @@ import java.util.jar.JarEntry; import java.util.jar.JarInputStream; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; // @@ -40,7 +41,7 @@ // Credit: http://internna.blogspot.com/2007/11/java-5-retrieving-all-classes-from.html // public class OnwireClassRegistry { - private static final Logger s_logger = Logger.getLogger(OnwireClassRegistry.class); + protected Logger logger = LogManager.getLogger(getClass()); private List packages = new ArrayList(); private final Map> registry = new HashMap>(); @@ -89,7 +90,7 @@ public Class getOnwireClass(String onwireName) { return registry.get(onwireName); } - static Set> getClasses(String packageName) { + private Set> getClasses(String packageName) { ClassLoader loader = Thread.currentThread().getContextClassLoader(); return getClasses(loader, packageName); } @@ -98,7 +99,7 @@ static Set> getClasses(String packageName) { // Following helper methods can be put in a separated helper class, // will do that later // - static Set> getClasses(ClassLoader loader, String packageName) { + private Set> getClasses(ClassLoader loader, String packageName) { Set> classes = new HashSet>(); String path = packageName.replace('.', '/'); try { @@ -123,14 +124,14 @@ static Set> getClasses(ClassLoader loader, String packageName) { } } } catch (IOException e) { - s_logger.debug("Encountered IOException", e); + logger.debug("Encountered IOException", e); } catch (ClassNotFoundException e) { - s_logger.info("[ignored] class not found", e); + logger.info("[ignored] class not found", e); } return classes; } - static Set> getFromDirectory(File directory, String packageName) throws ClassNotFoundException { + private Set> getFromDirectory(File directory, String packageName) throws ClassNotFoundException { Set> classes = new HashSet>(); if (directory.exists()) { for (String file : directory.list()) { @@ -140,9 +141,9 @@ static Set> getFromDirectory(File directory, String packageName) throws Class clazz = Class.forName(name); classes.add(clazz); } catch (ClassNotFoundException e) { - s_logger.info("[ignored] class not found in directory " + directory, e); + logger.info("[ignored] class not found in directory " + directory, e); } catch (Exception e) { - s_logger.debug("Encountered unexpect exception! ", e); + logger.debug("Encountered unexpect exception! ", e); } } else { File f = new File(directory.getPath() + "/" + file); @@ -155,7 +156,7 @@ static Set> getFromDirectory(File directory, String packageName) throws return classes; } - static Set> getFromJARFile(String jar, String packageName) throws IOException, ClassNotFoundException { + private Set> getFromJARFile(String jar, String packageName) throws IOException, ClassNotFoundException { Set> classes = new HashSet>(); try (JarInputStream jarFile = new JarInputStream(new FileInputStream(jar));) { JarEntry jarEntry; @@ -170,7 +171,7 @@ static Set> getFromJARFile(String jar, String packageName) throws IOExc Class clz = Class.forName(className.replace('/', '.')); classes.add(clz); } catch (ClassNotFoundException | NoClassDefFoundError e) { - s_logger.warn("Unable to load class from jar file", e); + logger.warn("Unable to load class from jar file", e); } } } diff --git a/framework/ipc/src/main/java/org/apache/cloudstack/framework/server/ServerTransportProvider.java b/framework/ipc/src/main/java/org/apache/cloudstack/framework/server/ServerTransportProvider.java index 06215a78b70d..2302fe4e39bc 100644 --- a/framework/ipc/src/main/java/org/apache/cloudstack/framework/server/ServerTransportProvider.java +++ b/framework/ipc/src/main/java/org/apache/cloudstack/framework/server/ServerTransportProvider.java @@ -27,7 +27,8 @@ import org.apache.cloudstack.framework.transport.TransportPdu; import org.apache.cloudstack.framework.transport.TransportProvider; import org.apache.cloudstack.managed.context.ManagedContextRunnable; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.bouncycastle.jce.provider.BouncyCastleProvider; import java.security.SecureRandom; @@ -38,7 +39,7 @@ import java.util.concurrent.Executors; public class ServerTransportProvider implements TransportProvider { - private static final Logger s_logger = Logger.getLogger(ServerTransportProvider.class); + protected Logger logger = LogManager.getLogger(getClass()); public static final int DEFAULT_WORKER_POOL_SIZE = 5; @@ -150,7 +151,7 @@ protected void runInContext() { site.processOutput(); site.ackOutputProcessSignal(); } catch (Throwable e) { - s_logger.error("Unhandled exception", e); + logger.error("Unhandled exception", e); } } }); diff --git a/framework/ipc/src/test/java/org/apache/cloudstack/framework/sampleserver/SampleManagementServer.java b/framework/ipc/src/test/java/org/apache/cloudstack/framework/sampleserver/SampleManagementServer.java index af5862c48e6a..bb59d1e23981 100644 --- a/framework/ipc/src/test/java/org/apache/cloudstack/framework/sampleserver/SampleManagementServer.java +++ b/framework/ipc/src/test/java/org/apache/cloudstack/framework/sampleserver/SampleManagementServer.java @@ -18,17 +18,18 @@ */ package org.apache.cloudstack.framework.sampleserver; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; public class SampleManagementServer { - private static final Logger s_logger = Logger.getLogger(SampleManagementServer.class); + protected Logger logger = LogManager.getLogger(getClass()); public void mainLoop() { while (true) { try { Thread.sleep(1000); } catch (InterruptedException e) { - s_logger.debug("[ignored] ."); + logger.debug("[ignored] ."); } } } diff --git a/framework/ipc/src/test/java/org/apache/cloudstack/framework/sampleserver/SampleManagementServerApp.java b/framework/ipc/src/test/java/org/apache/cloudstack/framework/sampleserver/SampleManagementServerApp.java index 47eb9d94b944..340e2068b520 100644 --- a/framework/ipc/src/test/java/org/apache/cloudstack/framework/sampleserver/SampleManagementServerApp.java +++ b/framework/ipc/src/test/java/org/apache/cloudstack/framework/sampleserver/SampleManagementServerApp.java @@ -22,7 +22,7 @@ import java.net.URISyntaxException; import java.net.URL; -import org.apache.log4j.xml.DOMConfigurator; +import org.apache.logging.log4j.core.config.Configurator; import org.springframework.context.ApplicationContext; import org.springframework.context.support.ClassPathXmlApplicationContext; @@ -37,7 +37,7 @@ private static void setupLog4j() { File file = new File(configUrl.toURI()); System.out.println("Log4j configuration from : " + file.getAbsolutePath()); - DOMConfigurator.configureAndWatch(file.getAbsolutePath(), 10000); + Configurator.initialize(null, file.getAbsolutePath()); } catch (URISyntaxException e) { System.out.println("Unable to convert log4j configuration Url to URI"); } diff --git a/framework/ipc/src/test/java/org/apache/cloudstack/framework/sampleserver/SampleManagerComponent.java b/framework/ipc/src/test/java/org/apache/cloudstack/framework/sampleserver/SampleManagerComponent.java index e3abcca9d46e..77a2a72066ef 100644 --- a/framework/ipc/src/test/java/org/apache/cloudstack/framework/sampleserver/SampleManagerComponent.java +++ b/framework/ipc/src/test/java/org/apache/cloudstack/framework/sampleserver/SampleManagerComponent.java @@ -24,7 +24,8 @@ import javax.annotation.PostConstruct; import javax.inject.Inject; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.stereotype.Component; import org.apache.cloudstack.framework.messagebus.MessageBus; @@ -39,7 +40,7 @@ @Component public class SampleManagerComponent { - private static final Logger s_logger = Logger.getLogger(SampleManagerComponent.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject private MessageBus _eventBus; @@ -88,12 +89,12 @@ void testRpc() { .addCallbackListener(new RpcCallbackListener() { @Override public void onSuccess(SampleStoragePrepareAnswer result) { - s_logger.info("StoragePrepare return result: " + result.getResult()); + logger.info("StoragePrepare return result: " + result.getResult()); } @Override public void onFailure(RpcException e) { - s_logger.info("StoragePrepare failed"); + logger.info("StoragePrepare failed"); } }) .apply(); diff --git a/framework/ipc/src/test/java/org/apache/cloudstack/framework/sampleserver/SampleManagerComponent2.java b/framework/ipc/src/test/java/org/apache/cloudstack/framework/sampleserver/SampleManagerComponent2.java index 13040c1c9351..294d1c979eb9 100644 --- a/framework/ipc/src/test/java/org/apache/cloudstack/framework/sampleserver/SampleManagerComponent2.java +++ b/framework/ipc/src/test/java/org/apache/cloudstack/framework/sampleserver/SampleManagerComponent2.java @@ -21,7 +21,8 @@ import javax.annotation.PostConstruct; import javax.inject.Inject; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.stereotype.Component; import org.apache.cloudstack.framework.messagebus.MessageBus; @@ -34,7 +35,7 @@ @Component public class SampleManagerComponent2 { - private static final Logger s_logger = Logger.getLogger(SampleManagerComponent2.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject private MessageBus _eventBus; @@ -55,10 +56,10 @@ public void init() { @RpcServiceHandler(command = "StoragePrepare") void onStartCommand(RpcServerCall call) { - s_logger.info("Reevieved StoragePrpare call"); + logger.info("Reevieved StoragePrpare call"); SampleStoragePrepareCommand cmd = call.getCommandArgument(); - s_logger.info("StoragePrepare command arg. pool: " + cmd.getStoragePool() + ", vol: " + cmd.getVolumeId()); + logger.info("StoragePrepare command arg. pool: " + cmd.getStoragePool() + ", vol: " + cmd.getVolumeId()); SampleStoragePrepareAnswer answer = new SampleStoragePrepareAnswer(); answer.setResult("Successfully executed StoragePrepare command"); diff --git a/framework/ipc/src/test/java/org/apache/cloudstack/messagebus/TestMessageBus.java b/framework/ipc/src/test/java/org/apache/cloudstack/messagebus/TestMessageBus.java index 3ee48803e298..5dd38642258a 100644 --- a/framework/ipc/src/test/java/org/apache/cloudstack/messagebus/TestMessageBus.java +++ b/framework/ipc/src/test/java/org/apache/cloudstack/messagebus/TestMessageBus.java @@ -26,7 +26,6 @@ import org.apache.cloudstack.framework.messagebus.MessageDetector; import org.apache.cloudstack.framework.messagebus.MessageSubscriber; import org.apache.cloudstack.framework.messagebus.PublishScope; -import org.apache.log4j.Logger; import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; @@ -37,7 +36,6 @@ @RunWith(SpringJUnit4ClassRunner.class) @ContextConfiguration(locations = "classpath:/MessageBusTestContext.xml") public class TestMessageBus extends TestCase { - private static final Logger s_logger = Logger.getLogger(TestMessageBus.class); @Inject MessageBus _messageBus; @@ -129,7 +127,6 @@ public void run() { try { Thread.sleep(3000); } catch (InterruptedException e) { - s_logger.debug("[ignored] ."); } _messageBus.publish(null, "Host", PublishScope.GLOBAL, null); } @@ -150,7 +147,6 @@ public void run() { try { thread.join(); } catch (InterruptedException e) { - s_logger.debug("[ignored] ."); } } } diff --git a/framework/ipc/src/test/resources/log4j-cloud.xml b/framework/ipc/src/test/resources/log4j-cloud.xml index e9b1918b6e60..6bd50829d825 100644 --- a/framework/ipc/src/test/resources/log4j-cloud.xml +++ b/framework/ipc/src/test/resources/log4j-cloud.xml @@ -17,78 +17,57 @@ KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/AsyncJobExecutionContext.java b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/AsyncJobExecutionContext.java index 5575ab394ee6..465a80b62c7f 100644 --- a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/AsyncJobExecutionContext.java +++ b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/AsyncJobExecutionContext.java @@ -16,7 +16,8 @@ // under the License. package org.apache.cloudstack.framework.jobs; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.jobs.dao.AsyncJobJoinMapDao; @@ -33,7 +34,7 @@ import com.cloud.user.User; public class AsyncJobExecutionContext { - private static final Logger s_logger = Logger.getLogger(AsyncJobExecutionContext.class); + protected static Logger LOGGER = LogManager.getLogger(AsyncJobExecutionContext.class); private AsyncJob _job; @@ -132,24 +133,24 @@ public void disjoinJob(long joinedJobId) throws InsufficientCapacityException, Object exception = JobSerializerHelper.fromObjectSerializedString(record.getJoinResult()); if (exception != null && exception instanceof Exception) { if (exception instanceof InsufficientCapacityException) { - s_logger.error("Job " + joinedJobId + " failed with InsufficientCapacityException"); + LOGGER.error("Job " + joinedJobId + " failed with InsufficientCapacityException"); throw (InsufficientCapacityException)exception; } else if (exception instanceof ConcurrentOperationException) { - s_logger.error("Job " + joinedJobId + " failed with ConcurrentOperationException"); + LOGGER.error("Job " + joinedJobId + " failed with ConcurrentOperationException"); throw (ConcurrentOperationException)exception; } else if (exception instanceof ResourceUnavailableException) { - s_logger.error("Job " + joinedJobId + " failed with ResourceUnavailableException"); + LOGGER.error("Job " + joinedJobId + " failed with ResourceUnavailableException"); throw (ResourceUnavailableException)exception; } else { - s_logger.error("Job " + joinedJobId + " failed with exception"); + LOGGER.error("Job " + joinedJobId + " failed with exception"); throw new RuntimeException((Exception)exception); } } } else { - s_logger.error("Job " + joinedJobId + " failed without providing an error object"); + LOGGER.error("Job " + joinedJobId + " failed without providing an error object"); throw new RuntimeException("Job " + joinedJobId + " failed without providing an error object"); } } @@ -172,7 +173,7 @@ public static AsyncJobExecutionContext getCurrentExecutionContext() { // TODO, this has security implications, operations carried from API layer should always // set its context, otherwise, the fall-back here will use system security context // - s_logger.warn("Job is executed without a context, setup psudo job for the executing thread"); + LOGGER.warn("Job is executed without a context, setup psudo job for the executing thread"); if (CallContext.current() != null) context = registerPseudoExecutionContext(CallContext.current().getCallingAccountId(), CallContext.current().getCallingUserId()); diff --git a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/AsyncJobDaoImpl.java b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/AsyncJobDaoImpl.java index 1914ff714602..a2f1f36b8637 100644 --- a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/AsyncJobDaoImpl.java +++ b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/AsyncJobDaoImpl.java @@ -22,7 +22,6 @@ import java.util.List; import org.apache.cloudstack.api.ApiConstants; -import org.apache.log4j.Logger; import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO; import org.apache.cloudstack.jobs.JobInfo; @@ -37,7 +36,6 @@ import com.cloud.utils.db.TransactionLegacy; public class AsyncJobDaoImpl extends GenericDaoBase implements AsyncJobDao { - private static final Logger s_logger = Logger.getLogger(AsyncJobDaoImpl.class.getName()); private final SearchBuilder pendingAsyncJobSearch; private final SearchBuilder pendingAsyncJobsSearch; @@ -121,7 +119,7 @@ public AsyncJobVO findInstancePendingAsyncJob(String instanceType, long instance List l = listIncludingRemovedBy(sc); if (l != null && l.size() > 0) { if (l.size() > 1) { - s_logger.warn("Instance " + instanceType + "-" + instanceId + " has multiple pending async-job"); + logger.warn("Instance " + instanceType + "-" + instanceId + " has multiple pending async-job"); } return l.get(0); @@ -208,9 +206,9 @@ public void resetJobProcess(long msid, int jobResultCode, String jobResultMessag pstmt.setLong(6, msid); pstmt.execute(); } catch (SQLException e) { - s_logger.warn("Unable to reset job status for management server " + msid, e); + logger.warn("Unable to reset job status for management server " + msid, e); } catch (Throwable e) { - s_logger.warn("Unable to reset job status for management server " + msid, e); + logger.warn("Unable to reset job status for management server " + msid, e); } } diff --git a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/AsyncJobJoinMapDaoImpl.java b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/AsyncJobJoinMapDaoImpl.java index d70864c755b6..da7ba36c919f 100644 --- a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/AsyncJobJoinMapDaoImpl.java +++ b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/AsyncJobJoinMapDaoImpl.java @@ -24,7 +24,6 @@ import java.util.List; import java.util.TimeZone; -import org.apache.log4j.Logger; import org.apache.cloudstack.framework.jobs.impl.AsyncJobJoinMapVO; import org.apache.cloudstack.jobs.JobInfo; @@ -39,7 +38,6 @@ import com.cloud.utils.exception.CloudRuntimeException; public class AsyncJobJoinMapDaoImpl extends GenericDaoBase implements AsyncJobJoinMapDao { - public static final Logger s_logger = Logger.getLogger(AsyncJobJoinMapDaoImpl.class); private final SearchBuilder RecordSearch; private final SearchBuilder RecordSearchByOwner; @@ -202,7 +200,7 @@ public void completeJoin(long joinJobId, JobInfo.Status joinStatus, String joinR // // txn.commit(); // } catch (SQLException e) { -// s_logger.error("Unexpected exception", e); +// logger.error("Unexpected exception", e); // } // // return standaloneList; diff --git a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/SyncQueueDaoImpl.java b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/SyncQueueDaoImpl.java index 00bd08d0a2ab..18a9160b6da4 100644 --- a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/SyncQueueDaoImpl.java +++ b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/SyncQueueDaoImpl.java @@ -22,7 +22,6 @@ import java.util.Date; import java.util.TimeZone; -import org.apache.log4j.Logger; import org.apache.cloudstack.framework.jobs.impl.SyncQueueVO; @@ -33,7 +32,6 @@ import com.cloud.utils.db.TransactionLegacy; public class SyncQueueDaoImpl extends GenericDaoBase implements SyncQueueDao { - private static final Logger s_logger = Logger.getLogger(SyncQueueDaoImpl.class.getName()); SearchBuilder TypeIdSearch = createSearchBuilder(); @@ -60,9 +58,9 @@ public void ensureQueue(String syncObjType, long syncObjId) { pstmt.setString(4, DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), dt)); pstmt.execute(); } catch (SQLException e) { - s_logger.warn("Unable to create sync queue " + syncObjType + "-" + syncObjId + ":" + e.getMessage(), e); + logger.warn("Unable to create sync queue " + syncObjType + "-" + syncObjId + ":" + e.getMessage(), e); } catch (Throwable e) { - s_logger.warn("Unable to create sync queue " + syncObjType + "-" + syncObjId + ":" + e.getMessage(), e); + logger.warn("Unable to create sync queue " + syncObjType + "-" + syncObjId + ":" + e.getMessage(), e); } } diff --git a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/SyncQueueItemDaoImpl.java b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/SyncQueueItemDaoImpl.java index 29c3f1b289fb..756cbb7efb0f 100644 --- a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/SyncQueueItemDaoImpl.java +++ b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/SyncQueueItemDaoImpl.java @@ -25,7 +25,6 @@ import java.util.List; import java.util.TimeZone; -import org.apache.log4j.Logger; import org.apache.cloudstack.framework.jobs.impl.SyncQueueItemVO; @@ -42,7 +41,6 @@ @DB public class SyncQueueItemDaoImpl extends GenericDaoBase implements SyncQueueItemDao { - private static final Logger s_logger = Logger.getLogger(SyncQueueItemDaoImpl.class); final GenericSearchBuilder queueIdSearch; final GenericSearchBuilder queueActiveItemSearch; @@ -116,9 +114,9 @@ public List getNextQueueItems(int maxItems) { l.add(item); } } catch (SQLException e) { - s_logger.error("Unexpected sql exception, ", e); + logger.error("Unexpected sql exception, ", e); } catch (Throwable e) { - s_logger.error("Unexpected exception, ", e); + logger.error("Unexpected exception, ", e); } return l; } diff --git a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImpl.java b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImpl.java index 4a10727546ea..e66221cc8fe0 100644 --- a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImpl.java +++ b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImpl.java @@ -24,7 +24,6 @@ import javax.annotation.PostConstruct; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.framework.jobs.impl.VmWorkJobVO; import org.apache.cloudstack.framework.jobs.impl.VmWorkJobVO.Step; @@ -43,7 +42,6 @@ import com.cloud.vm.VirtualMachine; public class VmWorkJobDaoImpl extends GenericDaoBase implements VmWorkJobDao { - private static final Logger s_logger = Logger.getLogger(VmWorkJobDaoImpl.class); protected SearchBuilder PendingWorkJobSearch; protected SearchBuilder PendingWorkJobByCommandSearch; @@ -159,8 +157,8 @@ public void expungeCompletedWorkJobs(final Date cutDate) { sc.setParameters("dispatcher", "VmWorkJobDispatcher"); List expungeList = listBy(sc); for (VmWorkJobVO job : expungeList) { - if (s_logger.isDebugEnabled()) - s_logger.debug("Expunge completed work job-" + job.getId()); + if (logger.isDebugEnabled()) + logger.debug("Expunge completed work job-" + job.getId()); expunge(job.getId()); _baseJobDao.expunge(job.getId()); } @@ -190,10 +188,10 @@ public void doInTransactionWithoutResult(TransactionStatus status) { pstmt.execute(); } catch (SQLException e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "SQL failed to delete vm work job: " + e.getLocalizedMessage()); } catch (Throwable e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "caught an error during delete vm work job: " + e.getLocalizedMessage()); } @@ -205,10 +203,10 @@ public void doInTransactionWithoutResult(TransactionStatus status) { pstmt.execute(); } catch (SQLException e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "SQL failed to delete async job: " + e.getLocalizedMessage()); } catch (Throwable e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "caught an error during delete async job: " + e.getLocalizedMessage()); } } diff --git a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/AsyncJobManagerImpl.java b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/AsyncJobManagerImpl.java index 3c0f81d0bc19..92a2acb9d4f3 100644 --- a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/AsyncJobManagerImpl.java +++ b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/AsyncJobManagerImpl.java @@ -64,9 +64,6 @@ import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.management.ManagementServerHost; import org.apache.cloudstack.utils.identity.ManagementServerNode; -import org.apache.log4j.Logger; -import org.apache.log4j.MDC; -import org.apache.log4j.NDC; import com.cloud.cluster.ClusterManagerListener; import com.cloud.network.Network; @@ -109,6 +106,8 @@ import com.cloud.vm.VirtualMachineManager; import com.cloud.vm.dao.VMInstanceDao; +import org.apache.logging.log4j.ThreadContext; + public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, ClusterManagerListener, Configurable { // Advanced public static final ConfigKey JobExpireMinutes = new ConfigKey("Advanced", Long.class, "job.expire.minutes", "1440", @@ -120,7 +119,6 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, "Time in seconds to wait in acquiring lock to submit a vm worker job", false); private static final ConfigKey HidePassword = new ConfigKey("Advanced", Boolean.class, "log.hide.password", "true", "If set to true, the password is hidden", true, ConfigKey.Scope.Global); - private static final Logger s_logger = Logger.getLogger(AsyncJobManagerImpl.class); private static final int ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION = 3; // 3 seconds @@ -240,8 +238,8 @@ public long submitAsyncJob(AsyncJob job, boolean scheduleJobExecutionInContext) publishOnEventBus(job, "submit"); scheduleExecution(job, scheduleJobExecutionInContext); - if (s_logger.isDebugEnabled()) { - s_logger.debug("submit async job-" + job.getId() + ", details: " + StringUtils.cleanString(job.toString())); + if (logger.isDebugEnabled()) { + logger.debug("submit async job-" + job.getId() + ", details: " + StringUtils.cleanString(job.toString())); } return job.getId(); } @@ -283,7 +281,7 @@ public Long doInTransaction(TransactionStatus status) { } } catch (Exception e) { String errMsg = "Unable to schedule async job for command " + job.getCmd() + ", unexpected exception."; - s_logger.warn(errMsg, e); + logger.warn(errMsg, e); throw new CloudRuntimeException(errMsg); } } @@ -292,16 +290,16 @@ public Long doInTransaction(TransactionStatus status) { @DB public void completeAsyncJob(final long jobId, final Status jobStatus, final int resultCode, final String resultObject) { String resultObj = null; - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { resultObj = convertHumanReadableJson(obfuscatePassword(resultObject, HidePassword.value())); - s_logger.debug("Complete async job-" + jobId + ", jobStatus: " + jobStatus + ", resultCode: " + resultCode + ", result: " + resultObj); + logger.debug("Complete async job-" + jobId + ", jobStatus: " + jobStatus + ", resultCode: " + resultCode + ", result: " + resultObj); } final AsyncJobVO job = _jobDao.findById(jobId); if (job == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("job-" + jobId + " no longer exists, we just log completion info here. " + jobStatus + ", resultCode: " + resultCode + ", result: " + + if (logger.isDebugEnabled()) { + logger.debug("job-" + jobId + " no longer exists, we just log completion info here. " + jobStatus + ", resultCode: " + resultCode + ", result: " + resultObj); } // still purge item from queue to avoid any blocking @@ -310,8 +308,8 @@ public void completeAsyncJob(final long jobId, final Status jobStatus, final int } if (job.getStatus() != JobInfo.Status.IN_PROGRESS) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("job-" + jobId + " is already completed."); + if (logger.isDebugEnabled()) { + logger.debug("job-" + jobId + " is already completed."); } // still purge item from queue to avoid any blocking _queueMgr.purgeAsyncJobQueueItemId(jobId); @@ -322,18 +320,18 @@ public void completeAsyncJob(final long jobId, final Status jobStatus, final int job.setResult(resultObject); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Publish async job-" + jobId + " complete on message bus"); + if (logger.isDebugEnabled()) { + logger.debug("Publish async job-" + jobId + " complete on message bus"); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Wake up jobs related to job-" + jobId); + if (logger.isDebugEnabled()) { + logger.debug("Wake up jobs related to job-" + jobId); } final List wakeupList = Transaction.execute(new TransactionCallback>() { @Override public List doInTransaction(final TransactionStatus status) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Update db status for job-" + jobId); + if (logger.isDebugEnabled()) { + logger.debug("Update db status for job-" + jobId); } job.setCompleteMsid(getMsid()); job.setStatus(jobStatus); @@ -351,8 +349,8 @@ public List doInTransaction(final TransactionStatus status) { job.setExecutingMsid(null); _jobDao.update(jobId, job); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Wake up jobs joined with job-" + jobId + " and disjoin all subjobs created from job- " + jobId); + if (logger.isDebugEnabled()) { + logger.debug("Wake up jobs joined with job-" + jobId + " and disjoin all subjobs created from job- " + jobId); } final List wakeupList = wakeupByJoinedJobCompletion(jobId); _joinMapDao.disjoinAllJobs(jobId); @@ -392,14 +390,14 @@ private String convertHumanReadableJson(String resultObj) { @Override @DB public void updateAsyncJobStatus(final long jobId, final int processStatus, final String resultObject) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Update async-job progress, job-" + jobId + ", processStatus: " + processStatus + ", result: " + resultObject); + if (logger.isDebugEnabled()) { + logger.debug("Update async-job progress, job-" + jobId + ", processStatus: " + processStatus + ", result: " + resultObject); } final AsyncJobVO job = _jobDao.findById(jobId); if (job == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("job-" + jobId + " no longer exists, we just log progress info here. progress status: " + processStatus); + if (logger.isDebugEnabled()) { + logger.debug("job-" + jobId + " no longer exists, we just log progress info here. progress status: " + processStatus); } return; @@ -422,8 +420,8 @@ public void doInTransactionWithoutResult(TransactionStatus status) { @Override @DB public void updateAsyncJobAttachment(final long jobId, final String instanceType, final Long instanceId) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Update async-job attachment, job-" + jobId + ", instanceType: " + instanceType + ", instanceId: " + instanceId); + if (logger.isDebugEnabled()) { + logger.debug("Update async-job attachment, job-" + jobId + ", instanceType: " + instanceType + ", instanceId: " + instanceId); } final AsyncJobVO job = _jobDao.findById(jobId); @@ -488,8 +486,8 @@ public void completeJoin(long joinJobId, JobInfo.Status joinStatus, String joinR @Override public void syncAsyncJobExecution(AsyncJob job, String syncObjType, long syncObjId, long queueSizeLimit) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Sync job-" + job.getId() + " execution on object " + syncObjType + "." + syncObjId); + if (logger.isDebugEnabled()) { + logger.debug("Sync job-" + job.getId() + " execution on object " + syncObjType + "." + syncObjId); } SyncQueueVO queue = null; @@ -565,7 +563,7 @@ private AsyncJobDispatcher findWakeupDispatcher(AsyncJob job) { return dispatcher; } } else { - s_logger.warn("job-" + job.getId() + " is scheduled for wakeup run, but there is no joining info anymore"); + logger.warn("job-" + job.getId() + " is scheduled for wakeup run, but there is no joining info anymore"); } } return null; @@ -589,19 +587,19 @@ public void run() { String related = job.getRelated(); String logContext = job.getShortUuid(); if (related != null && !related.isEmpty()) { - NDC.push("job-" + related + "/" + "job-" + job.getId()); + ThreadContext.push("job-" + related + "/" + "job-" + job.getId()); AsyncJob relatedJob = _jobDao.findByIdIncludingRemoved(Long.parseLong(related)); if (relatedJob != null) { logContext = relatedJob.getShortUuid(); } } else { - NDC.push("job-" + job.getId()); + ThreadContext.push("job-" + job.getId()); } - MDC.put("logcontextid", logContext); + ThreadContext.put("logcontextid", logContext); try { super.run(); } finally { - NDC.pop(); + ThreadContext.pop(); } } @@ -618,8 +616,8 @@ protected void runInContext() { } catch (Exception e) { // Due to co-existence of normal-dispatched-job/wakeup-dispatched-job, MBean register() call // is expected to fail under situations - if (s_logger.isTraceEnabled()) - s_logger.trace("Unable to register active job " + job.getId() + " to JMX monitoring due to exception " + ExceptionUtil.toString(e)); + if (logger.isTraceEnabled()) + logger.trace("Unable to register active job " + job.getId() + " to JMX monitoring due to exception " + ExceptionUtil.toString(e)); } _jobMonitor.registerActiveTask(runNumber, job.getId()); @@ -632,11 +630,11 @@ protected void runInContext() { logContext = relatedJob.getShortUuid(); } } - MDC.put("logcontextid", logContext); + ThreadContext.put("logcontextid", logContext); // execute the job - if (s_logger.isDebugEnabled()) { - s_logger.debug("Executing " + StringUtils.cleanString(job.toString())); + if (logger.isDebugEnabled()) { + logger.debug("Executing " + StringUtils.cleanString(job.toString())); } if ((getAndResetPendingSignals(job) & AsyncJob.Constants.SIGNAL_MASK_WAKEUP) != 0) { @@ -645,25 +643,25 @@ protected void runInContext() { jobDispatcher.runJob(job); } else { // TODO, job wakeup is not in use yet - if (s_logger.isTraceEnabled()) - s_logger.trace("Unable to find a wakeup dispatcher from the joined job: " + job); + if (logger.isTraceEnabled()) + logger.trace("Unable to find a wakeup dispatcher from the joined job: " + job); } } else { AsyncJobDispatcher jobDispatcher = getDispatcher(job.getDispatcher()); if (jobDispatcher != null) { jobDispatcher.runJob(job); } else { - s_logger.error("Unable to find job dispatcher, job will be cancelled"); + logger.error("Unable to find job dispatcher, job will be cancelled"); completeAsyncJob(job.getId(), JobInfo.Status.FAILED, ApiErrorCode.INTERNAL_ERROR.getHttpCode(), null); } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Done executing " + job.getCmd() + " for job-" + job.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Done executing " + job.getCmd() + " for job-" + job.getId()); } } catch (Throwable e) { - s_logger.error("Unexpected exception", e); + logger.error("Unexpected exception", e); completeAsyncJob(job.getId(), JobInfo.Status.FAILED, ApiErrorCode.INTERNAL_ERROR.getHttpCode(), null); } finally { // guard final clause as well @@ -678,8 +676,8 @@ protected void runInContext() { } catch (Exception e) { // Due to co-existence of normal-dispatched-job/wakeup-dispatched-job, MBean unregister() call // is expected to fail under situations - if (s_logger.isTraceEnabled()) - s_logger.trace("Unable to unregister job " + job.getId() + " to JMX monitoring due to exception " + ExceptionUtil.toString(e)); + if (logger.isTraceEnabled()) + logger.trace("Unable to unregister job " + job.getId() + " to JMX monitoring due to exception " + ExceptionUtil.toString(e)); } // @@ -689,7 +687,7 @@ protected void runInContext() { _jobMonitor.unregisterActiveTask(runNumber); } catch (Throwable e) { - s_logger.error("Double exception", e); + logger.error("Double exception", e); } } } @@ -709,8 +707,8 @@ private int getAndResetPendingSignals(AsyncJob job) { private void executeQueueItem(SyncQueueItemVO item, boolean fromPreviousSession) { AsyncJobVO job = _jobDao.findById(item.getContentId()); if (job != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Schedule queued job-" + job.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Schedule queued job-" + job.getId()); } job.setSyncSource(item); @@ -724,37 +722,37 @@ private void executeQueueItem(SyncQueueItemVO item, boolean fromPreviousSession) job.setExecutingMsid(getMsid()); _jobDao.update(job.getId(), job); } catch (Exception e) { - s_logger.warn("Unexpected exception while dispatching job-" + item.getContentId(), e); + logger.warn("Unexpected exception while dispatching job-" + item.getContentId(), e); try { _queueMgr.returnItem(item.getId()); } catch (Throwable thr) { - s_logger.error("Unexpected exception while returning job-" + item.getContentId() + " to queue", thr); + logger.error("Unexpected exception while returning job-" + item.getContentId() + " to queue", thr); } } try { scheduleExecution(job); } catch (RejectedExecutionException e) { - s_logger.warn("Execution for job-" + job.getId() + " is rejected, return it to the queue for next turn"); + logger.warn("Execution for job-" + job.getId() + " is rejected, return it to the queue for next turn"); try { _queueMgr.returnItem(item.getId()); } catch (Exception e2) { - s_logger.error("Unexpected exception while returning job-" + item.getContentId() + " to queue", e2); + logger.error("Unexpected exception while returning job-" + item.getContentId() + " to queue", e2); } try { job.setExecutingMsid(null); _jobDao.update(job.getId(), job); } catch (Exception e3) { - s_logger.warn("Unexpected exception while update job-" + item.getContentId() + " msid for bookkeeping"); + logger.warn("Unexpected exception while update job-" + item.getContentId() + " msid for bookkeeping"); } } } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to find related job for queue item: " + item.toString()); + if (logger.isDebugEnabled()) { + logger.debug("Unable to find related job for queue item: " + item.toString()); } _queueMgr.purgeItem(item.getId()); @@ -767,8 +765,8 @@ public void releaseSyncSource() { assert (executionContext != null); if (executionContext.getSyncSource() != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Release sync source for job-" + executionContext.getJob().getId() + " sync source: " + executionContext.getSyncSource().getContentType() + + if (logger.isDebugEnabled()) { + logger.debug("Release sync source for job-" + executionContext.getJob().getId() + " sync source: " + executionContext.getSyncSource().getContentType() + "-" + executionContext.getSyncSource().getContentId()); } @@ -825,8 +823,8 @@ private void checkQueue(long queueId) { try { SyncQueueItemVO item = _queueMgr.dequeueFromOne(queueId, getMsid()); if (item != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Executing sync queue item: " + item.toString()); + if (logger.isDebugEnabled()) { + logger.debug("Executing sync queue item: " + item.toString()); } executeQueueItem(item, false); @@ -834,7 +832,7 @@ private void checkQueue(long queueId) { break; } } catch (Throwable e) { - s_logger.error("Unexpected exception when kicking sync queue-" + queueId, e); + logger.error("Unexpected exception when kicking sync queue-" + queueId, e); break; } } @@ -862,15 +860,15 @@ protected void runInContext() { protected void reallyRun() { try { if (!isAsyncJobsEnabled()) { - s_logger.info("A shutdown has been triggered. Not executing any async job"); + logger.info("A shutdown has been triggered. Not executing any async job"); return; } List l = _queueMgr.dequeueFromAny(getMsid(), MAX_ONETIME_SCHEDULE_SIZE); if (l != null && l.size() > 0) { for (SyncQueueItemVO item : l) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Execute sync-queue item: " + item.toString()); + if (logger.isDebugEnabled()) { + logger.debug("Execute sync-queue item: " + item.toString()); } executeQueueItem(item, false); } @@ -884,7 +882,7 @@ protected void reallyRun() { scheduleExecution(job, false); } } catch (Throwable e) { - s_logger.error("Unexpected exception when trying to execute queue item, ", e); + logger.error("Unexpected exception when trying to execute queue item, ", e); } } }; @@ -911,7 +909,7 @@ protected void runInContext() { public void reallyRun() { try { - s_logger.trace("Begin cleanup expired async-jobs"); + logger.trace("Begin cleanup expired async-jobs"); // forcefully cancel blocking queue items if they've been staying there for too long List blockItems = _queueMgr.getBlockedQueueItems(JobCancelThresholdMinutes.value() * 60000, false); @@ -919,7 +917,7 @@ public void reallyRun() { for (SyncQueueItemVO item : blockItems) { try { if (item.getContentType().equalsIgnoreCase(SyncQueueItem.AsyncJobContentType)) { - s_logger.info("Remove Job-" + item.getContentId() + " from Queue-" + item.getId() + " since it has been blocked for too long"); + logger.info("Remove Job-" + item.getContentId() + " from Queue-" + item.getId() + " since it has been blocked for too long"); completeAsyncJob(item.getContentId(), JobInfo.Status.FAILED, 0, "Job is cancelled as it has been blocking others for too long"); _jobMonitor.unregisterByJobId(item.getContentId()); @@ -928,7 +926,7 @@ public void reallyRun() { // purge the item and resume queue processing _queueMgr.purgeItem(item.getId()); } catch (Throwable e) { - s_logger.error("Unexpected exception when trying to remove job from sync queue, ", e); + logger.error("Unexpected exception when trying to remove job from sync queue, ", e); } } } @@ -940,12 +938,12 @@ public void reallyRun() { List unfinishedJobs = _jobDao.getExpiredUnfinishedJobs(cutTime, 100); for (AsyncJobVO job : unfinishedJobs) { try { - s_logger.info("Expunging unfinished job-" + job.getId()); + logger.info("Expunging unfinished job-" + job.getId()); _jobMonitor.unregisterByJobId(job.getId()); expungeAsyncJob(job); } catch (Throwable e) { - s_logger.error("Unexpected exception when trying to expunge job-" + job.getId(), e); + logger.error("Unexpected exception when trying to expunge job-" + job.getId(), e); } } @@ -953,17 +951,17 @@ public void reallyRun() { List completedJobs = _jobDao.getExpiredCompletedJobs(cutTime, 100); for (AsyncJobVO job : completedJobs) { try { - s_logger.info("Expunging completed job-" + job.getId()); + logger.info("Expunging completed job-" + job.getId()); expungeAsyncJob(job); } catch (Throwable e) { - s_logger.error("Unexpected exception when trying to expunge job-" + job.getId(), e); + logger.error("Unexpected exception when trying to expunge job-" + job.getId(), e); } } - s_logger.trace("End cleanup expired async-jobs"); + logger.trace("End cleanup expired async-jobs"); } catch (Throwable e) { - s_logger.error("Unexpected exception when trying to execute queue item, ", e); + logger.error("Unexpected exception when trying to execute queue item, ", e); } } }; @@ -1058,10 +1056,10 @@ public boolean configure(String name, Map params) throws Configu int apiPoolSize = cloudMaxActive / 2; int workPoolSize = (cloudMaxActive * 2) / 3; - s_logger.info("Start AsyncJobManager API executor thread pool in size " + apiPoolSize); + logger.info("Start AsyncJobManager API executor thread pool in size " + apiPoolSize); _apiJobExecutor = Executors.newFixedThreadPool(apiPoolSize, new NamedThreadFactory(AsyncJobManager.API_JOB_POOL_THREAD_PREFIX)); - s_logger.info("Start AsyncJobManager Work executor thread pool in size " + workPoolSize); + logger.info("Start AsyncJobManager Work executor thread pool in size " + workPoolSize); _workerJobExecutor = Executors.newFixedThreadPool(workPoolSize, new NamedThreadFactory(AsyncJobManager.WORK_JOB_POOL_THREAD_PREFIX)); } catch (final Exception e) { throw new ConfigurationException("Unable to load db.properties to configure AsyncJobManagerImpl"); @@ -1108,8 +1106,8 @@ public void doInTransactionWithoutResult(TransactionStatus status) { // reset job status for all jobs running on this ms node final List jobs = _jobDao.getResetJobs(msid); for (final AsyncJobVO job : jobs) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cancel left-over job-" + job.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Cancel left-over job-" + job.getId()); } cleanupResources(job); job.setStatus(JobInfo.Status.FAILED); @@ -1120,8 +1118,8 @@ public void doInTransactionWithoutResult(TransactionStatus status) { job.setLastUpdated(currentGMTTime); job.setRemoved(currentGMTTime); _jobDao.update(job.getId(), job); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Purge queue item for cancelled job-" + job.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Purge queue item for cancelled job-" + job.getId()); } _queueMgr.purgeAsyncJobQueueItemId(job.getId()); } @@ -1129,7 +1127,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } }); } catch (Throwable e) { - s_logger.warn("Unexpected exception in cleaning up left over jobs for mamagement server node " + msid, e); + logger.warn("Unexpected exception in cleaning up left over jobs for mamagement server node " + msid, e); } } @@ -1141,7 +1139,7 @@ protected boolean cleanupResources(AsyncJobVO job) { try { ApiCommandResourceType resourceType = ApiCommandResourceType.fromString(job.getInstanceType()); if (resourceType == null) { - s_logger.warn("Unknown ResourceType. Skip Cleanup: " + job.getInstanceType()); + logger.warn("Unknown ResourceType. Skip Cleanup: " + job.getInstanceType()); return true; } switch (resourceType) { @@ -1153,7 +1151,7 @@ protected boolean cleanupResources(AsyncJobVO job) { return cleanupNetwork(job.getInstanceId()); } } catch (Exception e) { - s_logger.warn("Error while cleaning up resource: [" + job.getInstanceType().toString() + "] with Id: " + job.getInstanceId(), e); + logger.warn("Error while cleaning up resource: [" + job.getInstanceType().toString() + "] with Id: " + job.getInstanceId(), e); return false; } return true; @@ -1162,49 +1160,49 @@ protected boolean cleanupResources(AsyncJobVO job) { private boolean cleanupVolume(final long volumeId) { VolumeInfo vol = volFactory.getVolume(volumeId); if (vol == null) { - s_logger.warn("Volume not found. Skip Cleanup. VolumeId: " + volumeId); + logger.warn("Volume not found. Skip Cleanup. VolumeId: " + volumeId); return true; } if (vol.getState().isTransitional()) { - s_logger.debug("Cleaning up volume with Id: " + volumeId); + logger.debug("Cleaning up volume with Id: " + volumeId); boolean status = vol.stateTransit(Volume.Event.OperationFailed); cleanupFailedVolumesCreatedFromSnapshots(volumeId); return status; } - s_logger.debug("Volume not in transition state. Skip cleanup. VolumeId: " + volumeId); + logger.debug("Volume not in transition state. Skip cleanup. VolumeId: " + volumeId); return true; } private boolean cleanupVirtualMachine(final long vmId) throws Exception { VMInstanceVO vmInstanceVO = _vmInstanceDao.findById(vmId); if (vmInstanceVO == null) { - s_logger.warn("Instance not found. Skip Cleanup. InstanceId: " + vmId); + logger.warn("Instance not found. Skip Cleanup. InstanceId: " + vmId); return true; } if (vmInstanceVO.getState().isTransitional()) { - s_logger.debug("Cleaning up Instance with Id: " + vmId); + logger.debug("Cleaning up Instance with Id: " + vmId); return virtualMachineManager.stateTransitTo(vmInstanceVO, VirtualMachine.Event.OperationFailed, vmInstanceVO.getHostId()); } - s_logger.debug("Instance not in transition state. Skip cleanup. InstanceId: " + vmId); + logger.debug("Instance not in transition state. Skip cleanup. InstanceId: " + vmId); return true; } private boolean cleanupNetwork(final long networkId) throws Exception { NetworkVO networkVO = networkDao.findById(networkId); if (networkVO == null) { - s_logger.warn("Network not found. Skip Cleanup. NetworkId: " + networkId); + logger.warn("Network not found. Skip Cleanup. NetworkId: " + networkId); return true; } if (Network.State.Implementing.equals(networkVO.getState())) { try { - s_logger.debug("Cleaning up Network with Id: " + networkId); + logger.debug("Cleaning up Network with Id: " + networkId); return networkOrchestrationService.stateTransitTo(networkVO, Network.Event.OperationFailed); } catch (final NoTransitionException e) { networkVO.setState(Network.State.Shutdown); networkDao.update(networkVO.getId(), networkVO); } } - s_logger.debug("Network not in transition state. Skip cleanup. NetworkId: " + networkId); + logger.debug("Network not in transition state. Skip cleanup. NetworkId: " + networkId); return true; } @@ -1216,7 +1214,7 @@ private void cleanupFailedVolumesCreatedFromSnapshots(final long volumeId) { _volsDao.remove(volumeId); } } catch (Exception e) { - s_logger.error("Unexpected exception while removing concurrent request meta data :" + e.getLocalizedMessage()); + logger.error("Unexpected exception while removing concurrent request meta data :" + e.getLocalizedMessage()); } } diff --git a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/AsyncJobMonitor.java b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/AsyncJobMonitor.java index b1cac3e79a53..b2216cb75025 100644 --- a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/AsyncJobMonitor.java +++ b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/AsyncJobMonitor.java @@ -25,7 +25,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.cloudstack.framework.jobs.AsyncJob; import org.apache.cloudstack.framework.jobs.AsyncJobManager; @@ -37,7 +36,6 @@ import com.cloud.utils.component.ManagerBase; public class AsyncJobMonitor extends ManagerBase { - public static final Logger s_logger = Logger.getLogger(AsyncJobMonitor.class); @Inject private MessageBus _messageBus; @@ -86,7 +84,7 @@ private void heartbeat() { synchronized (this) { for (Map.Entry entry : _activeTasks.entrySet()) { if (entry.getValue().millisSinceLastJobHeartbeat() > _inactivityWarningThresholdMs) { - s_logger.warn("Task (job-" + entry.getValue().getJobId() + ") has been pending for " + logger.warn("Task (job-" + entry.getValue().getJobId() + ") has been pending for " + entry.getValue().millisSinceLastJobHeartbeat() / 1000 + " seconds"); } } @@ -110,7 +108,7 @@ protected void runInContext() { public void registerActiveTask(long runNumber, long jobId) { synchronized (this) { - s_logger.info("Add job-" + jobId + " into job monitoring"); + logger.info("Add job-" + jobId + " into job monitoring"); assert (_activeTasks.get(runNumber) == null); @@ -130,7 +128,7 @@ public void unregisterActiveTask(long runNumber) { ActiveTaskRecord record = _activeTasks.get(runNumber); assert (record != null); if (record != null) { - s_logger.info("Remove job-" + record.getJobId() + " from job monitoring"); + logger.info("Remove job-" + record.getJobId() + " from job monitoring"); if (record.isPoolThread()) _activePoolThreads.decrementAndGet(); @@ -148,7 +146,7 @@ public void unregisterByJobId(long jobId) { while (it.hasNext()) { Map.Entry entry = it.next(); if (entry.getValue().getJobId() == jobId) { - s_logger.info("Remove Job-" + entry.getValue().getJobId() + " from job monitoring due to job cancelling"); + logger.info("Remove Job-" + entry.getValue().getJobId() + " from job monitoring due to job cancelling"); if (entry.getValue().isPoolThread()) _activePoolThreads.decrementAndGet(); diff --git a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/JobSerializerHelper.java b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/JobSerializerHelper.java index 735d7cf73e20..fa1d175c45f9 100644 --- a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/JobSerializerHelper.java +++ b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/JobSerializerHelper.java @@ -27,7 +27,8 @@ import java.lang.reflect.Type; import org.apache.commons.codec.binary.Base64; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.utils.exception.CloudRuntimeException; import com.google.gson.Gson; @@ -45,14 +46,14 @@ * Note: toPairList and appendPairList only support simple POJO objects currently */ public class JobSerializerHelper { - private static final Logger s_logger = Logger.getLogger(JobSerializerHelper.class); + protected static Logger LOGGER = LogManager.getLogger(JobSerializerHelper.class); public static final String token = "/"; private static Gson s_gson; static { GsonBuilder gsonBuilder = new GsonBuilder(); gsonBuilder.setVersion(1.5); - s_logger.debug("Job GSON Builder initialized."); + LOGGER.debug("Job GSON Builder initialized."); gsonBuilder.registerTypeAdapter(Class.class, new ClassTypeAdapter()); gsonBuilder.registerTypeAdapter(Throwable.class, new ThrowableTypeAdapter()); s_gson = gsonBuilder.create(); diff --git a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/SyncQueueManagerImpl.java b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/SyncQueueManagerImpl.java index 2f97991e3e31..3397daa58191 100644 --- a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/SyncQueueManagerImpl.java +++ b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/SyncQueueManagerImpl.java @@ -22,7 +22,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.framework.jobs.dao.SyncQueueDao; import org.apache.cloudstack.framework.jobs.dao.SyncQueueItemDao; @@ -36,7 +35,6 @@ import com.cloud.utils.exception.CloudRuntimeException; public class SyncQueueManagerImpl extends ManagerBase implements SyncQueueManager { - public static final Logger s_logger = Logger.getLogger(SyncQueueManagerImpl.class.getName()); @Inject private SyncQueueDao _syncQueueDao; @@ -70,7 +68,7 @@ public SyncQueueVO doInTransaction(TransactionStatus status) { } }); } catch (Exception e) { - s_logger.error("Unexpected exception: ", e); + logger.error("Unexpected exception: ", e); } return null; } @@ -84,7 +82,7 @@ public SyncQueueItemVO dequeueFromOne(final long queueId, final Long msid) { public SyncQueueItemVO doInTransaction(TransactionStatus status) { SyncQueueVO queueVO = _syncQueueDao.findById(queueId); if(queueVO == null) { - s_logger.error("Sync queue(id: " + queueId + ") does not exist"); + logger.error("Sync queue(id: " + queueId + ") does not exist"); return null; } @@ -109,19 +107,19 @@ public SyncQueueItemVO doInTransaction(TransactionStatus status) { return itemVO; } else { - if (s_logger.isDebugEnabled()) - s_logger.debug("Sync queue (" + queueId + ") is currently empty"); + if (logger.isDebugEnabled()) + logger.debug("Sync queue (" + queueId + ") is currently empty"); } } else { - if (s_logger.isDebugEnabled()) - s_logger.debug("There is a pending process in sync queue(id: " + queueId + ")"); + if (logger.isDebugEnabled()) + logger.debug("There is a pending process in sync queue(id: " + queueId + ")"); } return null; } }); } catch (Exception e) { - s_logger.error("Unexpected exception: ", e); + logger.error("Unexpected exception: ", e); } return null; @@ -169,7 +167,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { return resultList; } catch (Exception e) { - s_logger.error("Unexpected exception: ", e); + logger.error("Unexpected exception: ", e); } return null; @@ -200,14 +198,14 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } }); } catch (Exception e) { - s_logger.error("Unexpected exception: ", e); + logger.error("Unexpected exception: ", e); } } @Override @DB public void returnItem(final long queueItemId) { - s_logger.info("Returning queue item " + queueItemId + " back to queue for second try in case of DB deadlock"); + logger.info("Returning queue item " + queueItemId + " back to queue for second try in case of DB deadlock"); try { Transaction.execute(new TransactionCallbackNoReturn() { @Override @@ -228,7 +226,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } }); } catch (Exception e) { - s_logger.error("Unexpected exception: ", e); + logger.error("Unexpected exception: ", e); } } @@ -247,8 +245,8 @@ private boolean queueReadyToProcess(SyncQueueVO queueVO) { if (nActiveItems < queueVO.getQueueSizeLimit()) return true; - if (s_logger.isDebugEnabled()) - s_logger.debug("Queue (queue id, sync type, sync id) - (" + queueVO.getId() + if (logger.isDebugEnabled()) + logger.debug("Queue (queue id, sync type, sync id) - (" + queueVO.getId() + "," + queueVO.getSyncObjType() + ", " + queueVO.getSyncObjId() + ") is reaching concurrency limit " + queueVO.getQueueSizeLimit()); return false; @@ -266,8 +264,8 @@ public void purgeAsyncJobQueueItemId(long asyncJobId) { public void cleanupActiveQueueItems(Long msid, boolean exclusive) { List l = getActiveQueueItems(msid, false); for (SyncQueueItemVO item : l) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Discard left-over queue item: " + item.toString()); + if (logger.isInfoEnabled()) { + logger.info("Discard left-over queue item: " + item.toString()); } purgeItem(item.getId()); } diff --git a/framework/jobs/src/test/java/org/apache/cloudstack/framework/jobs/AsyncJobTestDispatcher.java b/framework/jobs/src/test/java/org/apache/cloudstack/framework/jobs/AsyncJobTestDispatcher.java index eb30a804978a..604eae74afc3 100644 --- a/framework/jobs/src/test/java/org/apache/cloudstack/framework/jobs/AsyncJobTestDispatcher.java +++ b/framework/jobs/src/test/java/org/apache/cloudstack/framework/jobs/AsyncJobTestDispatcher.java @@ -20,15 +20,12 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.jobs.JobInfo.Status; import com.cloud.utils.component.AdapterBase; public class AsyncJobTestDispatcher extends AdapterBase implements AsyncJobDispatcher { - private static final Logger s_logger = - Logger.getLogger(AsyncJobTestDispatcher.class); @Inject private AsyncJobManager _asyncJobMgr; @@ -45,14 +42,14 @@ public AsyncJobTestDispatcher() { public void runJob(final AsyncJob job) { _testDashboard.increaseConcurrency(); - s_logger.info("Execute job " + job.getId() + ", current concurrency " + _testDashboard.getConcurrencyCount()); + logger.info("Execute job " + job.getId() + ", current concurrency " + _testDashboard.getConcurrencyCount()); int interval = 3000; try { Thread.sleep(interval); } catch (InterruptedException e) { - s_logger.debug("[ignored] ."); + logger.debug("[ignored] ."); } _asyncJobMgr.completeAsyncJob(job.getId(), Status.SUCCEEDED, 0, null); diff --git a/framework/managed-context/pom.xml b/framework/managed-context/pom.xml index 864e68af7fa6..bc7fa17940bf 100644 --- a/framework/managed-context/pom.xml +++ b/framework/managed-context/pom.xml @@ -29,9 +29,12 @@ - ch.qos.reload4j - reload4j - ${cs.reload4j.version} + org.apache.logging.log4j + log4j-core + + + org.apache.logging.log4j + log4j-api diff --git a/framework/managed-context/src/main/java/org/apache/cloudstack/managed/context/ManagedContextRunnable.java b/framework/managed-context/src/main/java/org/apache/cloudstack/managed/context/ManagedContextRunnable.java index fed4e180ff8e..be0ddceebb60 100644 --- a/framework/managed-context/src/main/java/org/apache/cloudstack/managed/context/ManagedContextRunnable.java +++ b/framework/managed-context/src/main/java/org/apache/cloudstack/managed/context/ManagedContextRunnable.java @@ -18,7 +18,8 @@ */ package org.apache.cloudstack.managed.context; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.managed.context.impl.DefaultManagedContext; @@ -26,7 +27,7 @@ public abstract class ManagedContextRunnable implements Runnable { private static final int SLEEP_COUNT = 120; - private static final Logger log = Logger.getLogger(ManagedContextRunnable.class); + protected Logger logger = LogManager.getLogger(getClass()); private static final ManagedContext DEFAULT_MANAGED_CONTEXT = new DefaultManagedContext(); private static ManagedContext context; private static boolean managedContext = false; @@ -62,7 +63,7 @@ protected ManagedContext getContext() { Thread.sleep(1000); if (context == null) - log.info("Sleeping until ManagedContext becomes available"); + logger.info("Sleeping until ManagedContext becomes available"); } catch (InterruptedException e) { throw new RuntimeException(e); } diff --git a/framework/managed-context/src/main/java/org/apache/cloudstack/managed/context/impl/DefaultManagedContext.java b/framework/managed-context/src/main/java/org/apache/cloudstack/managed/context/impl/DefaultManagedContext.java index 76e6d4580287..33d181b3af14 100644 --- a/framework/managed-context/src/main/java/org/apache/cloudstack/managed/context/impl/DefaultManagedContext.java +++ b/framework/managed-context/src/main/java/org/apache/cloudstack/managed/context/impl/DefaultManagedContext.java @@ -23,7 +23,8 @@ import java.util.concurrent.Callable; import java.util.concurrent.CopyOnWriteArrayList; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.managed.context.ManagedContext; import org.apache.cloudstack.managed.context.ManagedContextListener; @@ -32,7 +33,7 @@ public class DefaultManagedContext implements ManagedContext { - private static final Logger log = Logger.getLogger(DefaultManagedContext.class); + protected Logger logger = LogManager.getLogger(getClass()); List> listeners = new CopyOnWriteArrayList>(); @@ -87,7 +88,7 @@ public T callWithContext(Callable callable) throws Exception { if (firstError == null) { firstError = t; } - log.error("Failed onEnterContext for listener: " + listener, t); + logger.error("Failed onEnterContext for listener: " + listener, t); } /* Stack data structure is used because in between onEnter and onLeave @@ -113,7 +114,7 @@ public T callWithContext(Callable callable) throws Exception { invocation.listener.onLeaveContext(invocation.data, reentry); } catch (Throwable t) { lastError = t; - log.error("Failed onLeaveContext for listener: [" + invocation.listener + "]", t); + logger.error("Failed onLeaveContext for listener: [" + invocation.listener + "]", t); } } diff --git a/framework/managed-context/src/main/java/org/apache/cloudstack/managed/threadlocal/ManagedThreadLocal.java b/framework/managed-context/src/main/java/org/apache/cloudstack/managed/threadlocal/ManagedThreadLocal.java index f323d9a212a1..96b9ad80324b 100644 --- a/framework/managed-context/src/main/java/org/apache/cloudstack/managed/threadlocal/ManagedThreadLocal.java +++ b/framework/managed-context/src/main/java/org/apache/cloudstack/managed/threadlocal/ManagedThreadLocal.java @@ -21,7 +21,8 @@ import java.util.HashMap; import java.util.Map; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.managed.context.ManagedContextUtils; @@ -35,7 +36,7 @@ protected Map initialValue() { }; private static boolean s_validateContext = false; - private static final Logger log = Logger.getLogger(ManagedThreadLocal.class); + protected static Logger LOGGER = LogManager.getLogger(ManagedThreadLocal.class); @SuppressWarnings("unchecked") @Override @@ -71,7 +72,7 @@ public void remove() { private static void validateInContext(Object tl) { if (s_validateContext && !ManagedContextUtils.isInContext()) { String msg = "Using a managed thread local in a non managed context this WILL cause errors at runtime. TL [" + tl + "]"; - log.error(msg, new IllegalStateException(msg)); + LOGGER.error(msg, new IllegalStateException(msg)); } } diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/QuotaAlertManagerImpl.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/QuotaAlertManagerImpl.java index 555757ec8471..8d35bb9f4342 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/QuotaAlertManagerImpl.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/QuotaAlertManagerImpl.java @@ -40,7 +40,6 @@ import org.apache.commons.lang.text.StrSubstitutor; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.ObjectUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.domain.DomainVO; @@ -64,7 +63,6 @@ @Component public class QuotaAlertManagerImpl extends ManagerBase implements QuotaAlertManager { - private static final Logger s_logger = Logger.getLogger(QuotaAlertManagerImpl.class); @Inject private AccountDao _accountDao; @@ -126,16 +124,16 @@ public boolean configure(String name, Map params) throws Configu @Override public boolean start() { - if (s_logger.isInfoEnabled()) { - s_logger.info("Starting Alert Manager"); + if (logger.isInfoEnabled()) { + logger.info("Starting Alert Manager"); } return true; } @Override public boolean stop() { - if (s_logger.isInfoEnabled()) { - s_logger.info("Stopping Alert Manager"); + if (logger.isInfoEnabled()) { + logger.info("Stopping Alert Manager"); } return true; } @@ -145,8 +143,8 @@ public void checkAndSendQuotaAlertEmails() { List deferredQuotaEmailList = new ArrayList(); final BigDecimal zeroBalance = new BigDecimal(0); for (final QuotaAccountVO quotaAccount : _quotaAcc.listAllQuotaAccount()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("checkAndSendQuotaAlertEmails accId=" + quotaAccount.getId()); + if (logger.isDebugEnabled()) { + logger.debug("checkAndSendQuotaAlertEmails accId=" + quotaAccount.getId()); } BigDecimal accountBalance = quotaAccount.getQuotaBalance(); Date balanceDate = quotaAccount.getQuotaBalanceDate(); @@ -158,23 +156,23 @@ public void checkAndSendQuotaAlertEmails() { if (account == null) { continue; // the account is removed } - if (s_logger.isDebugEnabled()) { - s_logger.debug("checkAndSendQuotaAlertEmails: Check id=" + account.getId() + " bal=" + accountBalance + ", alertDate=" + alertDate + ", lockable=" + lockable); + if (logger.isDebugEnabled()) { + logger.debug("checkAndSendQuotaAlertEmails: Check id=" + account.getId() + " bal=" + accountBalance + ", alertDate=" + alertDate + ", lockable=" + lockable); } if (accountBalance.compareTo(zeroBalance) < 0) { if (_lockAccountEnforcement && (lockable == 1)) { if (_quotaManager.isLockable(account)) { - s_logger.info("Locking account " + account.getAccountName() + " due to quota < 0."); + logger.info("Locking account " + account.getAccountName() + " due to quota < 0."); lockAccount(account.getId()); } } if (alertDate == null || (balanceDate.after(alertDate) && getDifferenceDays(alertDate, new Date()) > 1)) { - s_logger.info("Sending alert " + account.getAccountName() + " due to quota < 0."); + logger.info("Sending alert " + account.getAccountName() + " due to quota < 0."); deferredQuotaEmailList.add(new DeferredQuotaEmail(account, quotaAccount, QuotaConfig.QuotaEmailTemplateTypes.QUOTA_EMPTY)); } } else if (accountBalance.compareTo(thresholdBalance) < 0) { if (alertDate == null || (balanceDate.after(alertDate) && getDifferenceDays(alertDate, new Date()) > 1)) { - s_logger.info("Sending alert " + account.getAccountName() + " due to quota below threshold."); + logger.info("Sending alert " + account.getAccountName() + " due to quota below threshold."); deferredQuotaEmailList.add(new DeferredQuotaEmail(account, quotaAccount, QuotaConfig.QuotaEmailTemplateTypes.QUOTA_LOW)); } } @@ -182,8 +180,8 @@ public void checkAndSendQuotaAlertEmails() { } for (DeferredQuotaEmail emailToBeSent : deferredQuotaEmailList) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("checkAndSendQuotaAlertEmails: Attempting to send quota alert email to users of account: " + emailToBeSent.getAccount().getAccountName()); + if (logger.isDebugEnabled()) { + logger.debug("checkAndSendQuotaAlertEmails: Attempting to send quota alert email to users of account: " + emailToBeSent.getAccount().getAccountName()); } sendQuotaAlert(emailToBeSent); } @@ -222,8 +220,8 @@ public void sendQuotaAlert(DeferredQuotaEmail emailToBeSent) { final Map subjectOptionMap = generateOptionMap(account, userNames, accountDomain, balanceStr, usageStr, emailType, false); final Map bodyOptionMap = generateOptionMap(account, userNames, accountDomain, balanceStr, usageStr, emailType, true); - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Sending quota alert with values: accountName [%s], accountID [%s], accountUsers [%s], domainName [%s], domainID [%s].", + if (logger.isDebugEnabled()) { + logger.debug(String.format("Sending quota alert with values: accountName [%s], accountID [%s], accountUsers [%s], domainName [%s], domainID [%s].", account.getAccountName(), account.getUuid(), userNames, accountDomain.getName(), accountDomain.getUuid())); } @@ -237,14 +235,14 @@ public void sendQuotaAlert(DeferredQuotaEmail emailToBeSent) { sendQuotaAlert(account, emailRecipients, subject, body); emailToBeSent.sentSuccessfully(_quotaAcc); } catch (Exception e) { - s_logger.error(String.format("Unable to send quota alert email (subject=%s; body=%s) to account %s (%s) recipients (%s) due to error (%s)", subject, body, account.getAccountName(), + logger.error(String.format("Unable to send quota alert email (subject=%s; body=%s) to account %s (%s) recipients (%s) due to error (%s)", subject, body, account.getAccountName(), account.getUuid(), emailRecipients, e)); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Exception", e); + if (logger.isDebugEnabled()) { + logger.debug("Exception", e); } } } else { - s_logger.error(String.format("No quota email template found for type %s, cannot send quota alert email to account %s(%s)", emailType, account.getAccountName(), account.getUuid())); + logger.error(String.format("No quota email template found for type %s, cannot send quota alert email to account %s(%s)", emailType, account.getAccountName(), account.getUuid())); } } @@ -304,15 +302,15 @@ protected boolean lockAccount(long accountId) { acctForUpdate.setState(State.LOCKED); success = _accountDao.update(Long.valueOf(accountId), acctForUpdate); } else { - if (s_logger.isInfoEnabled()) { - s_logger.info("Attempting to lock a non-enabled account, current state is " + account.getState() + " (accountId: " + accountId + "), locking failed."); + if (logger.isInfoEnabled()) { + logger.info("Attempting to lock a non-enabled account, current state is " + account.getState() + " (accountId: " + accountId + "), locking failed."); } } } else { - s_logger.warn("Failed to lock account " + accountId + ", account not found."); + logger.warn("Failed to lock account " + accountId + ", account not found."); } } catch (Exception e) { - s_logger.error("Exception occurred while locking account by Quota Alert Manager", e); + logger.error("Exception occurred while locking account by Quota Alert Manager", e); throw e; } finally { TransactionLegacy.open(opendb).close(); @@ -387,7 +385,7 @@ protected void sendQuotaAlert(Account account, List emails, String subje mailProperties.setContentType("text/html; charset=utf-8"); if (CollectionUtils.isEmpty(emails)) { - s_logger.warn(String.format("Account [%s] does not have users with email registered, " + logger.warn(String.format("Account [%s] does not have users with email registered, " + "therefore we are unable to send quota alert email with subject [%s] and content [%s].", account.getUuid(), subject, body)); return; } diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/QuotaManagerImpl.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/QuotaManagerImpl.java index 56a6edf5db47..4293415755a7 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/QuotaManagerImpl.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/QuotaManagerImpl.java @@ -54,7 +54,6 @@ import org.apache.commons.lang3.BooleanUtils; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.math.NumberUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.usage.UsageVO; @@ -67,7 +66,6 @@ @Component public class QuotaManagerImpl extends ManagerBase implements QuotaManager { - private static final Logger s_logger = Logger.getLogger(QuotaManagerImpl.class.getName()); @Inject private AccountDao _accountDao; @@ -124,26 +122,26 @@ public boolean configure(String name, Map params) throws Configu _aggregationDuration = Integer.parseInt(aggregationRange); if (_aggregationDuration < UsageUtils.USAGE_AGGREGATION_RANGE_MIN) { - s_logger.warn("Usage stats job aggregation range is to small, using the minimum value of " + UsageUtils.USAGE_AGGREGATION_RANGE_MIN); + logger.warn("Usage stats job aggregation range is to small, using the minimum value of " + UsageUtils.USAGE_AGGREGATION_RANGE_MIN); _aggregationDuration = UsageUtils.USAGE_AGGREGATION_RANGE_MIN; } - s_logger.info("Usage timezone = " + _usageTimezone + " AggregationDuration=" + _aggregationDuration); + logger.info("Usage timezone = " + _usageTimezone + " AggregationDuration=" + _aggregationDuration); return true; } @Override public boolean start() { - if (s_logger.isInfoEnabled()) { - s_logger.info("Starting Quota Manager"); + if (logger.isInfoEnabled()) { + logger.info("Starting Quota Manager"); } return true; } @Override public boolean stop() { - if (s_logger.isInfoEnabled()) { - s_logger.info("Stopping Quota Manager"); + if (logger.isInfoEnabled()) { + logger.info("Stopping Quota Manager"); } return true; } @@ -152,7 +150,7 @@ protected void processQuotaBalanceForAccount(AccountVO accountVo, List creditsReceived = _quotaBalanceDao.findCreditBalance(accountId, domainId, startDate, endDate); - s_logger.debug(String.format("Account [%s] has [%s] credit entries before [%s].", accountToString, creditsReceived.size(), endDate)); + logger.debug(String.format("Account [%s] has [%s] credit entries before [%s].", accountToString, creditsReceived.size(), endDate)); BigDecimal aggregatedUsage = BigDecimal.ZERO; - s_logger.debug(String.format("Aggregating the account [%s] credit entries before [%s].", accountToString, endDate)); + logger.debug(String.format("Aggregating the account [%s] credit entries before [%s].", accountToString, endDate)); for (QuotaBalanceVO credit : creditsReceived) { aggregatedUsage = aggregatedUsage.add(credit.getCreditBalance()); } - s_logger.debug(String.format("The aggregation of the account [%s] credit entries before [%s] resulted in the value [%s].", accountToString, endDate, aggregatedUsage)); + logger.debug(String.format("The aggregation of the account [%s] credit entries before [%s] resulted in the value [%s].", accountToString, endDate, aggregatedUsage)); return aggregatedUsage; } @@ -268,7 +266,7 @@ public boolean calculateQuotaUsage() { List accounts = _accountDao.listAll(); String accountsToString = ReflectionToStringBuilderUtils.reflectOnlySelectedFields(accounts, "id", "uuid", "accountName", "domainId"); - s_logger.info(String.format("Starting quota usage calculation for accounts [%s].", accountsToString)); + logger.info(String.format("Starting quota usage calculation for accounts [%s].", accountsToString)); Map, Boolean>> mapQuotaTariffsPerUsageType = createMapQuotaTariffsPerUsageType(); @@ -276,7 +274,7 @@ public boolean calculateQuotaUsage() { List usageRecords = getPendingUsageRecordsForQuotaAggregation(account); if (usageRecords == null) { - s_logger.debug(String.format("Account [%s] does not have pending usage records. Skipping to next account.", account.reflectionToString())); + logger.debug(String.format("Account [%s] does not have pending usage records. Skipping to next account.", account.reflectionToString())); continue; } @@ -284,7 +282,7 @@ public boolean calculateQuotaUsage() { processQuotaBalanceForAccount(account, quotaUsages); } - s_logger.info(String.format("Finished quota usage calculation for accounts [%s].", accountsToString)); + logger.info(String.format("Finished quota usage calculation for accounts [%s].", accountsToString)); return true; } @@ -300,7 +298,7 @@ protected List getPendingUsageRecordsForQuotaAggregation(AccountVO acco return null; } - s_logger.debug(String.format("Retrieved [%s] pending usage records for account [%s].", usageRecords.second(), account.reflectionToString())); + logger.debug(String.format("Retrieved [%s] pending usage records for account [%s].", usageRecords.second(), account.reflectionToString())); return records; } @@ -308,7 +306,7 @@ protected List getPendingUsageRecordsForQuotaAggregation(AccountVO acco protected List createQuotaUsagesAccordingToQuotaTariffs(AccountVO account, List usageRecords, Map, Boolean>> mapQuotaTariffsPerUsageType) { String accountToString = account.reflectionToString(); - s_logger.info(String.format("Calculating quota usage of [%s] usage records for account [%s].", usageRecords.size(), accountToString)); + logger.info(String.format("Calculating quota usage of [%s] usage records for account [%s].", usageRecords.size(), accountToString)); List> pairsUsageAndQuotaUsage = new ArrayList<>(); @@ -332,7 +330,7 @@ protected List createQuotaUsagesAccordingToQuotaTariffs(AccountVO pairsUsageAndQuotaUsage.add(new Pair<>(usageRecord, quotaUsage)); } } catch (Exception e) { - s_logger.error(String.format("Failed to calculate the quota usage for account [%s] due to [%s].", accountToString, e.getMessage()), e); + logger.error(String.format("Failed to calculate the quota usage for account [%s] due to [%s].", accountToString, e.getMessage()), e); return new ArrayList<>(); } @@ -341,7 +339,7 @@ protected List createQuotaUsagesAccordingToQuotaTariffs(AccountVO protected boolean shouldCalculateUsageRecord(AccountVO accountVO, UsageVO usageRecord) { if (Boolean.FALSE.equals(QuotaConfig.QuotaAccountEnabled.valueIn(accountVO.getAccountId()))) { - s_logger.debug(String.format("Considering usage record [%s] as calculated and skipping it because account [%s] has the quota plugin disabled.", + logger.debug(String.format("Considering usage record [%s] as calculated and skipping it because account [%s] has the quota plugin disabled.", usageRecord, accountVO.reflectionToString())); return false; } @@ -369,7 +367,7 @@ protected List persistUsagesAndQuotaUsagesAndRetrievePersistedQuot protected BigDecimal aggregateQuotaTariffsValues(UsageVO usageRecord, List quotaTariffs, boolean hasAnyQuotaTariffWithActivationRule, JsInterpreter jsInterpreter, String accountToString) { String usageRecordToString = usageRecord.toString(); - s_logger.debug(String.format("Validating usage record [%s] for account [%s] against [%s] quota tariffs.", usageRecordToString, accountToString, + logger.debug(String.format("Validating usage record [%s] for account [%s] against [%s] quota tariffs.", usageRecordToString, accountToString, quotaTariffs.size())); PresetVariables presetVariables = getPresetVariables(hasAnyQuotaTariffWithActivationRule, usageRecord); @@ -381,7 +379,7 @@ protected BigDecimal aggregateQuotaTariffsValues(UsageVO usageRecord, List params) throws Configu @Override public boolean start() { - if (s_logger.isInfoEnabled()) { - s_logger.info("Starting Statement Manager"); + if (logger.isInfoEnabled()) { + logger.info("Starting Statement Manager"); } return true; } @Override public boolean stop() { - if (s_logger.isInfoEnabled()) { - s_logger.info("Stopping Statement Manager"); + if (logger.isInfoEnabled()) { + logger.info("Stopping Statement Manager"); } return true; } @@ -123,25 +121,25 @@ public void sendStatement() { if (account != null) { if (lastStatementDate == null || getDifferenceDays(lastStatementDate, new Date()) >= s_LAST_STATEMENT_SENT_DAYS + 1) { BigDecimal quotaUsage = _quotaUsage.findTotalQuotaUsage(account.getAccountId(), account.getDomainId(), null, interval[0].getTime(), interval[1].getTime()); - s_logger.info("For account=" + quotaAccount.getId() + ", quota used = " + quotaUsage); + logger.info("For account=" + quotaAccount.getId() + ", quota used = " + quotaUsage); // send statement deferredQuotaEmailList.add(new DeferredQuotaEmail(account, quotaAccount, quotaUsage, QuotaConfig.QuotaEmailTemplateTypes.QUOTA_STATEMENT)); } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("For " + quotaAccount.getId() + " the statement has been sent recently"); + if (logger.isDebugEnabled()) { + logger.debug("For " + quotaAccount.getId() + " the statement has been sent recently"); } } } } else if (lastStatementDate != null) { - s_logger.info("For " + quotaAccount.getId() + " it is already more than " + getDifferenceDays(lastStatementDate, new Date()) + logger.info("For " + quotaAccount.getId() + " it is already more than " + getDifferenceDays(lastStatementDate, new Date()) + " days, will send statement in next cycle"); } } for (DeferredQuotaEmail emailToBeSent : deferredQuotaEmailList) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Attempting to send quota STATEMENT email to users of account: " + emailToBeSent.getAccount().getAccountName()); + if (logger.isDebugEnabled()) { + logger.debug("Attempting to send quota STATEMENT email to users of account: " + emailToBeSent.getAccount().getAccountName()); } _quotaAlert.sendQuotaAlert(emailToBeSent); } diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/PresetVariableHelper.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/PresetVariableHelper.java index 9723d3e5899f..afbcf346e0b0 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/PresetVariableHelper.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/activationrule/presetvariables/PresetVariableHelper.java @@ -47,7 +47,8 @@ import org.apache.cloudstack.utils.bytescale.ByteScaleUtils; import org.apache.cloudstack.utils.jsinterpreter.JsInterpreter; import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.stereotype.Component; import com.cloud.dc.DataCenterVO; @@ -99,7 +100,7 @@ @Component public class PresetVariableHelper { - protected Logger logger = Logger.getLogger(PresetVariableHelper.class); + protected Logger logger = LogManager.getLogger(PresetVariableHelper.class); @Inject AccountDao accountDao; diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/dao/QuotaAccountDaoImpl.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/dao/QuotaAccountDaoImpl.java index 084abcfc2b5d..b03b75f04f7a 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/dao/QuotaAccountDaoImpl.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/dao/QuotaAccountDaoImpl.java @@ -21,7 +21,6 @@ import org.apache.cloudstack.quota.constant.QuotaConfig; import org.apache.cloudstack.quota.vo.QuotaAccountVO; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.utils.Pair; @@ -34,7 +33,6 @@ @Component public class QuotaAccountDaoImpl extends GenericDaoBase implements QuotaAccountDao { - public static final Logger s_logger = Logger.getLogger(QuotaAccountDaoImpl.class); @Override public List listAllQuotaAccount() { @@ -44,7 +42,7 @@ public List listAllQuotaAccount() { accountsWithQuotaEnabled.add(account); continue; } - s_logger.trace(String.format("Account [%s] has the quota plugin disabled. Thus, it will not receive quota emails.", account)); + logger.trace(String.format("Account [%s] has the quota plugin disabled. Thus, it will not receive quota emails.", account)); } return accountsWithQuotaEnabled; } diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/dao/QuotaBalanceDaoImpl.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/dao/QuotaBalanceDaoImpl.java index 0ca7d9dbf8d9..01272d1a6184 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/dao/QuotaBalanceDaoImpl.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/dao/QuotaBalanceDaoImpl.java @@ -23,7 +23,6 @@ import java.util.List; import org.apache.cloudstack.quota.vo.QuotaBalanceVO; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.utils.db.Filter; @@ -37,7 +36,6 @@ @Component public class QuotaBalanceDaoImpl extends GenericDaoBase implements QuotaBalanceDao { - private static final Logger s_logger = Logger.getLogger(QuotaBalanceDaoImpl.class.getName()); @Override public QuotaBalanceVO findLastBalanceEntry(final Long accountId, final Long domainId, final Date beforeThis) { @@ -158,8 +156,8 @@ public List doInTransaction(final TransactionStatus status) { // get records before startDate to find start balance for (QuotaBalanceVO entry : quotaUsageRecords) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("FindQuotaBalance Entry=" + entry); + if (logger.isDebugEnabled()) { + logger.debug("FindQuotaBalance Entry=" + entry); } if (entry.getCreditsId() > 0) { trimmedRecords.add(entry); @@ -178,12 +176,12 @@ public BigDecimal lastQuotaBalance(final Long accountId, final Long domainId, Da List quotaBalance = lastQuotaBalanceVO(accountId, domainId, startDate); BigDecimal finalBalance = new BigDecimal(0); if (quotaBalance.isEmpty()) { - s_logger.info("There are no balance entries on or before the requested date."); + logger.info("There are no balance entries on or before the requested date."); return finalBalance; } for (QuotaBalanceVO entry : quotaBalance) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("lastQuotaBalance Entry=" + entry); + if (logger.isDebugEnabled()) { + logger.debug("lastQuotaBalance Entry=" + entry); } finalBalance = finalBalance.add(entry.getCreditBalance()); } diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/dao/QuotaEmailTemplatesDaoImpl.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/dao/QuotaEmailTemplatesDaoImpl.java index e774a52648ec..b44ace0a1ff4 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/dao/QuotaEmailTemplatesDaoImpl.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/dao/QuotaEmailTemplatesDaoImpl.java @@ -19,7 +19,6 @@ import java.util.List; import org.apache.cloudstack.quota.vo.QuotaEmailTemplatesVO; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.utils.db.GenericDaoBase; @@ -33,7 +32,6 @@ @Component public class QuotaEmailTemplatesDaoImpl extends GenericDaoBase implements QuotaEmailTemplatesDao { - private static final Logger s_logger = Logger.getLogger(QuotaEmailTemplatesDaoImpl.class); protected SearchBuilder QuotaEmailTemplateSearch; diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/dao/QuotaTariffDaoImpl.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/dao/QuotaTariffDaoImpl.java index 470c84ac4e17..2bd3409953a7 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/dao/QuotaTariffDaoImpl.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/dao/QuotaTariffDaoImpl.java @@ -24,7 +24,6 @@ import org.apache.cloudstack.quota.constant.QuotaTypes; import org.apache.cloudstack.quota.vo.QuotaTariffVO; import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.utils.Pair; @@ -39,7 +38,6 @@ @Component public class QuotaTariffDaoImpl extends GenericDaoBase implements QuotaTariffDao { - private static final Logger s_logger = Logger.getLogger(QuotaTariffDaoImpl.class.getName()); private final SearchBuilder searchUsageType; private final SearchBuilder listAllIncludedUsageType; @@ -70,8 +68,8 @@ public QuotaTariffVO doInTransaction(final TransactionStatus status) { if (result != null && !result.isEmpty()) { return result.get(0); } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("QuotaTariffDaoImpl::findTariffPlanByUsageType: Missing quota type " + quotaType); + if (logger.isDebugEnabled()) { + logger.debug("QuotaTariffDaoImpl::findTariffPlanByUsageType: Missing quota type " + quotaType); } return null; } @@ -124,8 +122,8 @@ public Pair, Integer> doInTransaction(final TransactionStatu List result = search(sc, filter); if (result != null && !result.isEmpty()) { tariffs.add(result.get(0)); - if (s_logger.isDebugEnabled()) { - s_logger.debug("ListAllTariffPlans on or before " + effectiveDate + " quota type " + result.get(0).getUsageTypeDescription() + " , effective Date=" + if (logger.isDebugEnabled()) { + logger.debug("ListAllTariffPlans on or before " + effectiveDate + " quota type " + result.get(0).getUsageTypeDescription() + " , effective Date=" + result.get(0).getEffectiveOn() + " val=" + result.get(0).getCurrencyValue()); } } @@ -212,7 +210,7 @@ public QuotaTariffVO findByName(String name) { List quotaTariffs = pairQuotaTariffs.first(); if (CollectionUtils.isEmpty(quotaTariffs)) { - s_logger.debug(String.format("Could not find quota tariff with name [%s].", name)); + logger.debug(String.format("Could not find quota tariff with name [%s].", name)); return null; } @@ -225,7 +223,7 @@ public QuotaTariffVO findByUuid(String uuid) { List quotaTariffs = pairQuotaTariffs.first(); if (CollectionUtils.isEmpty(quotaTariffs)) { - s_logger.debug(String.format("Could not find quota tariff with UUID [%s].", uuid)); + logger.debug(String.format("Could not find quota tariff with UUID [%s].", uuid)); return null; } diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/dao/QuotaUsageDaoImpl.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/dao/QuotaUsageDaoImpl.java index 9134a4472b43..32b9c8d1d64c 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/dao/QuotaUsageDaoImpl.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/dao/QuotaUsageDaoImpl.java @@ -22,7 +22,6 @@ import java.util.List; import org.apache.cloudstack.quota.vo.QuotaUsageVO; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.utils.db.Filter; @@ -36,7 +35,6 @@ @Component public class QuotaUsageDaoImpl extends GenericDaoBase implements QuotaUsageDao { - private static final Logger s_logger = Logger.getLogger(QuotaUsageDaoImpl.class); @Override public BigDecimal findTotalQuotaUsage(final Long accountId, final Long domainId, final Integer usageType, final Date startDate, final Date endDate) { diff --git a/framework/security/src/main/java/org/apache/cloudstack/framework/security/keys/KeysManagerImpl.java b/framework/security/src/main/java/org/apache/cloudstack/framework/security/keys/KeysManagerImpl.java index 15bb49c65ff4..fa092ebdd3c6 100644 --- a/framework/security/src/main/java/org/apache/cloudstack/framework/security/keys/KeysManagerImpl.java +++ b/framework/security/src/main/java/org/apache/cloudstack/framework/security/keys/KeysManagerImpl.java @@ -23,7 +23,8 @@ import javax.net.ssl.KeyManager; import org.apache.commons.codec.binary.Base64; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.framework.config.ConfigDepot; import org.apache.cloudstack.framework.config.ConfigKey; @@ -50,7 +51,7 @@ * */ public class KeysManagerImpl implements KeysManager, Configurable { - private static final Logger s_logger = Logger.getLogger(KeysManagerImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject ConfigurationDao _configDao; @@ -87,7 +88,7 @@ public String getEncryptionIV() { return EncryptionIV.value(); } - private static String getBase64EncodedRandomKey(int nBits) { + private String getBase64EncodedRandomKey(int nBits) { SecureRandom random; try { random = SecureRandom.getInstance("SHA1PRNG"); @@ -95,7 +96,7 @@ private static String getBase64EncodedRandomKey(int nBits) { random.nextBytes(keyBytes); return Base64.encodeBase64URLSafeString(keyBytes); } catch (NoSuchAlgorithmException e) { - s_logger.error("Unhandled exception: ", e); + logger.error("Unhandled exception: ", e); } return null; } diff --git a/framework/security/src/main/java/org/apache/cloudstack/framework/security/keystore/KeystoreManagerImpl.java b/framework/security/src/main/java/org/apache/cloudstack/framework/security/keystore/KeystoreManagerImpl.java index 03a91fecc8ef..3fc2ff3702e1 100644 --- a/framework/security/src/main/java/org/apache/cloudstack/framework/security/keystore/KeystoreManagerImpl.java +++ b/framework/security/src/main/java/org/apache/cloudstack/framework/security/keystore/KeystoreManagerImpl.java @@ -31,7 +31,6 @@ import javax.inject.Inject; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.utils.Ternary; @@ -41,7 +40,6 @@ @Component public class KeystoreManagerImpl extends ManagerBase implements KeystoreManager { - private static final Logger s_logger = Logger.getLogger(KeystoreManagerImpl.class); @Inject private KeystoreDao _ksDao; @@ -49,7 +47,7 @@ public class KeystoreManagerImpl extends ManagerBase implements KeystoreManager @Override public boolean validateCertificate(String certificate, String key, String domainSuffix) { if (StringUtils.isAnyEmpty(certificate, key, domainSuffix)) { - s_logger.error("Invalid parameter found in (certificate, key, domainSuffix) tuple for domain: " + domainSuffix); + logger.error("Invalid parameter found in (certificate, key, domainSuffix) tuple for domain: " + domainSuffix); return false; } @@ -60,9 +58,9 @@ public boolean validateCertificate(String certificate, String key, String domain if (ks != null) return true; - s_logger.error("Unabled to construct keystore for domain: " + domainSuffix); + logger.error("Unabled to construct keystore for domain: " + domainSuffix); } catch (Exception e) { - s_logger.error("Certificate validation failed due to exception for domain: " + domainSuffix, e); + logger.error("Certificate validation failed due to exception for domain: " + domainSuffix, e); } return false; } @@ -109,9 +107,9 @@ public byte[] getKeystoreBits(String name, String aliasForCertificateInStore, St return CertificateHelper.buildAndSaveKeystore(certs, storePassword); } catch (KeyStoreException | CertificateException | NoSuchAlgorithmException | InvalidKeySpecException | IOException e) { String msg = String.format("Unable to build keystore for %s due to %s", name, e.getClass().getSimpleName()); - s_logger.warn(msg); - if (s_logger.isDebugEnabled()) { - s_logger.debug(msg, e); + logger.warn(msg); + if (logger.isDebugEnabled()) { + logger.debug(msg, e); } } return null; diff --git a/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/AbstractSmartLifeCycle.java b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/AbstractSmartLifeCycle.java index ad26fb19fc37..beb535cb2965 100644 --- a/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/AbstractSmartLifeCycle.java +++ b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/AbstractSmartLifeCycle.java @@ -18,10 +18,13 @@ */ package org.apache.cloudstack.spring.lifecycle; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.context.SmartLifecycle; public abstract class AbstractSmartLifeCycle implements SmartLifecycle { + protected Logger logger = LogManager.getLogger(getClass()); boolean running = false; @Override diff --git a/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/CloudStackExtendedLifeCycle.java b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/CloudStackExtendedLifeCycle.java index b0c1dcc0760e..15c1ccac5d97 100644 --- a/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/CloudStackExtendedLifeCycle.java +++ b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/CloudStackExtendedLifeCycle.java @@ -29,7 +29,6 @@ import javax.management.NotCompliantMBeanException; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.utils.component.ComponentLifecycle; import com.cloud.utils.component.SystemIntegrityChecker; @@ -39,7 +38,6 @@ public class CloudStackExtendedLifeCycle extends AbstractBeanCollector { - private static final Logger log = Logger.getLogger(CloudStackExtendedLifeCycle.class); Map> sorted = new TreeMap>(); @@ -59,14 +57,14 @@ public void start() { protected void checkIntegrity() { for (SystemIntegrityChecker checker : getBeans(SystemIntegrityChecker.class)) { - log.info("Running system integrity checker " + checker); + logger.info("Running system integrity checker " + checker); checker.check(); } } public void startBeans() { - log.info("Starting CloudStack Components"); + logger.info("Starting CloudStack Components"); with(new WithComponentLifeCycle() { @Override @@ -78,34 +76,34 @@ public void with(ComponentLifecycle lifecycle) { try { JmxUtil.registerMBean(mbean); } catch (MalformedObjectNameException e) { - log.warn("Unable to register MBean: " + mbean.getName(), e); + logger.warn("Unable to register MBean: " + mbean.getName(), e); } catch (InstanceAlreadyExistsException e) { - log.warn("Unable to register MBean: " + mbean.getName(), e); + logger.warn("Unable to register MBean: " + mbean.getName(), e); } catch (MBeanRegistrationException e) { - log.warn("Unable to register MBean: " + mbean.getName(), e); + logger.warn("Unable to register MBean: " + mbean.getName(), e); } catch (NotCompliantMBeanException e) { - log.warn("Unable to register MBean: " + mbean.getName(), e); + logger.warn("Unable to register MBean: " + mbean.getName(), e); } - log.info("Registered MBean: " + mbean.getName()); + logger.info("Registered MBean: " + mbean.getName()); } } }); - log.info("Done Starting CloudStack Components"); + logger.info("Done Starting CloudStack Components"); } public void stopBeans() { with(new WithComponentLifeCycle() { @Override public void with(ComponentLifecycle lifecycle) { - log.info("stopping bean " + lifecycle.getName()); + logger.info("stopping bean " + lifecycle.getName()); lifecycle.stop(); } }); } private void configure() { - log.info("Configuring CloudStack Components"); + logger.info("Configuring CloudStack Components"); with(new WithComponentLifeCycle() { @Override @@ -113,13 +111,13 @@ public void with(ComponentLifecycle lifecycle) { try { lifecycle.configure(lifecycle.getName(), lifecycle.getConfigParams()); } catch (ConfigurationException e) { - log.error("Failed to configure " + lifecycle.getName(), e); + logger.error("Failed to configure " + lifecycle.getName(), e); throw new CloudRuntimeException(e); } } }); - log.info("Done Configuring CloudStack Components"); + logger.info("Done Configuring CloudStack Components"); } private void sortBeans() { diff --git a/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/DumpRegistry.java b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/DumpRegistry.java index 5c5e9165b172..3a9bb04ce967 100644 --- a/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/DumpRegistry.java +++ b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/DumpRegistry.java @@ -22,7 +22,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import com.cloud.utils.component.ComponentLifecycleBase; import com.cloud.utils.component.Named; @@ -30,7 +29,6 @@ public class DumpRegistry extends ComponentLifecycleBase { - private static final Logger log = Logger.getLogger(DumpRegistry.class); List> registries; @@ -55,7 +53,7 @@ public boolean start() { buffer.append(getName(o)); } - log.info("Registry [" + registry.getName() + "] contains [" + buffer + "]"); + logger.info("Registry [" + registry.getName() + "] contains [" + buffer + "]"); } return super.start(); diff --git a/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/ExtensionRegistry.java b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/ExtensionRegistry.java index a077bc8f4f01..47aa82b9dc02 100644 --- a/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/ExtensionRegistry.java +++ b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/ExtensionRegistry.java @@ -28,7 +28,8 @@ import javax.annotation.PostConstruct; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.beans.factory.BeanNameAware; import org.apache.cloudstack.framework.config.ConfigKey; @@ -38,7 +39,7 @@ public class ExtensionRegistry implements Registry, Configurable, BeanNameAware { - private static final Logger log = Logger.getLogger(ExtensionRegistry.class); + protected Logger logger = LogManager.getLogger(getClass()); String name; String beanName; @@ -111,7 +112,7 @@ public boolean register(Object item) { registered.add(item); } - log.debug("Registering extension [" + name + "] in [" + this.name + "]"); + logger.debug("Registering extension [" + name + "] in [" + this.name + "]"); return true; } diff --git a/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/RegistryLifecycle.java b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/RegistryLifecycle.java index 43efd8461840..19d1fe3acc52 100644 --- a/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/RegistryLifecycle.java +++ b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/RegistryLifecycle.java @@ -23,7 +23,8 @@ import java.util.Properties; import java.util.Set; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.beans.BeansException; import org.springframework.beans.factory.config.BeanPostProcessor; import org.springframework.context.ApplicationContext; @@ -35,7 +36,7 @@ public class RegistryLifecycle implements BeanPostProcessor, SmartLifecycle, ApplicationContextAware { - private static final Logger log = Logger.getLogger(RegistryLifecycle.class); + protected Logger logger = LogManager.getLogger(getClass()); public static final String EXTENSION_EXCLUDE = "extensions.exclude"; public static final String EXTENSION_INCLUDE_PREFIX = "extensions.include."; @@ -70,7 +71,7 @@ protected synchronized boolean isExcluded(Object bean) { boolean result = excludes.contains(name); if (result) { - log.info("Excluding extension [" + name + "] based on configuration"); + logger.info("Excluding extension [" + name + "] based on configuration"); } return result; @@ -109,7 +110,7 @@ public void start() { while (iter.hasNext()) { Object next = iter.next(); if (registry.register(next)) { - log.debug("Registered " + next); + logger.debug("Registered " + next); } else { iter.remove(); } diff --git a/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/factory/CloudStackSpringContext.java b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/factory/CloudStackSpringContext.java index f054d3998ac5..8bbbc35f7e5f 100644 --- a/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/factory/CloudStackSpringContext.java +++ b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/factory/CloudStackSpringContext.java @@ -24,7 +24,8 @@ import java.util.List; import java.util.Map; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.context.ApplicationContext; import org.springframework.context.ConfigurableApplicationContext; import org.springframework.core.io.Resource; @@ -36,7 +37,7 @@ public class CloudStackSpringContext { - private static final Logger log = Logger.getLogger(CloudStackSpringContext.class); + protected Logger logger = LogManager.getLogger(getClass()); public static final String CLOUDSTACK_CONTEXT_SERVLET_KEY = CloudStackSpringContext.class.getSimpleName(); public static final String CLOUDSTACK_CONTEXT = "META-INF/cloudstack"; @@ -76,7 +77,7 @@ public void registerShutdownHook() { for (String appName : contextMap.keySet()) { ApplicationContext contex = contextMap.get(appName); if (contex instanceof ConfigurableApplicationContext) { - log.trace("registering shutdown hook for bean "+ appName); + logger.trace("registering shutdown hook for bean "+ appName); ((ConfigurableApplicationContext)contex).registerShutdownHook(); } } @@ -129,7 +130,7 @@ public String[] getConfigLocationsForWeb(String name, String[] configured) { String urlString = r.getURL().toExternalForm(); urlList.add(urlString); } catch (IOException e) { - log.error("Failed to create URL for " + r.getDescription(), e); + logger.error("Failed to create URL for " + r.getDescription(), e); } } diff --git a/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/impl/DefaultModuleDefinitionSet.java b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/impl/DefaultModuleDefinitionSet.java index 6c03c3ce9e16..d61e26fc3a8c 100644 --- a/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/impl/DefaultModuleDefinitionSet.java +++ b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/impl/DefaultModuleDefinitionSet.java @@ -33,7 +33,8 @@ import java.util.Stack; import org.apache.commons.io.IOUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.beans.BeansException; import org.springframework.context.ApplicationContext; import org.springframework.context.annotation.Bean; @@ -48,7 +49,7 @@ public class DefaultModuleDefinitionSet implements ModuleDefinitionSet { - private static final Logger log = Logger.getLogger(DefaultModuleDefinitionSet.class); + protected Logger logger = LogManager.getLogger(getClass()); public static final String DEFAULT_CONFIG_RESOURCES = "DefaultConfigResources"; public static final String DEFAULT_CONFIG_PROPERTIES = "DefaultConfigProperties"; @@ -98,26 +99,26 @@ protected void startContexts() { public void with(ModuleDefinition def, Stack parents) { try { String moduleDefinitionName = def.getName(); - log.debug(String.format("Trying to obtain module [%s] context.", moduleDefinitionName)); + logger.debug(String.format("Trying to obtain module [%s] context.", moduleDefinitionName)); ApplicationContext context = getApplicationContext(moduleDefinitionName); try { if (context.containsBean("moduleStartup")) { Runnable runnable = context.getBean("moduleStartup", Runnable.class); - log.info(String.format("Starting module [%s].", moduleDefinitionName)); + logger.info(String.format("Starting module [%s].", moduleDefinitionName)); runnable.run(); } else { - log.debug(String.format("Could not get module [%s] context bean.", moduleDefinitionName)); + logger.debug(String.format("Could not get module [%s] context bean.", moduleDefinitionName)); } } catch (BeansException e) { - log.warn(String.format("Failed to start module [%s] due to: [%s].", moduleDefinitionName, e.getMessage())); - if (log.isDebugEnabled()) { - log.debug(String.format("module start failure of module [%s] was due to: ", moduleDefinitionName), e); + logger.warn(String.format("Failed to start module [%s] due to: [%s].", moduleDefinitionName, e.getMessage())); + if (logger.isDebugEnabled()) { + logger.debug(String.format("module start failure of module [%s] was due to: ", moduleDefinitionName), e); } } } catch (EmptyStackException e) { - log.warn(String.format("Failed to obtain module context due to [%s]. Using root context instead.", e.getMessage())); - if (log.isDebugEnabled()) { - log.debug("Failed to obtain module context: ", e); + logger.warn(String.format("Failed to obtain module context due to [%s]. Using root context instead.", e.getMessage())); + if (logger.isDebugEnabled()) { + logger.debug("Failed to obtain module context: ", e); } } } @@ -131,22 +132,22 @@ public void with(ModuleDefinition def, Stack parents) { try { String moduleDefinitionName = def.getName(); if (parents.isEmpty()) { - log.debug(String.format("Could not find module [%s] context as they have no parents.", moduleDefinitionName)); + logger.debug(String.format("Could not find module [%s] context as they have no parents.", moduleDefinitionName)); return; } - log.debug(String.format("Trying to obtain module [%s] context.", moduleDefinitionName)); + logger.debug(String.format("Trying to obtain module [%s] context.", moduleDefinitionName)); ApplicationContext parent = getApplicationContext(parents.peek().getName()); - log.debug(String.format("Trying to load module [%s] context.", moduleDefinitionName)); + logger.debug(String.format("Trying to load module [%s] context.", moduleDefinitionName)); loadContext(def, parent); } catch (EmptyStackException e) { - log.warn(String.format("Failed to obtain module context due to [%s]. Using root context instead.", e.getMessage())); - if (log.isDebugEnabled()) { - log.debug("Failed to obtain module context: ", e); + logger.warn(String.format("Failed to obtain module context due to [%s]. Using root context instead.", e.getMessage())); + if (logger.isDebugEnabled()) { + logger.debug("Failed to obtain module context: ", e); } } catch (BeansException e) { - log.warn(String.format("Failed to start module [%s] due to: [%s].", def.getName(), e.getMessage())); - if (log.isDebugEnabled()) { - log.debug(String.format("module start failure of module [%s] was due to: ", def.getName()), e); + logger.warn(String.format("Failed to start module [%s] due to: [%s].", def.getName(), e.getMessage())); + if (logger.isDebugEnabled()) { + logger.debug(String.format("module start failure of module [%s] was due to: ", def.getName()), e); } } } @@ -163,13 +164,13 @@ protected ApplicationContext loadContext(ModuleDefinition def, ApplicationContex context.setClassLoader(def.getClassLoader()); long start = System.currentTimeMillis(); - if (log.isInfoEnabled()) { + if (logger.isInfoEnabled()) { for (Resource resource : resources) { - log.info("Loading module context [" + def.getName() + "] from " + resource); + logger.info("Loading module context [" + def.getName() + "] from " + resource); } } context.refresh(); - log.info("Loaded module context [" + def.getName() + "] in " + (System.currentTimeMillis() - start) + " ms"); + logger.info("Loaded module context [" + def.getName() + "] in " + (System.currentTimeMillis() - start) + " ms"); contexts.put(def.getName(), context); @@ -249,7 +250,7 @@ protected void printHierarchy() { withModule(new WithModule() { @Override public void with(ModuleDefinition def, Stack parents) { - log.info(String.format("Module Hierarchy:%" + ((parents.size() * 2) + 1) + "s%s", "", def.getName())); + logger.info(String.format("Module Hierarchy:%" + ((parents.size() * 2) + 1) + "s%s", "", def.getName())); } }); } @@ -264,7 +265,7 @@ protected void withModule(ModuleDefinition def, Stack parents, return; if (!shouldLoad(def)) { - log.info("Excluding context [" + def.getName() + "] based on configuration"); + logger.info("Excluding context [" + def.getName() + "] based on configuration"); return; } diff --git a/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/web/CloudStackContextLoaderListener.java b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/web/CloudStackContextLoaderListener.java index 549c69d5da9b..3b6133b91b4a 100644 --- a/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/web/CloudStackContextLoaderListener.java +++ b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/web/CloudStackContextLoaderListener.java @@ -23,7 +23,8 @@ import javax.servlet.ServletContext; import javax.servlet.ServletContextEvent; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.context.ApplicationContext; import org.springframework.web.context.ConfigurableWebApplicationContext; import org.springframework.web.context.ContextLoaderListener; @@ -35,7 +36,7 @@ public class CloudStackContextLoaderListener extends ContextLoaderListener { public static final String WEB_PARENT_MODULE = "parentModule"; public static final String WEB_PARENT_MODULE_DEFAULT = "web"; - private static final Logger log = Logger.getLogger(CloudStackContextLoaderListener.class); + protected Logger logger = LogManager.getLogger(getClass()); CloudStackSpringContext cloudStackContext; String configuredParentName; @@ -47,13 +48,13 @@ protected ApplicationContext loadParentContext(ServletContext servletContext) { @Override public void contextInitialized(ServletContextEvent event) { - log.trace("context initialized"); + logger.trace("context initialized"); try { cloudStackContext = new CloudStackSpringContext(); cloudStackContext.registerShutdownHook(); event.getServletContext().setAttribute(CloudStackSpringContext.CLOUDSTACK_CONTEXT_SERVLET_KEY, cloudStackContext); } catch (IOException e) { - log.error("Failed to start CloudStack", e); + logger.error("Failed to start CloudStack", e); throw new RuntimeException("Failed to initialize CloudStack Spring modules", e); } @@ -67,7 +68,7 @@ public void contextInitialized(ServletContextEvent event) { @Override protected void customizeContext(ServletContext servletContext, ConfigurableWebApplicationContext applicationContext) { - log.trace("customize context"); + logger.trace("customize context"); super.customizeContext(servletContext, applicationContext); String[] newLocations = cloudStackContext.getConfigLocationsForWeb(configuredParentName, applicationContext.getConfigLocations()); diff --git a/packaging/centos7/cloud.spec b/packaging/centos7/cloud.spec index 0fe1a81a6cf2..8c3b0f4651dc 100644 --- a/packaging/centos7/cloud.spec +++ b/packaging/centos7/cloud.spec @@ -298,7 +298,7 @@ do cp client/target/conf/$name ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/management/$name done -ln -sf log4j-cloud.xml ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/management/log4j.xml +ln -sf log4j-cloud.xml ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/management/log4j2.xml install python/bindir/cloud-external-ipallocator.py ${RPM_BUILD_ROOT}%{_bindir}/%{name}-external-ipallocator.py install -D client/target/pythonlibs/jasypt-1.9.3.jar ${RPM_BUILD_ROOT}%{_datadir}/%{name}-common/lib/jasypt-1.9.3.jar @@ -591,7 +591,7 @@ pip3 install --upgrade urllib3 %config(noreplace) %attr(0640,root,cloud) %{_sysconfdir}/%{name}/management/server.properties %config(noreplace) %attr(0640,root,cloud) %{_sysconfdir}/%{name}/management/config.json %config(noreplace) %{_sysconfdir}/%{name}/management/log4j-cloud.xml -%config(noreplace) %{_sysconfdir}/%{name}/management/log4j.xml +%config(noreplace) %{_sysconfdir}/%{name}/management/log4j2.xml %config(noreplace) %{_sysconfdir}/%{name}/management/environment.properties %config(noreplace) %{_sysconfdir}/%{name}/management/java.security.ciphers %attr(0644,root,root) %{_unitdir}/%{name}-management.service diff --git a/packaging/centos8/cloud.spec b/packaging/centos8/cloud.spec index ce425d381652..1c62efad5e8a 100644 --- a/packaging/centos8/cloud.spec +++ b/packaging/centos8/cloud.spec @@ -280,7 +280,7 @@ do cp client/target/conf/$name ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/management/$name done -ln -sf log4j-cloud.xml ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/management/log4j.xml +ln -sf log4j-cloud.xml ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/management/log4j2.xml install python/bindir/cloud-external-ipallocator.py ${RPM_BUILD_ROOT}%{_bindir}/%{name}-external-ipallocator.py install -D client/target/pythonlibs/jasypt-1.9.3.jar ${RPM_BUILD_ROOT}%{_datadir}/%{name}-common/lib/jasypt-1.9.3.jar @@ -570,7 +570,7 @@ pip install --upgrade /usr/share/cloudstack-marvin/Marvin-*.tar.gz %config(noreplace) %attr(0640,root,cloud) %{_sysconfdir}/%{name}/management/server.properties %config(noreplace) %attr(0640,root,cloud) %{_sysconfdir}/%{name}/management/config.json %config(noreplace) %{_sysconfdir}/%{name}/management/log4j-cloud.xml -%config(noreplace) %{_sysconfdir}/%{name}/management/log4j.xml +%config(noreplace) %{_sysconfdir}/%{name}/management/log4j2.xml %config(noreplace) %{_sysconfdir}/%{name}/management/environment.properties %config(noreplace) %{_sysconfdir}/%{name}/management/java.security.ciphers %attr(0644,root,root) %{_unitdir}/%{name}-management.service diff --git a/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java b/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java index cca9e3388687..94b763d013f9 100644 --- a/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java +++ b/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java @@ -27,7 +27,6 @@ import javax.naming.ConfigurationException; import org.apache.cloudstack.api.APICommand; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RolePermissionEntity.Permission; import com.cloud.exception.PermissionDeniedException; @@ -49,7 +48,6 @@ public class DynamicRoleBasedAPIAccessChecker extends AdapterBase implements API private List services; private Map> annotationRoleBasedApisMap = new HashMap>(); - private static final Logger LOGGER = Logger.getLogger(DynamicRoleBasedAPIAccessChecker.class.getName()); protected DynamicRoleBasedAPIAccessChecker() { super(); @@ -92,8 +90,8 @@ public boolean checkApiPermissionByRole(Role role, String apiName, List services; - private static final Logger LOGGER = Logger.getLogger(ProjectRoleBasedApiAccessChecker.class.getName()); protected ProjectRoleBasedApiAccessChecker() { super(); } @@ -61,9 +59,7 @@ private void denyApiAccess(final String commandName) throws PermissionDeniedExce @Override public boolean isEnabled() { if (!roleService.isEnabled()) { - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("RoleService is disabled. We will not use ProjectRoleBasedApiAccessChecker."); - } + logger.trace("RoleService is disabled. We will not use ProjectRoleBasedApiAccessChecker."); } return roleService.isEnabled(); } @@ -76,7 +72,7 @@ public List getApisAllowedToUser(Role role, User user, List apiN Project project = CallContext.current().getProject(); if (project == null) { - LOGGER.warn(String.format("Project is null, ProjectRoleBasedApiAccessChecker only applies to projects, returning APIs [%s] for user [%s] as allowed.", apiNames, user)); + logger.warn(String.format("Project is null, ProjectRoleBasedApiAccessChecker only applies to projects, returning APIs [%s] for user [%s] as allowed.", apiNames, user)); return apiNames; } @@ -86,8 +82,8 @@ public List getApisAllowedToUser(Role role, User user, List apiN if (projectUser.getAccountRole() != ProjectAccount.Role.Admin) { apiNames.removeIf(apiName -> !isPermitted(project, projectUser, apiName)); } - if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format("Returning APIs [%s] as allowed for user [%s].", apiNames, user)); + if (logger.isTraceEnabled()) { + logger.trace(String.format("Returning APIs [%s] as allowed for user [%s].", apiNames, user)); } return apiNames; } @@ -100,8 +96,8 @@ public List getApisAllowedToUser(Role role, User user, List apiN if (projectAccount.getAccountRole() != ProjectAccount.Role.Admin) { apiNames.removeIf(apiName -> !isPermitted(project, projectAccount, apiName)); } - if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format("Returning APIs [%s] as allowed for user [%s].", apiNames, user)); + if (logger.isTraceEnabled()) { + logger.trace(String.format("Returning APIs [%s] as allowed for user [%s].", apiNames, user)); } return apiNames; } @@ -114,16 +110,14 @@ public boolean checkAccess(User user, String apiCommandName) throws PermissionDe Project project = CallContext.current().getProject(); if (project == null) { - LOGGER.warn(String.format("Project is null, ProjectRoleBasedApiAccessChecker only applies to projects, returning API [%s] for user [%s] as allowed.", apiCommandName, + logger.warn(String.format("Project is null, ProjectRoleBasedApiAccessChecker only applies to projects, returning API [%s] for user [%s] as allowed.", apiCommandName, user)); return true; } Account userAccount = accountService.getAccount(user.getAccountId()); if (accountService.isRootAdmin(userAccount.getId()) || accountService.isDomainAdmin(userAccount.getAccountId())) { - if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format("Account [%s] is Root Admin or Domain Admin, all APIs are allowed.", userAccount.getAccountName())); - } + logger.info(String.format("Account [%s] is Root Admin or Domain Admin, all APIs are allowed.", userAccount.getAccountName())); return true; } diff --git a/plugins/acl/static-role-based/src/main/java/org/apache/cloudstack/acl/StaticRoleBasedAPIAccessChecker.java b/plugins/acl/static-role-based/src/main/java/org/apache/cloudstack/acl/StaticRoleBasedAPIAccessChecker.java index 7d12178f0f38..3444f967d784 100644 --- a/plugins/acl/static-role-based/src/main/java/org/apache/cloudstack/acl/StaticRoleBasedAPIAccessChecker.java +++ b/plugins/acl/static-role-based/src/main/java/org/apache/cloudstack/acl/StaticRoleBasedAPIAccessChecker.java @@ -26,7 +26,6 @@ import javax.naming.ConfigurationException; import com.cloud.exception.UnavailableCommandException; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; @@ -43,7 +42,6 @@ @Deprecated public class StaticRoleBasedAPIAccessChecker extends AdapterBase implements APIAclChecker { - protected static final Logger LOGGER = Logger.getLogger(StaticRoleBasedAPIAccessChecker.class); private Set commandPropertyFiles = new HashSet(); private Set commandNames = new HashSet(); @@ -74,7 +72,7 @@ public StaticRoleBasedAPIAccessChecker() { @Override public boolean isEnabled() { if (roleService.isEnabled()) { - LOGGER.debug("RoleService is enabled. We will use it instead of StaticRoleBasedAPIAccessChecker."); + logger.debug("RoleService is enabled. We will use it instead of StaticRoleBasedAPIAccessChecker."); } return !roleService.isEnabled(); } @@ -180,7 +178,7 @@ private void processMapping(Map configMap) { commandsPropertiesRoleBasedApisMap.get(roleType).add(apiName); } } catch (NumberFormatException nfe) { - LOGGER.error(String.format("Malformed key=value pair for entry: [%s].", entry)); + logger.error(String.format("Malformed key=value pair for entry: [%s].", entry)); } } } diff --git a/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java b/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java index 952830284410..ec6674477b07 100644 --- a/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java +++ b/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java @@ -24,7 +24,6 @@ import org.apache.cloudstack.affinity.dao.AffinityGroupDao; import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; -import org.apache.log4j.Logger; import com.cloud.dc.ClusterVO; import com.cloud.dc.DataCenter; @@ -56,7 +55,6 @@ public class ExplicitDedicationProcessor extends AffinityProcessorBase implements AffinityGroupProcessor { - private static final Logger s_logger = Logger.getLogger(ExplicitDedicationProcessor.class); @Inject protected UserVmDao _vmDao; @Inject @@ -96,8 +94,8 @@ public void process(VirtualMachineProfile vmProfile, DeploymentPlan plan, Exclud for (AffinityGroupVMMapVO vmGroupMapping : vmGroupMappings) { if (vmGroupMapping != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Processing affinity group " + vmGroupMapping.getAffinityGroupId() + "of type 'ExplicitDedication' for VM Id: " + vm.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Processing affinity group " + vmGroupMapping.getAffinityGroupId() + "of type 'ExplicitDedication' for VM Id: " + vm.getId()); } long affinityGroupId = vmGroupMapping.getAffinityGroupId(); @@ -234,13 +232,13 @@ public void process(VirtualMachineProfile vmProfile, DeploymentPlan plan, Exclud avoid = updateAvoidList(resourceList, avoid, dc); } else { avoid.addDataCenter(dc.getId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("No dedicated resources available for this domain or account under this group"); + if (logger.isDebugEnabled()) { + logger.debug("No dedicated resources available for this domain or account under this group"); } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("ExplicitDedicationProcessor returns Avoid List as: Deploy avoids pods: " + avoid.getPodsToAvoid() + ", clusters: " + + if (logger.isDebugEnabled()) { + logger.debug("ExplicitDedicationProcessor returns Avoid List as: Deploy avoids pods: " + avoid.getPodsToAvoid() + ", clusters: " + avoid.getClustersToAvoid() + ", hosts: " + avoid.getHostsToAvoid()); } } @@ -305,8 +303,8 @@ private ExcludeList updateAvoidList(List dedicatedResources for (HostPodVO pod : podList) { DedicatedResourceVO dPod = _dedicatedDao.findByPodId(pod.getId()); if (dPod != null && !dedicatedResources.contains(dPod)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Avoiding POD %s [%s] because it is not dedicated.", pod.getName(), pod.getUuid())); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Avoiding POD %s [%s] because it is not dedicated.", pod.getName(), pod.getUuid())); } avoidList.addPod(pod.getId()); } else { @@ -346,8 +344,8 @@ private ExcludeList updateAvoidList(List dedicatedResources for (HostPodVO pod : pods) { if (podsInIncludeList != null && !podsInIncludeList.contains(pod.getId())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Avoiding POD %s [%s], as it is not in include list.", pod.getName(), pod.getUuid())); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Avoiding POD %s [%s], as it is not in include list.", pod.getName(), pod.getUuid())); } avoidList.addPod(pod.getId()); } @@ -413,8 +411,8 @@ public void handleDeleteGroup(final AffinityGroup group) { if (group != null) { List dedicatedResources = _dedicatedDao.listByAffinityGroupId(group.getId()); if (!dedicatedResources.isEmpty()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing the dedicated resources under group: " + group); + if (logger.isDebugEnabled()) { + logger.debug("Releasing the dedicated resources under group: " + group); } Transaction.execute(new TransactionCallbackNoReturn() { @@ -431,8 +429,8 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } }); } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("No dedicated resources to releease under group: " + group); + if (logger.isDebugEnabled()) { + logger.debug("No dedicated resources to releease under group: " + group); } } } diff --git a/plugins/affinity-group-processors/host-affinity/src/main/java/org/apache/cloudstack/affinity/HostAffinityProcessor.java b/plugins/affinity-group-processors/host-affinity/src/main/java/org/apache/cloudstack/affinity/HostAffinityProcessor.java index 07c1dd5ff884..7f316fe7a91d 100644 --- a/plugins/affinity-group-processors/host-affinity/src/main/java/org/apache/cloudstack/affinity/HostAffinityProcessor.java +++ b/plugins/affinity-group-processors/host-affinity/src/main/java/org/apache/cloudstack/affinity/HostAffinityProcessor.java @@ -27,7 +27,6 @@ import javax.inject.Inject; import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; import org.apache.cloudstack.affinity.dao.AffinityGroupDao; import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; @@ -42,7 +41,6 @@ public class HostAffinityProcessor extends AffinityProcessorBase implements AffinityGroupProcessor { - private static final Logger s_logger = Logger.getLogger(HostAffinityProcessor.class); @Inject protected VMInstanceDao _vmInstanceDao; @@ -68,7 +66,7 @@ public void process(VirtualMachineProfile vmProfile, DeploymentPlan plan, Exclud */ protected void processAffinityGroup(AffinityGroupVMMapVO vmGroupMapping, DeploymentPlan plan, VirtualMachine vm, List vmList) { AffinityGroupVO group = _affinityGroupDao.findById(vmGroupMapping.getAffinityGroupId()); - s_logger.debug("Processing affinity group " + group.getName() + " for VM Id: " + vm.getId()); + logger.debug("Processing affinity group " + group.getName() + " for VM Id: " + vm.getId()); List groupVMIds = _affinityGroupVMMapDao.listVmIdsByAffinityGroup(group.getId()); groupVMIds.remove(vm.getId()); diff --git a/plugins/affinity-group-processors/host-anti-affinity/src/main/java/org/apache/cloudstack/affinity/HostAntiAffinityProcessor.java b/plugins/affinity-group-processors/host-anti-affinity/src/main/java/org/apache/cloudstack/affinity/HostAntiAffinityProcessor.java index 2a3c5796dda4..9feeeed2b6da 100644 --- a/plugins/affinity-group-processors/host-anti-affinity/src/main/java/org/apache/cloudstack/affinity/HostAntiAffinityProcessor.java +++ b/plugins/affinity-group-processors/host-anti-affinity/src/main/java/org/apache/cloudstack/affinity/HostAntiAffinityProcessor.java @@ -23,7 +23,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.cloudstack.affinity.dao.AffinityGroupDao; import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; @@ -46,7 +45,6 @@ public class HostAntiAffinityProcessor extends AffinityProcessorBase implements AffinityGroupProcessor { - private static final Logger s_logger = Logger.getLogger(HostAntiAffinityProcessor.class); @Inject protected UserVmDao _vmDao; @Inject @@ -71,8 +69,8 @@ public void process(VirtualMachineProfile vmProfile, DeploymentPlan plan, Exclud if (vmGroupMapping != null) { AffinityGroupVO group = _affinityGroupDao.findById(vmGroupMapping.getAffinityGroupId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Processing affinity group " + group.getName() + " for VM Id: " + vm.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Processing affinity group " + group.getName() + " for VM Id: " + vm.getId()); } List groupVMIds = _affinityGroupVMMapDao.listVmIdsByAffinityGroup(group.getId()); @@ -83,15 +81,15 @@ public void process(VirtualMachineProfile vmProfile, DeploymentPlan plan, Exclud if (groupVM != null && !groupVM.isRemoved()) { if (groupVM.getHostId() != null) { avoid.addHost(groupVM.getHostId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Added host " + groupVM.getHostId() + " to avoid set, since VM " + groupVM.getId() + " is present on the host"); + if (logger.isDebugEnabled()) { + logger.debug("Added host " + groupVM.getHostId() + " to avoid set, since VM " + groupVM.getId() + " is present on the host"); } } else if (Arrays.asList(VirtualMachine.State.Starting, VirtualMachine.State.Stopped).contains(groupVM.getState()) && groupVM.getLastHostId() != null) { long secondsSinceLastUpdate = (DateUtil.currentGMTTime().getTime() - groupVM.getUpdateTime().getTime()) / 1000; if (secondsSinceLastUpdate < _vmCapacityReleaseInterval) { avoid.addHost(groupVM.getLastHostId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Added host " + groupVM.getLastHostId() + " to avoid set, since VM " + groupVM.getId() + + if (logger.isDebugEnabled()) { + logger.debug("Added host " + groupVM.getLastHostId() + " to avoid set, since VM " + groupVM.getId() + " is present on the host, in Stopped state but has reserved capacity"); } } @@ -131,8 +129,8 @@ public boolean check(VirtualMachineProfile vmProfile, DeployDestination plannedD for (Long groupVMId : groupVMIds) { VMReservationVO vmReservation = _reservationDao.findByVmId(groupVMId); if (vmReservation != null && vmReservation.getHostId() != null && vmReservation.getHostId().equals(plannedHostId)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Planned destination for VM " + vm.getId() + " conflicts with an existing VM " + vmReservation.getVmId() + + if (logger.isDebugEnabled()) { + logger.debug("Planned destination for VM " + vm.getId() + " conflicts with an existing VM " + vmReservation.getVmId() + " reserved on the same host " + plannedHostId); } return false; diff --git a/plugins/affinity-group-processors/non-strict-host-affinity/src/main/java/org/apache/cloudstack/affinity/NonStrictHostAffinityProcessor.java b/plugins/affinity-group-processors/non-strict-host-affinity/src/main/java/org/apache/cloudstack/affinity/NonStrictHostAffinityProcessor.java index cdb3447f5a52..f227a3ffc8db 100644 --- a/plugins/affinity-group-processors/non-strict-host-affinity/src/main/java/org/apache/cloudstack/affinity/NonStrictHostAffinityProcessor.java +++ b/plugins/affinity-group-processors/non-strict-host-affinity/src/main/java/org/apache/cloudstack/affinity/NonStrictHostAffinityProcessor.java @@ -23,7 +23,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.cloudstack.affinity.dao.AffinityGroupDao; import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; @@ -45,7 +44,6 @@ public class NonStrictHostAffinityProcessor extends AffinityProcessorBase implements AffinityGroupProcessor { - private final Logger logger = Logger.getLogger(this.getClass().getName()); @Inject protected UserVmDao vmDao; @Inject diff --git a/plugins/alert-handlers/snmp-alerts/pom.xml b/plugins/alert-handlers/snmp-alerts/pom.xml index 527031cc215d..fad47d426f2e 100644 --- a/plugins/alert-handlers/snmp-alerts/pom.xml +++ b/plugins/alert-handlers/snmp-alerts/pom.xml @@ -33,8 +33,12 @@ org.apache.servicemix.bundles.snmp4j - ch.qos.reload4j - reload4j + org.apache.logging.log4j + log4j-core + + + org.apache.logging.log4j + log4j-api diff --git a/plugins/alert-handlers/snmp-alerts/src/main/java/org/apache/cloudstack/alert/snmp/SnmpEnhancedPatternLayout.java b/plugins/alert-handlers/snmp-alerts/src/main/java/org/apache/cloudstack/alert/snmp/SnmpEnhancedPatternLayout.java index 5761e7041e76..cf9e18bf9cf7 100644 --- a/plugins/alert-handlers/snmp-alerts/src/main/java/org/apache/cloudstack/alert/snmp/SnmpEnhancedPatternLayout.java +++ b/plugins/alert-handlers/snmp-alerts/src/main/java/org/apache/cloudstack/alert/snmp/SnmpEnhancedPatternLayout.java @@ -17,42 +17,27 @@ package org.apache.cloudstack.alert.snmp; +import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.message.Message; + import java.util.Date; import java.util.StringTokenizer; -import org.apache.log4j.EnhancedPatternLayout; -import org.apache.log4j.spi.LoggingEvent; - -public class SnmpEnhancedPatternLayout extends EnhancedPatternLayout { +public class SnmpEnhancedPatternLayout { private String _pairDelimiter = "//"; private String _keyValueDelimiter = "::"; private static final int LENGTH_OF_STRING_MESSAGE_AND_KEY_VALUE_DELIMITER = 9; private static final int LENGTH_OF_STRING_MESSAGE = 8; - public String getKeyValueDelimeter() { - return _keyValueDelimiter; - } - - public void setKeyValueDelimiter(String keyValueDelimiter) { - this._keyValueDelimiter = keyValueDelimiter; - } - - public String getPairDelimiter() { - return _pairDelimiter; - } - - public void setPairDelimiter(String pairDelimiter) { - this._pairDelimiter = pairDelimiter; - } - - public SnmpTrapInfo parseEvent(LoggingEvent event) { + public SnmpTrapInfo parseEvent(LogEvent event) { SnmpTrapInfo snmpTrapInfo = null; - final String message = event.getRenderedMessage(); - if (message.contains("alertType") && message.contains("message")) { + Message message = event.getMessage(); + final String formattedMessage = message.getFormattedMessage(); + if (formattedMessage.contains("alertType") && formattedMessage.contains("message")) { snmpTrapInfo = new SnmpTrapInfo(); - final StringTokenizer messageSplitter = new StringTokenizer(message, _pairDelimiter); + final StringTokenizer messageSplitter = new StringTokenizer(formattedMessage, _pairDelimiter); while (messageSplitter.hasMoreTokens()) { final String pairToken = messageSplitter.nextToken(); final StringTokenizer pairSplitter = new StringTokenizer(pairToken, _keyValueDelimiter); @@ -80,11 +65,11 @@ public SnmpTrapInfo parseEvent(LoggingEvent event) { } else if (keyToken.equalsIgnoreCase("clusterId") && !valueToken.equalsIgnoreCase("null")) { snmpTrapInfo.setClusterId(Long.parseLong(valueToken)); } else if (keyToken.equalsIgnoreCase("message") && !valueToken.equalsIgnoreCase("null")) { - snmpTrapInfo.setMessage(getSnmpMessage(message)); + snmpTrapInfo.setMessage(getSnmpMessage(formattedMessage)); } } - snmpTrapInfo.setGenerationTime(new Date(event.getTimeStamp())); + snmpTrapInfo.setGenerationTime(new Date(event.getTimeMillis())); } return snmpTrapInfo; } diff --git a/plugins/alert-handlers/snmp-alerts/src/main/java/org/apache/cloudstack/alert/snmp/SnmpTrapAppender.java b/plugins/alert-handlers/snmp-alerts/src/main/java/org/apache/cloudstack/alert/snmp/SnmpTrapAppender.java index 5374e39916d4..d91c60db31d2 100644 --- a/plugins/alert-handlers/snmp-alerts/src/main/java/org/apache/cloudstack/alert/snmp/SnmpTrapAppender.java +++ b/plugins/alert-handlers/snmp-alerts/src/main/java/org/apache/cloudstack/alert/snmp/SnmpTrapAppender.java @@ -17,21 +17,32 @@ package org.apache.cloudstack.alert.snmp; +import java.io.Serializable; import java.util.ArrayList; import java.util.List; import java.util.StringTokenizer; -import org.apache.log4j.AppenderSkeleton; -import org.apache.log4j.spi.ErrorCode; -import org.apache.log4j.spi.LoggingEvent; - import com.cloud.utils.net.NetUtils; - -public class SnmpTrapAppender extends AppenderSkeleton { +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.Filter; +import org.apache.logging.log4j.core.Layout; +import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.core.appender.AbstractAppender; +import org.apache.logging.log4j.core.config.Property; +import org.apache.logging.log4j.core.config.plugins.Plugin; +import org.apache.logging.log4j.core.config.plugins.PluginElement; +import org.apache.logging.log4j.core.config.plugins.PluginAttribute; +import org.apache.logging.log4j.core.config.plugins.PluginFactory; +import org.apache.logging.log4j.core.layout.PatternLayout; + +@Plugin(name = "SnmpTrapAppender", category = "Core", elementType = "appender", printObject = true) +public class SnmpTrapAppender extends AbstractAppender { + protected static Logger LOGGER = LogManager.getLogger(SnmpTrapAppender.class); private String _delimiter = ","; - private String _snmpManagerIpAddresses; - private String _snmpManagerPorts; - private String _snmpManagerCommunities; + private String snmpManagerIpAddresses; + private String snmpManagerPorts; + private String snmpManagerCommunities; private String _oldSnmpManagerIpAddresses = null; private String _oldSnmpManagerPorts = null; @@ -41,27 +52,21 @@ public class SnmpTrapAppender extends AppenderSkeleton { private List _communities = null; private List _ports = null; - List _snmpHelpers = new ArrayList(); + private SnmpEnhancedPatternLayout snmpEnhancedPatternLayout; - @Override - protected void append(LoggingEvent event) { - SnmpEnhancedPatternLayout snmpEnhancedPatternLayout; - - if (getLayout() == null) { - errorHandler.error("No layout set for the Appender named [" + getName() + ']', null, ErrorCode.MISSING_LAYOUT); - return; - } - - if (getLayout() instanceof SnmpEnhancedPatternLayout) { - snmpEnhancedPatternLayout = (SnmpEnhancedPatternLayout)getLayout(); - } else { - return; - } + List _snmpHelpers = new ArrayList<>(); - if (!isAsSevereAsThreshold(event.getLevel())) { - return; - } + protected SnmpTrapAppender(String name, Filter filter, Layout layout, final boolean ignoreExceptions, final Property[] properties, + String snmpManagerIpAddresses, String snmpManagerPorts, String snmpManagerCommunities) { + super(name, filter, layout, ignoreExceptions, properties); + this.snmpEnhancedPatternLayout = new SnmpEnhancedPatternLayout(); + this.snmpManagerIpAddresses = snmpManagerIpAddresses; + this.snmpManagerPorts = snmpManagerPorts; + this.snmpManagerCommunities = snmpManagerCommunities; + } + @Override + public void append(LogEvent event) { SnmpTrapInfo snmpTrapInfo = snmpEnhancedPatternLayout.parseEvent(event); if (snmpTrapInfo != null && !_snmpHelpers.isEmpty()) { @@ -69,41 +74,57 @@ protected void append(LoggingEvent event) { try { helper.sendSnmpTrap(snmpTrapInfo); } catch (Exception e) { - errorHandler.error(e.getMessage()); + getHandler().error(e.getMessage()); } } } } + @PluginFactory + public static SnmpTrapAppender createAppender(@PluginAttribute("name") String name, @PluginElement("Layout") Layout layout, + @PluginElement("Filter") final Filter filter, @PluginAttribute("ignoreExceptions") boolean ignoreExceptions, @PluginElement("properties") final Property[] properties, + @PluginAttribute("SnmpManagerIpAddresses") String snmpManagerIpAddresses, @PluginAttribute("SnmpManagerPorts") String snmpManagerPorts, + @PluginAttribute("SnmpManagerCommunities") String snmpManagerCommunities) { + + if (name == null) { + LOGGER.error("No name provided for SnmpTrapAppender"); + return null; + } + if (layout == null) { + layout = PatternLayout.createDefaultLayout(); + } + return new SnmpTrapAppender(name, filter, layout, ignoreExceptions, properties, snmpManagerIpAddresses, snmpManagerPorts, snmpManagerCommunities); + } + void setSnmpHelpers() { - if (_snmpManagerIpAddresses == null || _snmpManagerIpAddresses.trim().isEmpty() || _snmpManagerCommunities == null || _snmpManagerCommunities.trim().isEmpty() || - _snmpManagerPorts == null || _snmpManagerPorts.trim().isEmpty()) { + if (snmpManagerIpAddresses == null || snmpManagerIpAddresses.trim().isEmpty() || snmpManagerCommunities == null || snmpManagerCommunities.trim().isEmpty() || + snmpManagerPorts == null || snmpManagerPorts.trim().isEmpty()) { reset(); return; } - if (_oldSnmpManagerIpAddresses != null && _oldSnmpManagerIpAddresses.equals(_snmpManagerIpAddresses) && - _oldSnmpManagerCommunities.equals(_snmpManagerCommunities) && _oldSnmpManagerPorts.equals(_snmpManagerPorts)) { + if (_oldSnmpManagerIpAddresses != null && _oldSnmpManagerIpAddresses.equals(snmpManagerIpAddresses) && + _oldSnmpManagerCommunities.equals(snmpManagerCommunities) && _oldSnmpManagerPorts.equals(snmpManagerPorts)) { return; } - _oldSnmpManagerIpAddresses = _snmpManagerIpAddresses; - _oldSnmpManagerPorts = _snmpManagerPorts; - _oldSnmpManagerCommunities = _snmpManagerCommunities; + _oldSnmpManagerIpAddresses = snmpManagerIpAddresses; + _oldSnmpManagerPorts = snmpManagerPorts; + _oldSnmpManagerCommunities = snmpManagerCommunities; - _ipAddresses = parse(_snmpManagerIpAddresses); - _communities = parse(_snmpManagerCommunities); - _ports = parse(_snmpManagerPorts); + _ipAddresses = parse(snmpManagerIpAddresses); + _communities = parse(snmpManagerCommunities); + _ports = parse(snmpManagerPorts); if (!(_ipAddresses.size() == _communities.size() && _ipAddresses.size() == _ports.size())) { reset(); - errorHandler.error(" size of ip addresses , communities, " + "and ports list doesn't match, " + "setting all to null"); + getHandler().error(" size of ip addresses , communities, " + "and ports list doesn't match, " + "setting all to null"); return; } if (!validateIpAddresses() || !validatePorts()) { reset(); - errorHandler.error(" Invalid format for the IP Addresses or Ports parameter "); + getHandler().error(" Invalid format for the IP Addresses or Ports parameter "); return; } @@ -114,7 +135,7 @@ void setSnmpHelpers() { try { _snmpHelpers.add(new SnmpHelper(address, _communities.get(i))); } catch (Exception e) { - errorHandler.error(e.getMessage()); + getHandler().error(e.getMessage()); } } } @@ -126,17 +147,6 @@ private void reset() { _snmpHelpers.clear(); } - @Override - public void close() { - if (!closed) - closed = true; - } - - @Override - public boolean requiresLayout() { - return true; - } - private List parse(String str) { List result = new ArrayList(); @@ -168,38 +178,20 @@ private boolean validateIpAddresses() { return true; } - public String getSnmpManagerIpAddresses() { - return _snmpManagerIpAddresses; - } - public void setSnmpManagerIpAddresses(String snmpManagerIpAddresses) { - this._snmpManagerIpAddresses = snmpManagerIpAddresses; + this.snmpManagerIpAddresses = snmpManagerIpAddresses; setSnmpHelpers(); } - public String getSnmpManagerPorts() { - return _snmpManagerPorts; - } public void setSnmpManagerPorts(String snmpManagerPorts) { - this._snmpManagerPorts = snmpManagerPorts; + this.snmpManagerPorts = snmpManagerPorts; setSnmpHelpers(); } - public String getSnmpManagerCommunities() { - return _snmpManagerCommunities; - } - public void setSnmpManagerCommunities(String snmpManagerCommunities) { - this._snmpManagerCommunities = snmpManagerCommunities; + this.snmpManagerCommunities = snmpManagerCommunities; setSnmpHelpers(); } - public String getDelimiter() { - return _delimiter; - } - - public void setDelimiter(String delimiter) { - this._delimiter = delimiter; - } } diff --git a/plugins/alert-handlers/snmp-alerts/src/test/java/org/apache/cloudstack/alert/snmp/SnmpEnhancedPatternLayoutTest.java b/plugins/alert-handlers/snmp-alerts/src/test/java/org/apache/cloudstack/alert/snmp/SnmpEnhancedPatternLayoutTest.java index a04d36bede8b..adfc0e27b0e9 100644 --- a/plugins/alert-handlers/snmp-alerts/src/test/java/org/apache/cloudstack/alert/snmp/SnmpEnhancedPatternLayoutTest.java +++ b/plugins/alert-handlers/snmp-alerts/src/test/java/org/apache/cloudstack/alert/snmp/SnmpEnhancedPatternLayoutTest.java @@ -20,56 +20,57 @@ import static junit.framework.Assert.assertEquals; import static junit.framework.Assert.assertNotNull; import static junit.framework.Assert.assertNull; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; +import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.message.Message; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.Mockito; -import javax.naming.ConfigurationException; - -import org.apache.log4j.spi.LoggingEvent; -import org.junit.Before; import org.junit.Test; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; +@RunWith(MockitoJUnitRunner.class) public class SnmpEnhancedPatternLayoutTest { - SnmpEnhancedPatternLayout _snmpEnhancedPatternLayout = new SnmpEnhancedPatternLayout(); - @Before - public void setUp() throws ConfigurationException { - _snmpEnhancedPatternLayout.setKeyValueDelimiter("::"); - _snmpEnhancedPatternLayout.setPairDelimiter("//"); - } + @Mock + Message messageMock; + @Mock + LogEvent eventMock; + + @Spy + @InjectMocks + SnmpEnhancedPatternLayout _snmpEnhancedPatternLayout = new SnmpEnhancedPatternLayout(); @Test public void parseAlertTest() { - LoggingEvent event = mock(LoggingEvent.class); setMessage(" alertType:: 14 // dataCenterId:: 1 // podId:: 1 // " + "clusterId:: null // message:: Management" - + " network CIDR is not configured originally. Set it default to 10.102.192.0/22", event); - SnmpTrapInfo info = _snmpEnhancedPatternLayout.parseEvent(event); + + " network CIDR is not configured originally. Set it default to 10.102.192.0/22", eventMock); + SnmpTrapInfo info = _snmpEnhancedPatternLayout.parseEvent(eventMock); commonAssertions(info, "Management network CIDR is not configured originally. Set it default to 10.102.192" + ".0/22"); } @Test public void ParseAlertWithPairDelimeterInMessageTest() { - LoggingEvent event = mock(LoggingEvent.class); setMessage(" alertType:: 14 // dataCenterId:: 1 // podId:: 1 // " + "clusterId:: null // message:: Management" - + " //network CIDR is not configured originally. Set it default to 10.102.192.0/22", event); - SnmpTrapInfo info = _snmpEnhancedPatternLayout.parseEvent(event); + + " //network CIDR is not configured originally. Set it default to 10.102.192.0/22", eventMock); + SnmpTrapInfo info = _snmpEnhancedPatternLayout.parseEvent(eventMock); commonAssertions(info, "Management //network CIDR is not configured originally. Set it default to 10.102.192" + ".0/22"); } @Test public void ParseAlertWithKeyValueDelimeterInMessageTest() { - LoggingEvent event = mock(LoggingEvent.class); setMessage(" alertType:: 14 // dataCenterId:: 1 // podId:: 1 // " + "clusterId:: null // message:: Management" - + " ::network CIDR is not configured originally. Set it default to 10.102.192.0/22", event); - SnmpTrapInfo info = _snmpEnhancedPatternLayout.parseEvent(event); + + " ::network CIDR is not configured originally. Set it default to 10.102.192.0/22", eventMock); + SnmpTrapInfo info = _snmpEnhancedPatternLayout.parseEvent(eventMock); commonAssertions(info, "Management ::network CIDR is not configured originally. Set it default to 10.102.192" + ".0/22"); } @Test public void parseRandomTest() { - LoggingEvent event = mock(LoggingEvent.class); - when(event.getRenderedMessage()).thenReturn("Problem clearing email alert"); - assertNull(" Null value was expected ", _snmpEnhancedPatternLayout.parseEvent(event)); + setMessage("Problem clearing email alert", eventMock); + assertNull(" Null value was expected ", _snmpEnhancedPatternLayout.parseEvent(eventMock)); } private void commonAssertions(SnmpTrapInfo info, String message) { @@ -81,7 +82,8 @@ private void commonAssertions(SnmpTrapInfo info, String message) { assertEquals(" message is not as expected ", message, info.getMessage()); } - private void setMessage(String message, LoggingEvent event) { - when(event.getRenderedMessage()).thenReturn(message); + private void setMessage(String message, LogEvent eventMock) { + Mockito.doReturn(messageMock).when(eventMock).getMessage(); + Mockito.doReturn(message).when(messageMock).getFormattedMessage(); } } diff --git a/plugins/alert-handlers/snmp-alerts/src/test/java/org/apache/cloudstack/alert/snmp/SnmpTrapAppenderTest.java b/plugins/alert-handlers/snmp-alerts/src/test/java/org/apache/cloudstack/alert/snmp/SnmpTrapAppenderTest.java index 36fb0c9e5c24..ce207e08266a 100644 --- a/plugins/alert-handlers/snmp-alerts/src/test/java/org/apache/cloudstack/alert/snmp/SnmpTrapAppenderTest.java +++ b/plugins/alert-handlers/snmp-alerts/src/test/java/org/apache/cloudstack/alert/snmp/SnmpTrapAppenderTest.java @@ -19,20 +19,11 @@ import static junit.framework.Assert.assertEquals; import static junit.framework.Assert.assertTrue; -import static org.mockito.Mockito.mock; -import java.util.List; - -import org.apache.log4j.spi.LoggingEvent; import org.junit.Test; -import org.mockito.Mock; public class SnmpTrapAppenderTest { - SnmpTrapAppender _appender = new SnmpTrapAppender(); - LoggingEvent _event = mock(LoggingEvent.class); - SnmpEnhancedPatternLayout _snmpEnhancedPatternLayout = mock(SnmpEnhancedPatternLayout.class); - @Mock - List snmpHelpers; + SnmpTrapAppender _appender = new SnmpTrapAppender("test", null, null, false, null, null, null, null); @Test public void appendTest() { diff --git a/plugins/alert-handlers/syslog-alerts/pom.xml b/plugins/alert-handlers/syslog-alerts/pom.xml index 1fdbc597faf7..54641bd8a8a8 100644 --- a/plugins/alert-handlers/syslog-alerts/pom.xml +++ b/plugins/alert-handlers/syslog-alerts/pom.xml @@ -29,8 +29,12 @@ - ch.qos.reload4j - reload4j + org.apache.logging.log4j + log4j-core + + + org.apache.logging.log4j + log4j-api diff --git a/plugins/alert-handlers/syslog-alerts/src/main/java/org/apache/cloudstack/syslog/AlertsSyslogAppender.java b/plugins/alert-handlers/syslog-alerts/src/main/java/org/apache/cloudstack/syslog/AlertsSyslogAppender.java index b73da2fd4219..a6f511558dc1 100644 --- a/plugins/alert-handlers/syslog-alerts/src/main/java/org/apache/cloudstack/syslog/AlertsSyslogAppender.java +++ b/plugins/alert-handlers/syslog-alerts/src/main/java/org/apache/cloudstack/syslog/AlertsSyslogAppender.java @@ -17,6 +17,7 @@ package org.apache.cloudstack.syslog; +import java.io.Serializable; import java.net.InetAddress; import java.net.UnknownHostException; import java.util.ArrayList; @@ -26,20 +27,31 @@ import java.util.Map; import java.util.StringTokenizer; -import org.apache.log4j.AppenderSkeleton; -import org.apache.log4j.net.SyslogAppender; -import org.apache.log4j.spi.LoggingEvent; import com.cloud.utils.net.NetUtils; - -public class AlertsSyslogAppender extends AppenderSkeleton { - String _syslogHosts = null; - String _delimiter = ","; - List _syslogHostsList = null; - List _syslogAppenders = null; - private String _facility; - private String _pairDelimiter = "//"; - private String _keyValueDelimiter = "::"; +import org.apache.logging.log4j.core.Filter; +import org.apache.logging.log4j.core.Layout; +import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.core.appender.AbstractAppender; +import org.apache.logging.log4j.core.appender.SyslogAppender; +import org.apache.logging.log4j.core.config.Property; +import org.apache.logging.log4j.core.config.plugins.Plugin; +import org.apache.logging.log4j.core.config.plugins.PluginAttribute; +import org.apache.logging.log4j.core.config.plugins.PluginElement; +import org.apache.logging.log4j.core.config.plugins.PluginFactory; +import org.apache.logging.log4j.core.impl.Log4jLogEvent; +import org.apache.logging.log4j.core.net.Facility; +import org.apache.logging.log4j.message.SimpleMessage; + +@Plugin(name = "AlertSyslogAppender", category = "Core", elementType = "appender", printObject = true) +public class AlertsSyslogAppender extends AbstractAppender { + String syslogHosts; + String delimiter = ","; + List syslogHostsList = null; + List syslogAppenders = null; + private String facility; + private String pairDelimiter = "//"; + private String keyValueDelimiter = "::"; private int alertType = -1; private long dataCenterId = 0; private long podId = 0; @@ -53,7 +65,7 @@ public class AlertsSyslogAppender extends AppenderSkeleton { private static final Map alertsMap; static { - Map aMap = new HashMap(27); + Map aMap = new HashMap<>(27); aMap.put(0, "availableMemory"); aMap.put(1, "availableCpu"); aMap.put(2, "availableStorage"); @@ -86,70 +98,68 @@ public class AlertsSyslogAppender extends AppenderSkeleton { alertsMap = Collections.unmodifiableMap(aMap); } - @Override - protected void append(LoggingEvent event) { - if (!isAsSevereAsThreshold(event.getLevel())) { - return; - } + protected AlertsSyslogAppender(String name, Filter filter, Layout layout, final boolean ignoreExceptions, final Property[] properties, String facility, + String syslogHosts){ + super(name, filter, layout, ignoreExceptions, properties); + this.facility = facility; + this.syslogHosts = syslogHosts; + } - if (_syslogAppenders != null && !_syslogAppenders.isEmpty()) { + @Override + public void append(LogEvent event) { + if (syslogAppenders != null && !syslogAppenders.isEmpty()) { try { - String logMessage = event.getRenderedMessage(); + String logMessage = event.getMessage().getFormattedMessage(); if (logMessage.contains("alertType") && logMessage.contains("message")) { parseMessage(logMessage); String syslogMessage = createSyslogMessage(); - LoggingEvent syslogEvent = new LoggingEvent(event.getFQNOfLoggerClass(), event.getLogger(), event.getLevel(), syslogMessage, null); + LogEvent syslogEvent = new Log4jLogEvent(event.getLoggerName(), event.getMarker(), event.getLoggerFqcn(), event.getLevel(), new SimpleMessage(syslogMessage), event.getThrown()); - for (SyslogAppender syslogAppender : _syslogAppenders) { + for (SyslogAppender syslogAppender : syslogAppenders) { syslogAppender.append(syslogEvent); } } } catch (Exception e) { - errorHandler.error(e.getMessage()); + getHandler().error(e.getMessage()); } } } - @Override - synchronized public void close() { - for (SyslogAppender syslogAppender : _syslogAppenders) { - syslogAppender.close(); - } - } - - @Override - public boolean requiresLayout() { - return true; + @PluginFactory + public static AlertsSyslogAppender createAppender(@PluginAttribute("name") String name, @PluginElement("Layout") Layout layout, + @PluginElement("Filter") final Filter filter, @PluginAttribute("ignoreExceptions") boolean ignoreExceptions, @PluginElement("properties") final Property[] properties, + @PluginAttribute("facility") String facility, @PluginAttribute("syslogHosts") String syslogHosts) { + return new AlertsSyslogAppender(name, filter, layout, ignoreExceptions, properties, facility, syslogHosts); } void setSyslogAppenders() { - if (_syslogAppenders == null) { - _syslogAppenders = new ArrayList(); + if (syslogAppenders == null) { + syslogAppenders = new ArrayList(); } - if (_syslogHosts == null || _syslogHosts.trim().isEmpty()) { + if (syslogHosts == null || syslogHosts.trim().isEmpty()) { reset(); return; } - _syslogHostsList = parseSyslogHosts(_syslogHosts); + syslogHostsList = parseSyslogHosts(syslogHosts); if (!validateIpAddresses()) { reset(); - errorHandler.error(" Invalid format for the IP Addresses parameter "); + getHandler().error(" Invalid format for the IP Addresses parameter "); return; } - for (String syslogHost : _syslogHostsList) { - _syslogAppenders.add(new SyslogAppender(getLayout(), syslogHost, SyslogAppender.getFacility(_facility))); + for (String syslogHost : syslogHostsList) { + syslogAppenders.add(SyslogAppender.newSyslogAppenderBuilder().setFacility(Facility.toFacility(facility)).setHost(syslogHost).setLayout(getLayout()).build()); } } private List parseSyslogHosts(String syslogHosts) { List result = new ArrayList(); - final StringTokenizer tokenizer = new StringTokenizer(syslogHosts, _delimiter); + final StringTokenizer tokenizer = new StringTokenizer(syslogHosts, delimiter); while (tokenizer.hasMoreTokens()) { result.add(tokenizer.nextToken().trim()); } @@ -157,7 +167,7 @@ private List parseSyslogHosts(String syslogHosts) { } private boolean validateIpAddresses() { - for (String ipAddress : _syslogHostsList) { + for (String ipAddress : syslogHostsList) { String[] hostTokens = (ipAddress.trim()).split(":"); String ip = hostTokens[0]; @@ -181,10 +191,10 @@ private boolean validateIpAddresses() { } void parseMessage(String logMessage) { - final StringTokenizer messageSplitter = new StringTokenizer(logMessage, _pairDelimiter); + final StringTokenizer messageSplitter = new StringTokenizer(logMessage, pairDelimiter); while (messageSplitter.hasMoreTokens()) { final String pairToken = messageSplitter.nextToken(); - final StringTokenizer pairSplitter = new StringTokenizer(pairToken, _keyValueDelimiter); + final StringTokenizer pairSplitter = new StringTokenizer(pairToken, keyValueDelimiter); String keyToken; String valueToken; @@ -231,60 +241,47 @@ String createSyslogMessage() { } if (alertType >= 0) { - message.append("alertType").append(_keyValueDelimiter).append(" ").append(alertsMap.containsKey(alertType) ? alertsMap.get(alertType) : "unknown") + message.append("alertType").append(keyValueDelimiter).append(" ").append(alertsMap.containsKey(alertType) ? alertsMap.get(alertType) : "unknown") .append(MESSAGE_DELIMITER_STRING); if (dataCenterId != 0) { - message.append("dataCenterId").append(_keyValueDelimiter).append(" ").append(dataCenterId).append(MESSAGE_DELIMITER_STRING); + message.append("dataCenterId").append(keyValueDelimiter).append(" ").append(dataCenterId).append(MESSAGE_DELIMITER_STRING); } if (podId != 0) { - message.append("podId").append(_keyValueDelimiter).append(" ").append(podId).append(MESSAGE_DELIMITER_STRING); + message.append("podId").append(keyValueDelimiter).append(" ").append(podId).append(MESSAGE_DELIMITER_STRING); } if (clusterId != 0) { - message.append("clusterId").append(_keyValueDelimiter).append(" ").append(clusterId).append(MESSAGE_DELIMITER_STRING); + message.append("clusterId").append(keyValueDelimiter).append(" ").append(clusterId).append(MESSAGE_DELIMITER_STRING); } if (sysMessage != null) { - message.append("message").append(_keyValueDelimiter).append(" ").append(sysMessage); + message.append("message").append(keyValueDelimiter).append(" ").append(sysMessage); } else { - errorHandler.error("What is the use of alert without message "); + getHandler().error("What is the use of alert without message "); } } else { - errorHandler.error("Invalid alert Type "); + getHandler().error("Invalid alert Type "); } return message.toString(); } private String getSyslogMessage(String message) { - int lastIndexOfKeyValueDelimiter = message.lastIndexOf(_keyValueDelimiter); + int lastIndexOfKeyValueDelimiter = message.lastIndexOf(keyValueDelimiter); int lastIndexOfMessageInString = message.lastIndexOf("message"); if (lastIndexOfKeyValueDelimiter - lastIndexOfMessageInString <= LENGTH_OF_STRING_MESSAGE_AND_KEY_VALUE_DELIMITER) { - return message.substring(lastIndexOfKeyValueDelimiter + _keyValueDelimiter.length()).trim(); + return message.substring(lastIndexOfKeyValueDelimiter + keyValueDelimiter.length()).trim(); } else if (lastIndexOfMessageInString < lastIndexOfKeyValueDelimiter) { - return message.substring(lastIndexOfMessageInString + _keyValueDelimiter.length() + LENGTH_OF_STRING_MESSAGE).trim(); + return message.substring(lastIndexOfMessageInString + keyValueDelimiter.length() + LENGTH_OF_STRING_MESSAGE).trim(); } - return message.substring(message.lastIndexOf("message" + _keyValueDelimiter) + LENGTH_OF_STRING_MESSAGE_AND_KEY_VALUE_DELIMITER).trim(); + return message.substring(message.lastIndexOf("message" + keyValueDelimiter) + LENGTH_OF_STRING_MESSAGE_AND_KEY_VALUE_DELIMITER).trim(); } private void reset() { - _syslogAppenders.clear(); - } - - public void setFacility(String facility) { - if (facility == null) { - return; - } - - _facility = facility; - if (_syslogAppenders != null && !_syslogAppenders.isEmpty()) { - for (SyslogAppender syslogAppender : _syslogAppenders) { - syslogAppender.setFacility(facility); - } - } + syslogAppenders.clear(); } private String severityOfAlert(int alertType) { @@ -304,40 +301,9 @@ private boolean isCritical(int alertType) { return false; } - public String getFacility() { - return _facility; - } - - public String getSyslogHosts() { - return _syslogHosts; - } - public void setSyslogHosts(String syslogHosts) { - _syslogHosts = syslogHosts; + this.syslogHosts = syslogHosts; setSyslogAppenders(); } - public String getDelimiter() { - return _delimiter; - } - - public void setDelimiter(String delimiter) { - _delimiter = delimiter; - } - - public String getPairDelimiter() { - return _pairDelimiter; - } - - public void setPairDelimiter(String pairDelimiter) { - _pairDelimiter = pairDelimiter; - } - - public String getKeyValueDelimiter() { - return _keyValueDelimiter; - } - - public void setKeyValueDelimiter(String keyValueDelimiter) { - _keyValueDelimiter = keyValueDelimiter; - } } diff --git a/plugins/alert-handlers/syslog-alerts/src/test/java/org/apache/cloudstack/syslog/AlertsSyslogAppenderTest.java b/plugins/alert-handlers/syslog-alerts/src/test/java/org/apache/cloudstack/syslog/AlertsSyslogAppenderTest.java index fe071a6c3277..b76a259409c5 100644 --- a/plugins/alert-handlers/syslog-alerts/src/test/java/org/apache/cloudstack/syslog/AlertsSyslogAppenderTest.java +++ b/plugins/alert-handlers/syslog-alerts/src/test/java/org/apache/cloudstack/syslog/AlertsSyslogAppenderTest.java @@ -17,41 +17,32 @@ package org.apache.cloudstack.syslog; +import org.apache.logging.log4j.core.config.Property; +import org.apache.logging.log4j.core.layout.PatternLayout; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; -import javax.naming.ConfigurationException; - -import org.apache.log4j.PatternLayout; -import org.junit.Before; import org.junit.Test; public class AlertsSyslogAppenderTest { - AlertsSyslogAppender _appender = new AlertsSyslogAppender(); - - @Before - public void setUp() throws ConfigurationException { - _appender.setLayout(new PatternLayout("%-5p [%c{3}] (%t:%x) %m%n")); - _appender.setFacility("LOCAL6"); - } - + AlertsSyslogAppender _appender = new AlertsSyslogAppender("test", null, PatternLayout.createDefaultLayout(), true, Property.EMPTY_ARRAY, "LOCAL6", null); @Test public void setSyslogAppendersTest() { _appender.setSyslogHosts("10.1.1.1,10.1.1.2"); - assertEquals(" error Syslog Appenders list size not as expected ", 2, _appender._syslogAppenders.size()); + assertEquals(" error Syslog Appenders list size not as expected ", 2, _appender.syslogAppenders.size()); } @Test public void setSyslogAppendersWithPortTest() { _appender.setSyslogHosts("10.1.1.1:897,10.1.1.2"); - assertEquals(" error Syslog Appenders list size not as expected ", 2, _appender._syslogAppenders.size()); + assertEquals(" error Syslog Appenders list size not as expected ", 2, _appender.syslogAppenders.size()); } @Test public void setSyslogAppendersNegativeTest() { //setting invalid IP for Syslog Hosts _appender.setSyslogHosts("10.1.1."); - assertTrue(" list was expected to be empty", _appender._syslogAppenders.isEmpty()); + assertTrue(" list was expected to be empty", _appender.syslogAppenders.isEmpty()); } @Test diff --git a/plugins/api/discovery/src/main/java/org/apache/cloudstack/api/command/user/discovery/ListApisCmd.java b/plugins/api/discovery/src/main/java/org/apache/cloudstack/api/command/user/discovery/ListApisCmd.java index 8cf643edb04b..aa78a725a34f 100644 --- a/plugins/api/discovery/src/main/java/org/apache/cloudstack/api/command/user/discovery/ListApisCmd.java +++ b/plugins/api/discovery/src/main/java/org/apache/cloudstack/api/command/user/discovery/ListApisCmd.java @@ -19,7 +19,6 @@ import javax.inject.Inject; import org.apache.cloudstack.acl.RoleType; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -42,7 +41,6 @@ responseHasSensitiveInfo = false, authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class ListApisCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(ListApisCmd.class.getName()); @Inject ApiDiscoveryService _apiDiscoveryService; diff --git a/plugins/api/discovery/src/main/java/org/apache/cloudstack/discovery/ApiDiscoveryServiceImpl.java b/plugins/api/discovery/src/main/java/org/apache/cloudstack/discovery/ApiDiscoveryServiceImpl.java index 3bdf2e9ce922..239bc49a65a2 100644 --- a/plugins/api/discovery/src/main/java/org/apache/cloudstack/discovery/ApiDiscoveryServiceImpl.java +++ b/plugins/api/discovery/src/main/java/org/apache/cloudstack/discovery/ApiDiscoveryServiceImpl.java @@ -44,7 +44,6 @@ import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.reflections.ReflectionUtils; import org.springframework.stereotype.Component; @@ -60,7 +59,6 @@ @Component public class ApiDiscoveryServiceImpl extends ComponentLifecycleBase implements ApiDiscoveryService { - private static final Logger s_logger = Logger.getLogger(ApiDiscoveryServiceImpl.class); List _apiAccessCheckers = null; List _services = null; @@ -83,13 +81,13 @@ public boolean start() { s_apiNameDiscoveryResponseMap = new HashMap(); Set> cmdClasses = new LinkedHashSet>(); for (PluggableService service : _services) { - s_logger.debug(String.format("getting api commands of service: %s", service.getClass().getName())); + logger.debug(String.format("getting api commands of service: %s", service.getClass().getName())); cmdClasses.addAll(service.getCommands()); } cmdClasses.addAll(this.getCommands()); cacheResponseMap(cmdClasses); long endTime = System.nanoTime(); - s_logger.info("Api Discovery Service: Annotation, docstrings, api relation graph processed in " + (endTime - startTime) / 1000000.0 + " ms"); + logger.info("Api Discovery Service: Annotation, docstrings, api relation graph processed in " + (endTime - startTime) / 1000000.0 + " ms"); } return true; @@ -108,8 +106,8 @@ protected Map> cacheResponseMap(Set> cmdClasses) { } String apiName = apiCmdAnnotation.name(); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Found api: " + apiName); + if (logger.isTraceEnabled()) { + logger.trace("Found api: " + apiName); } ApiDiscoveryResponse response = getCmdRequestMap(cmdClass, apiCmdAnnotation); @@ -258,7 +256,7 @@ public ListResponse listApis(User user, String name) { try { apiChecker.checkAccess(user, name); } catch (Exception ex) { - s_logger.error(String.format("API discovery access check failed for [%s] with error [%s].", name, ex.getMessage()), ex); + logger.error(String.format("API discovery access check failed for [%s] with error [%s].", name, ex.getMessage()), ex); return null; } } @@ -277,7 +275,7 @@ public ListResponse listApis(User user, String name) { } if (role.getRoleType() == RoleType.Admin && role.getId() == RoleType.Admin.getId()) { - s_logger.info(String.format("Account [%s] is Root Admin, all APIs are allowed.", + logger.info(String.format("Account [%s] is Root Admin, all APIs are allowed.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(account, "accountName", "uuid"))); } else { for (APIChecker apiChecker : _apiAccessCheckers) { diff --git a/plugins/api/rate-limit/src/main/java/org/apache/cloudstack/api/command/admin/ratelimit/ResetApiLimitCmd.java b/plugins/api/rate-limit/src/main/java/org/apache/cloudstack/api/command/admin/ratelimit/ResetApiLimitCmd.java index 5be0dfcb678c..8f5624f5b951 100644 --- a/plugins/api/rate-limit/src/main/java/org/apache/cloudstack/api/command/admin/ratelimit/ResetApiLimitCmd.java +++ b/plugins/api/rate-limit/src/main/java/org/apache/cloudstack/api/command/admin/ratelimit/ResetApiLimitCmd.java @@ -18,7 +18,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.ACL; import org.apache.cloudstack.api.APICommand; @@ -39,7 +38,6 @@ @APICommand(name = "resetApiLimit", responseObject = SuccessResponse.class, description = "Reset api count", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ResetApiLimitCmd extends BaseCmd { - private static final Logger s_logger = Logger.getLogger(ResetApiLimitCmd.class.getName()); @Inject diff --git a/plugins/api/rate-limit/src/main/java/org/apache/cloudstack/api/command/user/ratelimit/GetApiLimitCmd.java b/plugins/api/rate-limit/src/main/java/org/apache/cloudstack/api/command/user/ratelimit/GetApiLimitCmd.java index 027d9bef459c..eafe2782d33f 100644 --- a/plugins/api/rate-limit/src/main/java/org/apache/cloudstack/api/command/user/ratelimit/GetApiLimitCmd.java +++ b/plugins/api/rate-limit/src/main/java/org/apache/cloudstack/api/command/user/ratelimit/GetApiLimitCmd.java @@ -18,7 +18,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiErrorCode; @@ -35,7 +34,6 @@ @APICommand(name = "getApiLimit", responseObject = ApiLimitResponse.class, description = "Get API limit count for the caller", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class GetApiLimitCmd extends BaseCmd { - private static final Logger s_logger = Logger.getLogger(GetApiLimitCmd.class.getName()); @Inject diff --git a/plugins/api/rate-limit/src/main/java/org/apache/cloudstack/ratelimit/ApiRateLimitServiceImpl.java b/plugins/api/rate-limit/src/main/java/org/apache/cloudstack/ratelimit/ApiRateLimitServiceImpl.java index 3192727fbeb4..917cd7bb2b46 100644 --- a/plugins/api/rate-limit/src/main/java/org/apache/cloudstack/ratelimit/ApiRateLimitServiceImpl.java +++ b/plugins/api/rate-limit/src/main/java/org/apache/cloudstack/ratelimit/ApiRateLimitServiceImpl.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.acl.Role; import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.acl.APIChecker; @@ -47,7 +46,6 @@ @Component public class ApiRateLimitServiceImpl extends AdapterBase implements APIChecker, ApiRateLimitService { - private static final Logger s_logger = Logger.getLogger(ApiRateLimitServiceImpl.class); /** * True if api rate limiting is enabled @@ -100,7 +98,7 @@ public boolean configure(String name, Map params) throws Configu CacheManager cm = CacheManager.create(); Cache cache = new Cache("api-limit-cache", maxElements, false, false, timeToLive, timeToLive); cm.addCache(cache); - s_logger.info("Limit Cache created with timeToLive=" + timeToLive + ", maxAllowed=" + maxAllowed + ", maxElements=" + maxElements); + logger.info("Limit Cache created with timeToLive=" + timeToLive + ", maxAllowed=" + maxAllowed + ", maxElements=" + maxElements); cacheStore.setCache(cache); _store = cacheStore; @@ -158,7 +156,7 @@ public List getApisAllowedToUser(Role role, User user, List apiN public void throwExceptionDueToApiRateLimitReached(Long accountId) throws RequestLimitException { long expireAfter = _store.get(accountId).getExpireDuration(); String msg = String.format("The given user has reached his/her account api limit, please retry after [%s] ms.", expireAfter); - s_logger.warn(msg); + logger.warn(msg); throw new RequestLimitException(msg); } @@ -176,7 +174,7 @@ public boolean checkAccess(User user, String apiCommandName) throws PermissionDe public boolean checkAccess(Account account, String commandName) { Long accountId = account.getAccountId(); if (_accountService.isRootAdmin(accountId)) { - s_logger.info(String.format("Account [%s] is Root Admin, in this case, API limit does not apply.", + logger.info(String.format("Account [%s] is Root Admin, in this case, API limit does not apply.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(account, "accountName", "uuid"))); return true; } @@ -203,7 +201,7 @@ public boolean hasApiRateLimitBeenExceeded(Long accountId) { int current = entry.incrementAndGet(); if (current <= maxAllowed) { - s_logger.trace(String.format("Account %s has current count [%s].", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(account, "uuid", "accountName"), current)); + logger.trace(String.format("Account %s has current count [%s].", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(account, "uuid", "accountName"), current)); return false; } return true; @@ -212,7 +210,7 @@ public boolean hasApiRateLimitBeenExceeded(Long accountId) { @Override public boolean isEnabled() { if (!enabled) { - s_logger.debug("API rate limiting is disabled. We will not use ApiRateLimitService."); + logger.debug("API rate limiting is disabled. We will not use ApiRateLimitService."); } return enabled; } diff --git a/plugins/api/solidfire-intg-test/src/main/java/org/apache/cloudstack/api/command/admin/solidfire/GetPathForVolumeCmd.java b/plugins/api/solidfire-intg-test/src/main/java/org/apache/cloudstack/api/command/admin/solidfire/GetPathForVolumeCmd.java index ff3c307a87bf..5ff7f82a7b0e 100644 --- a/plugins/api/solidfire-intg-test/src/main/java/org/apache/cloudstack/api/command/admin/solidfire/GetPathForVolumeCmd.java +++ b/plugins/api/solidfire-intg-test/src/main/java/org/apache/cloudstack/api/command/admin/solidfire/GetPathForVolumeCmd.java @@ -18,7 +18,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.BaseCmd; @@ -29,7 +28,6 @@ @APICommand(name = "getPathForVolume", responseObject = ApiPathForVolumeResponse.class, description = "Get the path associated with the provided volume UUID", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class GetPathForVolumeCmd extends BaseCmd { - private static final Logger LOGGER = Logger.getLogger(GetPathForVolumeCmd.class.getName()); @Parameter(name = ApiConstants.VOLUME_ID, type = CommandType.STRING, description = "CloudStack Volume UUID", required = true) private String _volumeUuid; @@ -47,7 +45,7 @@ public long getEntityOwnerId() { @Override public void execute() { - LOGGER.info("'GetPathForVolumeIdCmd.execute' method invoked"); + logger.info("'GetPathForVolumeIdCmd.execute' method invoked"); String pathForVolume = _util.getPathForVolumeUuid(_volumeUuid); diff --git a/plugins/api/solidfire-intg-test/src/main/java/org/apache/cloudstack/api/command/admin/solidfire/GetSolidFireAccountIdCmd.java b/plugins/api/solidfire-intg-test/src/main/java/org/apache/cloudstack/api/command/admin/solidfire/GetSolidFireAccountIdCmd.java index af6400ce1c5f..baedb0389d55 100644 --- a/plugins/api/solidfire-intg-test/src/main/java/org/apache/cloudstack/api/command/admin/solidfire/GetSolidFireAccountIdCmd.java +++ b/plugins/api/solidfire-intg-test/src/main/java/org/apache/cloudstack/api/command/admin/solidfire/GetSolidFireAccountIdCmd.java @@ -18,7 +18,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.APICommand; @@ -31,7 +30,6 @@ @APICommand(name = "getSolidFireAccountId", responseObject = ApiSolidFireAccountIdResponse.class, description = "Get SolidFire Account ID", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class GetSolidFireAccountIdCmd extends BaseCmd { - private static final Logger LOGGER = Logger.getLogger(GetSolidFireAccountIdCmd.class.getName()); @Parameter(name = ApiConstants.ACCOUNT_ID, type = CommandType.STRING, description = "CloudStack Account UUID", required = true) private String csAccountUuid; @@ -52,7 +50,7 @@ public long getEntityOwnerId() { @Override public void execute() { - LOGGER.info("'GetSolidFireAccountIdCmd.execute' method invoked"); + logger.info("'GetSolidFireAccountIdCmd.execute' method invoked"); long sfAccountId = manager.getSolidFireAccountId(csAccountUuid, storagePoolUuid); diff --git a/plugins/api/solidfire-intg-test/src/main/java/org/apache/cloudstack/api/command/admin/solidfire/GetSolidFireVolumeAccessGroupIdsCmd.java b/plugins/api/solidfire-intg-test/src/main/java/org/apache/cloudstack/api/command/admin/solidfire/GetSolidFireVolumeAccessGroupIdsCmd.java index 31330f066e55..c250c870f8d7 100644 --- a/plugins/api/solidfire-intg-test/src/main/java/org/apache/cloudstack/api/command/admin/solidfire/GetSolidFireVolumeAccessGroupIdsCmd.java +++ b/plugins/api/solidfire-intg-test/src/main/java/org/apache/cloudstack/api/command/admin/solidfire/GetSolidFireVolumeAccessGroupIdsCmd.java @@ -20,7 +20,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.APICommand; @@ -34,7 +33,6 @@ @APICommand(name = "getSolidFireVolumeAccessGroupIds", responseObject = ApiSolidFireVolumeAccessGroupIdsResponse.class, description = "Get the SF Volume Access Group IDs", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class GetSolidFireVolumeAccessGroupIdsCmd extends BaseCmd { - private static final Logger LOGGER = Logger.getLogger(GetSolidFireVolumeAccessGroupIdsCmd.class.getName()); @Parameter(name = ApiConstants.CLUSTER_ID, type = CommandType.STRING, description = "Cluster UUID", required = true) private String clusterUuid; @@ -61,7 +59,7 @@ public long getEntityOwnerId() { @Override public void execute() { - LOGGER.info("'GetSolidFireVolumeAccessGroupIdsCmd.execute' method invoked"); + logger.info("'GetSolidFireVolumeAccessGroupIdsCmd.execute' method invoked"); long[] sfVagIds = manager.getSolidFireVolumeAccessGroupIds(clusterUuid, storagePoolUuid); diff --git a/plugins/api/solidfire-intg-test/src/main/java/org/apache/cloudstack/api/command/admin/solidfire/GetSolidFireVolumeSizeCmd.java b/plugins/api/solidfire-intg-test/src/main/java/org/apache/cloudstack/api/command/admin/solidfire/GetSolidFireVolumeSizeCmd.java index 9179ec26a8b3..10af3be25b06 100644 --- a/plugins/api/solidfire-intg-test/src/main/java/org/apache/cloudstack/api/command/admin/solidfire/GetSolidFireVolumeSizeCmd.java +++ b/plugins/api/solidfire-intg-test/src/main/java/org/apache/cloudstack/api/command/admin/solidfire/GetSolidFireVolumeSizeCmd.java @@ -18,7 +18,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.APICommand; @@ -31,7 +30,6 @@ @APICommand(name = "getSolidFireVolumeSize", responseObject = ApiSolidFireVolumeSizeResponse.class, description = "Get the SF volume size including Hypervisor Snapshot Reserve", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class GetSolidFireVolumeSizeCmd extends BaseCmd { - private static final Logger LOGGER = Logger.getLogger(GetSolidFireVolumeSizeCmd.class.getName()); @Parameter(name = ApiConstants.VOLUME_ID, type = CommandType.STRING, description = "Volume UUID", required = true) private String volumeUuid; @@ -50,7 +48,7 @@ public long getEntityOwnerId() { @Override public void execute() { - LOGGER.info("'GetSolidFireVolumeSizeCmd.execute' method invoked"); + logger.info("'GetSolidFireVolumeSizeCmd.execute' method invoked"); long sfVolumeSize = manager.getSolidFireVolumeSize(volumeUuid); diff --git a/plugins/api/solidfire-intg-test/src/main/java/org/apache/cloudstack/api/command/admin/solidfire/GetVolumeSnapshotDetailsCmd.java b/plugins/api/solidfire-intg-test/src/main/java/org/apache/cloudstack/api/command/admin/solidfire/GetVolumeSnapshotDetailsCmd.java index 91fe6bba1cb0..bbb86bec31ed 100644 --- a/plugins/api/solidfire-intg-test/src/main/java/org/apache/cloudstack/api/command/admin/solidfire/GetVolumeSnapshotDetailsCmd.java +++ b/plugins/api/solidfire-intg-test/src/main/java/org/apache/cloudstack/api/command/admin/solidfire/GetVolumeSnapshotDetailsCmd.java @@ -20,7 +20,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.BaseCmd; @@ -34,7 +33,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class GetVolumeSnapshotDetailsCmd extends BaseCmd { - private static final Logger LOGGER = Logger.getLogger(GetVolumeSnapshotDetailsCmd.class.getName()); @Parameter(name = ApiConstants.SNAPSHOT_ID, type = CommandType.STRING, description = "CloudStack Snapshot UUID", required = true) private String snapshotUuid; @@ -52,7 +50,7 @@ public long getEntityOwnerId() { @Override public void execute() { - LOGGER.info("'" + GetVolumeSnapshotDetailsCmd.class.getSimpleName() + ".execute' method invoked"); + logger.info("'" + GetVolumeSnapshotDetailsCmd.class.getSimpleName() + ".execute' method invoked"); List responses = util.getSnapshotDetails(snapshotUuid); diff --git a/plugins/api/solidfire-intg-test/src/main/java/org/apache/cloudstack/api/command/admin/solidfire/GetVolumeiScsiNameCmd.java b/plugins/api/solidfire-intg-test/src/main/java/org/apache/cloudstack/api/command/admin/solidfire/GetVolumeiScsiNameCmd.java index 41ed1069418d..e2063ce9d69a 100644 --- a/plugins/api/solidfire-intg-test/src/main/java/org/apache/cloudstack/api/command/admin/solidfire/GetVolumeiScsiNameCmd.java +++ b/plugins/api/solidfire-intg-test/src/main/java/org/apache/cloudstack/api/command/admin/solidfire/GetVolumeiScsiNameCmd.java @@ -18,7 +18,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.BaseCmd; @@ -30,7 +29,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class GetVolumeiScsiNameCmd extends BaseCmd { - private static final Logger LOGGER = Logger.getLogger(GetVolumeiScsiNameCmd.class.getName()); @Parameter(name = ApiConstants.VOLUME_ID, type = CommandType.STRING, description = "CloudStack Volume UUID", required = true) private String volumeUuid; @@ -48,7 +46,7 @@ public long getEntityOwnerId() { @Override public void execute() { - LOGGER.info("'GetVolumeiScsiNameCmd.execute' method invoked"); + logger.info("'GetVolumeiScsiNameCmd.execute' method invoked"); String volume_iScsiName = _util.getVolume_iScsiName(volumeUuid); diff --git a/plugins/api/solidfire-intg-test/src/main/java/org/apache/cloudstack/api/solidfire/ApiSolidFireIntegrationTestServiceImpl.java b/plugins/api/solidfire-intg-test/src/main/java/org/apache/cloudstack/api/solidfire/ApiSolidFireIntegrationTestServiceImpl.java index 4adcbbe89d8d..91868f4ef8f0 100644 --- a/plugins/api/solidfire-intg-test/src/main/java/org/apache/cloudstack/api/solidfire/ApiSolidFireIntegrationTestServiceImpl.java +++ b/plugins/api/solidfire-intg-test/src/main/java/org/apache/cloudstack/api/solidfire/ApiSolidFireIntegrationTestServiceImpl.java @@ -20,7 +20,6 @@ import java.util.ArrayList; import org.apache.cloudstack.api.command.admin.solidfire.GetPathForVolumeCmd; -// import org.apache.log4j.Logger; import org.apache.cloudstack.api.command.admin.solidfire.GetSolidFireAccountIdCmd; import org.apache.cloudstack.api.command.admin.solidfire.GetSolidFireVolumeAccessGroupIdsCmd; import org.apache.cloudstack.api.command.admin.solidfire.GetVolumeSnapshotDetailsCmd; diff --git a/plugins/api/vmware-sioc/src/main/java/org/apache/cloudstack/api/command/admin/sioc/UpdateSiocInfoCmd.java b/plugins/api/vmware-sioc/src/main/java/org/apache/cloudstack/api/command/admin/sioc/UpdateSiocInfoCmd.java index 84504d13e4c9..b9dd659905b0 100644 --- a/plugins/api/vmware-sioc/src/main/java/org/apache/cloudstack/api/command/admin/sioc/UpdateSiocInfoCmd.java +++ b/plugins/api/vmware-sioc/src/main/java/org/apache/cloudstack/api/command/admin/sioc/UpdateSiocInfoCmd.java @@ -18,7 +18,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -37,7 +36,6 @@ since = "4.11.0", authorized = {RoleType.Admin}) public class UpdateSiocInfoCmd extends BaseCmd { - private static final Logger s_logger = Logger.getLogger(UpdateSiocInfoCmd.class.getName()); ///////////////////////////////////////////////////// @@ -78,7 +76,7 @@ public long getEntityOwnerId() { @Override public void execute() { - s_logger.info("'UpdateSiocInfoCmd.execute' method invoked"); + logger.info("'UpdateSiocInfoCmd.execute' method invoked"); String msg = "Success"; diff --git a/plugins/api/vmware-sioc/src/main/java/org/apache/cloudstack/sioc/SiocManagerImpl.java b/plugins/api/vmware-sioc/src/main/java/org/apache/cloudstack/sioc/SiocManagerImpl.java index f012dbfa9725..c87ff3dfc827 100644 --- a/plugins/api/vmware-sioc/src/main/java/org/apache/cloudstack/sioc/SiocManagerImpl.java +++ b/plugins/api/vmware-sioc/src/main/java/org/apache/cloudstack/sioc/SiocManagerImpl.java @@ -30,7 +30,8 @@ import org.apache.cloudstack.util.LoginInfo; import org.apache.cloudstack.util.vmware.VMwareUtil; import org.apache.cloudstack.utils.volume.VirtualMachineDiskInfo; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.stereotype.Component; import com.cloud.dc.DataCenterVO; @@ -63,7 +64,7 @@ @Component public class SiocManagerImpl implements SiocManager { - private static final Logger LOGGER = Logger.getLogger(SiocManagerImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); private static final int LOCK_TIME_IN_SECONDS = 3; private static final int ONE_GB_IN_BYTES = 1000000000; private static final int LOWEST_SHARES_PER_VIRTUAL_DISK = 2000; // We want this to be greater than 1,000, which is the VMware default value. @@ -82,7 +83,7 @@ public class SiocManagerImpl implements SiocManager { @Override public void updateSiocInfo(long zoneId, long storagePoolId, int sharesPerGB, int limitIopsPerGB, int iopsNotifyThreshold) throws Exception { - LOGGER.info("'SiocManagerImpl.updateSiocInfo(long, long, int, int, int)' method invoked"); + logger.info("'SiocManagerImpl.updateSiocInfo(long, long, int, int, int)' method invoked"); DataCenterVO zone = zoneDao.findById(zoneId); @@ -250,7 +251,7 @@ private ResultWrapper updateSiocInfo(VMwareUtil.VMwareConnection connection, Map tasks.add(task); - LOGGER.info(getInfoMsg(volumeVO, newShares, newLimitIops)); + logger.info(getInfoMsg(volumeVO, newShares, newLimitIops)); } catch (Exception ex) { throw new Exception("Error: " + ex.getMessage()); } @@ -321,7 +322,7 @@ private ResultWrapper updateSiocInfoForWorkerVM(VMwareUtil.VMwareConnection conn tasks.add(task); - LOGGER.info(getInfoMsgForWorkerVm(newLimitIops)); + logger.info(getInfoMsgForWorkerVm(newLimitIops)); } catch (Exception ex) { throw new Exception("Error: " + ex.getMessage()); } diff --git a/plugins/api/vmware-sioc/src/main/java/org/apache/cloudstack/util/vmware/VMwareUtil.java b/plugins/api/vmware-sioc/src/main/java/org/apache/cloudstack/util/vmware/VMwareUtil.java index 209945fa4714..ae93c3431fc9 100644 --- a/plugins/api/vmware-sioc/src/main/java/org/apache/cloudstack/util/vmware/VMwareUtil.java +++ b/plugins/api/vmware-sioc/src/main/java/org/apache/cloudstack/util/vmware/VMwareUtil.java @@ -35,7 +35,8 @@ import javax.xml.ws.WebServiceException; import org.apache.cloudstack.util.LoginInfo; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.hypervisor.vmware.mo.VirtualMachineDiskInfoBuilder; import com.vmware.vim25.DynamicProperty; @@ -71,7 +72,7 @@ import com.vmware.vim25.VirtualSCSIController; public class VMwareUtil { - private static final Logger s_logger = Logger.getLogger(VMwareUtil.class); + protected static Logger LOGGER = LogManager.getLogger(VMwareUtil.class); private VMwareUtil() {} @@ -315,7 +316,7 @@ public static boolean waitForTask(VMwareConnection connection, ManagedObjectRefe throw new Exception(((LocalizedMethodFault)result[1]).getLocalizedMessage()); } } catch (WebServiceException we) { - s_logger.debug("Cancelling vCenter task because the task failed with the following error: " + we.getLocalizedMessage()); + LOGGER.debug("Cancelling vCenter task because the task failed with the following error: " + we.getLocalizedMessage()); connection.getVimPortType().cancelTask(task); diff --git a/plugins/backup/dummy/src/main/java/org/apache/cloudstack/backup/DummyBackupProvider.java b/plugins/backup/dummy/src/main/java/org/apache/cloudstack/backup/DummyBackupProvider.java index fabc9821fd3e..fa376f992ed1 100644 --- a/plugins/backup/dummy/src/main/java/org/apache/cloudstack/backup/DummyBackupProvider.java +++ b/plugins/backup/dummy/src/main/java/org/apache/cloudstack/backup/DummyBackupProvider.java @@ -25,7 +25,6 @@ import javax.inject.Inject; import org.apache.cloudstack.backup.dao.BackupDao; -import org.apache.log4j.Logger; import com.cloud.utils.Pair; import com.cloud.utils.component.AdapterBase; @@ -35,7 +34,6 @@ public class DummyBackupProvider extends AdapterBase implements BackupProvider { - private static final Logger s_logger = Logger.getLogger(DummyBackupProvider.class); @Inject private BackupDao backupDao; @@ -52,7 +50,7 @@ public String getDescription() { @Override public List listBackupOfferings(Long zoneId) { - s_logger.debug("Listing backup policies on Dummy B&R Plugin"); + logger.debug("Listing backup policies on Dummy B&R Plugin"); BackupOffering policy1 = new BackupOfferingVO(1, "gold-policy", "dummy", "Golden Policy", "Gold description", true); BackupOffering policy2 = new BackupOfferingVO(1, "silver-policy", "dummy", "Silver Policy", "Silver description", true); return Arrays.asList(policy1, policy2); @@ -60,26 +58,26 @@ public List listBackupOfferings(Long zoneId) { @Override public boolean isValidProviderOffering(Long zoneId, String uuid) { - s_logger.debug("Checking if backup offering exists on the Dummy Backup Provider"); + logger.debug("Checking if backup offering exists on the Dummy Backup Provider"); return true; } @Override public boolean assignVMToBackupOffering(VirtualMachine vm, BackupOffering backupOffering) { - s_logger.debug("Creating VM backup for VM " + vm.getInstanceName() + " from backup offering " + backupOffering.getName()); + logger.debug("Creating VM backup for VM " + vm.getInstanceName() + " from backup offering " + backupOffering.getName()); ((VMInstanceVO) vm).setBackupExternalId("dummy-external-backup-id"); return true; } @Override public boolean restoreVMFromBackup(VirtualMachine vm, Backup backup) { - s_logger.debug("Restoring vm " + vm.getUuid() + "from backup " + backup.getUuid() + " on the Dummy Backup Provider"); + logger.debug("Restoring vm " + vm.getUuid() + "from backup " + backup.getUuid() + " on the Dummy Backup Provider"); return true; } @Override public Pair restoreBackedUpVolume(Backup backup, String volumeUuid, String hostIp, String dataStoreUuid) { - s_logger.debug("Restoring volume " + volumeUuid + "from backup " + backup.getUuid() + " on the Dummy Backup Provider"); + logger.debug("Restoring volume " + volumeUuid + "from backup " + backup.getUuid() + " on the Dummy Backup Provider"); throw new CloudRuntimeException("Dummy plugin does not support this feature"); } @@ -100,7 +98,7 @@ public Map getBackupMetrics(Long zoneId, List NetworkerUrl = new ConfigKey<>("Advanced", String.class, "backup.plugin.networker.url", "https://localhost:9090/nwrestapi/v3", diff --git a/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/networker/NetworkerClient.java b/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/networker/NetworkerClient.java index 8bb89b635e98..8aecaa26023e 100644 --- a/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/networker/NetworkerClient.java +++ b/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/networker/NetworkerClient.java @@ -42,7 +42,8 @@ import org.apache.http.conn.ssl.NoopHostnameVerifier; import org.apache.http.conn.ssl.SSLConnectionSocketFactory; import org.apache.http.impl.client.HttpClientBuilder; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import javax.net.ssl.SSLContext; import javax.net.ssl.X509TrustManager; @@ -64,7 +65,7 @@ import static org.apache.cloudstack.backup.NetworkerBackupProvider.BACKUP_IDENTIFIER; public class NetworkerClient { - private static final Logger LOG = Logger.getLogger(NetworkerClient.class); + private static final Logger LOG = LogManager.getLogger(NetworkerClient.class); private final URI apiURI; private final String apiName; private final String apiPassword; diff --git a/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/VeeamBackupProvider.java b/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/VeeamBackupProvider.java index e20f67995b9f..0e4537390186 100644 --- a/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/VeeamBackupProvider.java +++ b/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/VeeamBackupProvider.java @@ -39,7 +39,6 @@ import org.apache.cloudstack.framework.config.Configurable; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.BooleanUtils; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -65,7 +64,6 @@ public class VeeamBackupProvider extends AdapterBase implements BackupProvider, Configurable { - private static final Logger LOG = Logger.getLogger(VeeamBackupProvider.class); public static final String BACKUP_IDENTIFIER = "-CSBKP-"; public ConfigKey VeeamUrl = new ConfigKey<>("Advanced", String.class, @@ -120,7 +118,7 @@ protected VeeamClient getClient(final Long zoneId) { } catch (URISyntaxException e) { throw new CloudRuntimeException("Failed to parse Veeam API URL: " + e.getMessage()); } catch (NoSuchAlgorithmException | KeyManagementException e) { - LOG.error("Failed to build Veeam API client due to: ", e); + logger.error("Failed to build Veeam API client due to: ", e); } throw new CloudRuntimeException("Failed to build Veeam API client"); } @@ -175,7 +173,7 @@ public boolean assignVMToBackupOffering(final VirtualMachine vm, final BackupOff final String clonedJobName = getGuestBackupName(vm.getInstanceName(), vm.getUuid()); if (!client.cloneVeeamJob(parentJob, clonedJobName)) { - LOG.error("Failed to clone pre-defined Veeam job (backup offering) for backup offering ID: " + backupOffering.getExternalId() + " but will check the list of jobs again if it was eventually succeeded."); + logger.error("Failed to clone pre-defined Veeam job (backup offering) for backup offering ID: " + backupOffering.getExternalId() + " but will check the list of jobs again if it was eventually succeeded."); } for (final BackupOffering job : client.listJobs()) { @@ -184,7 +182,7 @@ public boolean assignVMToBackupOffering(final VirtualMachine vm, final BackupOff if (BooleanUtils.isTrue(clonedJob.getScheduleConfigured()) && !clonedJob.getScheduleEnabled()) { client.toggleJobSchedule(clonedJob.getId()); } - LOG.debug("Veeam job (backup offering) for backup offering ID: " + backupOffering.getExternalId() + " found, now trying to assign the VM to the job."); + logger.debug("Veeam job (backup offering) for backup offering ID: " + backupOffering.getExternalId() + " found, now trying to assign the VM to the job."); final VmwareDatacenter vmwareDC = findVmwareDatacenterForVM(vm); if (client.addVMToVeeamJob(job.getExternalId(), vm.getInstanceName(), vmwareDC.getVcenterHost())) { ((VMInstanceVO) vm).setBackupExternalId(job.getExternalId()); @@ -201,15 +199,15 @@ public boolean removeVMFromBackupOffering(final VirtualMachine vm) { final VmwareDatacenter vmwareDC = findVmwareDatacenterForVM(vm); try { if (!client.removeVMFromVeeamJob(vm.getBackupExternalId(), vm.getInstanceName(), vmwareDC.getVcenterHost())) { - LOG.warn("Failed to remove VM from Veeam Job id: " + vm.getBackupExternalId()); + logger.warn("Failed to remove VM from Veeam Job id: " + vm.getBackupExternalId()); } } catch (Exception e) { - LOG.debug("VM was removed from the job so could not remove again, trying to delete the veeam job now.", e); + logger.debug("VM was removed from the job so could not remove again, trying to delete the veeam job now.", e); } final String clonedJobName = getGuestBackupName(vm.getInstanceName(), vm.getUuid()); if (!client.deleteJobAndBackup(clonedJobName)) { - LOG.warn("Failed to remove Veeam job and backup for job: " + clonedJobName); + logger.warn("Failed to remove Veeam job and backup for job: " + clonedJobName); throw new CloudRuntimeException("Failed to delete Veeam B&R job and backup, an operation may be in progress. Please try again after some time."); } client.syncBackupRepository(); @@ -234,7 +232,7 @@ public boolean deleteBackup(Backup backup, boolean forced) { throw new CloudRuntimeException(String.format("Could not find any VM associated with the Backup [uuid: %s, externalId: %s].", backup.getUuid(), backup.getExternalId())); } if (!forced) { - LOG.debug(String.format("Veeam backup provider does not have a safe way to remove a single restore point, which results in all backup chain being removed. " + logger.debug(String.format("Veeam backup provider does not have a safe way to remove a single restore point, which results in all backup chain being removed. " + "More information about this limitation can be found in the links: [%s, %s].", "https://forums.veeam.com/powershell-f26/removing-a-single-restorepoint-t21061.html", "https://helpcenter.veeam.com/docs/backup/vsphere/retention_separate_vms.html?ver=110")); throw new CloudRuntimeException("Veeam backup provider does not have a safe way to remove a single restore point, which results in all backup chain being removed. " @@ -263,7 +261,7 @@ public boolean restoreVMFromBackup(VirtualMachine vm, Backup backup) { try { return getClient(vm.getDataCenterId()).restoreFullVM(vm.getInstanceName(), restorePointId); } catch (Exception ex) { - LOG.error(String.format("Failed to restore Full VM due to: %s. Retrying after some preparation", ex.getMessage())); + logger.error(String.format("Failed to restore Full VM due to: %s. Retrying after some preparation", ex.getMessage())); prepareForBackupRestoration(vm); return getClient(vm.getDataCenterId()).restoreFullVM(vm.getInstanceName(), restorePointId); } @@ -273,7 +271,7 @@ private void prepareForBackupRestoration(VirtualMachine vm) { if (!Hypervisor.HypervisorType.VMware.equals(vm.getHypervisorType())) { return; } - LOG.info("Preparing for restoring VM " + vm); + logger.info("Preparing for restoring VM " + vm); PrepareForBackupRestorationCommand command = new PrepareForBackupRestorationCommand(vm.getInstanceName()); Long hostId = virtualMachineManager.findClusterAndHostIdForVm(vm.getId()).second(); if (hostId == null) { @@ -282,7 +280,7 @@ private void prepareForBackupRestoration(VirtualMachine vm) { try { Answer answer = agentMgr.easySend(hostId, command); if (answer != null && answer.getResult()) { - LOG.info("Succeeded to prepare for restoring VM " + vm); + logger.info("Succeeded to prepare for restoring VM " + vm); } else { throw new CloudRuntimeException(String.format("Failed to prepare for restoring VM %s. details: %s", vm, (answer != null ? answer.getDetails() : null))); @@ -303,12 +301,12 @@ public Pair restoreBackedUpVolume(Backup backup, String volumeU public Map getBackupMetrics(final Long zoneId, final List vms) { final Map metrics = new HashMap<>(); if (CollectionUtils.isEmpty(vms)) { - LOG.warn("Unable to get VM Backup Metrics because the list of VMs is empty."); + logger.warn("Unable to get VM Backup Metrics because the list of VMs is empty."); return metrics; } List vmUuids = vms.stream().filter(Objects::nonNull).map(VirtualMachine::getUuid).collect(Collectors.toList()); - LOG.debug(String.format("Get Backup Metrics for VMs: [%s].", String.join(", ", vmUuids))); + logger.debug(String.format("Get Backup Metrics for VMs: [%s].", String.join(", ", vmUuids))); final Map backendMetrics = getClient(zoneId).getBackupMetrics(); for (final VirtualMachine vm : vms) { @@ -317,7 +315,7 @@ public Map getBackupMetrics(final Long zoneId, fi } Metric metric = backendMetrics.get(vm.getUuid()); - LOG.debug(String.format("Metrics for VM [uuid: %s, name: %s] is [backup size: %s, data size: %s].", vm.getUuid(), + logger.debug(String.format("Metrics for VM [uuid: %s, name: %s] is [backup size: %s, data size: %s].", vm.getUuid(), vm.getInstanceName(), metric.getBackupSize(), metric.getDataSize())); metrics.put(vm, metric); } @@ -333,7 +331,7 @@ private Backup checkAndUpdateIfBackupEntryExistsForRestorePoint(List bac for (final Backup backup : backupsInDb) { if (restorePoint.getId().equals(backup.getExternalId())) { if (metric != null) { - LOG.debug(String.format("Update backup with [uuid: %s, external id: %s] from [size: %s, protected size: %s] to [size: %s, protected size: %s].", + logger.debug(String.format("Update backup with [uuid: %s, external id: %s] from [size: %s, protected size: %s] to [size: %s, protected size: %s].", backup.getUuid(), backup.getExternalId(), backup.getSize(), backup.getProtectedSize(), metric.getBackupSize(), metric.getDataSize())); ((BackupVO) backup).setSize(metric.getBackupSize()); @@ -350,7 +348,7 @@ private Backup checkAndUpdateIfBackupEntryExistsForRestorePoint(List bac public void syncBackups(VirtualMachine vm, Backup.Metric metric) { List restorePoints = listRestorePoints(vm); if (CollectionUtils.isEmpty(restorePoints)) { - LOG.debug(String.format("Can't find any restore point to VM: [uuid: %s, name: %s].", vm.getUuid(), vm.getInstanceName())); + logger.debug(String.format("Can't find any restore point to VM: [uuid: %s, name: %s].", vm.getUuid(), vm.getInstanceName())); return; } Transaction.execute(new TransactionCallbackNoReturn() { @@ -381,7 +379,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { backup.setDomainId(vm.getDomainId()); backup.setZoneId(vm.getDataCenterId()); - LOG.debug(String.format("Creating a new entry in backups: [uuid: %s, vm_id: %s, external_id: %s, type: %s, date: %s, backup_offering_id: %s, account_id: %s, " + logger.debug(String.format("Creating a new entry in backups: [uuid: %s, vm_id: %s, external_id: %s, type: %s, date: %s, backup_offering_id: %s, account_id: %s, " + "domain_id: %s, zone_id: %s].", backup.getUuid(), backup.getVmId(), backup.getExternalId(), backup.getType(), backup.getDate(), backup.getBackupOfferingId(), backup.getAccountId(), backup.getDomainId(), backup.getZoneId())); backupDao.persist(backup); @@ -392,7 +390,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } } for (final Long backupIdToRemove : removeList) { - LOG.warn(String.format("Removing backup with ID: [%s].", backupIdToRemove)); + logger.warn(String.format("Removing backup with ID: [%s].", backupIdToRemove)); backupDao.remove(backupIdToRemove); } } diff --git a/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/veeam/VeeamClient.java b/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/veeam/VeeamClient.java index 701c45f1a9d0..8a193c1ce80f 100644 --- a/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/veeam/VeeamClient.java +++ b/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/veeam/VeeamClient.java @@ -79,7 +79,8 @@ import org.apache.http.conn.ssl.SSLConnectionSocketFactory; import org.apache.http.entity.StringEntity; import org.apache.http.impl.client.HttpClientBuilder; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; @@ -93,7 +94,7 @@ import org.apache.commons.lang3.StringUtils; public class VeeamClient { - private static final Logger LOG = Logger.getLogger(VeeamClient.class); + protected Logger logger = LogManager.getLogger(getClass()); private static final String FAILED_TO_DELETE = "Failed to delete"; private final URI apiURI; @@ -193,24 +194,24 @@ protected Integer getVeeamServerVersion() { ); Pair response = executePowerShellCommands(cmds); if (response == null || !response.first() || response.second() == null || StringUtils.isBlank(response.second().trim())) { - LOG.error("Failed to get veeam server version, using default version"); + logger.error("Failed to get veeam server version, using default version"); return 0; } else { Integer majorVersion = NumbersUtil.parseInt(response.second().trim().split("\\.")[0], 0); - LOG.info(String.format("Veeam server full version is %s, major version is %s", response.second().trim(), majorVersion)); + logger.info(String.format("Veeam server full version is %s, major version is %s", response.second().trim(), majorVersion)); return majorVersion; } } private void checkResponseOK(final HttpResponse response) { if (response.getStatusLine().getStatusCode() == HttpStatus.SC_NO_CONTENT) { - LOG.debug("Requested Veeam resource does not exist"); + logger.debug("Requested Veeam resource does not exist"); return; } if (!(response.getStatusLine().getStatusCode() == HttpStatus.SC_OK || response.getStatusLine().getStatusCode() == HttpStatus.SC_ACCEPTED) && response.getStatusLine().getStatusCode() != HttpStatus.SC_NO_CONTENT) { - LOG.debug(String.format("HTTP request failed, status code is [%s], response is: [%s].", response.getStatusLine().getStatusCode(), response.toString())); + logger.debug(String.format("HTTP request failed, status code is [%s], response is: [%s].", response.getStatusLine().getStatusCode(), response.toString())); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Got invalid API status code returned by the Veeam server"); } } @@ -228,7 +229,7 @@ protected HttpResponse get(final String path) throws IOException { final HttpResponse response = httpClient.execute(request); checkAuthFailure(response); - LOG.debug(String.format("Response received in GET request is: [%s] for URL: [%s].", response.toString(), url)); + logger.debug(String.format("Response received in GET request is: [%s] for URL: [%s].", response.toString(), url)); return response; } @@ -254,7 +255,7 @@ private HttpResponse post(final String path, final Object obj) throws IOExceptio final HttpResponse response = httpClient.execute(request); checkAuthFailure(response); - LOG.debug(String.format("Response received in POST request with body [%s] is: [%s] for URL [%s].", xml, response.toString(), url)); + logger.debug(String.format("Response received in POST request with body [%s] is: [%s] for URL [%s].", xml, response.toString(), url)); return response; } @@ -265,7 +266,7 @@ private HttpResponse delete(final String path) throws IOException { final HttpResponse response = httpClient.execute(request); checkAuthFailure(response); - LOG.debug(String.format("Response received in DELETE request is: [%s] for URL [%s].", response.toString(), url)); + logger.debug(String.format("Response received in DELETE request is: [%s] for URL [%s].", response.toString(), url)); return response; } @@ -274,7 +275,7 @@ private HttpResponse delete(final String path) throws IOException { /////////////////////////////////////////////////////////////////// private String findDCHierarchy(final String vmwareDcName) { - LOG.debug("Trying to find hierarchy ID for vmware datacenter: " + vmwareDcName); + logger.debug("Trying to find hierarchy ID for vmware datacenter: " + vmwareDcName); try { final HttpResponse response = get("/hierarchyRoots"); @@ -287,14 +288,14 @@ private String findDCHierarchy(final String vmwareDcName) { } } } catch (final IOException e) { - LOG.error("Failed to list Veeam jobs due to:", e); + logger.error("Failed to list Veeam jobs due to:", e); checkResponseTimeOut(e); } throw new CloudRuntimeException("Failed to find hierarchy reference for VMware datacenter " + vmwareDcName + " in Veeam, please ask administrator to check Veeam B&R manager configuration"); } private String lookupVM(final String hierarchyId, final String vmName) { - LOG.debug("Trying to lookup VM from veeam hierarchy:" + hierarchyId + " for vm name:" + vmName); + logger.debug("Trying to lookup VM from veeam hierarchy:" + hierarchyId + " for vm name:" + vmName); try { final HttpResponse response = get(String.format("/lookup?host=%s&type=Vm&name=%s", hierarchyId, vmName)); @@ -310,7 +311,7 @@ private String lookupVM(final String hierarchyId, final String vmName) { } } } catch (final IOException e) { - LOG.error("Failed to list Veeam jobs due to:", e); + logger.error("Failed to list Veeam jobs due to:", e); checkResponseTimeOut(e); } throw new CloudRuntimeException("Failed to lookup VM " + vmName + " in Veeam, please ask administrator to check Veeam B&R manager configuration"); @@ -336,7 +337,7 @@ private boolean checkTaskStatus(final HttpResponse response) throws IOException if (polledTask.getState().equals("Finished")) { final HttpResponse taskDeleteResponse = delete("/tasks/" + task.getTaskId()); if (taskDeleteResponse.getStatusLine().getStatusCode() != HttpStatus.SC_NO_CONTENT) { - LOG.warn("Operation failed for veeam task id=" + task.getTaskId()); + logger.warn("Operation failed for veeam task id=" + task.getTaskId()); } if (polledTask.getResult().getSuccess().equals("true")) { Pair pair = getRelatedLinkPair(polledTask.getLink()); @@ -355,7 +356,7 @@ private boolean checkTaskStatus(final HttpResponse response) throws IOException try { Thread.sleep(this.taskPollInterval * 1000); } catch (InterruptedException e) { - LOG.debug("Failed to sleep while polling for Veeam task status due to: ", e); + logger.debug("Failed to sleep while polling for Veeam task status due to: ", e); } } return false; @@ -375,7 +376,7 @@ protected boolean checkIfRestoreSessionFinished(String type, String path) throws try { Thread.sleep(1000); } catch (InterruptedException ignored) { - LOG.trace(String.format("Ignoring InterruptedException [%s] when waiting for restore session finishes.", ignored.getMessage())); + logger.trace(String.format("Ignoring InterruptedException [%s] when waiting for restore session finishes.", ignored.getMessage())); } } throw new CloudRuntimeException("Related job type: " + type + " was not successful"); @@ -395,7 +396,7 @@ private Pair getRelatedLinkPair(List links) { //////////////////////////////////////////////////////// public Ref listBackupRepository(final String backupServerId, final String backupName) { - LOG.debug(String.format("Trying to list backup repository for backup job [name: %s] in server [id: %s].", backupName, backupServerId)); + logger.debug(String.format("Trying to list backup repository for backup job [name: %s] in server [id: %s].", backupName, backupServerId)); try { String repositoryName = getRepositoryNameFromJob(backupName); final HttpResponse response = get(String.format("/backupServers/%s/repositories", backupServerId)); @@ -408,7 +409,7 @@ public Ref listBackupRepository(final String backupServerId, final String backup } } } catch (final IOException e) { - LOG.error(String.format("Failed to list Veeam backup repository used by backup job [name: %s] due to: [%s].", backupName, e.getMessage()), e); + logger.error(String.format("Failed to list Veeam backup repository used by backup job [name: %s] due to: [%s].", backupName, e.getMessage()), e); checkResponseTimeOut(e); } return null; @@ -433,23 +434,23 @@ protected String getRepositoryNameFromJob(String backupName) { } public void listAllBackups() { - LOG.debug("Trying to list Veeam backups"); + logger.debug("Trying to list Veeam backups"); try { final HttpResponse response = get("/backups"); checkResponseOK(response); final ObjectMapper objectMapper = new XmlMapper(); final EntityReferences entityReferences = objectMapper.readValue(response.getEntity().getContent(), EntityReferences.class); for (final Ref ref : entityReferences.getRefs()) { - LOG.debug("Veeam Backup found, name: " + ref.getName() + ", uid: " + ref.getUid() + ", type: " + ref.getType()); + logger.debug("Veeam Backup found, name: " + ref.getName() + ", uid: " + ref.getUid() + ", type: " + ref.getType()); } } catch (final IOException e) { - LOG.error("Failed to list Veeam backups due to:", e); + logger.error("Failed to list Veeam backups due to:", e); checkResponseTimeOut(e); } } public List listJobs() { - LOG.debug("Trying to list backup policies that are Veeam jobs"); + logger.debug("Trying to list backup policies that are Veeam jobs"); try { final HttpResponse response = get("/jobs"); checkResponseOK(response); @@ -464,14 +465,14 @@ public List listJobs() { } return policies; } catch (final IOException e) { - LOG.error("Failed to list Veeam jobs due to:", e); + logger.error("Failed to list Veeam jobs due to:", e); checkResponseTimeOut(e); } return new ArrayList<>(); } public Job listJob(final String jobId) { - LOG.debug("Trying to list veeam job id: " + jobId); + logger.debug("Trying to list veeam job id: " + jobId); try { final HttpResponse response = get(String.format("/jobs/%s?format=Entity", jobId.replace("urn:veeam:Job:", ""))); @@ -480,40 +481,40 @@ public Job listJob(final String jobId) { objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); return objectMapper.readValue(response.getEntity().getContent(), Job.class); } catch (final IOException e) { - LOG.error("Failed to list Veeam jobs due to:", e); + logger.error("Failed to list Veeam jobs due to:", e); checkResponseTimeOut(e); } catch (final ServerApiException e) { - LOG.error(e); + logger.error(e); } return null; } public boolean toggleJobSchedule(final String jobId) { - LOG.debug("Trying to toggle schedule for Veeam job: " + jobId); + logger.debug("Trying to toggle schedule for Veeam job: " + jobId); try { final HttpResponse response = post(String.format("/jobs/%s?action=toggleScheduleEnabled", jobId), null); return checkTaskStatus(response); } catch (final IOException e) { - LOG.error("Failed to toggle Veeam job schedule due to:", e); + logger.error("Failed to toggle Veeam job schedule due to:", e); checkResponseTimeOut(e); } return false; } public boolean startBackupJob(final String jobId) { - LOG.debug("Trying to start ad-hoc backup for Veeam job: " + jobId); + logger.debug("Trying to start ad-hoc backup for Veeam job: " + jobId); try { final HttpResponse response = post(String.format("/jobs/%s?action=start", jobId), null); return checkTaskStatus(response); } catch (final IOException e) { - LOG.error("Failed to list Veeam jobs due to:", e); + logger.error("Failed to list Veeam jobs due to:", e); checkResponseTimeOut(e); } return false; } public boolean cloneVeeamJob(final Job parentJob, final String clonedJobName) { - LOG.debug("Trying to clone veeam job: " + parentJob.getUid() + " with backup uuid: " + clonedJobName); + logger.debug("Trying to clone veeam job: " + parentJob.getUid() + " with backup uuid: " + clonedJobName); try { final Ref repositoryRef = listBackupRepository(parentJob.getBackupServerId(), parentJob.getName()); if (repositoryRef == null) { @@ -529,13 +530,13 @@ public boolean cloneVeeamJob(final Job parentJob, final String clonedJobName) { final HttpResponse response = post(String.format("/jobs/%s?action=clone", parentJob.getId()), cloneSpec); return checkTaskStatus(response); } catch (final Exception e) { - LOG.warn("Exception caught while trying to clone Veeam job:", e); + logger.warn("Exception caught while trying to clone Veeam job:", e); } return false; } public boolean addVMToVeeamJob(final String jobId, final String vmwareInstanceName, final String vmwareDcName) { - LOG.debug("Trying to add VM to backup offering that is Veeam job: " + jobId); + logger.debug("Trying to add VM to backup offering that is Veeam job: " + jobId); try { final String heirarchyId = findDCHierarchy(vmwareDcName); final String veeamVmRefId = lookupVM(heirarchyId, vmwareInstanceName); @@ -545,14 +546,14 @@ public boolean addVMToVeeamJob(final String jobId, final String vmwareInstanceNa final HttpResponse response = post(String.format("/jobs/%s/includes", jobId), vmToBackupJob); return checkTaskStatus(response); } catch (final IOException e) { - LOG.error("Failed to add VM to Veeam job due to:", e); + logger.error("Failed to add VM to Veeam job due to:", e); checkResponseTimeOut(e); } throw new CloudRuntimeException("Failed to add VM to backup offering likely due to timeout, please check Veeam tasks"); } public boolean removeVMFromVeeamJob(final String jobId, final String vmwareInstanceName, final String vmwareDcName) { - LOG.debug("Trying to remove VM from backup offering that is a Veeam job: " + jobId); + logger.debug("Trying to remove VM from backup offering that is a Veeam job: " + jobId); try { final String hierarchyId = findDCHierarchy(vmwareDcName); final String veeamVmRefId = lookupVM(hierarchyId, vmwareInstanceName); @@ -562,7 +563,7 @@ public boolean removeVMFromVeeamJob(final String jobId, final String vmwareInsta objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); final ObjectsInJob jobObjects = objectMapper.readValue(response.getEntity().getContent(), ObjectsInJob.class); if (jobObjects == null || jobObjects.getObjects() == null) { - LOG.warn("No objects found in the Veeam job " + jobId); + logger.warn("No objects found in the Veeam job " + jobId); return false; } for (final ObjectInJob jobObject : jobObjects.getObjects()) { @@ -571,22 +572,22 @@ public boolean removeVMFromVeeamJob(final String jobId, final String vmwareInsta return checkTaskStatus(deleteResponse); } } - LOG.warn(vmwareInstanceName + " VM was not found to be attached to Veaam job (backup offering): " + jobId); + logger.warn(vmwareInstanceName + " VM was not found to be attached to Veaam job (backup offering): " + jobId); return false; } catch (final IOException e) { - LOG.error("Failed to list Veeam jobs due to:", e); + logger.error("Failed to list Veeam jobs due to:", e); checkResponseTimeOut(e); } return false; } public boolean restoreFullVM(final String vmwareInstanceName, final String restorePointId) { - LOG.debug("Trying to restore full VM: " + vmwareInstanceName + " from backup"); + logger.debug("Trying to restore full VM: " + vmwareInstanceName + " from backup"); try { final HttpResponse response = post(String.format("/vmRestorePoints/%s?action=restore", restorePointId), null); return checkTaskStatus(response); } catch (final IOException e) { - LOG.error("Failed to restore full VM due to: ", e); + logger.error("Failed to restore full VM due to: ", e); checkResponseTimeOut(e); } throw new CloudRuntimeException("Failed to restore full VM from backup"); @@ -624,9 +625,9 @@ protected Pair executePowerShellCommands(List cmds) { commands, 120000, 120000, 3600000); if (response == null || !response.first()) { - LOG.error(String.format("Veeam PowerShell commands [%s] failed due to: [%s].", commands, response != null ? response.second() : "no PowerShell output returned")); + logger.error(String.format("Veeam PowerShell commands [%s] failed due to: [%s].", commands, response != null ? response.second() : "no PowerShell output returned")); } else { - LOG.debug(String.format("Veeam response for PowerShell commands [%s] is: [%s].", commands, response.second())); + logger.debug(String.format("Veeam response for PowerShell commands [%s] is: [%s].", commands, response.second())); } return response; @@ -654,7 +655,7 @@ public boolean deleteJobAndBackup(final String jobName) { } public boolean deleteBackup(final String restorePointId) { - LOG.debug(String.format("Trying to delete restore point [name: %s].", restorePointId)); + logger.debug(String.format("Trying to delete restore point [name: %s].", restorePointId)); Pair result = executePowerShellCommands(Arrays.asList( String.format("$restorePoint = Get-VBRRestorePoint ^| Where-Object { $_.Id -eq '%s' }", restorePointId), "if ($restorePoint) { Remove-VBRRestorePoint -Oib $restorePoint -Confirm:$false", @@ -667,13 +668,13 @@ public boolean deleteBackup(final String restorePointId) { } public boolean syncBackupRepository() { - LOG.debug("Trying to sync backup repository."); + logger.debug("Trying to sync backup repository."); Pair result = executePowerShellCommands(Arrays.asList( "$repo = Get-VBRBackupRepository", "$Syncs = Sync-VBRBackupRepository -Repository $repo", "while ((Get-VBRSession -ID $Syncs.ID).Result -ne 'Success') { Start-Sleep -Seconds 10 }" )); - LOG.debug("Done syncing backup repository."); + logger.debug("Done syncing backup repository."); return result != null && result.first(); } @@ -686,14 +687,14 @@ public Map getBackupMetrics() { } public Map getBackupMetricsViaVeeamAPI() { - LOG.debug("Trying to get backup metrics via Veeam B&R API"); + logger.debug("Trying to get backup metrics via Veeam B&R API"); try { final HttpResponse response = get(String.format("/backupFiles?format=Entity")); checkResponseOK(response); return processHttpResponseForBackupMetrics(response.getEntity().getContent()); } catch (final IOException e) { - LOG.error("Failed to get backup metrics via Veeam B&R API due to:", e); + logger.error("Failed to get backup metrics via Veeam B&R API due to:", e); checkResponseTimeOut(e); } return new HashMap<>(); @@ -744,7 +745,7 @@ protected Map processHttpResponseForBackupMetrics(final I metrics.put(vmUuid, new Backup.Metric(usedSize, dataSize)); } } catch (final IOException e) { - LOG.error("Failed to process response to get backup metrics via Veeam B&R API due to:", e); + logger.error("Failed to process response to get backup metrics via Veeam B&R API due to:", e); checkResponseTimeOut(e); } return metrics; @@ -782,7 +783,7 @@ public Map getBackupMetricsLegacy() { } protected Map processPowerShellResultForBackupMetrics(final String result) { - LOG.debug("Processing powershell result: " + result); + logger.debug("Processing powershell result: " + result); final String separator = "====="; final Map sizes = new HashMap<>(); @@ -801,7 +802,7 @@ protected Map processPowerShellResultForBackupMetrics(fin } private Backup.RestorePoint getRestorePointFromBlock(String[] parts) { - LOG.debug(String.format("Processing block of restore points: [%s].", StringUtils.join(parts, ", "))); + logger.debug(String.format("Processing block of restore points: [%s].", StringUtils.join(parts, ", "))); String id = null; Date created = null; String type = null; @@ -840,7 +841,7 @@ public List listRestorePointsLegacy(String backupName, Stri if (block.isEmpty()) { continue; } - LOG.debug(String.format("Found restore points from [backupName: %s, vmInternalName: %s] which is: [%s].", backupName, vmInternalName, block)); + logger.debug(String.format("Found restore points from [backupName: %s, vmInternalName: %s] which is: [%s].", backupName, vmInternalName, block)); final String[] parts = block.split("\r\n"); restorePoints.add(getRestorePointFromBlock(parts)); } @@ -856,14 +857,14 @@ public List listRestorePoints(String backupName, String vmI } public List listVmRestorePointsViaVeeamAPI(String vmInternalName) { - LOG.debug(String.format("Trying to list VM restore points via Veeam B&R API for VM %s: ", vmInternalName)); + logger.debug(String.format("Trying to list VM restore points via Veeam B&R API for VM %s: ", vmInternalName)); try { final HttpResponse response = get(String.format("/vmRestorePoints?format=Entity")); checkResponseOK(response); return processHttpResponseForVmRestorePoints(response.getEntity().getContent(), vmInternalName); } catch (final IOException e) { - LOG.error("Failed to list VM restore points via Veeam B&R API due to:", e); + logger.error("Failed to list VM restore points via Veeam B&R API due to:", e); checkResponseTimeOut(e); } return new ArrayList<>(); @@ -878,7 +879,7 @@ public List processHttpResponseForVmRestorePoints(InputStre throw new CloudRuntimeException("Could not get VM restore points via Veeam B&R API"); } for (final VmRestorePoint vmRestorePoint : vmRestorePoints.getVmRestorePoints()) { - LOG.debug(String.format("Processing VM restore point Name=%s, VmDisplayName=%s for vm name=%s", + logger.debug(String.format("Processing VM restore point Name=%s, VmDisplayName=%s for vm name=%s", vmRestorePoint.getName(), vmRestorePoint.getVmDisplayName(), vmInternalName)); if (!vmInternalName.equals(vmRestorePoint.getVmDisplayName())) { continue; @@ -887,7 +888,7 @@ public List processHttpResponseForVmRestorePoints(InputStre List links = vmRestorePoint.getLink(); for (Link link : links) { if (Arrays.asList(BACKUP_FILE_REFERENCE, RESTORE_POINT_REFERENCE).contains(link.getType()) && !link.getRel().equals("Up")) { - LOG.info(String.format("The VM restore point is not ready. Reference: %s, state: %s", link.getType(), link.getRel())); + logger.info(String.format("The VM restore point is not ready. Reference: %s, state: %s", link.getType(), link.getRel())); isReady = false; break; } @@ -898,11 +899,11 @@ public List processHttpResponseForVmRestorePoints(InputStre String vmRestorePointId = vmRestorePoint.getUid().substring(vmRestorePoint.getUid().lastIndexOf(':') + 1); Date created = formatDate(vmRestorePoint.getCreationTimeUtc()); String type = vmRestorePoint.getPointType(); - LOG.debug(String.format("Adding restore point %s, %s, %s", vmRestorePointId, created, type)); + logger.debug(String.format("Adding restore point %s, %s, %s", vmRestorePointId, created, type)); vmRestorePointList.add(new Backup.RestorePoint(vmRestorePointId, created, type)); } } catch (final IOException | ParseException e) { - LOG.error("Failed to process response to get VM restore points via Veeam B&R API due to:", e); + logger.error("Failed to process response to get VM restore points via Veeam B&R API due to:", e); checkResponseTimeOut(e); } return vmRestorePointList; diff --git a/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/veeam/VeeamClientTest.java b/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/veeam/VeeamClientTest.java index 06804d68da27..b00455968c6e 100644 --- a/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/veeam/VeeamClientTest.java +++ b/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/veeam/VeeamClientTest.java @@ -38,6 +38,7 @@ import org.apache.cloudstack.backup.BackupOffering; import org.apache.cloudstack.backup.veeam.api.RestoreSession; import org.apache.http.HttpResponse; +import org.apache.logging.log4j.core.Logger; import org.junit.Assert; import org.junit.Before; import org.junit.Rule; @@ -70,6 +71,7 @@ public void setUp() throws Exception { .withBody(""))); client = new VeeamClient("http://localhost:9399/api/", 12, adminUsername, adminPassword, true, 60, 600, 5, 120); mockClient = Mockito.mock(VeeamClient.class); + mockClient.logger = Mockito.mock(Logger.class); Mockito.when(mockClient.getRepositoryNameFromJob(Mockito.anyString())).thenCallRealMethod(); Mockito.when(mockClient.getVeeamServerVersion()).thenCallRealMethod(); } diff --git a/plugins/ca/root-ca/src/main/java/org/apache/cloudstack/ca/provider/RootCACustomTrustManager.java b/plugins/ca/root-ca/src/main/java/org/apache/cloudstack/ca/provider/RootCACustomTrustManager.java index fb5da50ce162..5ff036fef12f 100644 --- a/plugins/ca/root-ca/src/main/java/org/apache/cloudstack/ca/provider/RootCACustomTrustManager.java +++ b/plugins/ca/root-ca/src/main/java/org/apache/cloudstack/ca/provider/RootCACustomTrustManager.java @@ -27,13 +27,14 @@ import javax.net.ssl.X509TrustManager; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.certificate.dao.CrlDao; import org.apache.commons.lang3.StringUtils; public final class RootCACustomTrustManager implements X509TrustManager { - private static final Logger LOG = Logger.getLogger(RootCACustomTrustManager.class); + protected Logger logger = LogManager.getLogger(getClass()); private String clientAddress = "Unknown"; private boolean authStrictness = true; @@ -71,12 +72,12 @@ private void printCertificateChain(final X509Certificate[] certificates, final S builder.append("\n Issuer DN:" + certificate.getIssuerDN()); builder.append("\n Alternative Names:" + certificate.getSubjectAlternativeNames()); } - LOG.debug(builder.toString()); + logger.debug(builder.toString()); } @Override public void checkClientTrusted(final X509Certificate[] certificates, final String s) throws CertificateException { - if (LOG.isDebugEnabled()) { + if (logger.isDebugEnabled()) { printCertificateChain(certificates, s); } @@ -86,7 +87,7 @@ public void checkClientTrusted(final X509Certificate[] certificates, final Strin if (authStrictness && primaryClientCertificate == null) { throw new CertificateException("In strict auth mode, certificate(s) are expected from client:" + clientAddress); } else if (primaryClientCertificate == null) { - LOG.info("No certificate was received from client, but continuing since strict auth mode is disabled"); + logger.info("No certificate was received from client, but continuing since strict auth mode is disabled"); return; } @@ -95,7 +96,7 @@ public void checkClientTrusted(final X509Certificate[] certificates, final Strin if (serialNumber == null || crlDao.findBySerial(serialNumber) != null) { final String errorMsg = String.format("Client is using revoked certificate of serial=%x, subject=%s from address=%s", primaryClientCertificate.getSerialNumber(), primaryClientCertificate.getSubjectDN(), clientAddress); - LOG.error(errorMsg); + logger.error(errorMsg); exceptionMsg = (StringUtils.isEmpty(exceptionMsg)) ? errorMsg : (exceptionMsg + ". " + errorMsg); } @@ -105,7 +106,7 @@ public void checkClientTrusted(final X509Certificate[] certificates, final Strin } catch (final CertificateExpiredException | CertificateNotYetValidException e) { final String errorMsg = String.format("Client certificate has expired with serial=%x, subject=%s from address=%s", primaryClientCertificate.getSerialNumber(), primaryClientCertificate.getSubjectDN(), clientAddress); - LOG.error(errorMsg); + logger.error(errorMsg); if (!allowExpiredCertificate) { throw new CertificateException(errorMsg); } @@ -125,17 +126,17 @@ public void checkClientTrusted(final X509Certificate[] certificates, final Strin } if (!certMatchesOwnership) { final String errorMsg = "Certificate ownership verification failed for client: " + clientAddress; - LOG.error(errorMsg); + logger.error(errorMsg); exceptionMsg = (StringUtils.isEmpty(exceptionMsg)) ? errorMsg : (exceptionMsg + ". " + errorMsg); } if (authStrictness && StringUtils.isNotEmpty(exceptionMsg)) { throw new CertificateException(exceptionMsg); } - if (LOG.isDebugEnabled()) { + if (logger.isDebugEnabled()) { if (authStrictness) { - LOG.debug("Client/agent connection from ip=" + clientAddress + " has been validated and trusted."); + logger.debug("Client/agent connection from ip=" + clientAddress + " has been validated and trusted."); } else { - LOG.debug("Client/agent connection from ip=" + clientAddress + " accepted without certificate validation."); + logger.debug("Client/agent connection from ip=" + clientAddress + " accepted without certificate validation."); } } diff --git a/plugins/ca/root-ca/src/main/java/org/apache/cloudstack/ca/provider/RootCAProvider.java b/plugins/ca/root-ca/src/main/java/org/apache/cloudstack/ca/provider/RootCAProvider.java index 69df700cf606..d7001ce941aa 100644 --- a/plugins/ca/root-ca/src/main/java/org/apache/cloudstack/ca/provider/RootCAProvider.java +++ b/plugins/ca/root-ca/src/main/java/org/apache/cloudstack/ca/provider/RootCAProvider.java @@ -62,7 +62,6 @@ import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.utils.security.CertUtils; import org.apache.cloudstack.utils.security.KeyStoreUtils; -import org.apache.log4j.Logger; import org.bouncycastle.asn1.pkcs.Attribute; import org.bouncycastle.asn1.pkcs.PKCSObjectIdentifiers; import org.bouncycastle.asn1.x509.Extension; @@ -83,7 +82,6 @@ import org.apache.commons.lang3.StringUtils; public final class RootCAProvider extends AdapterBase implements CAProvider, Configurable { - private static final Logger LOG = Logger.getLogger(RootCAProvider.class); public static final Integer caValidityYears = 30; public static final String caAlias = "root"; @@ -168,7 +166,7 @@ private Certificate generateCertificateUsingCsr(final String csr, final List domainNames, final List domainN try { return generateCertificateUsingCsr(csr, domainNames, ipAddresses, validityDays); } catch (final CertificateException | IOException | SignatureException | NoSuchAlgorithmException | NoSuchProviderException | InvalidKeyException | OperatorCreationException e) { - LOG.error("Failed to generate certificate from CSR: ", e); + logger.error("Failed to generate certificate from CSR: ", e); throw new CloudRuntimeException("Failed to generate certificate using CSR due to:" + e.getMessage()); } } @@ -305,16 +303,16 @@ private int getCaValidityDays() { private boolean saveNewRootCAKeypair() { try { - LOG.debug("Generating root CA public/private keys"); + logger.debug("Generating root CA public/private keys"); final KeyPair keyPair = CertUtils.generateRandomKeyPair(2 * CAManager.CertKeySize.value()); if (!configDao.update(rootCAPublicKey.key(), rootCAPublicKey.category(), CertUtils.publicKeyToPem(keyPair.getPublic()))) { - LOG.error("Failed to save RootCA public key"); + logger.error("Failed to save RootCA public key"); } if (!configDao.update(rootCAPrivateKey.key(), rootCAPrivateKey.category(), CertUtils.privateKeyToPem(keyPair.getPrivate()))) { - LOG.error("Failed to save RootCA private key"); + logger.error("Failed to save RootCA private key"); } } catch (final NoSuchProviderException | NoSuchAlgorithmException | IOException e) { - LOG.error("Failed to generate/save RootCA private/public keys due to exception:", e); + logger.error("Failed to generate/save RootCA private/public keys due to exception:", e); } return loadRootCAKeyPair(); } @@ -324,16 +322,16 @@ private boolean saveNewRootCACertificate() { throw new CloudRuntimeException("Cannot issue self-signed root CA certificate as CA keypair is not initialized"); } try { - LOG.debug("Generating root CA certificate"); + logger.debug("Generating root CA certificate"); final X509Certificate rootCaCertificate = CertUtils.generateV3Certificate( null, caKeyPair, caKeyPair.getPublic(), rootCAIssuerDN.value(), CAManager.CertSignatureAlgorithm.value(), getCaValidityDays(), null, null); if (!configDao.update(rootCACertificate.key(), rootCACertificate.category(), CertUtils.x509CertificateToPem(rootCaCertificate))) { - LOG.error("Failed to update RootCA public/x509 certificate"); + logger.error("Failed to update RootCA public/x509 certificate"); } } catch (final CertificateException | NoSuchAlgorithmException | NoSuchProviderException | SignatureException | InvalidKeyException | OperatorCreationException | IOException e) { - LOG.error("Failed to generate RootCA certificate from private/public keys due to exception:", e); + logger.error("Failed to generate RootCA certificate from private/public keys due to exception:", e); return false; } return loadRootCACertificate(); @@ -346,7 +344,7 @@ private boolean loadRootCAKeyPair() { try { caKeyPair = new KeyPair(CertUtils.pemToPublicKey(rootCAPublicKey.value()), CertUtils.pemToPrivateKey(rootCAPrivateKey.value())); } catch (InvalidKeySpecException | IOException e) { - LOG.error("Failed to load saved RootCA private/public keys due to exception:", e); + logger.error("Failed to load saved RootCA private/public keys due to exception:", e); return false; } return caKeyPair.getPrivate() != null && caKeyPair.getPublic() != null; @@ -360,7 +358,7 @@ private boolean loadRootCACertificate() { caCertificate = CertUtils.pemToX509Certificate(rootCACertificate.value()); caCertificate.verify(caKeyPair.getPublic()); } catch (final IOException | CertificateException | NoSuchAlgorithmException | InvalidKeyException | SignatureException | NoSuchProviderException e) { - LOG.error("Failed to load saved RootCA certificate due to exception:", e); + logger.error("Failed to load saved RootCA certificate due to exception:", e); return false; } return caCertificate != null; @@ -379,7 +377,7 @@ private boolean loadManagementKeyStore() { if (serverCertificate == null || serverCertificate.getPrivateKey() == null) { throw new CloudRuntimeException("Failed to generate management server certificate and load management server keystore"); } - LOG.info("Creating new management server certificate and keystore"); + logger.info("Creating new management server certificate and keystore"); try { managementKeyStore = KeyStore.getInstance("JKS"); managementKeyStore.load(null, null); @@ -387,7 +385,7 @@ private boolean loadManagementKeyStore() { managementKeyStore.setKeyEntry(managementAlias, serverCertificate.getPrivateKey(), getKeyStorePassphrase(), new X509Certificate[]{serverCertificate.getClientCertificate(), caCertificate}); } catch (final CertificateException | NoSuchAlgorithmException | KeyStoreException | IOException e) { - LOG.error("Failed to load root CA management-server keystore due to exception: ", e); + logger.error("Failed to load root CA management-server keystore due to exception: ", e); return false; } return managementKeyStore != null; @@ -396,20 +394,20 @@ private boolean loadManagementKeyStore() { protected void addConfiguredManagementIp(List ipList) { String msNetworkCidr = configDao.getValue(Config.ManagementNetwork.key()); try { - LOG.debug(String.format("Trying to find management IP in CIDR range [%s].", msNetworkCidr)); + logger.debug(String.format("Trying to find management IP in CIDR range [%s].", msNetworkCidr)); Enumeration networkInterfaces = NetworkInterface.getNetworkInterfaces(); networkInterfaces.asIterator().forEachRemaining(networkInterface -> { networkInterface.getInetAddresses().asIterator().forEachRemaining(inetAddress -> { if (NetUtils.isIpWithInCidrRange(inetAddress.getHostAddress(), msNetworkCidr)) { ipList.add(inetAddress.getHostAddress()); - LOG.debug(String.format("Added IP [%s] to the list of IPs in the management server's certificate.", inetAddress.getHostAddress())); + logger.debug(String.format("Added IP [%s] to the list of IPs in the management server's certificate.", inetAddress.getHostAddress())); } }); }); } catch (SocketException e) { String msg = "Exception while trying to gather the management server's network interfaces."; - LOG.error(msg, e); + logger.error(msg, e); throw new CloudRuntimeException(msg, e); } } @@ -417,15 +415,15 @@ protected void addConfiguredManagementIp(List ipList) { private boolean setupCA() { if (!loadRootCAKeyPair() && !saveNewRootCAKeypair()) { - LOG.error("Failed to save and load root CA keypair"); + logger.error("Failed to save and load root CA keypair"); return false; } if (!loadRootCACertificate() && !saveNewRootCACertificate()) { - LOG.error("Failed to save and load root CA certificate"); + logger.error("Failed to save and load root CA certificate"); return false; } if (!loadManagementKeyStore()) { - LOG.error("Failed to check and configure management server keystore"); + logger.error("Failed to check and configure management server keystore"); return false; } return true; @@ -449,7 +447,7 @@ public boolean configure(String name, Map params) throws Configu caLock.unlock(); } } else { - LOG.error("Failed to grab lock and setup CA, startup method will try to load the CA certificate and keypair."); + logger.error("Failed to grab lock and setup CA, startup method will try to load the CA certificate and keypair."); } } finally { caLock.releaseRef(); diff --git a/plugins/database/mysql-ha/src/main/java/com/cloud/utils/db/StaticStrategy.java b/plugins/database/mysql-ha/src/main/java/com/cloud/utils/db/StaticStrategy.java index 469e58be5177..a72d6968f735 100644 --- a/plugins/database/mysql-ha/src/main/java/com/cloud/utils/db/StaticStrategy.java +++ b/plugins/database/mysql-ha/src/main/java/com/cloud/utils/db/StaticStrategy.java @@ -23,7 +23,8 @@ import java.util.List; import java.util.Map; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.mysql.cj.jdbc.ConnectionImpl; import com.mysql.cj.jdbc.JdbcConnection; @@ -32,7 +33,7 @@ import com.mysql.cj.jdbc.ha.LoadBalancedConnectionProxy; public class StaticStrategy implements BalanceStrategy { - private static final Logger s_logger = Logger.getLogger(StaticStrategy.class); + protected Logger logger = LogManager.getLogger(getClass()); public StaticStrategy() { } @@ -84,7 +85,7 @@ public JdbcConnection pickConnection(InvocationHandler proxy, List confi try { Thread.sleep(250); } catch (InterruptedException e) { - s_logger.debug("[ignored] interrupted while fail over in progres."); + logger.debug("[ignored] interrupted while fail over in progres."); } // start fresh diff --git a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaBalanceCmd.java b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaBalanceCmd.java index f4e248855fd0..53d82fae604c 100644 --- a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaBalanceCmd.java +++ b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaBalanceCmd.java @@ -21,7 +21,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseCmd; @@ -36,7 +35,6 @@ @APICommand(name = "quotaBalance", responseObject = QuotaStatementItemResponse.class, description = "Create a quota balance statement", since = "4.7.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class QuotaBalanceCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(QuotaBalanceCmd.class); @Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, required = true, description = "Account Id for which statement needs to be generated") diff --git a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaCreditsCmd.java b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaCreditsCmd.java index c47c0ad2d763..8ca29f275ddd 100644 --- a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaCreditsCmd.java +++ b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaCreditsCmd.java @@ -29,7 +29,6 @@ import org.apache.cloudstack.api.response.QuotaResponseBuilder; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.quota.QuotaService; -import org.apache.log4j.Logger; import javax.inject.Inject; @@ -42,7 +41,6 @@ public class QuotaCreditsCmd extends BaseCmd { @Inject QuotaService _quotaService; - public static final Logger s_logger = Logger.getLogger(QuotaStatementCmd.class); @Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, required = true, description = "Account Id for which quota credits need to be added") diff --git a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaEmailTemplateListCmd.java b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaEmailTemplateListCmd.java index 3cca09c909ec..c7f39037934f 100644 --- a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaEmailTemplateListCmd.java +++ b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaEmailTemplateListCmd.java @@ -22,13 +22,11 @@ import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.QuotaEmailTemplateResponse; import org.apache.cloudstack.api.response.QuotaResponseBuilder; -import org.apache.log4j.Logger; import javax.inject.Inject; @APICommand(name = "quotaEmailTemplateList", responseObject = QuotaEmailTemplateResponse.class, description = "Lists all quota email templates", since = "4.7.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class QuotaEmailTemplateListCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(QuotaEmailTemplateListCmd.class); @Inject QuotaResponseBuilder _quotaResponseBuilder; diff --git a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaEmailTemplateUpdateCmd.java b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaEmailTemplateUpdateCmd.java index 36d09864e548..17e7c220d2e6 100644 --- a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaEmailTemplateUpdateCmd.java +++ b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaEmailTemplateUpdateCmd.java @@ -25,14 +25,12 @@ import org.apache.cloudstack.api.response.QuotaResponseBuilder; import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.quota.constant.QuotaConfig; -import org.apache.log4j.Logger; import javax.inject.Inject; import java.util.Arrays; @APICommand(name = "quotaEmailTemplateUpdate", responseObject = SuccessResponse.class, description = "Updates existing email templates for quota alerts", since = "4.7.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class QuotaEmailTemplateUpdateCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(QuotaEmailTemplateUpdateCmd.class); @Inject QuotaResponseBuilder _quotaResponseBuilder; diff --git a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaEnabledCmd.java b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaEnabledCmd.java index ad6f12e9cc64..4035a5205e6c 100644 --- a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaEnabledCmd.java +++ b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaEnabledCmd.java @@ -22,7 +22,6 @@ import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.response.QuotaEnabledResponse; import org.apache.cloudstack.quota.QuotaService; -import org.apache.log4j.Logger; import javax.inject.Inject; @@ -30,7 +29,6 @@ @APICommand(name = "quotaIsEnabled", responseObject = QuotaEnabledResponse.class, description = "Return true if the plugin is enabled", since = "4.7.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class QuotaEnabledCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(QuotaEnabledCmd.class); @Inject diff --git a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaStatementCmd.java b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaStatementCmd.java index 4d1c233c37a3..cc02ed31d2de 100644 --- a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaStatementCmd.java +++ b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaStatementCmd.java @@ -31,14 +31,12 @@ import org.apache.cloudstack.api.response.QuotaStatementItemResponse; import org.apache.cloudstack.api.response.QuotaStatementResponse; import org.apache.cloudstack.quota.vo.QuotaUsageVO; -import org.apache.log4j.Logger; import com.cloud.user.Account; @APICommand(name = "quotaStatement", responseObject = QuotaStatementItemResponse.class, description = "Create a quota statement", since = "4.7.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class QuotaStatementCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(QuotaStatementCmd.class); @Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, required = true, description = "Optional, Account Id for which statement needs to be generated") diff --git a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaSummaryCmd.java b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaSummaryCmd.java index 9236be1e0f8a..a1ef9b3746a5 100644 --- a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaSummaryCmd.java +++ b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaSummaryCmd.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.api.response.QuotaResponseBuilder; import org.apache.cloudstack.api.response.QuotaSummaryResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import java.util.List; @@ -36,7 +35,6 @@ @APICommand(name = "quotaSummary", responseObject = QuotaSummaryResponse.class, description = "Lists balance and quota usage for all accounts", since = "4.7.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class QuotaSummaryCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(QuotaSummaryCmd.class); @Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, required = false, description = "Optional, Account Id for which statement needs to be generated") private String accountName; diff --git a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffCreateCmd.java b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffCreateCmd.java index 2bbdb57fa7e7..8e5de2410642 100644 --- a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffCreateCmd.java +++ b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffCreateCmd.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.api.response.QuotaResponseBuilder; import org.apache.cloudstack.api.response.QuotaTariffResponse; import org.apache.cloudstack.quota.vo.QuotaTariffVO; -import org.apache.log4j.Logger; import javax.inject.Inject; @@ -37,7 +36,6 @@ @APICommand(name = "quotaTariffCreate", responseObject = QuotaTariffResponse.class, description = "Creates a quota tariff for a resource.", since = "4.18.0.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, authorized = {RoleType.Admin}) public class QuotaTariffCreateCmd extends BaseCmd { - protected Logger logger = Logger.getLogger(getClass()); @Inject QuotaResponseBuilder responseBuilder; diff --git a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffDeleteCmd.java b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffDeleteCmd.java index 6fd46dc487e1..699854ebac33 100644 --- a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffDeleteCmd.java +++ b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffDeleteCmd.java @@ -27,14 +27,12 @@ import org.apache.cloudstack.api.response.QuotaResponseBuilder; import org.apache.cloudstack.api.response.QuotaTariffResponse; import org.apache.cloudstack.api.response.SuccessResponse; -import org.apache.log4j.Logger; import javax.inject.Inject; @APICommand(name = "quotaTariffDelete", description = "Marks a quota tariff as removed.", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, since = "4.18.0.0", authorized = {RoleType.Admin}) public class QuotaTariffDeleteCmd extends BaseCmd { - protected Logger logger = Logger.getLogger(getClass()); @Inject QuotaResponseBuilder responseBuilder; diff --git a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffListCmd.java b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffListCmd.java index b79fd3d9aa82..c47fdbfa1d87 100644 --- a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffListCmd.java +++ b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffListCmd.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.api.response.QuotaTariffResponse; import org.apache.cloudstack.quota.vo.QuotaTariffVO; import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; -import org.apache.log4j.Logger; import javax.inject.Inject; @@ -38,7 +37,6 @@ @APICommand(name = "quotaTariffList", responseObject = QuotaTariffResponse.class, description = "Lists all quota tariff plans", since = "4.7.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class QuotaTariffListCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(QuotaTariffListCmd.class); @Inject QuotaResponseBuilder _responseBuilder; @@ -71,7 +69,7 @@ public void execute() { final List responses = new ArrayList<>(); - s_logger.trace(String.format("Adding quota tariffs [%s] to response of API quotaTariffList.", ReflectionToStringBuilderUtils.reflectCollection(responses))); + logger.trace(String.format("Adding quota tariffs [%s] to response of API quotaTariffList.", ReflectionToStringBuilderUtils.reflectCollection(responses))); for (final QuotaTariffVO resource : result.first()) { responses.add(_responseBuilder.createQuotaTariffResponse(resource)); diff --git a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffUpdateCmd.java b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffUpdateCmd.java index e2aad3a86f31..d5c5ca198b8d 100644 --- a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffUpdateCmd.java +++ b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffUpdateCmd.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.api.response.QuotaResponseBuilder; import org.apache.cloudstack.api.response.QuotaTariffResponse; import org.apache.cloudstack.quota.vo.QuotaTariffVO; -import org.apache.log4j.Logger; import javax.inject.Inject; @@ -37,7 +36,6 @@ @APICommand(name = "quotaTariffUpdate", responseObject = QuotaTariffResponse.class, description = "Update the tariff plan for a resource", since = "4.7.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, authorized = {RoleType.Admin}) public class QuotaTariffUpdateCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(QuotaTariffUpdateCmd.class); @Inject QuotaResponseBuilder _responseBuilder; diff --git a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaUpdateCmd.java b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaUpdateCmd.java index 6f0e70c66e8a..986b2d4ce981 100644 --- a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaUpdateCmd.java +++ b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaUpdateCmd.java @@ -24,7 +24,6 @@ import org.apache.cloudstack.quota.QuotaAlertManager; import org.apache.cloudstack.quota.QuotaManager; import org.apache.cloudstack.quota.QuotaStatement; -import org.apache.log4j.Logger; import java.util.Calendar; @@ -33,7 +32,6 @@ @APICommand(name = "quotaUpdate", responseObject = QuotaUpdateResponse.class, description = "Update quota calculations, alerts and statements", since = "4.7.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class QuotaUpdateCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(QuotaUpdateCmd.class); @Inject diff --git a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/response/QuotaResponseBuilderImpl.java b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/response/QuotaResponseBuilderImpl.java index 32b49a72ae41..eb667b00c3bc 100644 --- a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/response/QuotaResponseBuilderImpl.java +++ b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/response/QuotaResponseBuilderImpl.java @@ -61,7 +61,8 @@ import org.apache.cloudstack.quota.vo.QuotaTariffVO; import org.apache.cloudstack.quota.vo.QuotaUsageVO; import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.stereotype.Component; import com.cloud.domain.DomainVO; @@ -78,7 +79,7 @@ @Component public class QuotaResponseBuilderImpl implements QuotaResponseBuilder { - private static final Logger s_logger = Logger.getLogger(QuotaResponseBuilderImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject private QuotaTariffDao _quotaTariffDao; @@ -230,8 +231,8 @@ public int compare(QuotaBalanceVO o1, QuotaBalanceVO o2) { // Iterate in reverse. while (li.hasPrevious()) { QuotaBalanceVO entry = li.previous(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("createQuotaBalanceResponse: Entry=" + entry); + if (logger.isDebugEnabled()) { + logger.debug("createQuotaBalanceResponse: Entry=" + entry); } if (entry.getCreditsId() > 0) { li.remove(); @@ -247,8 +248,8 @@ public int compare(QuotaBalanceVO o1, QuotaBalanceVO o2) { boolean consecutive = true; for (Iterator it = quotaBalance.iterator(); it.hasNext();) { QuotaBalanceVO entry = it.next(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("createQuotaBalanceResponse: All Credit Entry=" + entry); + if (logger.isDebugEnabled()) { + logger.debug("createQuotaBalanceResponse: All Credit Entry=" + entry); } if (entry.getCreditsId() > 0) { if (consecutive) { @@ -268,9 +269,9 @@ public int compare(QuotaBalanceVO o1, QuotaBalanceVO o2) { resp.setStartDate(startDate); resp.setStartQuota(startItem.getCreditBalance()); resp.setEndDate(endDate); - if (s_logger.isDebugEnabled()) { - s_logger.debug("createQuotaBalanceResponse: Start Entry=" + startItem); - s_logger.debug("createQuotaBalanceResponse: End Entry=" + endItem); + if (logger.isDebugEnabled()) { + logger.debug("createQuotaBalanceResponse: Start Entry=" + startItem); + logger.debug("createQuotaBalanceResponse: End Entry=" + endItem); } resp.setEndQuota(endItem.getCreditBalance().add(lastCredits)); } else if (quota_activity > 0) { @@ -310,8 +311,8 @@ public QuotaStatementResponse createQuotaStatementResponse(final List= 0) { if (account.getState() == Account.State.LOCKED) { - s_logger.info("UnLocking account " + account.getAccountName() + " , due to positive balance " + currentAccountBalance); + logger.info("UnLocking account " + account.getAccountName() + " , due to positive balance " + currentAccountBalance); _accountMgr.enableAccount(account.getAccountName(), domainId, accountId); } } else { // currentAccountBalance < 0 then lock the account if (_quotaManager.isLockable(account) && account.getState() == Account.State.ENABLED && enforce) { - s_logger.info("Locking account " + account.getAccountName() + " , due to negative balance " + currentAccountBalance); + logger.info("Locking account " + account.getAccountName() + " , due to negative balance " + currentAccountBalance); _accountMgr.lockAccount(account.getAccountName(), domainId, accountId); } } @@ -580,8 +581,8 @@ public QuotaBalanceResponse createQuotaLastBalanceResponse(List QuotaBalanceResponse resp = new QuotaBalanceResponse(); BigDecimal lastCredits = new BigDecimal(0); for (QuotaBalanceVO entry : quotaBalance) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("createQuotaLastBalanceResponse Date=" + entry.getUpdatedOn() + " balance=" + entry.getCreditBalance() + " credit=" + entry.getCreditsId()); + if (logger.isDebugEnabled()) { + logger.debug("createQuotaLastBalanceResponse Date=" + entry.getUpdatedOn() + " balance=" + entry.getCreditBalance() + " credit=" + entry.getCreditsId()); } lastCredits = lastCredits.add(entry.getCreditBalance()); } diff --git a/plugins/database/quota/src/main/java/org/apache/cloudstack/quota/QuotaServiceImpl.java b/plugins/database/quota/src/main/java/org/apache/cloudstack/quota/QuotaServiceImpl.java index 9179691e9732..bd08f6afd7ef 100644 --- a/plugins/database/quota/src/main/java/org/apache/cloudstack/quota/QuotaServiceImpl.java +++ b/plugins/database/quota/src/main/java/org/apache/cloudstack/quota/QuotaServiceImpl.java @@ -52,7 +52,6 @@ import org.apache.cloudstack.quota.vo.QuotaBalanceVO; import org.apache.cloudstack.quota.vo.QuotaUsageVO; import org.apache.cloudstack.utils.usage.UsageUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.configuration.Config; @@ -67,7 +66,6 @@ @Component public class QuotaServiceImpl extends ManagerBase implements QuotaService, Configurable, QuotaConfig { - private static final Logger s_logger = Logger.getLogger(QuotaServiceImpl.class); @Inject private AccountDao _accountDao; @@ -103,11 +101,11 @@ public boolean configure(String name, Map params) throws Configu _aggregationDuration = Integer.parseInt(aggregationRange); if (_aggregationDuration < UsageUtils.USAGE_AGGREGATION_RANGE_MIN) { - s_logger.warn("Usage stats job aggregation range is to small, using the minimum value of " + UsageUtils.USAGE_AGGREGATION_RANGE_MIN); + logger.warn("Usage stats job aggregation range is to small, using the minimum value of " + UsageUtils.USAGE_AGGREGATION_RANGE_MIN); _aggregationDuration = UsageUtils.USAGE_AGGREGATION_RANGE_MIN; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Usage timezone = " + _usageTimezone + " AggregationDuration=" + _aggregationDuration); + if (logger.isDebugEnabled()) { + logger.debug("Usage timezone = " + _usageTimezone + " AggregationDuration=" + _aggregationDuration); } return true; } @@ -176,15 +174,15 @@ public List findQuotaBalanceVO(Long accountId, String accountNam if (endDate == null) { // adjust start date to end of day as there is no end date Date adjustedStartDate = computeAdjustedTime(_respBldr.startOfNextDay(startDate)); - if (s_logger.isDebugEnabled()) { - s_logger.debug("getQuotaBalance1: Getting quota balance records for account: " + accountId + ", domainId: " + domainId + ", on or before " + adjustedStartDate); + if (logger.isDebugEnabled()) { + logger.debug("getQuotaBalance1: Getting quota balance records for account: " + accountId + ", domainId: " + domainId + ", on or before " + adjustedStartDate); } List qbrecords = _quotaBalanceDao.lastQuotaBalanceVO(accountId, domainId, adjustedStartDate); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found records size=" + qbrecords.size()); + if (logger.isDebugEnabled()) { + logger.debug("Found records size=" + qbrecords.size()); } if (qbrecords.isEmpty()) { - s_logger.info("Incorrect Date there are no quota records before this date " + adjustedStartDate); + logger.info("Incorrect Date there are no quota records before this date " + adjustedStartDate); return qbrecords; } else { return qbrecords; @@ -195,16 +193,16 @@ public List findQuotaBalanceVO(Long accountId, String accountNam throw new InvalidParameterValueException("Incorrect Date Range. End date:" + endDate + " should not be in future. "); } else if (startDate.before(endDate)) { Date adjustedEndDate = computeAdjustedTime(endDate); - if (s_logger.isDebugEnabled()) { - s_logger.debug("getQuotaBalance2: Getting quota balance records for account: " + accountId + ", domainId: " + domainId + ", between " + adjustedStartDate + if (logger.isDebugEnabled()) { + logger.debug("getQuotaBalance2: Getting quota balance records for account: " + accountId + ", domainId: " + domainId + ", between " + adjustedStartDate + " and " + adjustedEndDate); } List qbrecords = _quotaBalanceDao.findQuotaBalance(accountId, domainId, adjustedStartDate, adjustedEndDate); - if (s_logger.isDebugEnabled()) { - s_logger.debug("getQuotaBalance3: Found records size=" + qbrecords.size()); + if (logger.isDebugEnabled()) { + logger.debug("getQuotaBalance3: Found records size=" + qbrecords.size()); } if (qbrecords.isEmpty()) { - s_logger.info("There are no quota records between these dates start date " + adjustedStartDate + " and end date:" + endDate); + logger.info("There are no quota records between these dates start date " + adjustedStartDate + " and end date:" + endDate); return qbrecords; } else { return qbrecords; @@ -245,8 +243,8 @@ public List getQuotaUsage(Long accountId, String accountName, Long } Date adjustedEndDate = computeAdjustedTime(endDate); Date adjustedStartDate = computeAdjustedTime(startDate); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Getting quota records for account: " + accountId + ", domainId: " + domainId + ", between " + adjustedStartDate + " and " + adjustedEndDate); + if (logger.isDebugEnabled()) { + logger.debug("Getting quota records for account: " + accountId + ", domainId: " + domainId + ", between " + adjustedStartDate + " and " + adjustedEndDate); } return _quotaUsageDao.findQuotaUsage(accountId, domainId, usageType, adjustedStartDate, adjustedEndDate); } @@ -302,16 +300,16 @@ public boolean saveQuotaAccount(final AccountVO account, final BigDecimal aggrUs quota_account = new QuotaAccountVO(account.getAccountId()); quota_account.setQuotaBalance(aggrUsage); quota_account.setQuotaBalanceDate(endDate); - if (s_logger.isDebugEnabled()) { - s_logger.debug(quota_account); + if (logger.isDebugEnabled()) { + logger.debug(quota_account); } _quotaAcc.persistQuotaAccount(quota_account); return true; } else { quota_account.setQuotaBalance(aggrUsage); quota_account.setQuotaBalanceDate(endDate); - if (s_logger.isDebugEnabled()) { - s_logger.debug(quota_account); + if (logger.isDebugEnabled()) { + logger.debug(quota_account); } return _quotaAcc.updateQuotaAccount(account.getAccountId(), quota_account); } diff --git a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/DedicateClusterCmd.java b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/DedicateClusterCmd.java index 06e0a42df853..5ab1ba73fced 100644 --- a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/DedicateClusterCmd.java +++ b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/DedicateClusterCmd.java @@ -21,7 +21,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -42,7 +41,6 @@ @APICommand(name = "dedicateCluster", description = "Dedicate an existing cluster", responseObject = DedicateClusterResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DedicateClusterCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DedicateClusterCmd.class.getName()); @Inject DedicatedService dedicatedService; diff --git a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/DedicateHostCmd.java b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/DedicateHostCmd.java index cf6c5872fdd4..6fb379f4a871 100644 --- a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/DedicateHostCmd.java +++ b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/DedicateHostCmd.java @@ -21,7 +21,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -42,7 +41,6 @@ @APICommand(name = "dedicateHost", description = "Dedicates a host.", responseObject = DedicateHostResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DedicateHostCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DedicateHostCmd.class.getName()); @Inject DedicatedService dedicatedService; diff --git a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/DedicatePodCmd.java b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/DedicatePodCmd.java index 819c4106af5d..2b5e9afab88c 100644 --- a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/DedicatePodCmd.java +++ b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/DedicatePodCmd.java @@ -21,7 +21,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -42,7 +41,6 @@ @APICommand(name = "dedicatePod", description = "Dedicates a Pod.", responseObject = DedicatePodResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DedicatePodCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DedicatePodCmd.class.getName()); @Inject public DedicatedService dedicatedService; diff --git a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/DedicateZoneCmd.java b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/DedicateZoneCmd.java index c3ce1d3baece..ea91ea5a661a 100644 --- a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/DedicateZoneCmd.java +++ b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/DedicateZoneCmd.java @@ -21,7 +21,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -42,7 +41,6 @@ @APICommand(name = "dedicateZone", description = "Dedicates a zones.", responseObject = DedicateZoneResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DedicateZoneCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DedicateZoneCmd.class.getName()); @Inject public DedicatedService dedicatedService; diff --git a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/ListDedicatedClustersCmd.java b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/ListDedicatedClustersCmd.java index c91e44707638..efdee15ff19c 100644 --- a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/ListDedicatedClustersCmd.java +++ b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/ListDedicatedClustersCmd.java @@ -21,7 +21,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.affinity.AffinityGroupResponse; import org.apache.cloudstack.api.APICommand; @@ -43,7 +42,6 @@ @APICommand(name = "listDedicatedClusters", description = "Lists dedicated clusters.", responseObject = DedicateClusterResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListDedicatedClustersCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListDedicatedClustersCmd.class.getName()); @Inject DedicatedService dedicatedService; diff --git a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/ListDedicatedHostsCmd.java b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/ListDedicatedHostsCmd.java index fd27662ddedb..b60509f6b660 100644 --- a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/ListDedicatedHostsCmd.java +++ b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/ListDedicatedHostsCmd.java @@ -21,7 +21,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.affinity.AffinityGroupResponse; import org.apache.cloudstack.api.APICommand; @@ -43,7 +42,6 @@ @APICommand(name = "listDedicatedHosts", description = "Lists dedicated hosts.", responseObject = DedicateHostResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListDedicatedHostsCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListDedicatedHostsCmd.class.getName()); @Inject DedicatedService dedicatedService; diff --git a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/ListDedicatedPodsCmd.java b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/ListDedicatedPodsCmd.java index 742c5cc96ea9..06eaefe32987 100644 --- a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/ListDedicatedPodsCmd.java +++ b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/ListDedicatedPodsCmd.java @@ -21,7 +21,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.affinity.AffinityGroupResponse; import org.apache.cloudstack.api.APICommand; @@ -43,7 +42,6 @@ @APICommand(name = "listDedicatedPods", description = "Lists dedicated pods.", responseObject = DedicatePodResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListDedicatedPodsCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListDedicatedPodsCmd.class.getName()); @Inject DedicatedService dedicatedService; diff --git a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/ListDedicatedZonesCmd.java b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/ListDedicatedZonesCmd.java index b8874eab7cd3..c5bc5456f790 100644 --- a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/ListDedicatedZonesCmd.java +++ b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/ListDedicatedZonesCmd.java @@ -21,7 +21,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.affinity.AffinityGroupResponse; import org.apache.cloudstack.api.APICommand; @@ -43,7 +42,6 @@ @APICommand(name = "listDedicatedZones", description = "List dedicated zones.", responseObject = DedicateZoneResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListDedicatedZonesCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListDedicatedZonesCmd.class.getName()); @Inject DedicatedService _dedicatedservice; diff --git a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/ReleaseDedicatedClusterCmd.java b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/ReleaseDedicatedClusterCmd.java index 9945ba29ca1a..af153e499e90 100644 --- a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/ReleaseDedicatedClusterCmd.java +++ b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/ReleaseDedicatedClusterCmd.java @@ -18,7 +18,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -36,7 +35,6 @@ @APICommand(name = "releaseDedicatedCluster", description = "Release the dedication for cluster", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ReleaseDedicatedClusterCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(ReleaseDedicatedClusterCmd.class.getName()); @Inject DedicatedService dedicatedService; diff --git a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/ReleaseDedicatedHostCmd.java b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/ReleaseDedicatedHostCmd.java index 22563503061e..81eff262fbc0 100644 --- a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/ReleaseDedicatedHostCmd.java +++ b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/ReleaseDedicatedHostCmd.java @@ -18,7 +18,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -36,7 +35,6 @@ @APICommand(name = "releaseDedicatedHost", description = "Release the dedication for host", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ReleaseDedicatedHostCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(ReleaseDedicatedHostCmd.class.getName()); @Inject DedicatedService dedicatedService; diff --git a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/ReleaseDedicatedPodCmd.java b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/ReleaseDedicatedPodCmd.java index ec47a4303408..5f7dadc993fa 100644 --- a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/ReleaseDedicatedPodCmd.java +++ b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/ReleaseDedicatedPodCmd.java @@ -18,7 +18,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -36,7 +35,6 @@ @APICommand(name = "releaseDedicatedPod", description = "Release the dedication for the pod", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ReleaseDedicatedPodCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(ReleaseDedicatedPodCmd.class.getName()); @Inject DedicatedService dedicatedService; diff --git a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/ReleaseDedicatedZoneCmd.java b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/ReleaseDedicatedZoneCmd.java index 4b15ddf951c1..cc178d992f27 100644 --- a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/ReleaseDedicatedZoneCmd.java +++ b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/api/commands/ReleaseDedicatedZoneCmd.java @@ -18,7 +18,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -36,7 +35,6 @@ @APICommand(name = "releaseDedicatedZone", description = "Release dedication of zone", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ReleaseDedicatedZoneCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(ReleaseDedicatedZoneCmd.class.getName()); @Inject DedicatedService dedicatedService; diff --git a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java index cd6d8cf590d4..9060eccb64a2 100644 --- a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java +++ b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java @@ -44,7 +44,8 @@ import org.apache.cloudstack.api.response.DedicateZoneResponse; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.stereotype.Component; import com.cloud.configuration.Config; @@ -84,7 +85,7 @@ @Component public class DedicatedResourceManagerImpl implements DedicatedService { - private static final Logger s_logger = Logger.getLogger(DedicatedResourceManagerImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject AccountDao _accountDao; @@ -140,7 +141,7 @@ public List dedicateZone(final Long zoneId, final Long doma DedicatedResourceVO dedicatedZone = _dedicatedDao.findByZoneId(zoneId); //check if zone is dedicated if (dedicatedZone != null) { - s_logger.error("Zone " + dc.getName() + " is already dedicated"); + logger.error("Zone " + dc.getName() + " is already dedicated"); throw new CloudRuntimeException("Zone " + dc.getName() + " is already dedicated"); } @@ -159,7 +160,7 @@ public List dedicateZone(final Long zoneId, final Long doma if (dPod.getAccountId().equals(accountId)) { podsToRelease.add(dPod); } else { - s_logger.error("Pod " + pod.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); + logger.error("Pod " + pod.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); throw new CloudRuntimeException("Pod " + pod.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); } } else { @@ -185,7 +186,7 @@ public List dedicateZone(final Long zoneId, final Long doma if (dCluster.getAccountId().equals(accountId)) { clustersToRelease.add(dCluster); } else { - s_logger.error("Cluster " + cluster.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); + logger.error("Cluster " + cluster.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); throw new CloudRuntimeException("Cluster " + cluster.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); } @@ -212,7 +213,7 @@ public List dedicateZone(final Long zoneId, final Long doma if (dHost.getAccountId().equals(accountId)) { hostsToRelease.add(dHost); } else { - s_logger.error("Host " + host.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); + logger.error("Host " + host.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); throw new CloudRuntimeException("Host " + host.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); } } else { @@ -237,7 +238,7 @@ public List doInTransaction(TransactionStatus status) { // find or create the affinity group by name under this account/domain AffinityGroup group = findOrCreateDedicatedAffinityGroup(domainId, accountIdFinal); if (group == null) { - s_logger.error("Unable to dedicate zone due to, failed to create dedication affinity group"); + logger.error("Unable to dedicate zone due to, failed to create dedication affinity group"); throw new CloudRuntimeException("Failed to dedicate zone. Please contact Cloud Support."); } @@ -256,7 +257,7 @@ public List doInTransaction(TransactionStatus status) { } } catch (Exception e) { - s_logger.error("Unable to dedicate zone due to " + e.getMessage(), e); + logger.error("Unable to dedicate zone due to " + e.getMessage(), e); throw new CloudRuntimeException("Failed to dedicate zone. Please contact Cloud Support."); } @@ -290,7 +291,7 @@ public List dedicatePod(final Long podId, final Long domain DedicatedResourceVO dedicatedZoneOfPod = _dedicatedDao.findByZoneId(pod.getDataCenterId()); //check if pod is dedicated if (dedicatedPod != null) { - s_logger.error("Pod " + pod.getName() + " is already dedicated"); + logger.error("Pod " + pod.getName() + " is already dedicated"); throw new CloudRuntimeException("Pod " + pod.getName() + " is already dedicated"); } @@ -300,7 +301,7 @@ public List dedicatePod(final Long podId, final Long domain if (dedicatedZoneOfPod.getAccountId() != null || (accountId == null && !domainIdInChildreanList) || (accountId != null && !(dedicatedZoneOfPod.getDomainId().equals(domainId) || domainIdInChildreanList))) { DataCenterVO zone = _zoneDao.findById(pod.getDataCenterId()); - s_logger.error("Cannot dedicate Pod. Its zone is already dedicated"); + logger.error("Cannot dedicate Pod. Its zone is already dedicated"); throw new CloudRuntimeException("Pod's Zone " + zone.getName() + " is already dedicated"); } } @@ -321,7 +322,7 @@ public List dedicatePod(final Long podId, final Long domain if (dCluster.getAccountId().equals(accountId)) { clustersToRelease.add(dCluster); } else { - s_logger.error("Cluster " + cluster.getName() + " under this Pod " + pod.getName() + " is dedicated to different account/domain"); + logger.error("Cluster " + cluster.getName() + " under this Pod " + pod.getName() + " is dedicated to different account/domain"); throw new CloudRuntimeException("Cluster " + cluster.getName() + " under this Pod " + pod.getName() + " is dedicated to different account/domain"); } @@ -348,7 +349,7 @@ public List dedicatePod(final Long podId, final Long domain if (dHost.getAccountId().equals(accountId)) { hostsToRelease.add(dHost); } else { - s_logger.error("Host " + host.getName() + " under this Pod " + pod.getName() + " is dedicated to different account/domain"); + logger.error("Host " + host.getName() + " under this Pod " + pod.getName() + " is dedicated to different account/domain"); throw new CloudRuntimeException("Host " + host.getName() + " under this Pod " + pod.getName() + " is dedicated to different account/domain"); } } else { @@ -373,7 +374,7 @@ public List doInTransaction(TransactionStatus status) { // find or create the affinity group by name under this account/domain AffinityGroup group = findOrCreateDedicatedAffinityGroup(domainId, accountIdFinal); if (group == null) { - s_logger.error("Unable to dedicate zone due to, failed to create dedication affinity group"); + logger.error("Unable to dedicate zone due to, failed to create dedication affinity group"); throw new CloudRuntimeException("Failed to dedicate zone. Please contact Cloud Support."); } DedicatedResourceVO dedicatedResource = new DedicatedResourceVO(null, podId, null, null, null, null, group.getId()); @@ -384,7 +385,7 @@ public List doInTransaction(TransactionStatus status) { } dedicatedResource = _dedicatedDao.persist(dedicatedResource); } catch (Exception e) { - s_logger.error("Unable to dedicate pod due to " + e.getMessage(), e); + logger.error("Unable to dedicate pod due to " + e.getMessage(), e); throw new CloudRuntimeException("Failed to dedicate pod. Please contact Cloud Support."); } @@ -419,7 +420,7 @@ public List dedicateCluster(final Long clusterId, final Lon //check if cluster is dedicated if (dedicatedCluster != null) { - s_logger.error("Cluster " + cluster.getName() + " is already dedicated"); + logger.error("Cluster " + cluster.getName() + " is already dedicated"); throw new CloudRuntimeException("Cluster " + cluster.getName() + " is already dedicated"); } @@ -428,7 +429,7 @@ public List dedicateCluster(final Long clusterId, final Lon //can dedicate a cluster to an account/domain if pod is dedicated to parent-domain if (dedicatedPodOfCluster.getAccountId() != null || (accountId == null && !domainIdInChildreanList) || (accountId != null && !(dedicatedPodOfCluster.getDomainId().equals(domainId) || domainIdInChildreanList))) { - s_logger.error("Cannot dedicate Cluster. Its Pod is already dedicated"); + logger.error("Cannot dedicate Cluster. Its Pod is already dedicated"); HostPodVO pod = _podDao.findById(cluster.getPodId()); throw new CloudRuntimeException("Cluster's Pod " + pod.getName() + " is already dedicated"); } @@ -439,7 +440,7 @@ public List dedicateCluster(final Long clusterId, final Lon //can dedicate a cluster to an account/domain if zone is dedicated to parent-domain if (dedicatedZoneOfCluster.getAccountId() != null || (accountId == null && !domainIdInChildreanList) || (accountId != null && !(dedicatedZoneOfCluster.getDomainId().equals(domainId) || domainIdInChildreanList))) { - s_logger.error("Cannot dedicate Cluster. Its zone is already dedicated"); + logger.error("Cannot dedicate Cluster. Its zone is already dedicated"); DataCenterVO zone = _zoneDao.findById(cluster.getDataCenterId()); throw new CloudRuntimeException("Cluster's Zone " + zone.getName() + " is already dedicated"); } @@ -461,7 +462,7 @@ public List dedicateCluster(final Long clusterId, final Lon if (dHost.getAccountId().equals(accountId)) { hostsToRelease.add(dHost); } else { - s_logger.error("Cannot dedicate Cluster " + cluster.getName() + " to account" + accountName); + logger.error("Cannot dedicate Cluster " + cluster.getName() + " to account" + accountName); throw new CloudRuntimeException("Cannot dedicate Cluster " + cluster.getName() + " to account" + accountName); } } else { @@ -486,7 +487,7 @@ public List doInTransaction(TransactionStatus status) { // find or create the affinity group by name under this account/domain AffinityGroup group = findOrCreateDedicatedAffinityGroup(domainId, accountIdFinal); if (group == null) { - s_logger.error("Unable to dedicate zone due to, failed to create dedication affinity group"); + logger.error("Unable to dedicate zone due to, failed to create dedication affinity group"); throw new CloudRuntimeException("Failed to dedicate zone. Please contact Cloud Support."); } DedicatedResourceVO dedicatedResource = new DedicatedResourceVO(null, null, clusterId, null, null, null, group.getId()); @@ -497,7 +498,7 @@ public List doInTransaction(TransactionStatus status) { } dedicatedResource = _dedicatedDao.persist(dedicatedResource); } catch (Exception e) { - s_logger.error("Unable to dedicate cluster due to " + e.getMessage(), e); + logger.error("Unable to dedicate cluster due to " + e.getMessage(), e); throw new CloudRuntimeException("Failed to dedicate cluster. Please contact Cloud Support.", e); } @@ -534,7 +535,7 @@ public List dedicateHost(final Long hostId, final Long doma DedicatedResourceVO dedicatedZoneOfHost = _dedicatedDao.findByZoneId(host.getDataCenterId()); if (dedicatedHost != null) { - s_logger.error("Host " + host.getName() + " is already dedicated"); + logger.error("Host " + host.getName() + " is already dedicated"); throw new CloudRuntimeException("Host " + host.getName() + " is already dedicated"); } @@ -544,7 +545,7 @@ public List dedicateHost(final Long hostId, final Long doma if (dedicatedClusterOfHost.getAccountId() != null || (accountId == null && !domainIdInChildreanList) || (accountId != null && !(dedicatedClusterOfHost.getDomainId().equals(domainId) || domainIdInChildreanList))) { ClusterVO cluster = _clusterDao.findById(host.getClusterId()); - s_logger.error("Host's Cluster " + cluster.getName() + " is already dedicated"); + logger.error("Host's Cluster " + cluster.getName() + " is already dedicated"); throw new CloudRuntimeException("Host's Cluster " + cluster.getName() + " is already dedicated"); } } @@ -555,7 +556,7 @@ public List dedicateHost(final Long hostId, final Long doma if (dedicatedPodOfHost.getAccountId() != null || (accountId == null && !domainIdInChildreanList) || (accountId != null && !(dedicatedPodOfHost.getDomainId().equals(domainId) || domainIdInChildreanList))) { HostPodVO pod = _podDao.findById(host.getPodId()); - s_logger.error("Host's Pod " + pod.getName() + " is already dedicated"); + logger.error("Host's Pod " + pod.getName() + " is already dedicated"); throw new CloudRuntimeException("Host's Pod " + pod.getName() + " is already dedicated"); } } @@ -566,7 +567,7 @@ public List dedicateHost(final Long hostId, final Long doma if (dedicatedZoneOfHost.getAccountId() != null || (accountId == null && !domainIdInChildreanList) || (accountId != null && !(dedicatedZoneOfHost.getDomainId().equals(domainId) || domainIdInChildreanList))) { DataCenterVO zone = _zoneDao.findById(host.getDataCenterId()); - s_logger.error("Host's Data Center " + zone.getName() + " is already dedicated"); + logger.error("Host's Data Center " + zone.getName() + " is already dedicated"); throw new CloudRuntimeException("Host's Data Center " + zone.getName() + " is already dedicated"); } } @@ -583,7 +584,7 @@ public List doInTransaction(TransactionStatus status) { // find or create the affinity group by name under this account/domain AffinityGroup group = findOrCreateDedicatedAffinityGroup(domainId, accountIdFinal); if (group == null) { - s_logger.error("Unable to dedicate zone due to, failed to create dedication affinity group"); + logger.error("Unable to dedicate zone due to, failed to create dedication affinity group"); throw new CloudRuntimeException("Failed to dedicate zone. Please contact Cloud Support."); } DedicatedResourceVO dedicatedResource = new DedicatedResourceVO(null, null, null, hostId, null, null, group.getId()); @@ -594,7 +595,7 @@ public List doInTransaction(TransactionStatus status) { } dedicatedResource = _dedicatedDao.persist(dedicatedResource); } catch (Exception e) { - s_logger.error("Unable to dedicate host due to " + e.getMessage(), e); + logger.error("Unable to dedicate host due to " + e.getMessage(), e); throw new CloudRuntimeException("Failed to dedicate host. Please contact Cloud Support.", e); } @@ -665,7 +666,7 @@ private boolean checkHostSuitabilityForExplicitDedication(Long accountId, List result = _dedicatedService.dedicateZone(10L, domainId, accountName); Assert.assertNotNull(result); } catch (Exception e) { - s_logger.info("exception in testing dedication of zone " + logger.info("exception in testing dedication of zone " + e.toString()); } } @@ -176,7 +174,7 @@ public void runDedicatePodTest() { List result = _dedicatedService.dedicatePod(10L, domainId, accountName); Assert.assertNotNull(result); } catch (Exception e) { - s_logger.info("exception in testing dedication of pod " + logger.info("exception in testing dedication of pod " + e.toString()); } } @@ -189,7 +187,7 @@ public void runDedicateClusterTest() { List result = _dedicatedService.dedicateCluster(10L, domainId, accountName); Assert.assertNotNull(result); } catch (Exception e) { - s_logger.info("exception in testing dedication of cluster " + logger.info("exception in testing dedication of cluster " + e.toString()); } } @@ -205,7 +203,7 @@ public void runDedicateHostTest() { List result = _dedicatedService.dedicateHost(10L, domainId, accountName); Assert.assertNotNull(result); } catch (Exception e) { - s_logger.info("exception in testing dedication of host " + logger.info("exception in testing dedication of host " + e.toString()); } } diff --git a/plugins/deployment-planners/implicit-dedication/src/main/java/com/cloud/deploy/ImplicitDedicationPlanner.java b/plugins/deployment-planners/implicit-dedication/src/main/java/com/cloud/deploy/ImplicitDedicationPlanner.java index 2c5a72498bc5..bd1bcf061013 100644 --- a/plugins/deployment-planners/implicit-dedication/src/main/java/com/cloud/deploy/ImplicitDedicationPlanner.java +++ b/plugins/deployment-planners/implicit-dedication/src/main/java/com/cloud/deploy/ImplicitDedicationPlanner.java @@ -25,7 +25,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.configuration.Config; import com.cloud.exception.InsufficientServerCapacityException; @@ -43,7 +42,6 @@ public class ImplicitDedicationPlanner extends FirstFitPlanner implements DeploymentClusterPlanner { - private static final Logger s_logger = Logger.getLogger(ImplicitDedicationPlanner.class); @Inject private ServiceOfferingDao serviceOfferingDao; @@ -158,12 +156,12 @@ private boolean checkHostSuitabilityForImplicitDedication(Long accountId, List applyUserConcentrationPodHeuristicToClusters(long zoneId, Lis private List reorderClustersByPods(List clusterIds, List podIds) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Reordering cluster list as per pods ordered by user concentration"); + if (logger.isDebugEnabled()) { + logger.debug("Reordering cluster list as per pods ordered by user concentration"); } Map> podClusterMap = clusterDao.getPodClusterIdMap(clusterIds); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Pod To cluster Map is: " + podClusterMap); + if (logger.isTraceEnabled()) { + logger.trace("Pod To cluster Map is: " + podClusterMap); } List reorderedClusters = new ArrayList(); @@ -88,22 +86,22 @@ private List reorderClustersByPods(List clusterIds, List podId } reorderedClusters.addAll(clusterIds); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Reordered cluster list: " + reorderedClusters); + if (logger.isTraceEnabled()) { + logger.trace("Reordered cluster list: " + reorderedClusters); } return reorderedClusters; } protected List listPodsByUserConcentration(long zoneId, long accountId) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Applying UserConcentratedPod heuristic for account: " + accountId); + if (logger.isDebugEnabled()) { + logger.debug("Applying UserConcentratedPod heuristic for account: " + accountId); } List prioritizedPods = vmDao.listPodIdsHavingVmsforAccount(zoneId, accountId); - if (s_logger.isTraceEnabled()) { - s_logger.trace("List of pods to be considered, after applying UserConcentratedPod heuristic: " + prioritizedPods); + if (logger.isTraceEnabled()) { + logger.trace("List of pods to be considered, after applying UserConcentratedPod heuristic: " + prioritizedPods); } return prioritizedPods; diff --git a/plugins/deployment-planners/user-dispersing/src/main/java/com/cloud/deploy/UserDispersingPlanner.java b/plugins/deployment-planners/user-dispersing/src/main/java/com/cloud/deploy/UserDispersingPlanner.java index 24bc061b2187..c5d81e9a51b8 100644 --- a/plugins/deployment-planners/user-dispersing/src/main/java/com/cloud/deploy/UserDispersingPlanner.java +++ b/plugins/deployment-planners/user-dispersing/src/main/java/com/cloud/deploy/UserDispersingPlanner.java @@ -25,7 +25,6 @@ import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.configuration.Config; import com.cloud.utils.NumbersUtil; @@ -34,7 +33,6 @@ public class UserDispersingPlanner extends FirstFitPlanner implements DeploymentClusterPlanner { - private static final Logger s_logger = Logger.getLogger(UserDispersingPlanner.class); /** * This method should reorder the given list of Cluster Ids by applying any necessary heuristic @@ -97,8 +95,8 @@ protected List reorderPods(Pair, Map> podCapacity } protected Pair, Map> listClustersByUserDispersion(long id, boolean isZone, long accountId) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Applying Userdispersion heuristic to clusters for account: " + accountId); + if (logger.isDebugEnabled()) { + logger.debug("Applying Userdispersion heuristic to clusters for account: " + accountId); } Pair, Map> clusterIdsVmCountInfo; if (isZone) { @@ -106,19 +104,19 @@ protected Pair, Map> listClustersByUserDispersion(long } else { clusterIdsVmCountInfo = vmInstanceDao.listClusterIdsInPodByVmCount(id, accountId); } - if (s_logger.isTraceEnabled()) { - s_logger.trace("List of clusters in ascending order of number of VMs: " + clusterIdsVmCountInfo.first()); + if (logger.isTraceEnabled()) { + logger.trace("List of clusters in ascending order of number of VMs: " + clusterIdsVmCountInfo.first()); } return clusterIdsVmCountInfo; } protected Pair, Map> listPodsByUserDispersion(long dataCenterId, long accountId) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Applying Userdispersion heuristic to pods for account: " + accountId); + if (logger.isDebugEnabled()) { + logger.debug("Applying Userdispersion heuristic to pods for account: " + accountId); } Pair, Map> podIdsVmCountInfo = vmInstanceDao.listPodIdsInZoneByVmCount(dataCenterId, accountId); - if (s_logger.isTraceEnabled()) { - s_logger.trace("List of pods in ascending order of number of VMs: " + podIdsVmCountInfo.first()); + if (logger.isTraceEnabled()) { + logger.trace("List of pods in ascending order of number of VMs: " + podIdsVmCountInfo.first()); } return podIdsVmCountInfo; @@ -130,25 +128,25 @@ private List orderByApplyingWeights(Pair, Map> ca Map capacityMap = capacityInfo.second(); Map vmCountMap = vmCountInfo.second(); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Capacity Id list: " + capacityOrderedIds + " , capacityMap:" + capacityMap); + if (logger.isTraceEnabled()) { + logger.trace("Capacity Id list: " + capacityOrderedIds + " , capacityMap:" + capacityMap); } - if (s_logger.isTraceEnabled()) { - s_logger.trace("Vm Count Id list: " + vmCountOrderedIds + " , vmCountMap:" + vmCountMap); + if (logger.isTraceEnabled()) { + logger.trace("Vm Count Id list: " + vmCountOrderedIds + " , vmCountMap:" + vmCountMap); } List idsReorderedByWeights = new ArrayList(); float capacityWeight = (1.0f - _userDispersionWeight); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Applying userDispersionWeight: " + _userDispersionWeight); + if (logger.isDebugEnabled()) { + logger.debug("Applying userDispersionWeight: " + _userDispersionWeight); } //normalize the vmCountMap LinkedHashMap normalisedVmCountIdMap = new LinkedHashMap(); Long totalVmsOfAccount = vmInstanceDao.countRunningAndStartingByAccount(accountId); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Total VMs for account: " + totalVmsOfAccount); + if (logger.isDebugEnabled()) { + logger.debug("Total VMs for account: " + totalVmsOfAccount); } for (Long id : vmCountOrderedIds) { Double normalisedCount = vmCountMap.get(id) / totalVmsOfAccount; @@ -177,8 +175,8 @@ private List orderByApplyingWeights(Pair, Map> ca idsReorderedByWeights.addAll(idList); } - if (s_logger.isTraceEnabled()) { - s_logger.trace("Reordered Id list: " + idsReorderedByWeights); + if (logger.isTraceEnabled()) { + logger.trace("Reordered Id list: " + idsReorderedByWeights); } return idsReorderedByWeights; diff --git a/plugins/event-bus/inmemory/src/main/java/org/apache/cloudstack/mom/inmemory/InMemoryEventBus.java b/plugins/event-bus/inmemory/src/main/java/org/apache/cloudstack/mom/inmemory/InMemoryEventBus.java index b7d74df980f7..d5d362781922 100644 --- a/plugins/event-bus/inmemory/src/main/java/org/apache/cloudstack/mom/inmemory/InMemoryEventBus.java +++ b/plugins/event-bus/inmemory/src/main/java/org/apache/cloudstack/mom/inmemory/InMemoryEventBus.java @@ -25,7 +25,6 @@ import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.cloudstack.framework.events.Event; import org.apache.cloudstack.framework.events.EventBus; @@ -38,7 +37,6 @@ public class InMemoryEventBus extends ManagerBase implements EventBus { - private static final Logger s_logger = Logger.getLogger(InMemoryEventBus.class); private final static Map> subscribers; diff --git a/plugins/event-bus/kafka/src/main/java/org/apache/cloudstack/mom/kafka/KafkaEventBus.java b/plugins/event-bus/kafka/src/main/java/org/apache/cloudstack/mom/kafka/KafkaEventBus.java index 17a58a5d2326..01888779fc6a 100644 --- a/plugins/event-bus/kafka/src/main/java/org/apache/cloudstack/mom/kafka/KafkaEventBus.java +++ b/plugins/event-bus/kafka/src/main/java/org/apache/cloudstack/mom/kafka/KafkaEventBus.java @@ -27,7 +27,6 @@ import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.cloudstack.framework.events.Event; import org.apache.cloudstack.framework.events.EventBus; @@ -50,7 +49,6 @@ public class KafkaEventBus extends ManagerBase implements EventBus { private String _topic = null; private Producer _producer; - private static final Logger s_logger = Logger.getLogger(KafkaEventBus.class); @Override public boolean configure(String name, Map params) throws ConfigurationException { diff --git a/plugins/event-bus/rabbitmq/src/main/java/org/apache/cloudstack/mom/rabbitmq/RabbitMQEventBus.java b/plugins/event-bus/rabbitmq/src/main/java/org/apache/cloudstack/mom/rabbitmq/RabbitMQEventBus.java index f54c769908d8..8cd2289f9f31 100644 --- a/plugins/event-bus/rabbitmq/src/main/java/org/apache/cloudstack/mom/rabbitmq/RabbitMQEventBus.java +++ b/plugins/event-bus/rabbitmq/src/main/java/org/apache/cloudstack/mom/rabbitmq/RabbitMQEventBus.java @@ -34,7 +34,6 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.rabbitmq.client.BlockedListener; -import org.apache.log4j.Logger; import com.rabbitmq.client.AMQP; import com.rabbitmq.client.AlreadyClosedException; @@ -97,7 +96,6 @@ public static void setUseSsl(String useSsl) { private ExecutorService executorService; private static DisconnectHandler disconnectHandler; private static BlockedConnectionHandler blockedConnectionHandler; - private static final Logger s_logger = Logger.getLogger(RabbitMQEventBus.class); @Override public boolean configure(String name, Map params) throws ConfigurationException { @@ -240,9 +238,9 @@ public void handleDelivery(String queueName, Envelope envelope, AMQP.BasicProper s_subscribers.put(queueName, queueDetails); } catch (AlreadyClosedException closedException) { - s_logger.warn("Connection to AMQP service is lost. Subscription:" + queueName + " will be active after reconnection", closedException); + logger.warn("Connection to AMQP service is lost. Subscription:" + queueName + " will be active after reconnection", closedException); } catch (ConnectException connectException) { - s_logger.warn("Connection to AMQP service is lost. Subscription:" + queueName + " will be active after reconnection", connectException); + logger.warn("Connection to AMQP service is lost. Subscription:" + queueName + " will be active after reconnection", connectException); } catch (Exception e) { throw new EventBusException("Failed to subscribe to event due to " + e.getMessage()); } @@ -362,7 +360,7 @@ private synchronized Connection getConnection() throws Exception { try { return createConnection(); } catch (KeyManagementException | NoSuchAlgorithmException | IOException | TimeoutException e) { - s_logger.error(String.format("Failed to create a connection to AMQP server [AMQP host:%s, port:%d] due to: %s", amqpHost, port, e)); + logger.error(String.format("Failed to create a connection to AMQP server [AMQP host:%s, port:%d] due to: %s", amqpHost, port, e)); throw e; } } else { @@ -399,7 +397,7 @@ private synchronized void closeConnection() { s_connection.close(); } } catch (Exception e) { - s_logger.warn("Failed to close connection to AMQP server due to " + e.getMessage()); + logger.warn("Failed to close connection to AMQP server due to " + e.getMessage()); } s_connection = null; } @@ -411,7 +409,7 @@ private synchronized void abortConnection() { try { s_connection.abort(); } catch (Exception e) { - s_logger.warn("Failed to abort connection due to " + e.getMessage()); + logger.warn("Failed to abort connection due to " + e.getMessage()); } s_connection = null; } @@ -428,7 +426,7 @@ private Channel createChannel(Connection connection) throws Exception { try { return connection.createChannel(); } catch (java.io.IOException exception) { - s_logger.warn("Failed to create a channel due to " + exception.getMessage()); + logger.warn("Failed to create a channel due to " + exception.getMessage()); throw exception; } } @@ -437,7 +435,7 @@ private void createExchange(Channel channel, String exchangeName) throws Excepti try { channel.exchangeDeclare(exchangeName, "topic", true); } catch (java.io.IOException exception) { - s_logger.error("Failed to create exchange" + exchangeName + " on RabbitMQ server"); + logger.error("Failed to create exchange" + exchangeName + " on RabbitMQ server"); throw exception; } } @@ -447,7 +445,7 @@ private void publishEventToExchange(Channel channel, String exchangeName, String byte[] messageBodyBytes = eventDescription.getBytes(); channel.basicPublish(exchangeName, routingKey, MessageProperties.PERSISTENT_TEXT_PLAIN, messageBodyBytes); } catch (Exception e) { - s_logger.error("Failed to publish event " + routingKey + " on exchange " + exchangeName + " of message broker due to " + e.getMessage()); + logger.error("Failed to publish event " + routingKey + " on exchange " + exchangeName + " of message broker due to " + e.getMessage()); throw e; } } @@ -500,7 +498,7 @@ public synchronized boolean stop() { channel.queueDelete(queueName); channel.abort(); } catch (IOException ioe) { - s_logger.warn("Failed to delete queue: " + queueName + " on AMQP server due to " + ioe.getMessage()); + logger.warn("Failed to delete queue: " + queueName + " on AMQP server due to " + ioe.getMessage()); } } } @@ -514,14 +512,14 @@ private class BlockedConnectionHandler implements BlockedListener { @Override public void handleBlocked(String reason) throws IOException { - s_logger.error("rabbitmq connection is blocked with reason: " + reason); + logger.error("rabbitmq connection is blocked with reason: " + reason); closeConnection(); throw new CloudRuntimeException("unblocking the parent thread as publishing to rabbitmq server is blocked with reason: " + reason); } @Override public void handleUnblocked() throws IOException { - s_logger.info("rabbitmq connection in unblocked"); + logger.info("rabbitmq connection in unblocked"); } } // logic to deal with loss of connection to AMQP server @@ -538,7 +536,7 @@ public void shutdownCompleted(ShutdownSignalException shutdownSignalException) { } abortConnection(); // disconnected to AMQP server, so abort the connection and channels - s_logger.warn("Connection has been shutdown by AMQP server. Attempting to reconnect."); + logger.warn("Connection has been shutdown by AMQP server. Attempting to reconnect."); // initiate re-connect process ReconnectionTask reconnect = new ReconnectionTask(); @@ -616,7 +614,7 @@ public void handleDelivery(String queueName, Envelope envelope, AMQP.BasicProper s_subscribers.put(subscriberId, subscriberDetails); } } catch (Exception e) { - s_logger.warn("Failed to recreate queues and binding for the subscribers due to " + e.getMessage()); + logger.warn("Failed to recreate queues and binding for the subscribers due to " + e.getMessage()); } } return; diff --git a/plugins/ha-planners/skip-heurestics/src/main/java/com/cloud/deploy/SkipHeuresticsPlanner.java b/plugins/ha-planners/skip-heurestics/src/main/java/com/cloud/deploy/SkipHeuresticsPlanner.java index 6f9d696a1711..c6e2ea4d99f0 100644 --- a/plugins/ha-planners/skip-heurestics/src/main/java/com/cloud/deploy/SkipHeuresticsPlanner.java +++ b/plugins/ha-planners/skip-heurestics/src/main/java/com/cloud/deploy/SkipHeuresticsPlanner.java @@ -17,7 +17,6 @@ package com.cloud.deploy; import com.cloud.vm.VirtualMachineProfile; -import org.apache.log4j.Logger; import javax.naming.ConfigurationException; @@ -25,7 +24,6 @@ import java.util.Map; public class SkipHeuresticsPlanner extends FirstFitPlanner implements HAPlanner { - private static final Logger s_logger = Logger.getLogger(SkipHeuresticsPlanner.class); /** @@ -37,8 +35,8 @@ public class SkipHeuresticsPlanner extends FirstFitPlanner implements HAPlanner @Override protected void removeClustersCrossingThreshold(List clusterListForVmAllocation, ExcludeList avoid, VirtualMachineProfile vmProfile, DeploymentPlan plan){ - if (s_logger.isDebugEnabled()) { - s_logger.debug("Deploying vm during HA process, so skipping disable threshold check"); + if (logger.isDebugEnabled()) { + logger.debug("Deploying vm during HA process, so skipping disable threshold check"); } return; } diff --git a/plugins/host-allocators/random/src/main/java/com/cloud/agent/manager/allocator/impl/RandomAllocator.java b/plugins/host-allocators/random/src/main/java/com/cloud/agent/manager/allocator/impl/RandomAllocator.java index 70920df5eb5c..ffdbfcbc5c36 100644 --- a/plugins/host-allocators/random/src/main/java/com/cloud/agent/manager/allocator/impl/RandomAllocator.java +++ b/plugins/host-allocators/random/src/main/java/com/cloud/agent/manager/allocator/impl/RandomAllocator.java @@ -25,7 +25,6 @@ import com.cloud.utils.exception.CloudRuntimeException; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.ListUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.manager.allocator.HostAllocator; @@ -47,7 +46,6 @@ @Component public class RandomAllocator extends AdapterBase implements HostAllocator { - private static final Logger s_logger = Logger.getLogger(RandomAllocator.class); @Inject private HostDao _hostDao; @Inject @@ -74,9 +72,9 @@ private List findSuitableHosts(VirtualMachineProfile vmProfile, Deployment } String hostTag = offering.getHostTag(); if (hostTag != null) { - s_logger.debug(String.format("Looking for hosts in dc [%s], pod [%s], cluster [%s] and complying with host tag [%s].", dcId, podId, clusterId, hostTag)); + logger.debug(String.format("Looking for hosts in dc [%s], pod [%s], cluster [%s] and complying with host tag [%s].", dcId, podId, clusterId, hostTag)); } else { - s_logger.debug("Looking for hosts in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId); + logger.debug("Looking for hosts in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId); } if (hosts != null) { // retain all computing hosts, regardless of whether they support routing...it's random after all @@ -98,11 +96,11 @@ private List findSuitableHosts(VirtualMachineProfile vmProfile, Deployment hostsCopy = ListUtils.union(hostsCopy, _hostDao.findHostsWithTagRuleThatMatchComputeOferringTags(hostTag)); if (hostsCopy.isEmpty()) { - s_logger.error(String.format("No suitable host found for vm [%s] with tags [%s].", vmProfile, hostTag)); + logger.error(String.format("No suitable host found for vm [%s] with tags [%s].", vmProfile, hostTag)); throw new CloudRuntimeException(String.format("No suitable host found for vm [%s].", vmProfile)); } - s_logger.debug("Random Allocator found " + hostsCopy.size() + " hosts"); + logger.debug("Random Allocator found " + hostsCopy.size() + " hosts"); if (hostsCopy.size() == 0) { return suitableHosts; } @@ -112,25 +110,25 @@ private List findSuitableHosts(VirtualMachineProfile vmProfile, Deployment break; } if (avoid.shouldAvoid(host)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host name: " + host.getName() + ", hostId: " + host.getId() + " is in avoid set, skipping this and trying other available hosts"); + if (logger.isDebugEnabled()) { + logger.debug("Host name: " + host.getName() + ", hostId: " + host.getId() + " is in avoid set, skipping this and trying other available hosts"); } continue; } Pair cpuCapabilityAndCapacity = capacityManager.checkIfHostHasCpuCapabilityAndCapacity(host, offering, considerReservedCapacity); if (!cpuCapabilityAndCapacity.first() || !cpuCapabilityAndCapacity.second()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Not using host " + host.getId() + "; host has cpu capability? " + cpuCapabilityAndCapacity.first() + ", host has capacity?" + cpuCapabilityAndCapacity.second()); + if (logger.isDebugEnabled()) { + logger.debug("Not using host " + host.getId() + "; host has cpu capability? " + cpuCapabilityAndCapacity.first() + ", host has capacity?" + cpuCapabilityAndCapacity.second()); } continue; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found a suitable host, adding to list: " + host.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Found a suitable host, adding to list: " + host.getId()); } suitableHosts.add(host); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Random Host Allocator returning " + suitableHosts.size() + " suitable hosts"); + if (logger.isDebugEnabled()) { + logger.debug("Random Host Allocator returning " + suitableHosts.size() + " suitable hosts"); } return suitableHosts; } @@ -145,8 +143,8 @@ public List allocateTo(VirtualMachineProfile vmProfile, DeploymentPlan pla ExcludeList avoid, List hosts, int returnUpTo, boolean considerReservedCapacity) { if (CollectionUtils.isEmpty(hosts)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Random Allocator found 0 hosts as given host list is empty"); + if (logger.isDebugEnabled()) { + logger.debug("Random Allocator found 0 hosts as given host list is empty"); } return new ArrayList(); } diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalDiscoverer.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalDiscoverer.java index 3bdd2e81fb51..321369b24b9e 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalDiscoverer.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalDiscoverer.java @@ -33,7 +33,6 @@ import javax.naming.ConfigurationException; import org.apache.cloudstack.api.ApiConstants; -import org.apache.log4j.Logger; import com.cloud.agent.api.StartupCommand; import com.cloud.agent.api.StartupRoutingCommand; @@ -61,7 +60,6 @@ import com.cloud.vm.dao.VMInstanceDao; public class BareMetalDiscoverer extends DiscovererBase implements Discoverer, ResourceStateAdapter { - protected static final Logger s_logger = Logger.getLogger(BareMetalDiscoverer.class); @Inject protected VMInstanceDao _vmDao = null; @@ -92,25 +90,25 @@ public Map> find(long dcId, Long p if (!url.getScheme().equals("http")) { String msg = "urlString is not http so we're not taking care of the discovery for this: " + url; - s_logger.debug(msg); + logger.debug(msg); return null; } if (clusterId == null) { String msg = "must specify cluster Id when add host"; - s_logger.debug(msg); + logger.debug(msg); throw new RuntimeException(msg); } if (podId == null) { String msg = "must specify pod Id when add host"; - s_logger.debug(msg); + logger.debug(msg); throw new RuntimeException(msg); } ClusterVO cluster = _clusterDao.findById(clusterId); if (cluster == null || (cluster.getHypervisorType() != HypervisorType.BareMetal)) { - if (s_logger.isInfoEnabled()) - s_logger.info("invalid cluster id or cluster is not for Bare Metal hosts"); + if (logger.isInfoEnabled()) + logger.info("invalid cluster id or cluster is not for Bare Metal hosts"); return null; } @@ -132,14 +130,14 @@ public Map> find(long dcId, Long p + injectScript); } - final Script2 command = new Script2(scriptPath, s_logger); + final Script2 command = new Script2(scriptPath, logger); command.add("ping"); command.add("hostname="+ipmiIp); command.add("usrname="+username); command.add("password="+password, ParamType.PASSWORD); final String result = command.execute(); if (result != null) { - s_logger.warn(String.format("Can not set up ipmi connection(ip=%1$s, username=%2$s, password=%3$s, args) because %4$s", ipmiIp, username, "******", result)); + logger.warn(String.format("Can not set up ipmi connection(ip=%1$s, username=%2$s, password=%3$s, args) because %4$s", ipmiIp, username, "******", result)); return null; } @@ -205,11 +203,11 @@ public Map> find(long dcId, Long p zone.setDhcpProvider(Network.Provider.ExternalDhcpServer.getName()); _dcDao.update(zone.getId(), zone); - s_logger.debug(String.format("Discover Bare Metal host successfully(ip=%1$s, username=%2$s, password=%3$s," + + logger.debug(String.format("Discover Bare Metal host successfully(ip=%1$s, username=%2$s, password=%3$s," + "cpuNum=%4$s, cpuCapacity-%5$s, memCapacity=%6$s)", ipmiIp, username, "******", cpuNum, cpuCapacity, memCapacity)); return resources; } catch (Exception e) { - s_logger.warn("Can not set up bare metal agent", e); + logger.warn("Can not set up bare metal agent", e); } return null; diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalGuru.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalGuru.java index f82ad489b8da..a1b306b66c91 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalGuru.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalGuru.java @@ -27,7 +27,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.hypervisor.Hypervisor.HypervisorType; @@ -39,7 +38,6 @@ import com.cloud.vm.VirtualMachineProfile; public class BareMetalGuru extends HypervisorGuruBase implements HypervisorGuru { - private static final Logger s_logger = Logger.getLogger(BareMetalGuru.class); @Inject GuestOSDao _guestOsDao; diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalPlanner.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalPlanner.java index c37b51df5e9b..318ac225c8c0 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalPlanner.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalPlanner.java @@ -24,7 +24,6 @@ import com.cloud.utils.NumbersUtil; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.log4j.Logger; import com.cloud.capacity.CapacityManager; import com.cloud.dc.ClusterDetailsDao; @@ -51,7 +50,6 @@ import com.cloud.vm.VirtualMachineProfile; public class BareMetalPlanner extends AdapterBase implements DeploymentPlanner { - private static final Logger s_logger = Logger.getLogger(BareMetalPlanner.class); @Inject protected DataCenterDao _dcDao; @Inject @@ -82,7 +80,7 @@ public DeployDestination plan(VirtualMachineProfile vmProfile, DeploymentPlan pl DataCenter dc = _dcDao.findById(h.getDataCenterId()); Pod pod = _podDao.findById(h.getPodId()); Cluster c = _clusterDao.findById(h.getClusterId()); - s_logger.debug("Start baremetal vm " + vm.getId() + " on last stayed host " + h.getId()); + logger.debug("Start baremetal vm " + vm.getId() + " on last stayed host " + h.getId()); return new DeployDestination(dc, pod, c, h); } @@ -114,7 +112,7 @@ public DeployDestination plan(VirtualMachineProfile vmProfile, DeploymentPlan pl } if (target == null) { - s_logger.warn("Cannot find host with tag " + hostTag + " use capacity from service offering"); + logger.warn("Cannot find host with tag " + hostTag + " use capacity from service offering"); cpu_requested = offering.getCpu() * offering.getSpeed(); ram_requested = offering.getRamSize() * 1024L * 1024L; } else { @@ -126,7 +124,7 @@ public DeployDestination plan(VirtualMachineProfile vmProfile, DeploymentPlan pl if (haVmTag == null) { hosts = _resourceMgr.listAllUpAndEnabledNonHAHosts(Host.Type.Routing, cluster.getId(), cluster.getPodId(), cluster.getDataCenterId()); } else { - s_logger.warn("Cannot find HA host with tag " + haVmTag + " in cluster id=" + cluster.getId() + ", pod id=" + cluster.getPodId() + ", data center id=" + + logger.warn("Cannot find HA host with tag " + haVmTag + " in cluster id=" + cluster.getId() + ", pod id=" + cluster.getPodId() + ", data center id=" + cluster.getDataCenterId()); return null; } @@ -138,7 +136,7 @@ public DeployDestination plan(VirtualMachineProfile vmProfile, DeploymentPlan pl Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); if (_capacityMgr.checkIfHostHasCapacity(h.getId(), cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) { - s_logger.debug("Find host " + h.getId() + " has enough capacity"); + logger.debug("Find host " + h.getId() + " has enough capacity"); DataCenter dc = _dcDao.findById(h.getDataCenterId()); Pod pod = _podDao.findById(h.getPodId()); return new DeployDestination(dc, pod, cluster, h); @@ -146,7 +144,7 @@ public DeployDestination plan(VirtualMachineProfile vmProfile, DeploymentPlan pl } } - s_logger.warn(String.format("Cannot find enough capacity(requested cpu=%1$s memory=%2$s)", cpu_requested, NumbersUtil.toHumanReadableSize(ram_requested))); + logger.warn(String.format("Cannot find enough capacity(requested cpu=%1$s memory=%2$s)", cpu_requested, NumbersUtil.toHumanReadableSize(ram_requested))); return null; } diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalTemplateAdapter.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalTemplateAdapter.java index 8265f951f8a8..940897de3c95 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalTemplateAdapter.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BareMetalTemplateAdapter.java @@ -45,14 +45,12 @@ import org.apache.cloudstack.api.command.user.template.RegisterTemplateCmd; import org.apache.cloudstack.storage.command.TemplateOrVolumePostUploadCommand; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; -import org.apache.log4j.Logger; import javax.inject.Inject; import java.util.Date; import java.util.List; public class BareMetalTemplateAdapter extends TemplateAdapterBase implements TemplateAdapter { - private final static Logger s_logger = Logger.getLogger(BareMetalTemplateAdapter.class); @Inject HostDao _hostDao; @Inject @@ -141,7 +139,7 @@ public boolean delete(TemplateProfile profile) { zoneName = "all zones"; } - s_logger.debug("Attempting to mark template host refs for template: " + template.getName() + " as destroyed in zone: " + zoneName); + logger.debug("Attempting to mark template host refs for template: " + template.getName() + " as destroyed in zone: " + zoneName); Account account = _accountDao.findByIdIncludingRemoved(template.getAccountId()); String eventType = EventTypes.EVENT_TEMPLATE_DELETE; List templateHostVOs = this._tmpltStoreDao.listByTemplate(templateId); @@ -151,7 +149,7 @@ public boolean delete(TemplateProfile profile) { try { lock = _tmpltStoreDao.acquireInLockTable(vo.getId()); if (lock == null) { - s_logger.debug("Failed to acquire lock when deleting templateDataStoreVO with ID: " + vo.getId()); + logger.debug("Failed to acquire lock when deleting templateDataStoreVO with ID: " + vo.getId()); success = false; break; } @@ -184,7 +182,7 @@ public boolean delete(TemplateProfile profile) { } } - s_logger.debug("Successfully marked template host refs for template: " + template.getName() + " as destroyed in zone: " + zoneName); + logger.debug("Successfully marked template host refs for template: " + template.getName() + " as destroyed in zone: " + zoneName); // If there are no more non-destroyed template host entries for this template, delete it if (success && (_tmpltStoreDao.listByTemplate(templateId).size() == 0)) { @@ -194,7 +192,7 @@ public boolean delete(TemplateProfile profile) { try { if (lock == null) { - s_logger.debug("Failed to acquire lock when deleting template with ID: " + templateId); + logger.debug("Failed to acquire lock when deleting template with ID: " + templateId); success = false; } else if (_tmpltDao.remove(templateId)) { // Decrement the number of templates and total secondary storage space used by the account. @@ -207,7 +205,7 @@ public boolean delete(TemplateProfile profile) { _tmpltDao.releaseFromLockTable(lock.getId()); } } - s_logger.debug("Removed template: " + template.getName() + " because all of its template host refs were marked as destroyed."); + logger.debug("Removed template: " + template.getName() + " because all of its template host refs were marked as destroyed."); } return success; diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalManagerImpl.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalManagerImpl.java index b1aafc692ef1..bf991b77e1cb 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalManagerImpl.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/manager/BaremetalManagerImpl.java @@ -32,7 +32,6 @@ import com.cloud.vm.VMInstanceVO; import com.cloud.vm.dao.VMInstanceDao; import org.apache.cloudstack.api.BaremetalProvisionDoneNotificationCmd; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.AddBaremetalHostCmd; @@ -46,7 +45,6 @@ import com.cloud.vm.VirtualMachine.State; public class BaremetalManagerImpl extends ManagerBase implements BaremetalManager, StateListener { - private static final Logger s_logger = Logger.getLogger(BaremetalManagerImpl.class); @Inject protected HostDao _hostDao; @@ -93,17 +91,17 @@ public boolean postStateTransitionEvent(StateMachine2.Transition t HostVO host = _hostDao.findById(vo.getHostId()); if (host == null) { - s_logger.debug("Skip oldState " + oldState + " to " + "newState " + newState + " transimtion"); + logger.debug("Skip oldState " + oldState + " to " + "newState " + newState + " transimtion"); return true; } _hostDao.loadDetails(host); if (newState == State.Starting) { host.setDetail("vmName", vo.getInstanceName()); - s_logger.debug("Add vmName " + host.getDetail("vmName") + " to host " + host.getId() + " details"); + logger.debug("Add vmName " + host.getDetail("vmName") + " to host " + host.getId() + " details"); } else { if (host.getDetail("vmName") != null && host.getDetail("vmName").equalsIgnoreCase(vo.getInstanceName())) { - s_logger.debug("Remove vmName " + host.getDetail("vmName") + " from host " + host.getId() + " details"); + logger.debug("Remove vmName " + host.getDetail("vmName") + " from host " + host.getId() + " details"); host.getDetails().remove("vmName"); } } @@ -150,7 +148,7 @@ public void notifyProvisionDone(BaremetalProvisionDoneNotificationCmd cmd) { vm.setState(State.Running); vm.setLastHostId(vm.getHostId()); vmDao.update(vm.getId(), vm); - s_logger.debug(String.format("received baremetal provision done notification for vm[id:%s name:%s] running on host[mac:%s, ip:%s]", + logger.debug(String.format("received baremetal provision done notification for vm[id:%s name:%s] running on host[mac:%s, ip:%s]", vm.getId(), vm.getInstanceName(), host.getPrivateMacAddress(), host.getPrivateIpAddress())); } } diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BareMetalPingServiceImpl.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BareMetalPingServiceImpl.java index fc7596a9dd66..509fd340dae4 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BareMetalPingServiceImpl.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BareMetalPingServiceImpl.java @@ -32,7 +32,6 @@ import org.apache.cloudstack.api.AddBaremetalPxeCmd; import org.apache.cloudstack.api.AddBaremetalPxePingServerCmd; import org.apache.cloudstack.api.ListBaremetalPxeServersCmd; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.baremetal.IpmISetBootDevCommand; @@ -68,7 +67,6 @@ import com.cloud.vm.VirtualMachineProfile; public class BareMetalPingServiceImpl extends BareMetalPxeServiceBase implements BaremetalPxeService { - private static final Logger s_logger = Logger.getLogger(BareMetalPingServiceImpl.class); @Inject ResourceManager _resourceMgr; @Inject @@ -107,19 +105,19 @@ public boolean prepare(VirtualMachineProfile profile, NicProfile pxeNic, Network new PreparePxeServerCommand(ip, mac, mask, gateway, dns, tpl, profile.getVirtualMachine().getInstanceName(), dest.getHost().getName()); PreparePxeServerAnswer ans = (PreparePxeServerAnswer)_agentMgr.send(pxeServerId, cmd); if (!ans.getResult()) { - s_logger.warn("Unable tot program PXE server: " + pxeVo.getId() + " because " + ans.getDetails()); + logger.warn("Unable tot program PXE server: " + pxeVo.getId() + " because " + ans.getDetails()); return false; } IpmISetBootDevCommand bootCmd = new IpmISetBootDevCommand(BootDev.pxe); Answer anw = _agentMgr.send(dest.getHost().getId(), bootCmd); if (!anw.getResult()) { - s_logger.warn("Unable to set host: " + dest.getHost().getId() + " to PXE boot because " + anw.getDetails()); + logger.warn("Unable to set host: " + dest.getHost().getId() + " to PXE boot because " + anw.getDetails()); } return anw.getResult(); } catch (Exception e) { - s_logger.warn("Cannot prepare PXE server", e); + logger.warn("Cannot prepare PXE server", e); return false; } } @@ -150,7 +148,7 @@ public boolean prepareCreateTemplate(Long pxeServerId, UserVm vm, String templat Answer ans = _agentMgr.send(pxeServerId, cmd); return ans.getResult(); } catch (Exception e) { - s_logger.debug("Prepare for creating baremetal template failed", e); + logger.debug("Prepare for creating baremetal template failed", e); return false; } } @@ -219,7 +217,7 @@ public BaremetalPxeVO addPxeServer(AddBaremetalPxeCmd cmd) { try { uri = new URI(cmd.getUrl()); } catch (Exception e) { - s_logger.debug(e); + logger.debug(e); throw new IllegalArgumentException(e.getMessage()); } String ipAddress = uri.getHost(); @@ -244,7 +242,7 @@ public BaremetalPxeVO addPxeServer(AddBaremetalPxeCmd cmd) { try { resource.configure("PING PXE resource", params); } catch (Exception e) { - s_logger.debug(e); + logger.debug(e); throw new CloudRuntimeException(e.getMessage()); } diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BareMetalResourceBase.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BareMetalResourceBase.java index 0cdd0f128224..007640e8aad3 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BareMetalResourceBase.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BareMetalResourceBase.java @@ -31,7 +31,6 @@ import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.log4j.Logger; import com.cloud.agent.IAgentControl; import com.cloud.agent.api.Answer; @@ -83,7 +82,6 @@ import com.cloud.vm.dao.VMInstanceDao; public class BareMetalResourceBase extends ManagerBase implements ServerResource { - private static final Logger s_logger = Logger.getLogger(BareMetalResourceBase.class); protected String _uuid; protected String _zone; protected String _pod; @@ -176,20 +174,20 @@ public boolean configure(String name, Map params) throws Configu try { ipmiIface = configDao.getValue(Config.BaremetalIpmiLanInterface.key()); } catch (Exception e) { - s_logger.debug(e.getMessage(), e); + logger.debug(e.getMessage(), e); } try { ipmiRetryTimes = Integer.parseInt(configDao.getValue(Config.BaremetalIpmiRetryTimes.key())); } catch (Exception e) { - s_logger.debug(e.getMessage(), e); + logger.debug(e.getMessage(), e); } try { provisionDoneNotificationOn = Boolean.valueOf(configDao.getValue(Config.BaremetalProvisionDoneNotificationEnabled.key())); isProvisionDoneNotificationTimeout = Integer.parseInt(configDao.getValue(Config.BaremetalProvisionDoneNotificationTimeout.key())); } catch (Exception e) { - s_logger.debug(e.getMessage(), e); + logger.debug(e.getMessage(), e); } String injectScript = "scripts/util/ipmi.py"; @@ -198,7 +196,7 @@ public boolean configure(String name, Map params) throws Configu throw new ConfigurationException("Cannot find ping script " + scriptPath); } String pythonPath = "/usr/bin/python"; - _pingCommand = new Script2(pythonPath, s_logger); + _pingCommand = new Script2(pythonPath, logger); _pingCommand.add(scriptPath); _pingCommand.add("ping"); _pingCommand.add("interface=" + ipmiIface); @@ -206,7 +204,7 @@ public boolean configure(String name, Map params) throws Configu _pingCommand.add("usrname=" + _username); _pingCommand.add("password=" + _password, ParamType.PASSWORD); - _setPxeBootCommand = new Script2(pythonPath, s_logger); + _setPxeBootCommand = new Script2(pythonPath, logger); _setPxeBootCommand.add(scriptPath); _setPxeBootCommand.add("boot_dev"); _setPxeBootCommand.add("interface=" + ipmiIface); @@ -215,7 +213,7 @@ public boolean configure(String name, Map params) throws Configu _setPxeBootCommand.add("password=" + _password, ParamType.PASSWORD); _setPxeBootCommand.add("dev=pxe"); - _setDiskBootCommand = new Script2(pythonPath, s_logger); + _setDiskBootCommand = new Script2(pythonPath, logger); _setDiskBootCommand.add(scriptPath); _setDiskBootCommand.add("boot_dev"); _setDiskBootCommand.add("interface=" + ipmiIface); @@ -224,7 +222,7 @@ public boolean configure(String name, Map params) throws Configu _setDiskBootCommand.add("password=" + _password, ParamType.PASSWORD); _setDiskBootCommand.add("dev=disk"); - _rebootCommand = new Script2(pythonPath, s_logger); + _rebootCommand = new Script2(pythonPath, logger); _rebootCommand.add(scriptPath); _rebootCommand.add("reboot"); _rebootCommand.add("interface=" + ipmiIface); @@ -232,7 +230,7 @@ public boolean configure(String name, Map params) throws Configu _rebootCommand.add("usrname=" + _username); _rebootCommand.add("password=" + _password, ParamType.PASSWORD); - _getStatusCommand = new Script2(pythonPath, s_logger); + _getStatusCommand = new Script2(pythonPath, logger); _getStatusCommand.add(scriptPath); _getStatusCommand.add("ping"); _getStatusCommand.add("interface=" + ipmiIface); @@ -240,7 +238,7 @@ public boolean configure(String name, Map params) throws Configu _getStatusCommand.add("usrname=" + _username); _getStatusCommand.add("password=" + _password, ParamType.PASSWORD); - _powerOnCommand = new Script2(pythonPath, s_logger); + _powerOnCommand = new Script2(pythonPath, logger); _powerOnCommand.add(scriptPath); _powerOnCommand.add("power"); _powerOnCommand.add("interface=" + ipmiIface); @@ -249,7 +247,7 @@ public boolean configure(String name, Map params) throws Configu _powerOnCommand.add("password=" + _password, ParamType.PASSWORD); _powerOnCommand.add("action=on"); - _powerOffCommand = new Script2(pythonPath, s_logger); + _powerOffCommand = new Script2(pythonPath, logger); _powerOffCommand.add(scriptPath); _powerOffCommand.add("power"); _powerOffCommand.add("interface=" + ipmiIface); @@ -258,7 +256,7 @@ public boolean configure(String name, Map params) throws Configu _powerOffCommand.add("password=" + _password, ParamType.PASSWORD); _powerOffCommand.add("action=soft"); - _forcePowerOffCommand = new Script2(pythonPath, s_logger); + _forcePowerOffCommand = new Script2(pythonPath, logger); _forcePowerOffCommand.add(scriptPath); _forcePowerOffCommand.add("power"); _forcePowerOffCommand.add("interface=" + ipmiIface); @@ -267,7 +265,7 @@ public boolean configure(String name, Map params) throws Configu _forcePowerOffCommand.add("password=" + _password, ParamType.PASSWORD); _forcePowerOffCommand.add("action=off"); - _bootOrRebootCommand = new Script2(pythonPath, s_logger); + _bootOrRebootCommand = new Script2(pythonPath, logger); _bootOrRebootCommand.add(scriptPath); _bootOrRebootCommand.add("boot_or_reboot"); _bootOrRebootCommand.add("interface=" + ipmiIface); @@ -299,11 +297,11 @@ protected boolean doScript(Script cmd, OutputInterpreter interpreter, int retry) res = cmd.execute(interpreter); } if (res != null && res.startsWith("Error: Unable to establish LAN")) { - s_logger.warn("IPMI script timeout(" + cmd.toString() + "), will retry " + retry + " times"); + logger.warn("IPMI script timeout(" + cmd.toString() + "), will retry " + retry + " times"); try { TimeUnit.SECONDS.sleep(1); } catch (InterruptedException e) { - s_logger.debug("[ignored] interrupted while waiting to retry running script."); + logger.debug("[ignored] interrupted while waiting to retry running script."); } continue; } else if (res == null) { @@ -313,7 +311,7 @@ protected boolean doScript(Script cmd, OutputInterpreter interpreter, int retry) } } - s_logger.warn("IPMI Scirpt failed due to " + res + "(" + cmd.toString() + ")"); + logger.warn("IPMI Scirpt failed due to " + res + "(" + cmd.toString() + ")"); return false; } @@ -379,12 +377,12 @@ public PingCommand getCurrentStatus(long id) { if (!ipmiPing()) { Thread.sleep(1000); if (!ipmiPing()) { - s_logger.warn("Cannot ping ipmi nic " + _ip); + logger.warn("Cannot ping ipmi nic " + _ip); return null; } } } catch (Exception e) { - s_logger.debug("Cannot ping ipmi nic " + _ip, e); + logger.debug("Cannot ping ipmi nic " + _ip, e); return null; } @@ -419,11 +417,11 @@ protected Answer execute(IpmISetBootDevCommand cmd) { String bootDev = cmd.getBootDev().name(); if (!doScript(bootCmd)) { - s_logger.warn("Set " + _ip + " boot dev to " + bootDev + "failed"); + logger.warn("Set " + _ip + " boot dev to " + bootDev + "failed"); return new Answer(cmd, false, "Set " + _ip + " boot dev to " + bootDev + "failed"); } - s_logger.warn("Set " + _ip + " boot dev to " + bootDev + "Success"); + logger.warn("Set " + _ip + " boot dev to " + bootDev + "Success"); return new Answer(cmd, true, "Set " + _ip + " boot dev to " + bootDev + "Success"); } @@ -494,7 +492,7 @@ public Answer executeRequest(Command cmd) { return Answer.createUnsupportedCommandAnswer(cmd); } } catch (Throwable t) { - s_logger.debug(t.getMessage(), t); + logger.debug(t.getMessage(), t); return new Answer(cmd, false, t.getMessage()); } } @@ -545,7 +543,7 @@ protected StopAnswer execute(final StopCommand cmd) { OutputInterpreter.AllLinesParser interpreter = new OutputInterpreter.AllLinesParser(); if (!doScript(_getStatusCommand, interpreter)) { success = true; - s_logger.warn("Cannot get power status of " + getName() + ", assume VM state changed successfully"); + logger.warn("Cannot get power status of " + getName() + ", assume VM state changed successfully"); break; } @@ -600,7 +598,7 @@ protected StartAnswer execute(StartCommand cmd) { try { TimeUnit.SECONDS.sleep(5); } catch (InterruptedException e) { - s_logger.warn(e.getMessage(), e); + logger.warn(e.getMessage(), e); } q = QueryBuilder.create(VMInstanceVO.class); @@ -614,21 +612,21 @@ protected StartAnswer execute(StartCommand cmd) { return new StartAnswer(cmd); } - s_logger.debug(String.format("still wait for baremetal provision done notification for vm[name:%s], current vm state is %s", vmvo.getInstanceName(), vmvo.getState())); + logger.debug(String.format("still wait for baremetal provision done notification for vm[name:%s], current vm state is %s", vmvo.getInstanceName(), vmvo.getState())); } return new StartAnswer(cmd, String.format("timeout after %s seconds, no baremetal provision done notification received. vm[name:%s] failed to start", isProvisionDoneNotificationTimeout, vm.getName())); } } - s_logger.debug("Start bare metal vm " + vm.getName() + "successfully"); + logger.debug("Start bare metal vm " + vm.getName() + "successfully"); _vmName = vm.getName(); return new StartAnswer(cmd); } protected ReadyAnswer execute(ReadyCommand cmd) { // derived resource should check if the PXE server is ready - s_logger.debug("Bare metal resource " + getName() + " is ready"); + logger.debug("Bare metal resource " + getName() + " is ready"); return new ReadyAnswer(cmd); } diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetaNetworkGuru.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetaNetworkGuru.java index bf6932f05d43..79590f08ffa4 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetaNetworkGuru.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetaNetworkGuru.java @@ -22,7 +22,6 @@ import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; -import org.apache.log4j.Logger; import com.cloud.dc.DataCenter; import com.cloud.dc.Pod; @@ -59,7 +58,6 @@ import com.cloud.vm.VirtualMachineProfile; public class BaremetaNetworkGuru extends DirectPodBasedNetworkGuru { - private static final Logger s_logger = Logger.getLogger(BaremetaNetworkGuru.class); @Inject private HostDao _hostDao; @Inject @@ -151,14 +149,14 @@ public void doInTransactionWithoutResult(TransactionStatus status) { * nic.setBroadcastUri(null); nic.setIsolationUri(null); */ - s_logger.debug("Allocated a nic " + nic + " for " + vm); + logger.debug("Allocated a nic " + nic + " for " + vm); } private void getBaremetalIp(NicProfile nic, Pod pod, VirtualMachineProfile vm, Network network, String requiredIp) throws InsufficientAddressCapacityException, ConcurrentOperationException { DataCenter dc = _dcDao.findById(pod.getDataCenterId()); if (nic.getIPv4Address() == null) { - s_logger.debug(String.format("Requiring ip address: %s", nic.getIPv4Address())); + logger.debug(String.format("Requiring ip address: %s", nic.getIPv4Address())); PublicIp ip = _ipAddrMgr.assignPublicIpAddress(dc.getId(), pod.getId(), vm.getOwner(), VlanType.DirectAttached, network.getId(), requiredIp, false, false); nic.setIPv4Address(ip.getAddress().toString()); nic.setFormat(AddressFormat.Ip4); diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalDhcpElement.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalDhcpElement.java index 807babcb09f8..e39b40cfc68b 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalDhcpElement.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalDhcpElement.java @@ -24,7 +24,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import com.cloud.baremetal.database.BaremetalDhcpVO; import com.cloud.dc.DataCenter.NetworkType; @@ -56,7 +55,6 @@ import com.cloud.vm.dao.NicDao; public class BaremetalDhcpElement extends AdapterBase implements DhcpServiceProvider { - private static final Logger s_logger = Logger.getLogger(BaremetalDhcpElement.class); private static final Map> capabilities; @Inject @@ -98,7 +96,7 @@ private boolean canHandle(DeployDestination dest, TrafficType trafficType, Guest public boolean implement(Network network, NetworkOffering offering, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { if (offering.isSystemOnly() || !canHandle(dest, offering.getTrafficType(), network.getGuestType())) { - s_logger.debug("BaremetalDhcpElement can not handle networkoffering: " + offering.getName()); + logger.debug("BaremetalDhcpElement can not handle networkoffering: " + offering.getName()); return false; } return true; diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalDhcpManagerImpl.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalDhcpManagerImpl.java index f50681abe7bd..99bedbff05e1 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalDhcpManagerImpl.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalDhcpManagerImpl.java @@ -33,7 +33,6 @@ import org.apache.cloudstack.api.AddBaremetalDhcpCmd; import org.apache.cloudstack.api.ListBaremetalDhcpCmd; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -75,7 +74,6 @@ import com.cloud.vm.dao.UserVmDao; public class BaremetalDhcpManagerImpl extends ManagerBase implements BaremetalDhcpManager, ResourceStateAdapter { - private static final org.apache.log4j.Logger s_logger = Logger.getLogger(BaremetalDhcpManagerImpl.class); protected String _name; @Inject DataCenterDao _dcDao; @@ -155,15 +153,15 @@ public boolean addVirtualMachineIntoNetwork(Network network, NicProfile nic, Vir try { Answer ans = _agentMgr.send(h.getId(), dhcpCommand); if (ans.getResult()) { - s_logger.debug(String.format("Set dhcp entry on external DHCP %1$s successfully(ip=%2$s, mac=%3$s, vmname=%4$s)", h.getPrivateIpAddress(), + logger.debug(String.format("Set dhcp entry on external DHCP %1$s successfully(ip=%2$s, mac=%3$s, vmname=%4$s)", h.getPrivateIpAddress(), nic.getIPv4Address(), nic.getMacAddress(), profile.getVirtualMachine().getHostName())); return true; } else { - s_logger.debug(errMsg + " " + ans.getDetails()); + logger.debug(errMsg + " " + ans.getDetails()); throw new ResourceUnavailableException(errMsg, DataCenter.class, zoneId); } } catch (Exception e) { - s_logger.debug(errMsg, e); + logger.debug(errMsg, e); throw new ResourceUnavailableException(errMsg + e.getMessage(), DataCenter.class, zoneId); } } @@ -226,7 +224,7 @@ public BaremetalDhcpVO addDchpServer(AddBaremetalDhcpCmd cmd) { try { uri = new URI(cmd.getUrl()); } catch (Exception e) { - s_logger.debug(e); + logger.debug(e); throw new IllegalArgumentException(e.getMessage()); } @@ -260,7 +258,7 @@ public BaremetalDhcpVO addDchpServer(AddBaremetalDhcpCmd cmd) { throw new CloudRuntimeException("Unsupport DHCP server type: " + cmd.getDhcpType()); } } catch (Exception e) { - s_logger.debug(e); + logger.debug(e); throw new CloudRuntimeException(e.getMessage()); } diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalDhcpResourceBase.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalDhcpResourceBase.java index 0d3cdce74e8a..9fe3f6ad5166 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalDhcpResourceBase.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalDhcpResourceBase.java @@ -27,7 +27,6 @@ import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.agent.IAgentControl; import com.cloud.agent.api.Answer; @@ -44,7 +43,6 @@ import com.cloud.utils.component.ManagerBase; public class BaremetalDhcpResourceBase extends ManagerBase implements ServerResource { - private static final Logger s_logger = Logger.getLogger(BaremetalDhcpResourceBase.class); String _name; String _guid; String _username; @@ -129,7 +127,7 @@ public PingCommand getCurrentStatus(long id) { } protected ReadyAnswer execute(ReadyCommand cmd) { - s_logger.debug("External DHCP resource " + _name + " is ready"); + logger.debug("External DHCP resource " + _name + " is ready"); return new ReadyAnswer(cmd); } diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalDhcpdResource.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalDhcpdResource.java index 8fd2c35ced99..e92cbf2c2045 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalDhcpdResource.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalDhcpdResource.java @@ -27,7 +27,6 @@ import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.trilead.ssh2.SCPClient; @@ -41,14 +40,13 @@ import com.cloud.utils.ssh.SSHCmdHelper; public class BaremetalDhcpdResource extends BaremetalDhcpResourceBase { - private static final Logger s_logger = Logger.getLogger(BaremetalDhcpdResource.class); @Override public boolean configure(String name, Map params) throws ConfigurationException { com.trilead.ssh2.Connection sshConnection = null; try { super.configure(name, params); - s_logger.debug(String.format("Trying to connect to DHCP server(IP=%1$s, username=%2$s, password=%3$s)", _ip, _username, "******")); + logger.debug(String.format("Trying to connect to DHCP server(IP=%1$s, username=%2$s, password=%3$s)", _ip, _username, "******")); sshConnection = SSHCmdHelper.acquireAuthorizedConnection(_ip, _username, _password); if (sshConnection == null) { throw new ConfigurationException(String.format("Cannot connect to DHCP server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, "******")); @@ -89,10 +87,10 @@ public boolean configure(String name, Map params) throws Configu throw new ConfigurationException("prepare Dhcpd at " + _ip + " failed, command:" + cmd); } - s_logger.debug("Dhcpd resource configure successfully"); + logger.debug("Dhcpd resource configure successfully"); return true; } catch (Exception e) { - s_logger.debug("Dhcpd resource configure failed", e); + logger.debug("Dhcpd resource configure failed", e); throw new ConfigurationException(e.getMessage()); } finally { SSHCmdHelper.releaseSshConnection(sshConnection); diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalDnsmasqResource.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalDnsmasqResource.java index 79f23cc46db2..51acfe93d39e 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalDnsmasqResource.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalDnsmasqResource.java @@ -27,7 +27,6 @@ import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.trilead.ssh2.SCPClient; @@ -41,14 +40,13 @@ import com.cloud.utils.ssh.SSHCmdHelper; public class BaremetalDnsmasqResource extends BaremetalDhcpResourceBase { - private static final Logger s_logger = Logger.getLogger(BaremetalDnsmasqResource.class); @Override public boolean configure(String name, Map params) throws ConfigurationException { com.trilead.ssh2.Connection sshConnection = null; try { super.configure(name, params); - s_logger.debug(String.format("Trying to connect to DHCP server(IP=%1$s, username=%2$s, password=%3$s)", _ip, _username, _password)); + logger.debug(String.format("Trying to connect to DHCP server(IP=%1$s, username=%2$s, password=%3$s)", _ip, _username, _password)); sshConnection = SSHCmdHelper.acquireAuthorizedConnection(_ip, _username, _password); if (sshConnection == null) { throw new ConfigurationException(String.format("Cannot connect to DHCP server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, _password)); @@ -81,10 +79,10 @@ public boolean configure(String name, Map params) throws Configu } */ - s_logger.debug("Dnsmasq resource configure successfully"); + logger.debug("Dnsmasq resource configure successfully"); return true; } catch (Exception e) { - s_logger.debug("Dnsmasq resorce configure failed", e); + logger.debug("Dnsmasq resorce configure failed", e); throw new ConfigurationException(e.getMessage()); } finally { SSHCmdHelper.releaseSshConnection(sshConnection); diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalKickStartPxeResource.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalKickStartPxeResource.java index dbee3c4a91d4..3775f4effc17 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalKickStartPxeResource.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalKickStartPxeResource.java @@ -25,7 +25,6 @@ import javax.naming.ConfigurationException; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.trilead.ssh2.SCPClient; @@ -40,7 +39,6 @@ import com.cloud.utils.ssh.SSHCmdHelper; public class BaremetalKickStartPxeResource extends BaremetalPxeResourceBase { - private static final Logger s_logger = Logger.getLogger(BaremetalKickStartPxeResource.class); private static final String Name = "BaremetalKickStartPxeResource"; String _tftpDir; @@ -54,11 +52,11 @@ public boolean configure(String name, Map params) throws Configu com.trilead.ssh2.Connection sshConnection = new com.trilead.ssh2.Connection(_ip, 22); - s_logger.debug(String.format("Trying to connect to kickstart PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, "******")); + logger.debug(String.format("Trying to connect to kickstart PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, "******")); try { sshConnection.connect(null, 60000, 60000); if (!sshConnection.authenticateWithPassword(_username, _password)) { - s_logger.debug("SSH Failed to authenticate"); + logger.debug("SSH Failed to authenticate"); throw new ConfigurationException(String.format("Cannot connect to kickstart PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, "******")); } @@ -132,7 +130,7 @@ private Answer execute(VmDataCommand cmd) { sshConnection.connect(null, 60000, 60000); if (!sshConnection.authenticateWithPassword(_username, _password)) { - s_logger.debug("SSH Failed to authenticate"); + logger.debug("SSH Failed to authenticate"); throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, _password)); } @@ -143,7 +141,7 @@ private Answer execute(VmDataCommand cmd) { return new Answer(cmd, true, "Success"); } catch (Exception e) { - s_logger.debug("Prepare for creating baremetal template failed", e); + logger.debug("Prepare for creating baremetal template failed", e); return new Answer(cmd, false, e.getMessage()); } finally { if (sshConnection != null) { @@ -168,7 +166,7 @@ private Answer execute(PrepareKickstartPxeServerCommand cmd) { try { sshConnection.connect(null, 60000, 60000); if (!sshConnection.authenticateWithPassword(_username, _password)) { - s_logger.debug("SSH Failed to authenticate"); + logger.debug("SSH Failed to authenticate"); throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, _password)); } @@ -188,10 +186,10 @@ private Answer execute(PrepareKickstartPxeServerCommand cmd) { return new Answer(cmd, false, "prepare kickstart at pxe server " + _ip + " failed, command:" + script); } - s_logger.debug("Prepare kickstart PXE server successfully"); + logger.debug("Prepare kickstart PXE server successfully"); return new Answer(cmd, true, "Success"); } catch (Exception e) { - s_logger.debug("Prepare for kickstart server failed", e); + logger.debug("Prepare for kickstart server failed", e); return new Answer(cmd, false, e.getMessage()); } finally { if (sshConnection != null) { diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalKickStartServiceImpl.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalKickStartServiceImpl.java index 8fe3d827ed40..169743881ede 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalKickStartServiceImpl.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalKickStartServiceImpl.java @@ -33,7 +33,6 @@ import org.apache.cloudstack.api.AddBaremetalPxeCmd; import org.apache.cloudstack.api.ListBaremetalPxeServersCmd; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.baremetal.IpmISetBootDevCommand; @@ -80,7 +79,6 @@ import com.cloud.vm.dao.NicDao; public class BaremetalKickStartServiceImpl extends BareMetalPxeServiceBase implements BaremetalPxeService { - private static final Logger s_logger = Logger.getLogger(BaremetalKickStartServiceImpl.class); @Inject ResourceManager _resourceMgr; @Inject @@ -170,7 +168,7 @@ public File getSystemVMKeyFile() { throw new CloudRuntimeException(String.format("cannot find id_rsa.cloud")); } if (!keyFile.exists()) { - s_logger.error("Unable to locate id_rsa.cloud in your setup at " + keyFile.toString()); + logger.error("Unable to locate id_rsa.cloud in your setup at " + keyFile.toString()); } return keyFile; } @@ -199,7 +197,7 @@ private boolean preparePxeInBasicZone(VirtualMachineProfile profile, NicProfile cmd.setTemplateUuid(template.getUuid()); Answer aws = _agentMgr.send(pxeVo.getHostId(), cmd); if (!aws.getResult()) { - s_logger.warn("Unable to set host: " + dest.getHost().getId() + " to PXE boot because " + aws.getDetails()); + logger.warn("Unable to set host: " + dest.getHost().getId() + " to PXE boot because " + aws.getDetails()); return false; } @@ -234,7 +232,7 @@ private boolean preparePxeInAdvancedZone(VirtualMachineProfile profile, NicProfi List tuple = parseKickstartUrl(profile); String cmd = String.format("/opt/cloud/bin/prepare_pxe.sh %s %s %s %s %s %s", tuple.get(1), tuple.get(2), profile.getTemplate().getUuid(), String.format("01-%s", nic.getMacAddress().replaceAll(":", "-")).toLowerCase(), tuple.get(0), nic.getMacAddress().toLowerCase()); - s_logger.debug(String.format("prepare pxe on virtual router[ip:%s], cmd: %s", mgmtNic.getIPv4Address(), cmd)); + logger.debug(String.format("prepare pxe on virtual router[ip:%s], cmd: %s", mgmtNic.getIPv4Address(), cmd)); ret = SshHelper.sshExecute(mgmtNic.getIPv4Address(), 3922, "root", getSystemVMKeyFile(), null, cmd); if (!ret.first()) { throw new CloudRuntimeException(String.format("failed preparing PXE in virtual router[id:%s], because %s", vr.getId(), ret.second())); @@ -242,7 +240,7 @@ private boolean preparePxeInAdvancedZone(VirtualMachineProfile profile, NicProfi //String internalServerIp = "10.223.110.231"; cmd = String.format("/opt/cloud/bin/baremetal_snat.sh %s %s %s", mgmtNic.getIPv4Address(), internalServerIp, mgmtNic.getIPv4Gateway()); - s_logger.debug(String.format("prepare SNAT on virtual router[ip:%s], cmd: %s", mgmtNic.getIPv4Address(), cmd)); + logger.debug(String.format("prepare SNAT on virtual router[ip:%s], cmd: %s", mgmtNic.getIPv4Address(), cmd)); ret = SshHelper.sshExecute(mgmtNic.getIPv4Address(), 3922, "root", getSystemVMKeyFile(), null, cmd); if (!ret.first()) { throw new CloudRuntimeException(String.format("failed preparing PXE in virtual router[id:%s], because %s", vr.getId(), ret.second())); @@ -267,12 +265,12 @@ public boolean prepare(VirtualMachineProfile profile, NicProfile nic, Network ne IpmISetBootDevCommand bootCmd = new IpmISetBootDevCommand(BootDev.pxe); Answer aws = _agentMgr.send(dest.getHost().getId(), bootCmd); if (!aws.getResult()) { - s_logger.warn("Unable to set host: " + dest.getHost().getId() + " to PXE boot because " + aws.getDetails()); + logger.warn("Unable to set host: " + dest.getHost().getId() + " to PXE boot because " + aws.getDetails()); } return aws.getResult(); } catch (Exception e) { - s_logger.warn("Cannot prepare PXE server", e); + logger.warn("Cannot prepare PXE server", e); return false; } } @@ -324,7 +322,7 @@ public BaremetalPxeVO addPxeServer(AddBaremetalPxeCmd cmd) { try { uri = new URI(cmd.getUrl()); } catch (Exception e) { - s_logger.debug(e); + logger.debug(e); throw new IllegalArgumentException(e.getMessage()); } String ipAddress = uri.getHost(); diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPingPxeResource.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPingPxeResource.java index 416b3d0e3b3b..96b2dbfeb935 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPingPxeResource.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPingPxeResource.java @@ -29,7 +29,6 @@ import javax.naming.ConfigurationException; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.trilead.ssh2.SCPClient; @@ -46,7 +45,6 @@ import com.cloud.utils.ssh.SSHCmdHelper; public class BaremetalPingPxeResource extends BaremetalPxeResourceBase { - private static final Logger s_logger = Logger.getLogger(BaremetalPingPxeResource.class); private static final String Name = "BaremetalPingPxeResource"; String _storageServer; String _pingDir; @@ -98,11 +96,11 @@ public boolean configure(String name, Map params) throws Configu com.trilead.ssh2.Connection sshConnection = new com.trilead.ssh2.Connection(_ip, 22); - s_logger.debug(String.format("Trying to connect to PING PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, "******")); + logger.debug(String.format("Trying to connect to PING PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, "******")); try { sshConnection.connect(null, 60000, 60000); if (!sshConnection.authenticateWithPassword(_username, _password)) { - s_logger.debug("SSH Failed to authenticate"); + logger.debug("SSH Failed to authenticate"); throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, "******")); } @@ -152,7 +150,7 @@ protected PreparePxeServerAnswer execute(PreparePxeServerCommand cmd) { try { sshConnection.connect(null, 60000, 60000); if (!sshConnection.authenticateWithPassword(_username, _password)) { - s_logger.debug("SSH Failed to authenticate"); + logger.debug("SSH Failed to authenticate"); throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, _password)); } @@ -162,11 +160,11 @@ protected PreparePxeServerAnswer execute(PreparePxeServerCommand cmd) { if (!SSHCmdHelper.sshExecuteCmd(sshConnection, script)) { return new PreparePxeServerAnswer(cmd, "prepare PING at " + _ip + " failed, command:" + script); } - s_logger.debug("Prepare Ping PXE server successfully"); + logger.debug("Prepare Ping PXE server successfully"); return new PreparePxeServerAnswer(cmd); } catch (Exception e) { - s_logger.debug("Prepare PING pxe server failed", e); + logger.debug("Prepare PING pxe server failed", e); return new PreparePxeServerAnswer(cmd, e.getMessage()); } finally { if (sshConnection != null) { @@ -180,7 +178,7 @@ protected Answer execute(PrepareCreateTemplateCommand cmd) { try { sshConnection.connect(null, 60000, 60000); if (!sshConnection.authenticateWithPassword(_username, _password)) { - s_logger.debug("SSH Failed to authenticate"); + logger.debug("SSH Failed to authenticate"); throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, _password)); } @@ -190,11 +188,11 @@ protected Answer execute(PrepareCreateTemplateCommand cmd) { if (!SSHCmdHelper.sshExecuteCmd(sshConnection, script)) { return new Answer(cmd, false, "prepare for creating template failed, command:" + script); } - s_logger.debug("Prepare for creating template successfully"); + logger.debug("Prepare for creating template successfully"); return new Answer(cmd, true, "Success"); } catch (Exception e) { - s_logger.debug("Prepare for creating baremetal template failed", e); + logger.debug("Prepare for creating baremetal template failed", e); return new Answer(cmd, false, e.getMessage()); } finally { if (sshConnection != null) { @@ -238,7 +236,7 @@ private Answer execute(VmDataCommand cmd) { sshConnection.connect(null, 60000, 60000); if (!sshConnection.authenticateWithPassword(_username, _password)) { - s_logger.debug("SSH Failed to authenticate"); + logger.debug("SSH Failed to authenticate"); throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, _password)); } @@ -249,7 +247,7 @@ private Answer execute(VmDataCommand cmd) { return new Answer(cmd, true, "Success"); } catch (Exception e) { - s_logger.debug("Prepare for creating baremetal template failed", e); + logger.debug("Prepare for creating baremetal template failed", e); return new Answer(cmd, false, e.getMessage()); } finally { if (sshConnection != null) { diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPxeElement.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPxeElement.java index 17ec90210163..fa708e7be4cc 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPxeElement.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPxeElement.java @@ -51,7 +51,6 @@ import com.cloud.vm.VirtualMachineProfile; import com.cloud.vm.dao.NicDao; import com.cloud.vm.dao.VMInstanceDao; -import org.apache.log4j.Logger; import javax.inject.Inject; import java.util.HashMap; @@ -59,7 +58,6 @@ import java.util.Set; public class BaremetalPxeElement extends AdapterBase implements NetworkElement { - private static final Logger s_logger = Logger.getLogger(BaremetalPxeElement.class); private static final Map> capabilities; @Inject @@ -110,7 +108,7 @@ public boolean implement(Network network, NetworkOffering offering, DeployDestin } if (offering.isSystemOnly() || !canHandle(dest, offering.getTrafficType(), network.getGuestType())) { - s_logger.debug("BaremetalPxeElement can not handle network offering: " + offering.getName()); + logger.debug("BaremetalPxeElement can not handle network offering: " + offering.getName()); return false; } return true; diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPxeManagerImpl.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPxeManagerImpl.java index 22f939597338..636ce360d03a 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPxeManagerImpl.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPxeManagerImpl.java @@ -34,7 +34,6 @@ import org.apache.cloudstack.api.AddBaremetalPxePingServerCmd; import org.apache.cloudstack.api.ListBaremetalPxeServersCmd; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -71,7 +70,6 @@ import com.cloud.vm.dao.UserVmDao; public class BaremetalPxeManagerImpl extends ManagerBase implements BaremetalPxeManager, ResourceStateAdapter { - private static final org.apache.log4j.Logger s_logger = Logger.getLogger(BaremetalPxeManagerImpl.class); @Inject DataCenterDao _dcDao; @Inject @@ -233,13 +231,13 @@ public boolean addUserData(NicProfile nic, VirtualMachineProfile profile) { try { Answer ans = _agentMgr.send(pxeVo.getHostId(), cmd); if (!ans.getResult()) { - s_logger.debug(String.format("Add userdata to vm:%s failed because %s", vm.getInstanceName(), ans.getDetails())); + logger.debug(String.format("Add userdata to vm:%s failed because %s", vm.getInstanceName(), ans.getDetails())); return false; } else { return true; } } catch (Exception e) { - s_logger.debug(String.format("Add userdata to vm:%s failed", vm.getInstanceName()), e); + logger.debug(String.format("Add userdata to vm:%s failed", vm.getInstanceName()), e); return false; } } diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPxeResourceBase.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPxeResourceBase.java index 5b5a959d597f..01d1bf62717d 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPxeResourceBase.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/BaremetalPxeResourceBase.java @@ -26,7 +26,6 @@ import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.agent.IAgentControl; import com.cloud.agent.api.Answer; @@ -41,7 +40,6 @@ import com.cloud.utils.component.ManagerBase; public class BaremetalPxeResourceBase extends ManagerBase implements ServerResource { - private static final Logger s_logger = Logger.getLogger(BaremetalPxeResourceBase.class); String _name; String _guid; String _username; @@ -84,7 +82,7 @@ public boolean configure(String name, Map params) throws Configu } protected ReadyAnswer execute(ReadyCommand cmd) { - s_logger.debug("Pxe resource " + _name + " is ready"); + logger.debug("Pxe resource " + _name + " is ready"); return new ReadyAnswer(cmd); } diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/Force10BaremetalSwitchBackend.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/Force10BaremetalSwitchBackend.java index e4dd5b1a5d8a..3a013da7858e 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/Force10BaremetalSwitchBackend.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/Force10BaremetalSwitchBackend.java @@ -27,7 +27,8 @@ import com.cloud.utils.xmlobject.XmlObjectParser; import org.apache.commons.codec.binary.Base64; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.http.HttpEntity; import org.springframework.http.HttpHeaders; import org.springframework.http.HttpMethod; @@ -48,7 +49,7 @@ * Created by frank on 9/2/14. */ public class Force10BaremetalSwitchBackend implements BaremetalSwitchBackend { - private Logger logger = Logger.getLogger(Force10BaremetalSwitchBackend.class); + private Logger logger = LogManager.getLogger(Force10BaremetalSwitchBackend.class); public static final String TYPE = "Force10"; private static List successHttpStatusCode = new ArrayList<>(); diff --git a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/SecurityGroupHttpClient.java b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/SecurityGroupHttpClient.java index b100929da961..b00535004640 100644 --- a/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/SecurityGroupHttpClient.java +++ b/plugins/hypervisors/baremetal/src/main/java/com/cloud/baremetal/networkservice/SecurityGroupHttpClient.java @@ -34,7 +34,8 @@ import org.apache.commons.httpclient.MultiThreadedHttpConnectionManager; import org.apache.commons.httpclient.methods.PostMethod; import org.apache.commons.httpclient.methods.StringRequestEntity; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import javax.xml.bind.JAXBContext; import javax.xml.bind.Marshaller; @@ -46,7 +47,7 @@ import java.util.concurrent.TimeUnit; public class SecurityGroupHttpClient { - private static final Logger logger = Logger.getLogger(SecurityGroupHttpClient.class); + protected Logger logger = LogManager.getLogger(getClass()); private static final String ARG_NAME = "args"; private static final String COMMAND = "command"; private JAXBContext context; diff --git a/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/api/AddBaremetalDhcpCmd.java b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/api/AddBaremetalDhcpCmd.java index 22a1c4eba905..379dee875f4d 100644 --- a/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/api/AddBaremetalDhcpCmd.java +++ b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/api/AddBaremetalDhcpCmd.java @@ -20,7 +20,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.response.PhysicalNetworkResponse; import org.apache.cloudstack.context.CallContext; @@ -38,7 +37,6 @@ @APICommand(name = "addBaremetalDhcp", description = "adds a baremetal dhcp server", responseObject = BaremetalDhcpResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class AddBaremetalDhcpCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(AddBaremetalDhcpCmd.class); @Inject BaremetalDhcpManager mgr; @@ -84,7 +82,7 @@ public void execute() throws ResourceUnavailableException, InsufficientCapacityE response.setResponseName(getCommandName()); this.setResponseObject(response); } catch (Exception e) { - s_logger.warn("Unable to add external dhcp server with url: " + getUrl(), e); + logger.warn("Unable to add external dhcp server with url: " + getUrl(), e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); } } diff --git a/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/api/AddBaremetalPxeCmd.java b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/api/AddBaremetalPxeCmd.java index 19854a981b5b..a11ae0bae369 100644 --- a/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/api/AddBaremetalPxeCmd.java +++ b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/api/AddBaremetalPxeCmd.java @@ -20,7 +20,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.response.PhysicalNetworkResponse; import org.apache.cloudstack.api.response.PodResponse; @@ -38,7 +37,6 @@ public class AddBaremetalPxeCmd extends BaseAsyncCmd { private static final String s_name = "addbaremetalpxeresponse"; - public static final Logger s_logger = Logger.getLogger(AddBaremetalPxeCmd.class); @Inject BaremetalPxeManager pxeMgr; @@ -86,7 +84,7 @@ public void execute() throws ResourceUnavailableException, InsufficientCapacityE rsp.setResponseName(getCommandName()); this.setResponseObject(rsp); } catch (Exception e) { - s_logger.warn("Unable to add external pxe server with url: " + getUrl(), e); + logger.warn("Unable to add external pxe server with url: " + getUrl(), e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); } } diff --git a/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/api/AddBaremetalRctCmd.java b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/api/AddBaremetalRctCmd.java index 3227cbd24ff1..e7c77c3cd1c4 100644 --- a/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/api/AddBaremetalRctCmd.java +++ b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/api/AddBaremetalRctCmd.java @@ -27,7 +27,6 @@ import com.cloud.exception.ResourceUnavailableException; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import javax.inject.Inject; @@ -37,7 +36,6 @@ @APICommand(name = "addBaremetalRct", description = "adds baremetal rack configuration text", responseObject = BaremetalRctResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, authorized = {RoleType.Admin}) public class AddBaremetalRctCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(AddBaremetalRctCmd.class); @Inject private BaremetalVlanManager vlanMgr; @@ -68,7 +66,7 @@ public void execute() throws ResourceUnavailableException, InsufficientCapacityE BaremetalRctResponse rsp = vlanMgr.addRct(this); this.setResponseObject(rsp); } catch (Exception e) { - s_logger.warn(String.format("unable to add baremetal RCT[%s]", getRctUrl()), e); + logger.warn(String.format("unable to add baremetal RCT[%s]", getRctUrl()), e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); } } diff --git a/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/api/BaremetalProvisionDoneNotificationCmd.java b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/api/BaremetalProvisionDoneNotificationCmd.java index c712849a27ac..75df9556ba4a 100644 --- a/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/api/BaremetalProvisionDoneNotificationCmd.java +++ b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/api/BaremetalProvisionDoneNotificationCmd.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.context.CallContext; import javax.inject.Inject; -import org.apache.log4j.Logger; /** * Created by frank on 9/17/14. @@ -36,7 +35,6 @@ @APICommand(name = "notifyBaremetalProvisionDone", description = "Notify provision has been done on a host. This api is for baremetal virtual router service, not for end user", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class BaremetalProvisionDoneNotificationCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(BaremetalProvisionDoneNotificationCmd.class); private static final String s_name = "baremetalprovisiondone"; @Inject @@ -61,7 +59,7 @@ public void execute() throws ResourceUnavailableException, InsufficientCapacityE bmMgr.notifyProvisionDone(this); this.setResponseObject(new SuccessResponse(getCommandName())); } catch (Exception e) { - s_logger.warn(String.format("unable to notify baremetal provision done[mac:%s]", mac), e); + logger.warn(String.format("unable to notify baremetal provision done[mac:%s]", mac), e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); } } diff --git a/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/api/DeleteBaremetalRctCmd.java b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/api/DeleteBaremetalRctCmd.java index c2691d67ae07..8bb31403d9f9 100644 --- a/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/api/DeleteBaremetalRctCmd.java +++ b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/api/DeleteBaremetalRctCmd.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import javax.inject.Inject; @@ -38,7 +37,6 @@ @APICommand(name = "deleteBaremetalRct", description = "deletes baremetal rack configuration text", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, authorized = {RoleType.Admin}) public class DeleteBaremetalRctCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteBaremetalRctCmd.class); @Parameter(name = ApiConstants.ID, type = BaseCmd.CommandType.UUID, description = "RCT id", required = true, entityType = BaremetalRctResponse.class) private Long id; @@ -63,7 +61,7 @@ public void execute() throws ResourceUnavailableException, InsufficientCapacityE SuccessResponse response = new SuccessResponse(getCommandName()); setResponseObject(response); } catch (Exception e) { - s_logger.warn(String.format("unable to delete baremetal RCT[%s]", getId()), e); + logger.warn(String.format("unable to delete baremetal RCT[%s]", getId()), e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage(), e); } } diff --git a/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/api/ListBaremetalDhcpCmd.java b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/api/ListBaremetalDhcpCmd.java index fdc64de25d71..8f4e2338be9b 100644 --- a/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/api/ListBaremetalDhcpCmd.java +++ b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/api/ListBaremetalDhcpCmd.java @@ -27,7 +27,6 @@ import com.cloud.exception.ResourceUnavailableException; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.PhysicalNetworkResponse; -import org.apache.log4j.Logger; import javax.inject.Inject; import java.util.List; @@ -35,7 +34,6 @@ @APICommand(name = "listBaremetalDhcp", description = "list baremetal dhcp servers", responseObject = BaremetalDhcpResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListBaremetalDhcpCmd extends BaseListCmd { - private static final Logger s_logger = Logger.getLogger(ListBaremetalDhcpCmd.class); @Inject BaremetalDhcpManager _dhcpMgr; @@ -90,7 +88,7 @@ public void execute() throws ResourceUnavailableException, InsufficientCapacityE response.setObjectName("baremetaldhcps"); this.setResponseObject(response); } catch (Exception e) { - s_logger.debug("Exception happend while executing ListBaremetalDhcpCmd"); + logger.debug("Exception happend while executing ListBaremetalDhcpCmd"); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); } diff --git a/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/api/ListBaremetalPxeServersCmd.java b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/api/ListBaremetalPxeServersCmd.java index 5f856a5d638f..bcf3f6f44dbe 100644 --- a/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/api/ListBaremetalPxeServersCmd.java +++ b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/api/ListBaremetalPxeServersCmd.java @@ -27,7 +27,6 @@ import com.cloud.exception.ResourceUnavailableException; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.PhysicalNetworkResponse; -import org.apache.log4j.Logger; import javax.inject.Inject; import java.util.List; @@ -35,7 +34,6 @@ @APICommand(name = "listBaremetalPxeServers", description = "list baremetal pxe server", responseObject = BaremetalPxeResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListBaremetalPxeServersCmd extends BaseListCmd { - private static final Logger s_logger = Logger.getLogger(ListBaremetalPxeServersCmd.class); @Inject BaremetalPxeManager _pxeMgr; @@ -76,7 +74,7 @@ public void execute() throws ResourceUnavailableException, InsufficientCapacityE response.setObjectName("baremetalpxeservers"); this.setResponseObject(response); } catch (Exception e) { - s_logger.debug("Exception happened while executing ListPingPxeServersCmd", e); + logger.debug("Exception happened while executing ListPingPxeServersCmd", e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); } } diff --git a/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/api/ListBaremetalRctCmd.java b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/api/ListBaremetalRctCmd.java index 379c8758bf17..d654fece0c76 100644 --- a/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/api/ListBaremetalRctCmd.java +++ b/plugins/hypervisors/baremetal/src/main/java/org/apache/cloudstack/api/ListBaremetalRctCmd.java @@ -27,7 +27,6 @@ import com.cloud.exception.ResourceUnavailableException; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.response.ListResponse; -import org.apache.log4j.Logger; import javax.inject.Inject; import java.util.ArrayList; @@ -36,7 +35,6 @@ @APICommand(name = "listBaremetalRct", description = "list baremetal rack configuration", responseObject = BaremetalRctResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, authorized = {RoleType.Admin}) public class ListBaremetalRctCmd extends BaseListCmd { - private static final Logger s_logger = Logger.getLogger(ListBaremetalRctCmd.class); @Inject BaremetalVlanManager vlanMgr; @@ -55,7 +53,7 @@ public void execute() throws ResourceUnavailableException, InsufficientCapacityE response.setObjectName("baremetalrcts"); this.setResponseObject(response); } catch (Exception e) { - s_logger.debug("Exception happened while executing ListBaremetalRctCmd", e); + logger.debug("Exception happened while executing ListBaremetalRctCmd", e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); } } diff --git a/plugins/hypervisors/hyperv/conf/log4j-cloud.xml.in b/plugins/hypervisors/hyperv/conf/log4j-cloud.xml.in index fdbba19e9036..0292dffb3a91 100644 --- a/plugins/hypervisors/hyperv/conf/log4j-cloud.xml.in +++ b/plugins/hypervisors/hyperv/conf/log4j-cloud.xml.in @@ -17,77 +17,52 @@ KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> - + + - + + + - - - + + + + + + + + - - - - - - - - - - - - - - - - + + + - - - + + + + - - - - + - - - + + - - - - - - - - - - - + - - - - + - - - - - - - + - - - + - - - - - + + + - + + + + + + + diff --git a/plugins/hypervisors/hyperv/src/main/java/com/cloud/ha/HypervInvestigator.java b/plugins/hypervisors/hyperv/src/main/java/com/cloud/ha/HypervInvestigator.java index 774efc84b3ae..d820fd5b6d30 100644 --- a/plugins/hypervisors/hyperv/src/main/java/com/cloud/ha/HypervInvestigator.java +++ b/plugins/hypervisors/hyperv/src/main/java/com/cloud/ha/HypervInvestigator.java @@ -22,7 +22,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -36,7 +35,6 @@ import com.cloud.utils.component.AdapterBase; public class HypervInvestigator extends AdapterBase implements Investigator { - private final static Logger s_logger = Logger.getLogger(HypervInvestigator.class); @Inject HostDao _hostDao; @Inject AgentManager _agentMgr; @Inject ResourceManager _resourceMgr; @@ -68,7 +66,7 @@ public Status isAgentAlive(Host agent) { return answer.getResult() ? Status.Down : Status.Up; } } catch (Exception e) { - s_logger.debug("Failed to send command to host: " + neighbor.getId(), e); + logger.debug("Failed to send command to host: " + neighbor.getId(), e); } } diff --git a/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/discoverer/HypervServerDiscoverer.java b/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/discoverer/HypervServerDiscoverer.java index 51b423052ea9..283f4dc0c96c 100644 --- a/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/discoverer/HypervServerDiscoverer.java +++ b/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/discoverer/HypervServerDiscoverer.java @@ -29,7 +29,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; @@ -72,7 +71,6 @@ * hypervisor and manages its lifecycle. */ public class HypervServerDiscoverer extends DiscovererBase implements Discoverer, Listener, ResourceStateAdapter { - private static final Logger s_logger = Logger.getLogger(HypervServerDiscoverer.class); Random _rand = new Random(System.currentTimeMillis()); Map _storageMounts = new HashMap(); @@ -121,7 +119,7 @@ public final void processConnect(final Host agent, final StartupCommand cmd, fin // assert if (startup.getHypervisorType() != HypervisorType.Hyperv) { - s_logger.debug("Not Hyper-V hypervisor, so moving on."); + logger.debug("Not Hyper-V hypervisor, so moving on."); return; } @@ -137,8 +135,8 @@ public final void processConnect(final Host agent, final StartupCommand cmd, fin _clusterDao.update(cluster.getId(), cluster); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Setting up host " + agentId); + if (logger.isDebugEnabled()) { + logger.debug("Setting up host " + agentId); } HostEnvironment env = new HostEnvironment(); @@ -163,14 +161,14 @@ public final void processConnect(final Host agent, final StartupCommand cmd, fin if (reason == null) { reason = " details were null"; } - s_logger.warn("Unable to setup agent " + agentId + " due to " + reason); + logger.warn("Unable to setup agent " + agentId + " due to " + reason); } // Error handling borrowed from XcpServerDiscoverer, may need to be // updated. } catch (AgentUnavailableException e) { - s_logger.warn("Unable to setup agent " + agentId + " because it became unavailable.", e); + logger.warn("Unable to setup agent " + agentId + " because it became unavailable.", e); } catch (OperationTimedoutException e) { - s_logger.warn("Unable to setup agent " + agentId + " because it timed out", e); + logger.warn("Unable to setup agent " + agentId + " because it timed out", e); } throw new ConnectionException(true, "Reinitialize agent after setup."); } @@ -213,14 +211,14 @@ public final boolean processTimeout(final long agentId, final long seq) { public final Map> find(final long dcId, final Long podId, final Long clusterId, final URI uri, final String username, final String password, final List hostTags) throws DiscoveryException { - if (s_logger.isInfoEnabled()) { - s_logger.info("Discover host. dc(zone): " + dcId + ", pod: " + podId + ", cluster: " + clusterId + ", uri host: " + uri.getHost()); + if (logger.isInfoEnabled()) { + logger.info("Discover host. dc(zone): " + dcId + ", pod: " + podId + ", cluster: " + clusterId + ", uri host: " + uri.getHost()); } // Assertions if (podId == null) { - if (s_logger.isInfoEnabled()) { - s_logger.info("No pod is assigned, skipping the discovery in" + " Hyperv discoverer"); + if (logger.isInfoEnabled()) { + logger.info("No pod is assigned, skipping the discovery in" + " Hyperv discoverer"); } return null; } @@ -228,20 +226,20 @@ public final Map> find(final long // in the // database if (cluster == null) { - if (s_logger.isInfoEnabled()) { - s_logger.info("No cluster in database for cluster id " + clusterId); + if (logger.isInfoEnabled()) { + logger.info("No cluster in database for cluster id " + clusterId); } return null; } if (cluster.getHypervisorType() != HypervisorType.Hyperv) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Cluster " + clusterId + "is not for Hyperv hypervisors"); + if (logger.isInfoEnabled()) { + logger.info("Cluster " + clusterId + "is not for Hyperv hypervisors"); } return null; } if (!uri.getScheme().equals("http")) { String msg = "urlString is not http so we're not taking care of" + " the discovery for this: " + uri; - s_logger.debug(msg); + logger.debug(msg); return null; } @@ -253,11 +251,11 @@ public final Map> find(final long String guidWithTail = calcServerResourceGuid(uuidSeed) + "-HypervResource"; if (_resourceMgr.findHostByGuid(guidWithTail) != null) { - s_logger.debug("Skipping " + agentIp + " because " + guidWithTail + " is already in the database."); + logger.debug("Skipping " + agentIp + " because " + guidWithTail + " is already in the database."); return null; } - s_logger.info("Creating" + HypervDirectConnectResource.class.getName() + " HypervDirectConnectResource for zone/pod/cluster " + dcId + "/" + podId + "/" + + logger.info("Creating" + HypervDirectConnectResource.class.getName() + " HypervDirectConnectResource for zone/pod/cluster " + dcId + "/" + podId + "/" + clusterId); // Some Hypervisors organise themselves in pools. @@ -298,7 +296,7 @@ public final Map> find(final long Answer pingAns = resource.executeRequest(ping); if (pingAns == null || !pingAns.getResult()) { String errMsg = "Agent not running, or no route to agent on at " + uri; - s_logger.debug(errMsg); + logger.debug(errMsg); throw new DiscoveryException(errMsg); } @@ -309,14 +307,14 @@ public final Map> find(final long return resources; } catch (ConfigurationException e) { _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, dcId, podId, "Unable to add " + uri.getHost(), "Error is " + e.getMessage()); - s_logger.warn("Unable to instantiate " + uri.getHost(), e); + logger.warn("Unable to instantiate " + uri.getHost(), e); } catch (UnknownHostException e) { _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, dcId, podId, "Unable to add " + uri.getHost(), "Error is " + e.getMessage()); - s_logger.warn("Unable to instantiate " + uri.getHost(), e); + logger.warn("Unable to instantiate " + uri.getHost(), e); } catch (Exception e) { String msg = " can't setup agent, due to " + e.toString() + " - " + e.getMessage(); - s_logger.warn(msg); + logger.warn(msg); } return null; } @@ -393,7 +391,7 @@ public final HostVO createHostVOForDirectConnectAgent(final HostVO host, final S return null; } - s_logger.info("Host: " + host.getName() + " connected with hypervisor type: " + HypervisorType.Hyperv + ". Checking CIDR..."); + logger.info("Host: " + host.getName() + " connected with hypervisor type: " + HypervisorType.Hyperv + ". Checking CIDR..."); HostPodVO pod = _podDao.findById(host.getPodId()); DataCenterVO dc = _dcDao.findById(host.getDataCenterId()); diff --git a/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/manager/HypervManagerImpl.java b/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/manager/HypervManagerImpl.java index 9490ae07b307..a31637b60deb 100644 --- a/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/manager/HypervManagerImpl.java +++ b/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/manager/HypervManagerImpl.java @@ -33,7 +33,8 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.utils.identity.ManagementServerNode; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.configuration.Config; import com.cloud.storage.JavaStorageLayer; @@ -47,7 +48,7 @@ import com.cloud.vm.dao.VMInstanceDao; public class HypervManagerImpl implements HypervManager { - public static final Logger s_logger = Logger.getLogger(HypervManagerImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); private String name; private int runLevel; @@ -127,7 +128,7 @@ public void setRunLevel(int level) { public String prepareSecondaryStorageStore(long zoneId) { String secondaryStorageUri = getSecondaryStorageStoreUrl(zoneId); if (secondaryStorageUri == null) { - s_logger.debug("Secondary storage uri for dc " + zoneId + " couldn't be obtained"); + logger.debug("Secondary storage uri for dc " + zoneId + " couldn't be obtained"); } else { prepareSecondaryStorageStore(secondaryStorageUri); } @@ -143,7 +144,7 @@ private String getSecondaryStorageStoreUrl(long zoneId) { } if (secUrl == null) { - s_logger.warn("Secondary storage uri couldn't be retrieved"); + logger.warn("Secondary storage uri couldn't be retrieved"); } return secUrl; @@ -160,7 +161,7 @@ private void prepareSecondaryStorageStore(String storageUrl) { if (!patchFolder.exists()) { if (!patchFolder.mkdirs()) { String msg = "Unable to create systemvm folder on secondary storage. location: " + patchFolder.toString(); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } } @@ -168,20 +169,20 @@ private void prepareSecondaryStorageStore(String storageUrl) { File srcIso = getSystemVMPatchIsoFile(); File destIso = new File(mountPoint + "/systemvm/" + getSystemVMIsoFileNameOnDatastore()); if (!destIso.exists()) { - s_logger.info("Copy System VM patch ISO file to secondary storage. source ISO: " + + logger.info("Copy System VM patch ISO file to secondary storage. source ISO: " + srcIso.getAbsolutePath() + ", destination: " + destIso.getAbsolutePath()); try { FileUtil.copyfile(srcIso, destIso); } catch (IOException e) { - s_logger.error("Unexpected exception ", e); + logger.error("Unexpected exception ", e); String msg = "Unable to copy systemvm ISO on secondary storage. src location: " + srcIso.toString() + ", dest location: " + destIso; - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } } else { - if (s_logger.isTraceEnabled()) { - s_logger.trace("SystemVM ISO file " + destIso.getPath() + " already exists"); + if (logger.isTraceEnabled()) { + logger.trace("SystemVM ISO file " + destIso.getPath() + " already exists"); } } } finally { @@ -205,14 +206,14 @@ private String getMountPoint(String storageUrl) { try { uri = new URI(storageUrl); } catch (URISyntaxException e) { - s_logger.error("Invalid storage URL format ", e); + logger.error("Invalid storage URL format ", e); throw new CloudRuntimeException("Unable to create mount point due to invalid storage URL format " + storageUrl); } mountPoint = mount(File.separator + File.separator + uri.getHost() + uri.getPath(), getMountParent(), uri.getScheme(), uri.getQuery()); if (mountPoint == null) { - s_logger.error("Unable to create mount point for " + storageUrl); + logger.error("Unable to create mount point for " + storageUrl); return "/mnt/sec"; } @@ -224,7 +225,7 @@ private String getMountPoint(String storageUrl) { protected String mount(String path, String parent, String scheme, String query) { String mountPoint = setupMountPoint(parent); if (mountPoint == null) { - s_logger.warn("Unable to create a mount point"); + logger.warn("Unable to create a mount point"); return null; } @@ -232,7 +233,7 @@ protected String mount(String path, String parent, String scheme, String query) String result = null; if (scheme.equals("cifs")) { String user = System.getProperty("user.name"); - Script command = new Script(true, "mount", _timeout, s_logger); + Script command = new Script(true, "mount", _timeout, logger); command.add("-t", "cifs"); command.add(path); command.add(mountPoint); @@ -250,7 +251,7 @@ protected String mount(String path, String parent, String scheme, String query) } if (result != null) { - s_logger.warn("Unable to mount " + path + " due to " + result); + logger.warn("Unable to mount " + path + " due to " + result); File file = new File(mountPoint); if (file.exists()) { file.delete(); @@ -259,11 +260,11 @@ protected String mount(String path, String parent, String scheme, String query) } // Change permissions for the mountpoint - script = new Script(true, "chmod", _timeout, s_logger); + script = new Script(true, "chmod", _timeout, logger); script.add("-R", "777", mountPoint); result = script.execute(); if (result != null) { - s_logger.warn("Unable to set permissions for " + mountPoint + " due to " + result); + logger.warn("Unable to set permissions for " + mountPoint + " due to " + result); } return mountPoint; } @@ -280,7 +281,7 @@ private String setupMountPoint(String parent) { break; } } - s_logger.error("Unable to create mount: " + mntPt); + logger.error("Unable to create mount: " + mntPt); } return mountPoint; @@ -306,7 +307,7 @@ private File getSystemVMPatchIsoFile() { assert (isoFile != null); if (!isoFile.exists()) { - s_logger.error("Unable to locate systemvm.iso in your setup at " + isoFile.toString()); + logger.error("Unable to locate systemvm.iso in your setup at " + isoFile.toString()); } return isoFile; } @@ -330,7 +331,7 @@ private String getMountParent() { } private void startupCleanup(String parent) { - s_logger.info("Cleanup mounted mount points used in previous session"); + logger.info("Cleanup mounted mount points used in previous session"); long mshostId = ManagementServerNode.getManagementServerId(); @@ -338,14 +339,14 @@ private void startupCleanup(String parent) { String[] mounts = _storage.listFiles(parent + File.separator + String.valueOf(mshostId) + ".*"); if (mounts != null && mounts.length > 0) { for (String mountPoint : mounts) { - s_logger.info("umount NFS mount from previous session: " + mountPoint); + logger.info("umount NFS mount from previous session: " + mountPoint); String result = null; - Script command = new Script(true, "umount", _timeout, s_logger); + Script command = new Script(true, "umount", _timeout, logger); command.add(mountPoint); result = command.execute(); if (result != null) { - s_logger.warn("Unable to umount " + mountPoint + " due to " + result); + logger.warn("Unable to umount " + mountPoint + " due to " + result); } File file = new File(mountPoint); if (file.exists()) { @@ -356,17 +357,17 @@ private void startupCleanup(String parent) { } private void shutdownCleanup() { - s_logger.info("Cleanup mounted mount points used in current session"); + logger.info("Cleanup mounted mount points used in current session"); synchronized (_storageMounts) { for (String mountPoint : _storageMounts.values()) { - s_logger.info("umount NFS mount: " + mountPoint); + logger.info("umount NFS mount: " + mountPoint); String result = null; - Script command = new Script(true, "umount", _timeout, s_logger); + Script command = new Script(true, "umount", _timeout, logger); command.add(mountPoint); result = command.execute(); if (result != null) { - s_logger.warn("Unable to umount " + mountPoint + " due to " + result); + logger.warn("Unable to umount " + mountPoint + " due to " + result); } File file = new File(mountPoint); if (file.exists()) { diff --git a/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/resource/HypervDirectConnectResource.java b/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/resource/HypervDirectConnectResource.java index 6bc1b98a688c..37df91b2c89c 100644 --- a/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/resource/HypervDirectConnectResource.java +++ b/plugins/hypervisors/hyperv/src/main/java/com/cloud/hypervisor/hyperv/resource/HypervDirectConnectResource.java @@ -60,7 +60,6 @@ import org.apache.http.impl.client.DefaultHttpClient; import org.apache.http.impl.conn.BasicClientConnectionManager; import org.apache.http.util.EntityUtils; -import org.apache.log4j.Logger; import org.joda.time.Duration; import com.cloud.agent.api.Answer; @@ -163,7 +162,6 @@ public class HypervDirectConnectResource extends ServerResourceBase implements ServerResource, VirtualRouterDeployer { public static final int DEFAULT_AGENT_PORT = 8250; public static final String HOST_VM_STATE_REPORT_COMMAND = "org.apache.cloudstack.HostVmStateReportCommand"; - private static final Logger s_logger = Logger.getLogger(HypervDirectConnectResource.class.getName()); private static final Gson s_gson = GsonHelper.getGson(); private String zoneId; @@ -206,7 +204,7 @@ public final StartupCommand[] initialize() { // assert if (!configureCalled) { final String errMsg = this.getClass().getName() + " requires configure() be called before" + " initialize()"; - s_logger.error(errMsg); + logger.error(errMsg); } // Create default StartupRoutingCommand, then customise @@ -224,7 +222,7 @@ public final StartupCommand[] initialize() { defaultStartRoutCmd.setStorageIpAddress(agentIp); defaultStartRoutCmd.setPool(clusterGuid); - s_logger.debug("Generated StartupRoutingCommand for agentIp \"" + agentIp + "\""); + logger.debug("Generated StartupRoutingCommand for agentIp \"" + agentIp + "\""); defaultStartRoutCmd.setVersion(this.getClass().getPackage().getImplementationVersion()); @@ -240,7 +238,7 @@ public final StartupCommand[] initialize() { // Assert that host identity is consistent with existing values. if (startCmd == null) { final String errMsg = String.format("Host %s (IP %s)" + "did not return a StartupRoutingCommand", name, agentIp); - s_logger.error(errMsg); + logger.error(errMsg); // TODO: valid to return null, or should we throw? return null; } @@ -248,26 +246,26 @@ public final StartupCommand[] initialize() { final String errMsg = String.format("Host %s (IP %s) changed zone/data center. Was " + defaultStartRoutCmd.getDataCenter() + " NOW its " + startCmd.getDataCenter(), name, agentIp); - s_logger.error(errMsg); + logger.error(errMsg); // TODO: valid to return null, or should we throw? return null; } if (!startCmd.getPod().equals(defaultStartRoutCmd.getPod())) { final String errMsg = String.format("Host %s (IP %s) changed pod. Was " + defaultStartRoutCmd.getPod() + " NOW its " + startCmd.getPod(), name, agentIp); - s_logger.error(errMsg); + logger.error(errMsg); // TODO: valid to return null, or should we throw? return null; } if (!startCmd.getCluster().equals(defaultStartRoutCmd.getCluster())) { final String errMsg = String.format("Host %s (IP %s) changed cluster. Was " + defaultStartRoutCmd.getCluster() + " NOW its " + startCmd.getCluster(), name, agentIp); - s_logger.error(errMsg); + logger.error(errMsg); // TODO: valid to return null, or should we throw? return null; } if (!startCmd.getGuid().equals(defaultStartRoutCmd.getGuid())) { final String errMsg = String.format("Host %s (IP %s) changed guid. Was " + defaultStartRoutCmd.getGuid() + " NOW its " + startCmd.getGuid(), name, agentIp); - s_logger.error(errMsg); + logger.error(errMsg); // TODO: valid to return null, or should we throw? return null; } @@ -275,13 +273,13 @@ public final StartupCommand[] initialize() { final String errMsg = String.format("Host %s (IP %s) IP address. Was " + defaultStartRoutCmd.getPrivateIpAddress() + " NOW its " + startCmd.getPrivateIpAddress(), name, agentIp); - s_logger.error(errMsg); + logger.error(errMsg); // TODO: valid to return null, or should we throw? return null; } if (!startCmd.getName().equals(defaultStartRoutCmd.getName())) { final String errMsg = String.format("Host %s (IP %s) name. Was " + startCmd.getName() + " NOW its " + defaultStartRoutCmd.getName(), name, agentIp); - s_logger.error(errMsg); + logger.error(errMsg); // TODO: valid to return null, or should we throw? return null; } @@ -301,13 +299,13 @@ public final StartupCommand[] initialize() { if (storePoolCmd == null) { final String frmtStr = "Host %s (IP %s) sent incorrect Command, " + "second parameter should be a " + "StartupStorageCommand"; final String errMsg = String.format(frmtStr, name, agentIp); - s_logger.error(errMsg); + logger.error(errMsg); // TODO: valid to return null, or should we throw? return null; } - s_logger.info("Host " + name + " (IP " + agentIp + ") already configured with a storeage pool, details " + s_gson.toJson(startCmds[1])); + logger.info("Host " + name + " (IP " + agentIp + ") already configured with a storeage pool, details " + s_gson.toJson(startCmds[1])); } else { - s_logger.info("Host " + name + " (IP " + agentIp + ") already configured with a storeage pool, details "); + logger.info("Host " + name + " (IP " + agentIp + ") already configured with a storeage pool, details "); } return new StartupCommand[] {startCmd, storePoolCmd}; } @@ -316,14 +314,14 @@ public final StartupCommand[] initialize() { public final PingCommand getCurrentStatus(final long id) { final PingCommand pingCmd = new PingRoutingCommand(getType(), id, getHostVmStateReport()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Ping host " + name + " (IP " + agentIp + ")"); + if (logger.isDebugEnabled()) { + logger.debug("Ping host " + name + " (IP " + agentIp + ")"); } final Answer pingAns = executeRequest(pingCmd); if (pingAns == null || !pingAns.getResult()) { - s_logger.info("Cannot ping host " + name + " (IP " + agentIp + "), pingAns (blank means null) is:" + pingAns); + logger.info("Cannot ping host " + name + " (IP " + agentIp + "), pingAns (blank means null) is:" + pingAns); return null; } return pingCmd; @@ -335,7 +333,7 @@ public final ArrayList> requestHostVmStateReport() { agentUri = new URI("https", null, agentIp, port, "/api/HypervResource/" + HOST_VM_STATE_REPORT_COMMAND, null, null); } catch (final URISyntaxException e) { final String errMsg = "Could not generate URI for Hyper-V agent"; - s_logger.error(errMsg, e); + logger.error(errMsg, e); return null; } final String incomingCmd = postHttpRequest("{}", agentUri); @@ -349,9 +347,9 @@ public final ArrayList> requestHostVmStateReport() { }.getType()); } catch (final Exception ex) { final String errMsg = "Failed to deserialize Command[] " + incomingCmd; - s_logger.error(errMsg, ex); + logger.error(errMsg, ex); } - s_logger.debug("HostVmStateReportCommand received response " + logger.debug("HostVmStateReportCommand received response " + s_gson.toJson(result)); if (result != null) { if (!result.isEmpty()) { @@ -393,7 +391,7 @@ public final Command[] requestStartupCommand(final Command[] cmd) { } catch (final URISyntaxException e) { // TODO add proper logging final String errMsg = "Could not generate URI for Hyper-V agent"; - s_logger.error(errMsg, e); + logger.error(errMsg, e); return null; } final String incomingCmd = postHttpRequest(s_gson.toJson(cmd), agentUri); @@ -406,9 +404,9 @@ public final Command[] requestStartupCommand(final Command[] cmd) { result = s_gson.fromJson(incomingCmd, Command[].class); } catch (final Exception ex) { final String errMsg = "Failed to deserialize Command[] " + incomingCmd; - s_logger.error(errMsg, ex); + logger.error(errMsg, ex); } - s_logger.debug("requestStartupCommand received response " + s_gson.toJson(result)); + logger.debug("requestStartupCommand received response " + s_gson.toJson(result)); if (result.length > 0) { return result; } @@ -432,7 +430,7 @@ public final Answer executeRequest(final Command cmd) { } catch (final URISyntaxException e) { // TODO add proper logging final String errMsg = "Could not generate URI for Hyper-V agent"; - s_logger.error(errMsg, e); + logger.error(errMsg, e); return null; } if (cmd instanceof NetworkElementCommand) { @@ -460,7 +458,7 @@ public final Answer executeRequest(final Command cmd) { ((StartCommand)cmd).setSecondaryStorage(secondary); } } else { - s_logger.error("Hyperv manager isn't available. Couldn't check and copy the systemvm iso."); + logger.error("Hyperv manager isn't available. Couldn't check and copy the systemvm iso."); } } } @@ -474,7 +472,7 @@ public final Answer executeRequest(final Command cmd) { // E.g. see Response.getAnswers() final Answer[] result = s_gson.fromJson(ansStr, Answer[].class); final String logResult = cleanPassword(s_gson.toJson(result)); - s_logger.debug("executeRequest received response " + logResult); + logger.debug("executeRequest received response " + logResult); if (result.length > 0) { return result[0]; } @@ -491,7 +489,7 @@ private Answer execute(final CopyCommand cmd) { "/api/HypervResource/" + cmdName, null, null); } catch (final URISyntaxException e) { final String errMsg = "Could not generate URI for Hyper-V agent"; - s_logger.error(errMsg, e); + logger.error(errMsg, e); return null; } cleanPassword(cmd.getSrcTO().getDataStore()); @@ -505,7 +503,7 @@ private Answer execute(final CopyCommand cmd) { final Answer[] result = s_gson.fromJson(ansStr, Answer[].class); final String logResult = cleanPassword(s_gson.toJson(result)); - s_logger.debug("executeRequest received response " + logResult); + logger.debug("executeRequest received response " + logResult); if (result.length > 0) { return result[0]; } @@ -524,8 +522,8 @@ private void cleanPassword(final DataStoreTO dataStoreTO) { } private PlugNicAnswer execute(final PlugNicCommand cmd) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource PlugNicCommand " + s_gson.toJson(cmd)); + if (logger.isInfoEnabled()) { + logger.info("Executing resource PlugNicCommand " + s_gson.toJson(cmd)); } try { @@ -544,19 +542,19 @@ private PlugNicAnswer execute(final PlugNicCommand cmd) { return new PlugNicAnswer(cmd, true, "success"); } final String msg = " Plug Nic failed for the vm as it has reached max limit of NICs to be added"; - s_logger.warn(msg); + logger.warn(msg); return new PlugNicAnswer(cmd, false, msg); } catch (final Exception e) { - s_logger.error("Unexpected exception: ", e); + logger.error("Unexpected exception: ", e); return new PlugNicAnswer(cmd, false, "Unable to execute PlugNicCommand due to " + e.toString()); } } private UnPlugNicAnswer execute(final UnPlugNicCommand cmd) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource UnPlugNicCommand " + s_gson.toJson(cmd)); + if (logger.isInfoEnabled()) { + logger.info("Executing resource UnPlugNicCommand " + s_gson.toJson(cmd)); } try { @@ -574,7 +572,7 @@ private UnPlugNicAnswer execute(final UnPlugNicCommand cmd) { } return new UnPlugNicAnswer(cmd, true, "success"); } catch (final Exception e) { - s_logger.error("Unexpected exception: ", e); + logger.error("Unexpected exception: ", e); return new UnPlugNicAnswer(cmd, false, "Unable to execute unPlugNicCommand due to " + e.toString()); } } @@ -589,8 +587,8 @@ public ExecutionResult executeInVR(final String routerIP, final String script, f Pair result; //TODO: Password should be masked, cannot output to log directly - if (s_logger.isDebugEnabled()) { - s_logger.debug("Run command on VR: " + routerIP + ", script: " + script + " with args: " + args); + if (logger.isDebugEnabled()) { + logger.debug("Run command on VR: " + routerIP + ", script: " + script + " with args: " + args); } try { @@ -598,11 +596,11 @@ public ExecutionResult executeInVR(final String routerIP, final String script, f VRScripts.CONNECTION_TIMEOUT, timeout); } catch (final Exception e) { final String msg = "Command failed due to " + e ; - s_logger.error(msg); + logger.error(msg); result = new Pair(false, msg); } - if (s_logger.isDebugEnabled()) { - s_logger.debug(script + " execution result: " + result.first().toString()); + if (logger.isDebugEnabled()) { + logger.debug(script + " execution result: " + result.first().toString()); } return new ExecutionResult(result.first(), result.second()); } @@ -613,7 +611,7 @@ public ExecutionResult createFileInVR(final String routerIp, final String filePa try { SshHelper.scpTo(routerIp, 3922, "root", keyFile, null, filePath, content.getBytes(Charset.forName("UTF-8")), fileName, null); } catch (final Exception e) { - s_logger.warn("Fail to create file " + filePath + fileName + " in VR " + routerIp, e); + logger.warn("Fail to create file " + filePath + fileName + " in VR " + routerIp, e); return new ExecutionResult(false, e.getMessage()); } return new ExecutionResult(true, null); @@ -660,8 +658,8 @@ private ExecutionResult prepareNetworkElementCommand(final IpAssocCommand cmd) { boolean addVif = false; if (ip.isAdd() && publicNicInfo == -1) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Plug new NIC to associate" + controlIp + " to " + ip.getPublicIp()); + if (logger.isDebugEnabled()) { + logger.debug("Plug new NIC to associate" + controlIp + " to " + ip.getPublicIp()); } addVif = true; } @@ -679,7 +677,7 @@ private ExecutionResult prepareNetworkElementCommand(final IpAssocCommand cmd) { else { // we didn't find any eth device available in VR to configure the ip range with new VLAN final String msg = "No Nic is available on DomR VIF to associate/disassociate IP with."; - s_logger.error(msg); + logger.error(msg); throw new InternalErrorException(msg); } ip.setNicDevId(publicNicInfo); @@ -689,7 +687,7 @@ private ExecutionResult prepareNetworkElementCommand(final IpAssocCommand cmd) { } } } catch (final Throwable e) { - s_logger.error("Unexpected exception: " + e.toString() + " will shortcut rest of IPAssoc commands", e); + logger.error("Unexpected exception: " + e.toString() + " will shortcut rest of IPAssoc commands", e); return new ExecutionResult(false, e.toString()); } return new ExecutionResult(true, null); @@ -711,7 +709,7 @@ protected ExecutionResult prepareNetworkElementCommand(final SetupGuestNetworkCo } } catch (final Exception e) { final String msg = "Prepare SetupGuestNetwork failed due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new ExecutionResult(false, msg); } return new ExecutionResult(true, null); @@ -735,7 +733,7 @@ private ExecutionResult prepareNetworkElementCommand(final IpAssocVpcCommand cmd if (ip.isAdd()) { throw new InternalErrorException("Failed to find DomR VIF to associate/disassociate IP with."); } else { - s_logger.debug("VIF to deassociate IP with does not exist, return success"); + logger.debug("VIF to deassociate IP with does not exist, return success"); continue; } } @@ -743,7 +741,7 @@ private ExecutionResult prepareNetworkElementCommand(final IpAssocVpcCommand cmd ip.setNicDevId(publicNicInfo); } } catch (final Exception e) { - s_logger.error("Prepare Ip Assoc failure on applying one ip due to exception: ", e); + logger.error("Prepare Ip Assoc failure on applying one ip due to exception: ", e); return new ExecutionResult(false, e.toString()); } @@ -765,7 +763,7 @@ protected ExecutionResult prepareNetworkElementCommand(final SetSourceNatCommand } } catch (final Exception e) { final String msg = "Prepare Ip SNAT failure due to " + e.toString(); - s_logger.error(msg, e); + logger.error(msg, e); return new ExecutionResult(false, e.toString()); } return new ExecutionResult(true, null); @@ -787,7 +785,7 @@ private ExecutionResult prepareNetworkElementCommand(final SetNetworkACLCommand } } catch (final Exception e) { final String msg = "Prepare SetNetworkACL failed due to " + e.toString(); - s_logger.error(msg, e); + logger.error(msg, e); return new ExecutionResult(false, msg); } return new ExecutionResult(true, null); @@ -814,29 +812,29 @@ protected Answer execute(final RemoteAccessVpnCfgCommand cmd) { try { final String command = String.format("%s%s %s", "/opt/cloud/bin/", VRScripts.VPN_L2TP, argsBuf.toString()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Executing " + command); + if (logger.isDebugEnabled()) { + logger.debug("Executing " + command); } final Pair result = SshHelper.sshExecute(controlIp, DEFAULT_DOMR_SSHPORT, "root", getSystemVMKeyFile(), null, command); if (!result.first()) { - s_logger.error("RemoteAccessVpnCfg command on domR failed, message: " + result.second()); + logger.error("RemoteAccessVpnCfg command on domR failed, message: " + result.second()); return new Answer(cmd, false, "RemoteAccessVpnCfg command failed due to " + result.second()); } - if (s_logger.isInfoEnabled()) { - s_logger.info("RemoteAccessVpnCfg command on domain router " + argsBuf.toString() + " completed"); + if (logger.isInfoEnabled()) { + logger.info("RemoteAccessVpnCfg command on domain router " + argsBuf.toString() + " completed"); } } catch (final Throwable e) { if (e instanceof RemoteException) { - s_logger.warn(e.getMessage()); + logger.warn(e.getMessage()); } final String msg = "RemoteAccessVpnCfg command failed due to " + e.getMessage(); - s_logger.error(msg, e); + logger.error(msg, e); return new Answer(cmd, false, msg); } @@ -856,24 +854,24 @@ protected Answer execute(final VpnUsersCfgCommand cmd) { try { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Executing /opt/cloud/bin/vpn_lt2p.sh "); + if (logger.isDebugEnabled()) { + logger.debug("Executing /opt/cloud/bin/vpn_lt2p.sh "); } final Pair result = SshHelper.sshExecute(controlIp, DEFAULT_DOMR_SSHPORT, "root", getSystemVMKeyFile(), null, "/opt/cloud/bin/vpn_l2tp.sh " + argsBuf.toString()); if (!result.first()) { - s_logger.error("VpnUserCfg command on domR failed, message: " + result.second()); + logger.error("VpnUserCfg command on domR failed, message: " + result.second()); return new Answer(cmd, false, "VpnUserCfg command failed due to " + result.second()); } } catch (final Throwable e) { if (e instanceof RemoteException) { - s_logger.warn(e.getMessage()); + logger.warn(e.getMessage()); } final String msg = "VpnUserCfg command failed due to " + e.getMessage(); - s_logger.error(msg, e); + logger.error(msg, e); return new Answer(cmd, false, msg); } } @@ -881,8 +879,8 @@ protected Answer execute(final VpnUsersCfgCommand cmd) { return new Answer(cmd); } private SetStaticRouteAnswer execute(final SetStaticRouteCommand cmd) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource SetStaticRouteCommand: " + s_gson.toJson(cmd)); + if (logger.isInfoEnabled()) { + logger.info("Executing resource SetStaticRouteCommand: " + s_gson.toJson(cmd)); } boolean endResult = true; @@ -908,19 +906,19 @@ private SetStaticRouteAnswer execute(final SetStaticRouteCommand cmd) { final Pair result = SshHelper.sshExecute(controlIp, DEFAULT_DOMR_SSHPORT, "root", getSystemVMKeyFile(), null, command); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Executing script on domain router " + controlIp + ": /opt/cloud/bin/vpc_staticroute.sh " + args); + if (logger.isDebugEnabled()) { + logger.debug("Executing script on domain router " + controlIp + ": /opt/cloud/bin/vpc_staticroute.sh " + args); } if (!result.first()) { - s_logger.error("SetStaticRouteCommand failure on setting one rule. args: " + args); + logger.error("SetStaticRouteCommand failure on setting one rule. args: " + args); results[i++] = "Failed"; endResult = false; } else { results[i++] = null; } } catch (final Throwable e) { - s_logger.error("SetStaticRouteCommand(args: " + args + ") failed on setting one rule due to " + e); + logger.error("SetStaticRouteCommand(args: " + args + ") failed on setting one rule due to " + e); results[i++] = "Failed"; endResult = false; } @@ -933,9 +931,9 @@ protected CheckS2SVpnConnectionsAnswer execute(final CheckS2SVpnConnectionsComma cmdline.append("/opt/cloud/bin/"); cmdline.append(VRScripts.S2SVPN_CHECK); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Executing resource CheckS2SVpnConnectionsCommand: " + s_gson.toJson(cmd)); - s_logger.debug("Run command on domR " + cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP) + cmdline.toString()); + if (logger.isDebugEnabled()) { + logger.debug("Executing resource CheckS2SVpnConnectionsCommand: " + s_gson.toJson(cmd)); + logger.debug("Run command on domR " + cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP) + cmdline.toString()); } Pair result; @@ -949,26 +947,26 @@ protected CheckS2SVpnConnectionsAnswer execute(final CheckS2SVpnConnectionsComma result = SshHelper.sshExecute(controlIp, DEFAULT_DOMR_SSHPORT, "root", getSystemVMKeyFile(), null, cmdline.toString()); if (!result.first()) { - s_logger.error("check site-to-site vpn connections command on domR " + cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP) + " failed, message: " + + logger.error("check site-to-site vpn connections command on domR " + cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP) + " failed, message: " + result.second()); return new CheckS2SVpnConnectionsAnswer(cmd, false, result.second()); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("check site-to-site vpn connections command on domain router " + cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP) + " completed"); + if (logger.isDebugEnabled()) { + logger.debug("check site-to-site vpn connections command on domain router " + cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP) + " completed"); } } catch (final Throwable e) { final String msg = "CheckS2SVpnConnectionsCommand failed due to " + e; - s_logger.error(msg, e); + logger.error(msg, e); return new CheckS2SVpnConnectionsAnswer(cmd, false, "CheckS2SVpnConneciontsCommand failed"); } return new CheckS2SVpnConnectionsAnswer(cmd, true, result.second()); } protected Answer execute(final Site2SiteVpnCfgCommand cmd) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource Site2SiteVpnCfgCommand " + s_gson.toJson(cmd)); + if (logger.isInfoEnabled()) { + logger.info("Executing resource Site2SiteVpnCfgCommand " + s_gson.toJson(cmd)); } final String routerIp = getRouterSshControlIp(cmd); @@ -1018,25 +1016,25 @@ protected Answer execute(final Site2SiteVpnCfgCommand cmd) { result = SshHelper.sshExecute(routerIp, DEFAULT_DOMR_SSHPORT, "root", getSystemVMKeyFile(), null, command); if (!result.first()) { - s_logger.error("Setup site2site VPN " + cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP) + " failed, message: " + result.second()); + logger.error("Setup site2site VPN " + cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP) + " failed, message: " + result.second()); return new Answer(cmd, false, "Setup site2site VPN falied due to " + result.second()); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("setup site 2 site vpn on router " + cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP) + " completed"); + if (logger.isDebugEnabled()) { + logger.debug("setup site 2 site vpn on router " + cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP) + " completed"); } } catch (final Throwable e) { final String msg = "Setup site2site VPN falied due to " + e.getMessage(); - s_logger.error(msg, e); + logger.error(msg, e); return new Answer(cmd, false, "Setup site2site VPN failed due to " + e.getMessage()); } return new Answer(cmd, true, result.second()); } protected SetSourceNatAnswer execute(final SetSourceNatCommand cmd) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource SetSourceNatCommand " + s_gson.toJson(cmd)); + if (logger.isInfoEnabled()) { + logger.info("Executing resource SetSourceNatCommand " + s_gson.toJson(cmd)); } final String routerName = cmd.getAccessDetail(NetworkElementCommand.ROUTER_NAME); @@ -1058,7 +1056,7 @@ protected SetSourceNatAnswer execute(final SetSourceNatCommand cmd) { if (!result.first()) { final String msg = "SetupGuestNetworkCommand on domain router " + routerIp + " failed. message: " + result.second(); - s_logger.error(msg); + logger.error(msg); return new SetSourceNatAnswer(cmd, false, msg); } @@ -1066,14 +1064,14 @@ protected SetSourceNatAnswer execute(final SetSourceNatCommand cmd) { return new SetSourceNatAnswer(cmd, true, "success"); } catch (final Exception e) { final String msg = "Ip SNAT failure due to " + e.toString(); - s_logger.error(msg, e); + logger.error(msg, e); return new SetSourceNatAnswer(cmd, false, msg); } } protected Answer execute(final SetPortForwardingRulesCommand cmd) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource SetPortForwardingRulesCommand: " + s_gson.toJson(cmd)); + if (logger.isInfoEnabled()) { + logger.info("Executing resource SetPortForwardingRulesCommand: " + s_gson.toJson(cmd)); } final String controlIp = getRouterSshControlIp(cmd); @@ -1093,19 +1091,19 @@ protected Answer execute(final SetPortForwardingRulesCommand cmd) { try { final Pair result = SshHelper.sshExecute(controlIp, DEFAULT_DOMR_SSHPORT, "root", getSystemVMKeyFile(), null, "/root/firewall.sh " + args); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Executing script on domain router " + controlIp + ": /root/firewall.sh " + args); + if (logger.isDebugEnabled()) { + logger.debug("Executing script on domain router " + controlIp + ": /root/firewall.sh " + args); } if (!result.first()) { - s_logger.error("SetPortForwardingRulesCommand failure on setting one rule. args: " + args); + logger.error("SetPortForwardingRulesCommand failure on setting one rule. args: " + args); results[i++] = "Failed"; endResult = false; } else { results[i++] = null; } } catch (final Throwable e) { - s_logger.error("SetPortForwardingRulesCommand(args: " + args + ") failed on setting one rule due to " + e.getMessage()); + logger.error("SetPortForwardingRulesCommand(args: " + args + ") failed on setting one rule due to " + e.getMessage()); results[i++] = "Failed"; endResult = false; } @@ -1117,9 +1115,9 @@ protected Answer execute(final SetPortForwardingRulesCommand cmd) { protected Answer execute(final CheckRouterCommand cmd) { final String command = String.format("%s%s", "/opt/cloud/bin/", VRScripts.RVR_CHECK); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Executing resource CheckRouterCommand: " + s_gson.toJson(cmd)); - s_logger.debug("Run command on domR " + cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP) + command); + if (logger.isDebugEnabled()) { + logger.debug("Executing resource CheckRouterCommand: " + s_gson.toJson(cmd)); + logger.debug("Run command on domR " + cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP) + command); } Pair result; @@ -1129,17 +1127,17 @@ protected Answer execute(final CheckRouterCommand cmd) { result = SshHelper.sshExecute(controlIp, DEFAULT_DOMR_SSHPORT, "root", getSystemVMKeyFile(), null, command); if (!result.first()) { - s_logger.error("check router command on domR " + cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP) + " failed, message: " + result.second()); + logger.error("check router command on domR " + cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP) + " failed, message: " + result.second()); return new CheckRouterAnswer(cmd, "CheckRouter failed due to " + result.second()); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("check router command on domain router " + cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP) + " completed"); + if (logger.isDebugEnabled()) { + logger.debug("check router command on domain router " + cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP) + " completed"); } } catch (final Throwable e) { final String msg = "CheckRouterCommand failed due to " + e.getMessage(); - s_logger.error(msg, e); + logger.error(msg, e); return new CheckRouterAnswer(cmd, msg); } return new CheckRouterAnswer(cmd, result.second(), true); @@ -1151,8 +1149,8 @@ protected Answer execute(final SetStaticNatRulesCommand cmd) { //return SetVPCStaticNatRules(cmd); } - if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource SetFirewallRuleCommand: " + s_gson.toJson(cmd)); + if (logger.isInfoEnabled()) { + logger.info("Executing resource SetFirewallRuleCommand: " + s_gson.toJson(cmd)); } String args = null; @@ -1177,19 +1175,19 @@ protected Answer execute(final SetStaticNatRulesCommand cmd) { final String controlIp = getRouterSshControlIp(cmd); final Pair result = SshHelper.sshExecute(controlIp, DEFAULT_DOMR_SSHPORT, "root", getSystemVMKeyFile(), null, "/root/firewall.sh " + args); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Executing script on domain router " + controlIp + ": /root/firewall.sh " + args); + if (logger.isDebugEnabled()) { + logger.debug("Executing script on domain router " + controlIp + ": /root/firewall.sh " + args); } if (!result.first()) { - s_logger.error("SetStaticNatRulesCommand failure on setting one rule. args: " + args); + logger.error("SetStaticNatRulesCommand failure on setting one rule. args: " + args); results[i++] = "Failed"; endResult = false; } else { results[i++] = null; } } catch (final Throwable e) { - s_logger.error("SetStaticNatRulesCommand (args: " + args + ") failed on setting one rule due to " + e.getMessage()); + logger.error("SetStaticNatRulesCommand (args: " + args + ") failed on setting one rule due to " + e.getMessage()); results[i++] = "Failed"; endResult = false; } @@ -1198,8 +1196,8 @@ protected Answer execute(final SetStaticNatRulesCommand cmd) { } protected Answer execute(final PingTestCommand cmd) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource PingTestCommand: " + s_gson.toJson(cmd)); + if (logger.isInfoEnabled()) { + logger.info("Executing resource PingTestCommand: " + s_gson.toJson(cmd)); } final String controlIp = cmd.getRouterIp(); final String args = " -c 1 -n -q " + cmd.getPrivateIp(); @@ -1209,7 +1207,7 @@ protected Answer execute(final PingTestCommand cmd) { return new Answer(cmd); } } catch (final Exception e) { - s_logger.error("Unable to execute ping command on DomR (" + controlIp + "), domR may not be ready yet. failure due to " + e.getMessage()); + logger.error("Unable to execute ping command on DomR (" + controlIp + "), domR may not be ready yet. failure due to " + e.getMessage()); } return new Answer(cmd, false, "PingTestCommand failed"); } @@ -1218,8 +1216,8 @@ protected Answer execute(final DeleteIpAliasCommand cmd) { cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP); final List revokedIpAliasTOs = cmd.getDeleteIpAliasTos(); final List activeIpAliasTOs = cmd.getCreateIpAliasTos(); - if (s_logger.isInfoEnabled()) { - s_logger.info("Executing deleteIpAlias command: " + s_gson.toJson(cmd)); + if (logger.isInfoEnabled()) { + logger.info("Executing deleteIpAlias command: " + s_gson.toJson(cmd)); } final StringBuilder args = new StringBuilder(); for (final IpAliasTO ipAliasTO : revokedIpAliasTOs) { @@ -1239,8 +1237,8 @@ protected Answer execute(final DeleteIpAliasCommand cmd) { args.append(ipAliasTO.getNetmask()); args.append("-"); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Run command on domR " + cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP) + ", /root/deleteIpAlias " + args); + if (logger.isDebugEnabled()) { + logger.debug("Run command on domR " + cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP) + ", /root/deleteIpAlias " + args); } try { @@ -1248,18 +1246,18 @@ protected Answer execute(final DeleteIpAliasCommand cmd) { final Pair result = SshHelper.sshExecute(controlIp, DEFAULT_DOMR_SSHPORT, "root", getSystemVMKeyFile(), null, "/root/deleteIpAlias.sh " + args); if (!result.first()) { - s_logger.error("deleteIpAlias command on domr " + controlIp + " failed, message: " + result.second()); + logger.error("deleteIpAlias command on domr " + controlIp + " failed, message: " + result.second()); return new Answer(cmd, false, "deleteIpAlias failed due to " + result.second()); } - if (s_logger.isInfoEnabled()) { - s_logger.info("deleteIpAlias command on domain router " + controlIp + " completed"); + if (logger.isInfoEnabled()) { + logger.info("deleteIpAlias command on domain router " + controlIp + " completed"); } } catch (final Throwable e) { final String msg = "deleteIpAlias failed due to " + e.getMessage(); - s_logger.error(msg, e); + logger.error(msg, e); return new Answer(cmd, false, msg); } @@ -1334,27 +1332,27 @@ protected Answer execute(final LoadBalancerConfigCommand cmd) { SshHelper.sshExecute(controlIp, DEFAULT_DOMR_SSHPORT, "root", getSystemVMKeyFile(), null, "scp " + tmpCfgFilePath + " /etc/haproxy/haproxy.cfg.new"); if (!result.first()) { - s_logger.error("Unable to copy haproxy configuration file"); + logger.error("Unable to copy haproxy configuration file"); return new Answer(cmd, false, "LoadBalancerConfigCommand failed due to unable to copy haproxy configuration file"); } final String command = String.format("%s%s %s", "/root/", VRScripts.LB, args); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Run command on domain router " + routerIp + command); + if (logger.isDebugEnabled()) { + logger.debug("Run command on domain router " + routerIp + command); } result = SshHelper.sshExecute(controlIp, DEFAULT_DOMR_SSHPORT, "root", getSystemVMKeyFile(), null, command); if (!result.first()) { final String msg = "LoadBalancerConfigCommand on domain router " + routerIp + " failed. message: " + result.second(); - s_logger.error(msg); + logger.error(msg); return new Answer(cmd, false, msg); } - if (s_logger.isInfoEnabled()) { - s_logger.info("LoadBalancerConfigCommand on domain router " + routerIp + " completed"); + if (logger.isInfoEnabled()) { + logger.info("LoadBalancerConfigCommand on domain router " + routerIp + " completed"); } } finally { SshHelper.sshExecute(controlIp, DEFAULT_DOMR_SSHPORT, "root", getSystemVMKeyFile(), null, "rm " + tmpCfgFilePath); @@ -1362,15 +1360,15 @@ protected Answer execute(final LoadBalancerConfigCommand cmd) { return new Answer(cmd); } catch (final Throwable e) { - s_logger.error("Unexpected exception: " + e.toString(), e); + logger.error("Unexpected exception: " + e.toString(), e); return new Answer(cmd, false, "LoadBalancerConfigCommand failed due to " + e.getMessage()); } } protected Answer execute(final SavePasswordCommand cmd) { - if (s_logger.isInfoEnabled()) { + if (logger.isInfoEnabled()) { - s_logger.info("Executing resource SavePasswordCommand. vmName: " + cmd.getVmName() + ", vmIp: " + cmd.getVmIpAddress() + ", password: " + + logger.info("Executing resource SavePasswordCommand. vmName: " + cmd.getVmName() + ", vmIp: " + cmd.getVmIpAddress() + ", password: " + StringUtils.getMaskedPasswordForDisplay(cmd.getPassword())); } @@ -1381,9 +1379,9 @@ protected Answer execute(final SavePasswordCommand cmd) { // Run save_password_to_domr.sh final String command = String.format("%s%s %s %s %s %s", "/opt/cloud/bin/", VRScripts.PASSWORD, "-v", vmIpAddress, "-p", password); - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { final String debugCommand = String.format("%s%s %s %s %s %s", "/opt/cloud/bin/", VRScripts.PASSWORD, "-v", vmIpAddress, "-p", StringUtils.getMaskedPasswordForDisplay(cmd.getPassword())); - s_logger.debug("Run command on domain router " + controlIp + debugCommand); + logger.debug("Run command on domain router " + controlIp + debugCommand); } try { @@ -1391,18 +1389,18 @@ protected Answer execute(final SavePasswordCommand cmd) { final Pair result = SshHelper.sshExecute(controlIp, DEFAULT_DOMR_SSHPORT, "root", getSystemVMKeyFile(), null, command); if (!result.first()) { - s_logger.error("savepassword command on domain router " + controlIp + " failed, message: " + result.second()); + logger.error("savepassword command on domain router " + controlIp + " failed, message: " + result.second()); return new Answer(cmd, false, "SavePassword failed due to " + result.second()); } - if (s_logger.isInfoEnabled()) { - s_logger.info("savepassword command on domain router " + controlIp + " completed"); + if (logger.isInfoEnabled()) { + logger.info("savepassword command on domain router " + controlIp + " completed"); } } catch (final Throwable e) { final String msg = "SavePasswordCommand failed due to " + e; - s_logger.error(msg, e); + logger.error(msg, e); return new Answer(cmd, false, msg); } return new Answer(cmd); @@ -1447,16 +1445,16 @@ protected SetFirewallRulesAnswer execute(final SetFirewallRulesCommand cmd) { result = SshHelper.sshExecute(controlIp, DEFAULT_DOMR_SSHPORT, "root", getSystemVMKeyFile(), null, "/root/firewall_rule.sh " + args); } - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { if (trafficType == FirewallRule.TrafficType.Egress) { - s_logger.debug("Executing script on domain router " + controlIp + ": /root/firewallRule_egress.sh " + args); + logger.debug("Executing script on domain router " + controlIp + ": /root/firewallRule_egress.sh " + args); } else { - s_logger.debug("Executing script on domain router " + controlIp + ": /root/firewall_rule.sh " + args); + logger.debug("Executing script on domain router " + controlIp + ": /root/firewall_rule.sh " + args); } } if (!result.first()) { - s_logger.error("SetFirewallRulesCommand failure on setting one rule. args: " + args); + logger.error("SetFirewallRulesCommand failure on setting one rule. args: " + args); //FIXME - in the future we have to process each rule separately; now we temporarily set every rule to be false if single rule fails for (int i = 0; i < results.length; i++) { results[i] = "Failed"; @@ -1465,7 +1463,7 @@ protected SetFirewallRulesAnswer execute(final SetFirewallRulesCommand cmd) { return new SetFirewallRulesAnswer(cmd, false, results); } } catch (final Throwable e) { - s_logger.error("SetFirewallRulesCommand(args: " + args + ") failed on setting one rule due to ", e); + logger.error("SetFirewallRulesCommand(args: " + args + ") failed on setting one rule due to ", e); //FIXME - in the future we have to process each rule separately; now we temporarily set every rule to be false if single rule fails for (int i = 0; i < results.length; i++) { results[i] = "Failed"; @@ -1477,15 +1475,15 @@ protected SetFirewallRulesAnswer execute(final SetFirewallRulesCommand cmd) { } protected Answer execute(final VmDataCommand cmd) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource VmDataCommand: " + s_gson.toJson(cmd)); + if (logger.isInfoEnabled()) { + logger.info("Executing resource VmDataCommand: " + s_gson.toJson(cmd)); } final String controlIp = getRouterSshControlIp(cmd); final Map> data = new HashMap>(); data.put(cmd.getVmIpAddress(), cmd.getVmData()); String json = new Gson().toJson(data); - s_logger.debug("VM data JSON IS:" + json); + logger.debug("VM data JSON IS:" + json); json = Base64.encodeBase64String(json.getBytes(Charset.forName("UTF-8"))); final String command = String.format("%s%s %s %s", "/opt/cloud/bin/", VRScripts.VMDATA, "-d", json); @@ -1493,24 +1491,24 @@ protected Answer execute(final VmDataCommand cmd) { try { final Pair result = SshHelper.sshExecute(controlIp, DEFAULT_DOMR_SSHPORT, "root", getSystemVMKeyFile(), null, command); if (!result.first()) { - s_logger.error("vm_data command on domain router " + controlIp + " failed. messge: " + result.second()); + logger.error("vm_data command on domain router " + controlIp + " failed. messge: " + result.second()); return new Answer(cmd, false, "VmDataCommand failed due to " + result.second()); } - if (s_logger.isInfoEnabled()) { - s_logger.info("vm_data command on domain router " + controlIp + " completed"); + if (logger.isInfoEnabled()) { + logger.info("vm_data command on domain router " + controlIp + " completed"); } } catch (final Throwable e) { final String msg = "VmDataCommand failed due to " + e.getMessage(); - s_logger.error(msg, e); + logger.error(msg, e); return new Answer(cmd, false, msg); } return new Answer(cmd); } protected Answer execute(final DhcpEntryCommand cmd) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource DhcpEntryCommand: " + s_gson.toJson(cmd)); + if (logger.isInfoEnabled()) { + logger.info("Executing resource DhcpEntryCommand: " + s_gson.toJson(cmd)); } // ssh -p 3922 -o StrictHostKeyChecking=no -i $cert root@$domr "/root/edithosts.sh $mac $ip $vm $dfltrt $ns $staticrt" >/dev/null @@ -1544,8 +1542,8 @@ protected Answer execute(final DhcpEntryCommand cmd) { final String command = String.format("%s%s %s", "/root/", VRScripts.DHCP, args); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Run command on domR " + cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP) + command); + if (logger.isDebugEnabled()) { + logger.debug("Run command on domR " + cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP) + command); } try { @@ -1553,18 +1551,18 @@ protected Answer execute(final DhcpEntryCommand cmd) { final Pair result = SshHelper.sshExecute(controlIp, DEFAULT_DOMR_SSHPORT, "root", getSystemVMKeyFile(), null, command); if (!result.first()) { - s_logger.error("dhcp_entry command on domR " + controlIp + " failed, message: " + result.second()); + logger.error("dhcp_entry command on domR " + controlIp + " failed, message: " + result.second()); return new Answer(cmd, false, "DhcpEntry failed due to " + result.second()); } - if (s_logger.isInfoEnabled()) { - s_logger.info("dhcp_entry command on domain router " + controlIp + " completed"); + if (logger.isInfoEnabled()) { + logger.info("dhcp_entry command on domain router " + controlIp + " completed"); } } catch (final Throwable e) { final String msg = "DhcpEntryCommand failed due to " + e; - s_logger.error(msg, e); + logger.error(msg, e); return new Answer(cmd, false, msg); } @@ -1572,8 +1570,8 @@ protected Answer execute(final DhcpEntryCommand cmd) { } protected Answer execute(final CreateIpAliasCommand cmd) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Executing createIpAlias command: " + s_gson.toJson(cmd)); + if (logger.isInfoEnabled()) { + logger.info("Executing createIpAlias command: " + s_gson.toJson(cmd)); } cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP); final List ipAliasTOs = cmd.getIpAliasList(); @@ -1586,8 +1584,8 @@ protected Answer execute(final CreateIpAliasCommand cmd) { args.append(ipaliasto.getNetmask()); args.append("-"); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Run command on domR " + cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP) + ", /root/createIpAlias " + args); + if (logger.isDebugEnabled()) { + logger.debug("Run command on domR " + cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP) + ", /root/createIpAlias " + args); } try { @@ -1595,18 +1593,18 @@ protected Answer execute(final CreateIpAliasCommand cmd) { final Pair result = SshHelper.sshExecute(controlIp, DEFAULT_DOMR_SSHPORT, "root", getSystemVMKeyFile(), null, "/root/createIpAlias.sh " + args); if (!result.first()) { - s_logger.error("CreateIpAlias command on domr " + controlIp + " failed, message: " + result.second()); + logger.error("CreateIpAlias command on domr " + controlIp + " failed, message: " + result.second()); return new Answer(cmd, false, "createipAlias failed due to " + result.second()); } - if (s_logger.isInfoEnabled()) { - s_logger.info("createIpAlias command on domain router " + controlIp + " completed"); + if (logger.isInfoEnabled()) { + logger.info("createIpAlias command on domain router " + controlIp + " completed"); } } catch (final Throwable e) { final String msg = "createIpAlias failed due to " + e; - s_logger.error(msg, e); + logger.error(msg, e); return new Answer(cmd, false, msg); } @@ -1614,8 +1612,8 @@ protected Answer execute(final CreateIpAliasCommand cmd) { } protected Answer execute(final DnsMasqConfigCommand cmd) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Executing dnsmasqConfig command: " + s_gson.toJson(cmd)); + if (logger.isInfoEnabled()) { + logger.info("Executing dnsmasqConfig command: " + s_gson.toJson(cmd)); } final String routerIp = cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP); final String controlIp = getRouterSshControlIp(cmd); @@ -1639,21 +1637,21 @@ protected Answer execute(final DnsMasqConfigCommand cmd) { final String command = String.format("%s%s %s", "/root/", VRScripts.DHCP, args); final Pair result = SshHelper.sshExecute(controlIp, DEFAULT_DOMR_SSHPORT, "root", getSystemVMKeyFile(), null, command); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Run command on domain router " + routerIp + ", /root/dnsmasq.sh"); + if (logger.isDebugEnabled()) { + logger.debug("Run command on domain router " + routerIp + ", /root/dnsmasq.sh"); } if (!result.first()) { - s_logger.error("Unable update dnsmasq config file"); + logger.error("Unable update dnsmasq config file"); return new Answer(cmd, false, "dnsmasq config update failed due to: " + result.second()); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("dnsmasq config command on domain router " + routerIp + " completed"); + if (logger.isDebugEnabled()) { + logger.debug("dnsmasq config command on domain router " + routerIp + " completed"); } } catch (final Throwable e) { final String msg = "Dnsmasqconfig command failed due to " + e.getMessage(); - s_logger.error(msg, e); + logger.error(msg, e); return new Answer(cmd, false, msg); } @@ -1678,7 +1676,7 @@ protected Answer execute(final DnsMasqConfigCommand cmd) { private int findRouterEthDeviceIndex(final String domrName, final String routerIp, final String mac) throws Exception { - s_logger.info("findRouterEthDeviceIndex. mac: " + mac); + logger.info("findRouterEthDeviceIndex. mac: " + mac); // TODO : this is a temporary very inefficient solution, will refactor it later final Pair result = SshHelper.sshExecute(routerIp, DEFAULT_DOMR_SSHPORT, "root", getSystemVMKeyFile(), null, @@ -1694,14 +1692,14 @@ private int findRouterEthDeviceIndex(final String domrName, final String routerI if (!("all".equalsIgnoreCase(token) || "default".equalsIgnoreCase(token) || "lo".equalsIgnoreCase(token))) { final String cmd = String.format("ip address show %s | grep link/ether | sed -e 's/^[ \t]*//' | cut -d' ' -f2", token); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Run domr script " + cmd); + if (logger.isDebugEnabled()) { + logger.debug("Run domr script " + cmd); } final Pair result2 = SshHelper.sshExecute(routerIp, DEFAULT_DOMR_SSHPORT, "root", getSystemVMKeyFile(), null, // TODO need to find the dev index inside router based on IP address cmd); - if (s_logger.isDebugEnabled()) { - s_logger.debug("result: " + result2.first() + ", output: " + result2.second()); + if (logger.isDebugEnabled()) { + logger.debug("result: " + result2.first() + ", output: " + result2.second()); } if (result2.first() && result2.second().trim().equalsIgnoreCase(mac.trim())) { @@ -1711,7 +1709,7 @@ private int findRouterEthDeviceIndex(final String domrName, final String routerI } } - s_logger.warn("can not find intereface associated with mac: " + mac + ", guest OS may still at loading state, retry..."); + logger.warn("can not find intereface associated with mac: " + mac + ", guest OS may still at loading state, retry..."); } @@ -1720,7 +1718,7 @@ private int findRouterEthDeviceIndex(final String domrName, final String routerI private Pair findRouterFreeEthDeviceIndex(final String routerIp) throws Exception { - s_logger.info("findRouterFreeEthDeviceIndex. mac: "); + logger.info("findRouterFreeEthDeviceIndex. mac: "); // TODO : this is a temporary very inefficient solution, will refactor it later final Pair result = SshHelper.sshExecute(routerIp, DEFAULT_DOMR_SSHPORT, "root", getSystemVMKeyFile(), null, @@ -1738,14 +1736,14 @@ private Pair findRouterFreeEthDeviceIndex(final String routerIp //TODO: don't check for eth0,1,2, as they will be empty by default. //String cmd = String.format("ip address show %s ", token); final String cmd = String.format("ip address show %s | grep link/ether | sed -e 's/^[ \t]*//' | cut -d' ' -f2", token); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Run domr script " + cmd); + if (logger.isDebugEnabled()) { + logger.debug("Run domr script " + cmd); } final Pair result2 = SshHelper.sshExecute(routerIp, DEFAULT_DOMR_SSHPORT, "root", getSystemVMKeyFile(), null, // TODO need to find the dev index inside router based on IP address cmd); - if (s_logger.isDebugEnabled()) { - s_logger.debug("result: " + result2.first() + ", output: " + result2.second()); + if (logger.isDebugEnabled()) { + logger.debug("result: " + result2.first() + ", output: " + result2.second()); } if (result2.first() && result2.second().trim().length() > 0) { @@ -1755,7 +1753,7 @@ private Pair findRouterFreeEthDeviceIndex(final String routerIp } } - //s_logger.warn("can not find intereface associated with mac: , guest OS may still at loading state, retry..."); + //logger.warn("can not find intereface associated with mac: , guest OS may still at loading state, retry..."); } @@ -1763,8 +1761,8 @@ private Pair findRouterFreeEthDeviceIndex(final String routerIp } protected Answer execute(final IpAssocCommand cmd) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource IPAssocCommand: " + s_gson.toJson(cmd)); + if (logger.isInfoEnabled()) { + logger.info("Executing resource IPAssocCommand: " + s_gson.toJson(cmd)); } int i = 0; @@ -1785,7 +1783,7 @@ protected Answer execute(final IpAssocCommand cmd) { results[i++] = IpAssocAnswer.errorResult; } } catch (final Throwable e) { - s_logger.error("Unexpected exception: " + e.toString() + " will shortcut rest of IPAssoc commands", e); + logger.error("Unexpected exception: " + e.toString() + " will shortcut rest of IPAssoc commands", e); for (; i < cmd.getIpAddresses().length; i++) { results[i++] = IpAssocAnswer.errorResult; @@ -1807,11 +1805,11 @@ protected int getVmFreeNicIndex(final String vmName) { "/api/HypervResource/" + cmdName, null, null); } catch (final URISyntaxException e) { final String errMsg = "Could not generate URI for Hyper-V agent"; - s_logger.error(errMsg, e); + logger.error(errMsg, e); } final String ansStr = postHttpRequest(s_gson.toJson(vmConfig), agentUri); final Answer[] result = s_gson.fromJson(ansStr, Answer[].class); - s_logger.debug("GetVmConfigCommand response received " + logger.debug("GetVmConfigCommand response received " + s_gson.toJson(result)); if (result.length > 0) { final GetVmConfigAnswer ans = (GetVmConfigAnswer)result[0]; @@ -1840,11 +1838,11 @@ protected int getVmNics(final String vmName, String vlanid) { "/api/HypervResource/" + cmdName, null, null); } catch (final URISyntaxException e) { final String errMsg = "Could not generate URI for Hyper-V agent"; - s_logger.error(errMsg, e); + logger.error(errMsg, e); } final String ansStr = postHttpRequest(s_gson.toJson(vmConfig), agentUri); final Answer[] result = s_gson.fromJson(ansStr, Answer[].class); - s_logger.debug("executeRequest received response " + logger.debug("executeRequest received response " + s_gson.toJson(result)); if (result.length > 0) { final GetVmConfigAnswer ans = (GetVmConfigAnswer)result[0]; @@ -1869,11 +1867,11 @@ protected void modifyNicVlan(final String vmName, final String vlanId, final Str "/api/HypervResource/" + cmdName, null, null); } catch (final URISyntaxException e) { final String errMsg = "Could not generate URI for Hyper-V agent"; - s_logger.error(errMsg, e); + logger.error(errMsg, e); } final String ansStr = postHttpRequest(s_gson.toJson(modifynic), agentUri); final Answer[] result = s_gson.fromJson(ansStr, Answer[].class); - s_logger.debug("executeRequest received response " + logger.debug("executeRequest received response " + s_gson.toJson(result)); if (result.length > 0) { } @@ -1890,11 +1888,11 @@ protected void modifyNicVlan(final String vmName, final String vlanId, final int "/api/HypervResource/" + cmdName, null, null); } catch (final URISyntaxException e) { final String errMsg = "Could not generate URI for Hyper-V agent"; - s_logger.error(errMsg, e); + logger.error(errMsg, e); } final String ansStr = postHttpRequest(s_gson.toJson(modifyNic), agentUri); final Answer[] result = s_gson.fromJson(ansStr, Answer[].class); - s_logger.debug("executeRequest received response " + logger.debug("executeRequest received response " + s_gson.toJson(result)); if (result.length > 0) { } @@ -1914,13 +1912,13 @@ protected void assignPublicIpAddress(final String vmName, final String privateIp boolean addVif = false; if (add && publicNicInfo == -1) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Plug new NIC to associate" + privateIpAddress + " to " + publicIpAddress); + if (logger.isDebugEnabled()) { + logger.debug("Plug new NIC to associate" + privateIpAddress + " to " + publicIpAddress); } addVif = true; } else if (!add && firstIP) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unplug NIC " + publicNicInfo); + if (logger.isDebugEnabled()) { + logger.debug("Unplug NIC " + publicNicInfo); } } @@ -1937,7 +1935,7 @@ protected void assignPublicIpAddress(final String vmName, final String privateIp else { // we didn't find any eth device available in VR to configure the ip range with new VLAN final String msg = "No Nic is available on DomR VIF to associate/disassociate IP with."; - s_logger.error(msg); + logger.error(msg); throw new InternalErrorException(msg); } } @@ -1972,29 +1970,29 @@ protected void assignPublicIpAddress(final String vmName, final String privateIp final String command = String.format("%s%s %s","/opt/cloud/bin/", VRScripts.IPASSOC ,args); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Run command on domain router " + privateIpAddress + command); + if (logger.isDebugEnabled()) { + logger.debug("Run command on domain router " + privateIpAddress + command); } final Pair result = SshHelper.sshExecute(privateIpAddress, DEFAULT_DOMR_SSHPORT, "root", getSystemVMKeyFile(), null, command); if (!result.first()) { - s_logger.error("ipassoc command on domain router " + privateIpAddress + " failed. message: " + result.second()); + logger.error("ipassoc command on domain router " + privateIpAddress + " failed. message: " + result.second()); throw new Exception("ipassoc failed due to " + result.second()); } - if (s_logger.isInfoEnabled()) { - s_logger.info("ipassoc command on domain router " + privateIpAddress + " completed"); + if (logger.isInfoEnabled()) { + logger.info("ipassoc command on domain router " + privateIpAddress + " completed"); } } protected Answer execute(final GetDomRVersionCmd cmd) { final String command = String.format("%s%s", "/opt/cloud/bin/", VRScripts.VERSION); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Executing resource GetDomRVersionCmd: " + s_gson.toJson(cmd)); - s_logger.debug("Run command on domR " + cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP) + command); + if (logger.isDebugEnabled()) { + logger.debug("Executing resource GetDomRVersionCmd: " + s_gson.toJson(cmd)); + logger.debug("Run command on domR " + cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP) + command); } Pair result; @@ -2003,17 +2001,17 @@ protected Answer execute(final GetDomRVersionCmd cmd) { result = SshHelper.sshExecute(controlIp, DEFAULT_DOMR_SSHPORT, "root", getSystemVMKeyFile(), null, command); if (!result.first()) { - s_logger.error("GetDomRVersionCmd on domR " + cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP) + " failed, message: " + result.second()); + logger.error("GetDomRVersionCmd on domR " + cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP) + " failed, message: " + result.second()); return new GetDomRVersionAnswer(cmd, "GetDomRVersionCmd failed due to " + result.second()); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("GetDomRVersionCmd on domain router " + cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP) + " completed"); + if (logger.isDebugEnabled()) { + logger.debug("GetDomRVersionCmd on domain router " + cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP) + " completed"); } } catch (final Throwable e) { final String msg = "GetDomRVersionCmd failed due to " + e; - s_logger.error(msg, e); + logger.error(msg, e); return new GetDomRVersionAnswer(cmd, msg); } final String[] lines = result.second().split("&"); @@ -2023,21 +2021,21 @@ protected Answer execute(final GetDomRVersionCmd cmd) { return new GetDomRVersionAnswer(cmd, result.second(), lines[0], lines[1]); } - private static String getRouterSshControlIp(final NetworkElementCommand cmd) { + private String getRouterSshControlIp(final NetworkElementCommand cmd) { final String routerIp = cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP); final String routerGuestIp = cmd.getAccessDetail(NetworkElementCommand.ROUTER_GUEST_IP); final String zoneNetworkType = cmd.getAccessDetail(NetworkElementCommand.ZONE_NETWORK_TYPE); if (routerGuestIp != null && zoneNetworkType != null && NetworkType.valueOf(zoneNetworkType) == NetworkType.Basic) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("In Basic zone mode, use router's guest IP for SSH control. guest IP : " + routerGuestIp); + if (logger.isDebugEnabled()) { + logger.debug("In Basic zone mode, use router's guest IP for SSH control. guest IP : " + routerGuestIp); } return routerGuestIp; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Use router's private IP for SSH control. IP : " + routerIp); + if (logger.isDebugEnabled()) { + logger.debug("Use router's private IP for SSH control. IP : " + routerIp); } return routerIp; } @@ -2046,8 +2044,8 @@ protected Answer execute(final NetworkUsageCommand cmd) { if (cmd.isForVpc()) { //return VPCNetworkUsage(cmd); } - if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource NetworkUsageCommand " + s_gson.toJson(cmd)); + if (logger.isInfoEnabled()) { + logger.info("Executing resource NetworkUsageCommand " + s_gson.toJson(cmd)); } if (cmd.getOption() != null && cmd.getOption().equals("create")) { networkUsage(cmd.getPrivateIP(), "create", null); @@ -2072,21 +2070,21 @@ private long[] getNetworkStats(final String privateIP) { stats[1] += Long.parseLong(splitResult[i++]); } } catch (final Throwable e) { - s_logger.warn("Unable to parse return from script return of network usage command: " + e.toString(), e); + logger.warn("Unable to parse return from script return of network usage command: " + e.toString(), e); } } return stats; } protected Answer execute(final SetMonitorServiceCommand cmd) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource SetMonitorServiceCommand: " + s_gson.toJson(cmd)); + if (logger.isInfoEnabled()) { + logger.info("Executing resource SetMonitorServiceCommand: " + s_gson.toJson(cmd)); } final String controlIp = getRouterSshControlIp(cmd); final String config = cmd.getConfiguration(); if (org.apache.commons.lang3.StringUtils.isBlank(config)) { - s_logger.error("SetMonitorServiceCommand should have config for this case"); + logger.error("SetMonitorServiceCommand should have config for this case"); return new Answer(cmd, false, "SetMonitorServiceCommand failed due to missing config"); } @@ -2099,14 +2097,14 @@ protected Answer execute(final SetMonitorServiceCommand cmd) { if (!result.first()) { final String msg= "monitor_service.sh failed on domain router " + controlIp + " failed " + result.second(); - s_logger.error(msg); + logger.error(msg); return new Answer(cmd, false, msg); } return new Answer(cmd); } catch (final Throwable e) { - s_logger.error("Unexpected exception: " + e.toString(), e); + logger.error("Unexpected exception: " + e.toString(), e); return new Answer(cmd, false, "SetMonitorServiceCommand failed due to " + e); } } @@ -2116,28 +2114,28 @@ protected CheckSshAnswer execute(final CheckSshCommand cmd) { final String privateIp = cmd.getIp(); final int cmdPort = cmd.getPort(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Ping command port, " + privateIp + ":" + cmdPort); + if (logger.isDebugEnabled()) { + logger.debug("Ping command port, " + privateIp + ":" + cmdPort); } try { final String result = connect(cmd.getName(), privateIp, cmdPort); if (result != null) { - s_logger.error("Can not ping System vm " + vmName + "due to:" + result); + logger.error("Can not ping System vm " + vmName + "due to:" + result); return new CheckSshAnswer(cmd, "Can not ping System vm " + vmName + "due to:" + result); } } catch (final Exception e) { - s_logger.error("Can not ping System vm " + vmName + "due to exception"); + logger.error("Can not ping System vm " + vmName + "due to exception"); return new CheckSshAnswer(cmd, e); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Ping command port succeeded for vm " + vmName); + if (logger.isDebugEnabled()) { + logger.debug("Ping command port succeeded for vm " + vmName); } if (VirtualMachineName.isValidRouterName(vmName)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Execute network usage setup command on " + vmName); + if (logger.isDebugEnabled()) { + logger.debug("Execute network usage setup command on " + vmName); } networkUsage(privateIp, "create", null); } @@ -2162,8 +2160,8 @@ protected String networkUsage(final String privateIpAddress, final String option } try { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Executing /opt/cloud/bin/netusage.sh " + args + " on DomR " + privateIpAddress); + if (logger.isTraceEnabled()) { + logger.trace("Executing /opt/cloud/bin/netusage.sh " + args + " on DomR " + privateIpAddress); } final Pair result = @@ -2175,7 +2173,7 @@ protected String networkUsage(final String privateIpAddress, final String option return result.second(); } catch (final Throwable e) { - s_logger.error("Unable to execute NetworkUsage command on DomR (" + privateIpAddress + "), domR may not be ready yet. failure due to " + e); + logger.error("Unable to execute NetworkUsage command on DomR (" + privateIpAddress + "), domR may not be ready yet. failure due to " + e); } return null; @@ -2192,12 +2190,12 @@ public File getSystemVMKeyFile() { } assert keyFile != null; if (!keyFile.exists()) { - s_logger.error("Unable to locate id_rsa.cloud in your setup at " + keyFile.toString()); + logger.error("Unable to locate id_rsa.cloud in your setup at " + keyFile.toString()); } return keyFile; } - public static String postHttpRequest(final String jsonCmd, final URI agentUri) { + public String postHttpRequest(final String jsonCmd, final URI agentUri) { // Using Apache's HttpClient for HTTP POST // Java-only approach discussed at on StackOverflow concludes with // comment to use Apache HttpClient @@ -2205,7 +2203,7 @@ public static String postHttpRequest(final String jsonCmd, final URI agentUri) { // use Apache. String logMessage = StringEscapeUtils.unescapeJava(jsonCmd); logMessage = cleanPassword(logMessage); - s_logger.debug("POST request to " + agentUri.toString() + logger.debug("POST request to " + agentUri.toString() + " with contents " + logMessage); // Create request @@ -2225,13 +2223,13 @@ public boolean isTrusted(final X509Certificate[] chain, final String authType) final ClientConnectionManager ccm = new BasicClientConnectionManager(registry); httpClient = new DefaultHttpClient(ccm); } catch (final KeyManagementException e) { - s_logger.error("failed to initialize http client " + e.getMessage()); + logger.error("failed to initialize http client " + e.getMessage()); } catch (final UnrecoverableKeyException e) { - s_logger.error("failed to initialize http client " + e.getMessage()); + logger.error("failed to initialize http client " + e.getMessage()); } catch (final NoSuchAlgorithmException e) { - s_logger.error("failed to initialize http client " + e.getMessage()); + logger.error("failed to initialize http client " + e.getMessage()); } catch (final KeyStoreException e) { - s_logger.error("failed to initialize http client " + e.getMessage()); + logger.error("failed to initialize http client " + e.getMessage()); } String result = null; @@ -2246,33 +2244,33 @@ public boolean isTrusted(final X509Certificate[] chain, final String authType) final StringEntity cmdJson = new StringEntity(jsonCmd); request.addHeader("content-type", "application/json"); request.setEntity(cmdJson); - s_logger.debug("Sending cmd to " + agentUri.toString() + logger.debug("Sending cmd to " + agentUri.toString() + " cmd data:" + logMessage); final HttpResponse response = httpClient.execute(request); // Unsupported commands will not route. if (response.getStatusLine().getStatusCode() == HttpStatus.SC_NOT_FOUND) { final String errMsg = "Failed to send : HTTP error code : " + response.getStatusLine().getStatusCode(); - s_logger.error(errMsg); + logger.error(errMsg); final String unsupportMsg = "Unsupported command " + agentUri.getPath() + ". Are you sure you got the right type of" + " server?"; final Answer ans = new UnsupportedAnswer(null, unsupportMsg); - s_logger.error(ans); + logger.error(ans); result = s_gson.toJson(new Answer[] {ans}); } else if (response.getStatusLine().getStatusCode() != HttpStatus.SC_OK) { final String errMsg = "Failed send to " + agentUri.toString() + " : HTTP error code : " + response.getStatusLine().getStatusCode(); - s_logger.error(errMsg); + logger.error(errMsg); return null; } else { result = EntityUtils.toString(response.getEntity()); final String logResult = cleanPassword(StringEscapeUtils.unescapeJava(result)); - s_logger.debug("POST response is " + logResult); + logger.debug("POST response is " + logResult); } } catch (final ClientProtocolException protocolEx) { // Problem with HTTP message exchange - s_logger.error(protocolEx); + logger.error(protocolEx); } catch (final IOException connEx) { // Problem with underlying communications - s_logger.error(connEx); + logger.error(connEx); } finally { httpClient.getConnectionManager().shutdown(); } @@ -2354,7 +2352,7 @@ protected String connect(final String vmName, final String ipAddress, final int // VM patching/rebooting time that may need int retry = this.retry; while (System.currentTimeMillis() - startTick <= opsTimeout || --retry > 0) { - s_logger.info("Trying to connect to " + ipAddress); + logger.info("Trying to connect to " + ipAddress); try (SocketChannel sch = SocketChannel.open();) { sch.configureBlocking(true); sch.socket().setSoTimeout(5000); @@ -2363,7 +2361,7 @@ protected String connect(final String vmName, final String ipAddress, final int sch.connect(addr); return null; } catch (final IOException e) { - s_logger.info("Could] not connect to " + ipAddress + " due to " + e.toString()); + logger.info("Could] not connect to " + ipAddress + " due to " + e.toString()); if (e instanceof ConnectException) { // if connection is refused because of VM is being started, // we give it more sleep time @@ -2371,7 +2369,7 @@ protected String connect(final String vmName, final String ipAddress, final int try { Thread.sleep(5000); } catch (final InterruptedException ex) { - s_logger.debug("[ignored] interrupted while waiting to retry connecting to vm after exception: "+e.getLocalizedMessage()); + logger.debug("[ignored] interrupted while waiting to retry connecting to vm after exception: "+e.getLocalizedMessage()); } } } @@ -2379,11 +2377,11 @@ protected String connect(final String vmName, final String ipAddress, final int try { Thread.sleep(1000); } catch (final InterruptedException ex) { - s_logger.debug("[ignored] interrupted while connecting to vm."); + logger.debug("[ignored] interrupted while connecting to vm."); } } - s_logger.info("Unable to logon to " + ipAddress); + logger.info("Unable to logon to " + ipAddress); return "Unable to connect"; } diff --git a/plugins/hypervisors/hyperv/src/main/java/org/apache/cloudstack/storage/motion/HypervStorageMotionStrategy.java b/plugins/hypervisors/hyperv/src/main/java/org/apache/cloudstack/storage/motion/HypervStorageMotionStrategy.java index 19c655b8f21f..0e189d050006 100644 --- a/plugins/hypervisors/hyperv/src/main/java/org/apache/cloudstack/storage/motion/HypervStorageMotionStrategy.java +++ b/plugins/hypervisors/hyperv/src/main/java/org/apache/cloudstack/storage/motion/HypervStorageMotionStrategy.java @@ -34,7 +34,8 @@ import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.to.VolumeObjectTO; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; @@ -59,7 +60,7 @@ @Component public class HypervStorageMotionStrategy implements DataMotionStrategy { - private static final Logger s_logger = Logger.getLogger(HypervStorageMotionStrategy.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject AgentManager agentMgr; @Inject VolumeDao volDao; @Inject VolumeDataFactory volFactory; @@ -99,7 +100,7 @@ public void copyAsync(Map volumeMap, VirtualMachineTO vmT throw new CloudRuntimeException("Unsupported operation requested for moving data."); } } catch (Exception e) { - s_logger.error("copy failed", e); + logger.error("copy failed", e); errMsg = e.toString(); } @@ -124,10 +125,10 @@ private Answer migrateVmWithVolumes(VMInstanceVO vm, VirtualMachineTO to, Host s MigrateWithStorageCommand command = new MigrateWithStorageCommand(to, volumeToFilerto, destHost.getPrivateIpAddress()); MigrateWithStorageAnswer answer = (MigrateWithStorageAnswer) agentMgr.send(srcHost.getId(), command); if (answer == null) { - s_logger.error("Migration with storage of vm " + vm + " failed."); + logger.error("Migration with storage of vm " + vm + " failed."); throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost); } else if (!answer.getResult()) { - s_logger.error("Migration with storage of vm " + vm+ " failed. Details: " + answer.getDetails()); + logger.error("Migration with storage of vm " + vm+ " failed. Details: " + answer.getDetails()); throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost + ". " + answer.getDetails()); } else { @@ -137,7 +138,7 @@ private Answer migrateVmWithVolumes(VMInstanceVO vm, VirtualMachineTO to, Host s return answer; } catch (OperationTimedoutException e) { - s_logger.error("Error while migrating vm " + vm + " to host " + destHost, e); + logger.error("Error while migrating vm " + vm + " to host " + destHost, e); throw new AgentUnavailableException("Operation timed out on storage motion for " + vm, destHost.getId()); } } @@ -170,7 +171,7 @@ private void updateVolumePathsAfterMigration(Map volumeTo } if (!updated) { - s_logger.error("Volume path wasn't updated for volume " + volume + " after it was migrated."); + logger.error("Volume path wasn't updated for volume " + volume + " after it was migrated."); } } } diff --git a/plugins/hypervisors/hyperv/src/test/java/com/cloud/hypervisor/hyperv/test/HypervDirectConnectResourceTest.java b/plugins/hypervisors/hyperv/src/test/java/com/cloud/hypervisor/hyperv/test/HypervDirectConnectResourceTest.java index bf069183cd92..d2e92bd5a2b6 100644 --- a/plugins/hypervisors/hyperv/src/test/java/com/cloud/hypervisor/hyperv/test/HypervDirectConnectResourceTest.java +++ b/plugins/hypervisors/hyperv/src/test/java/com/cloud/hypervisor/hyperv/test/HypervDirectConnectResourceTest.java @@ -35,7 +35,8 @@ import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -85,7 +86,7 @@ **/ public class HypervDirectConnectResourceTest { - private static final Logger s_logger = Logger.getLogger(HypervDirectConnectResourceTest.class.getName()); + protected Logger logger = LogManager.getLogger(getClass()); // TODO: make this a config parameter private static final String sampleLegitDiskImageURL = "http://s3-eu-west-1.amazonaws.com/cshv3eu/SmallDisk.vhdx"; @@ -188,30 +189,30 @@ public boolean accept(final File directory, final String fileName) { continue; } Assert.assertTrue("Should have deleted file " + file.getPath(), file.delete()); - s_logger.info("Cleaned up by delete file " + file.getPath()); + logger.info("Cleaned up by delete file " + file.getPath()); } s_testSampleVolumeTempURIJSON = createTestDiskImageFromExistingImage(testVolWorks, s_testLocalStorePath, s_testSampleVolumeTempUUID); - s_logger.info("Created " + s_testSampleVolumeTempURIJSON); + logger.info("Created " + s_testSampleVolumeTempURIJSON); s_testSampleVolumeCorruptURIJSON = createTestDiskImageFromExistingImage(testVolWorks, s_testLocalStorePath, s_testSampleVolumeCorruptUUID); - s_logger.info("Created " + s_testSampleVolumeCorruptURIJSON); + logger.info("Created " + s_testSampleVolumeCorruptURIJSON); createTestDiskImageFromExistingImage(testVolWorks, s_testLocalStorePath, s_testSampleTemplateUUID); s_testSampleTemplateURLJSON = s_testSampleTemplateUUID; - s_logger.info("Created " + s_testSampleTemplateURLJSON + " in local storage."); + logger.info("Created " + s_testSampleTemplateURLJSON + " in local storage."); // Create secondary storage template: createTestDiskImageFromExistingImage(testVolWorks, testSecondarStoreDir.getAbsolutePath(), "af39aa7f-2b12-37e1-86d3-e23f2f005101.vhdx"); - s_logger.info("Created " + "af39aa7f-2b12-37e1-86d3-e23f2f005101.vhdx" + " in secondary (NFS) storage."); + logger.info("Created " + "af39aa7f-2b12-37e1-86d3-e23f2f005101.vhdx" + " in secondary (NFS) storage."); s_testLocalStorePathJSON = s_gson.toJson(s_testLocalStorePath); String agentIp = (String)params.get("ipaddress"); - s_logger.info("Test using agent IP address " + agentIp); + logger.info("Test using agent IP address " + agentIp); params.put("agentIp", agentIp); setTestJsonResult(params); s_hypervresource.configure("hypervresource", params); // Verify sample template is in place storage pool - s_logger.info("setUp complete, sample StoragePool at " + s_testLocalStorePathJSON + " sample template at " + s_testSampleTemplateURLJSON); + logger.info("setUp complete, sample StoragePool at " + s_testLocalStorePathJSON + " sample template at " + s_testSampleTemplateURLJSON); s_agentExecutable = (String)params.get("agent.executable"); s_testPrimaryDataStoreHost = (String)params.get("ipaddress"); @@ -269,11 +270,11 @@ public final void testStartupCommand() { Command[] cmds = {scmd}; String cmdsStr = s_gson.toJson(cmds); - s_logger.debug("Commands[] toJson is " + cmdsStr); + logger.debug("Commands[] toJson is " + cmdsStr); Command[] result = s_gson.fromJson(cmdsStr, Command[].class); - s_logger.debug("Commands[] fromJson is " + s_gson.toJson(result)); - s_logger.debug("Commands[] first element has type" + result[0].toString()); + logger.debug("Commands[] fromJson is " + s_gson.toJson(result)); + logger.debug("Commands[] first element has type" + result[0].toString()); } // @Test @@ -286,7 +287,7 @@ public final void testJson() { sscmd.setGuid(pi.getUuid()); sscmd.setDataCenter("foo"); sscmd.setResourceType(Storage.StorageResourceType.STORAGE_POOL); - s_logger.debug("StartupStorageCommand fromJson is " + s_gson.toJson(sscmd)); + logger.debug("StartupStorageCommand fromJson is " + s_gson.toJson(sscmd)); } @Test @@ -305,7 +306,7 @@ public final void testCreateStoragePoolCommand() { StoragePoolVO pool = createTestStoragePoolVO(folderName); CreateStoragePoolCommand cmd = new CreateStoragePoolCommand(true, pool); - s_logger.debug("TestCreateStoragePoolCommand sending " + s_gson.toJson(cmd)); + logger.debug("TestCreateStoragePoolCommand sending " + s_gson.toJson(cmd)); Answer ans = s_hypervresource.executeRequest(cmd); Assert.assertTrue(ans.getResult()); @@ -340,7 +341,7 @@ public final void testModifyStoragePoolCommand2() { } // Use same spec for pool - s_logger.info("Createing pool at : " + folderName); + logger.info("Createing pool at : " + folderName); StoragePoolVO pool = new StoragePoolVO(StoragePoolType.Filesystem, "127.0.0.1", -1, folderName); pool.setUuid(s_testLocalStoreUUID); @@ -363,7 +364,7 @@ public final StoragePoolVO createTestStoragePoolVO(final String folderName) { } // Use same spec for pool - s_logger.info("Createing pool at : " + folderName); + logger.info("Createing pool at : " + folderName); StoragePoolVO pool = new StoragePoolVO(StoragePoolType.Filesystem, "127.0.0.1", -1, folderName); return pool; @@ -377,8 +378,8 @@ public final void testInitialize() { if (result == null) { result = "NULL"; } - s_logger.debug("TestInitialize returned " + result); - s_logger.debug("TestInitialize expected " + _setTestJsonResultStr); + logger.debug("TestInitialize returned " + result); + logger.debug("TestInitialize expected " + _setTestJsonResultStr); Assert.assertTrue("StartupCommand[] not what we expected", _setTestJsonResultStr.equals(result)); return; } @@ -393,9 +394,9 @@ public final void testPrimaryStorageDownloadCommandHTTP() { private void corePrimaryStorageDownloadCommandTestCycle(final PrimaryStorageDownloadCommand cmd) { PrimaryStorageDownloadAnswer ans = (PrimaryStorageDownloadAnswer)s_hypervresource.executeRequest(cmd); if (!ans.getResult()) { - s_logger.error(ans.getDetails()); + logger.error(ans.getDetails()); } else { - s_logger.debug(ans.getDetails()); + logger.debug(ans.getDetails()); } Assert.assertTrue(ans.getDetails(), ans.getResult()); @@ -444,7 +445,7 @@ public final void testCreateCommand() { testSampleTemplateURLFile.exists()); int fileCount = destDir.listFiles().length; - s_logger.debug(" test local store has " + fileCount + "files"); + logger.debug(" test local store has " + fileCount + "files"); // Test requires there to be a template at the tempalteUrl, which is its // location in the local file system. CreateCommand cmd = s_gson.fromJson(sample, CreateCommand.class); @@ -527,7 +528,7 @@ private StopAnswer simpleVmStop() { private StartAnswer simpleVmStart(final String sample) { StartCommand cmd = s_gson.fromJson(sample, StartCommand.class); - s_logger.info("StartCommand sample " + s_gson.toJson(cmd)); + logger.info("StartCommand sample " + s_gson.toJson(cmd)); StartAnswer ans = (StartAnswer)s_hypervresource.executeRequest(cmd); return ans; } @@ -553,7 +554,7 @@ public final void testGetStorageStatsCommand() { String sample = "{\"id\":\"" + s_testLocalStoreUUID + "\",\"localPath\":" + s_testLocalStorePathJSON + "," + "\"pooltype\":\"Filesystem\"," + "\"contextMap\":{},\"wait\":0}"; - s_logger.info("Sample JSON: " + sample); + logger.info("Sample JSON: " + sample); GetStorageStatsCommand cmd = s_gson.fromJson(sample, GetStorageStatsCommand.class); s_hypervresource.executeRequest(cmd); @@ -573,7 +574,7 @@ public final void agentTerminate() { writer.flush(); writer.close(); } catch (IOException ex) { - s_logger.debug("Error closing agent at " + s_agentExecutable + " message " + ex.getMessage()); + logger.debug("Error closing agent at " + s_agentExecutable + " message " + ex.getMessage()); } } @@ -592,7 +593,7 @@ private void agentCreation() { s_agentProc = builder.start(); Thread.sleep(4000); } catch (Exception ex) { - s_logger.debug("Error calling starting aget at " + s_agentExecutable + " message " + ex.getMessage()); + logger.debug("Error calling starting aget at " + s_agentExecutable + " message " + ex.getMessage()); } } @@ -617,14 +618,14 @@ public final void testGetHostStatsCommand() { Assert.assertTrue(ans.getDetails() == null); } - public static Properties loadProperties() throws ConfigurationException { + public Properties loadProperties() throws ConfigurationException { Properties properties = new Properties(); final File file = PropertiesUtil.findConfigFile("agent.properties"); if (file == null) { throw new ConfigurationException("Unable to find agent.properties."); } - s_logger.info("agent.properties found at " + file.getAbsolutePath()); + logger.info("agent.properties found at " + file.getAbsolutePath()); try { properties.load(new FileInputStream(file)); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/ha/KVMInvestigator.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/ha/KVMInvestigator.java index 022501524f7f..8fc748262424 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/ha/KVMInvestigator.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/ha/KVMInvestigator.java @@ -36,13 +36,11 @@ import org.apache.cloudstack.ha.HAManager; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.log4j.Logger; import javax.inject.Inject; import java.util.List; public class KVMInvestigator extends AdapterBase implements Investigator { - private final static Logger s_logger = Logger.getLogger(KVMInvestigator.class); @Inject private HostDao _hostDao; @Inject @@ -62,7 +60,7 @@ public boolean isVmAlive(com.cloud.vm.VirtualMachine vm, Host host) throws Unkno return haManager.isVMAliveOnHost(host); } Status status = isAgentAlive(host); - s_logger.debug("HA: HOST is ineligible legacy state " + status + " for host " + host.getId()); + logger.debug("HA: HOST is ineligible legacy state " + status + " for host " + host.getId()); if (status == null) { throw new UnknownVM(); } @@ -90,7 +88,7 @@ public Status isAgentAlive(Host agent) { storageSupportHA = storageSupportHa(zonePools); } if (!storageSupportHA) { - s_logger.warn( + logger.warn( "Agent investigation was requested on host " + agent + ", but host does not support investigation because it has no NFS storage. Skipping investigation."); return Status.Disconnected; } @@ -106,7 +104,7 @@ public Status isAgentAlive(Host agent) { hostStatus = answer.getResult() ? Status.Down : Status.Up; } } catch (Exception e) { - s_logger.debug("Failed to send command to host: " + agent.getId()); + logger.debug("Failed to send command to host: " + agent.getId()); } if (hostStatus == null) { hostStatus = Status.Disconnected; @@ -118,18 +116,18 @@ public Status isAgentAlive(Host agent) { || (neighbor.getHypervisorType() != Hypervisor.HypervisorType.KVM && neighbor.getHypervisorType() != Hypervisor.HypervisorType.LXC)) { continue; } - s_logger.debug("Investigating host:" + agent.getId() + " via neighbouring host:" + neighbor.getId()); + logger.debug("Investigating host:" + agent.getId() + " via neighbouring host:" + neighbor.getId()); try { Answer answer = _agentMgr.easySend(neighbor.getId(), cmd); if (answer != null) { neighbourStatus = answer.getResult() ? Status.Down : Status.Up; - s_logger.debug("Neighbouring host:" + neighbor.getId() + " returned status:" + neighbourStatus + " for the investigated host:" + agent.getId()); + logger.debug("Neighbouring host:" + neighbor.getId() + " returned status:" + neighbourStatus + " for the investigated host:" + agent.getId()); if (neighbourStatus == Status.Up) { break; } } } catch (Exception e) { - s_logger.debug("Failed to send command to host: " + neighbor.getId()); + logger.debug("Failed to send command to host: " + neighbor.getId()); } } if (neighbourStatus == Status.Up && (hostStatus == Status.Disconnected || hostStatus == Status.Down)) { @@ -138,7 +136,7 @@ public Status isAgentAlive(Host agent) { if (neighbourStatus == Status.Down && (hostStatus == Status.Disconnected || hostStatus == Status.Down)) { hostStatus = Status.Down; } - s_logger.debug("HA: HOST is ineligible legacy state " + hostStatus + " for host " + agent.getId()); + logger.debug("HA: HOST is ineligible legacy state " + hostStatus + " for host " + agent.getId()); return hostStatus; } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/dpdk/DpdkDriverImpl.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/dpdk/DpdkDriverImpl.java index 022eafaca8a0..2a8d87af643e 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/dpdk/DpdkDriverImpl.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/dpdk/DpdkDriverImpl.java @@ -21,7 +21,6 @@ import com.cloud.utils.component.AdapterBase; import com.cloud.utils.script.Script; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import java.util.Map; @@ -31,7 +30,6 @@ public class DpdkDriverImpl extends AdapterBase implements DpdkDriver { private final String dpdkPortVhostUserType = "dpdkvhostuser"; private final String dpdkPortVhostUserClientType = "dpdkvhostuserclient"; - private static final Logger s_logger = Logger.getLogger(DpdkDriver.class); public DpdkDriverImpl() { } @@ -48,7 +46,7 @@ public String getNextDpdkPort() { * Get the latest DPDK port number created on a DPDK enabled host */ public int getDpdkLatestPortNumberUsed() { - s_logger.debug("Checking the last DPDK port created"); + logger.debug("Checking the last DPDK port created"); String cmd = "ovs-vsctl show | grep Port | grep " + DPDK_PORT_PREFIX + " | " + "awk '{ print $2 }' | sort -rV | head -1"; String port = Script.runSimpleBashScript(cmd); @@ -82,7 +80,7 @@ public void addDpdkPort(String bridgeName, String port, String vlan, DpdkHelper. } String cmd = stringBuilder.toString(); - s_logger.debug("DPDK property enabled, executing: " + cmd); + logger.debug("DPDK property enabled, executing: " + cmd); Script.runSimpleBashScript(cmd); } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/BridgeVifDriver.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/BridgeVifDriver.java index 39ecc9182f00..26b8de530837 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/BridgeVifDriver.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/BridgeVifDriver.java @@ -31,7 +31,6 @@ import com.cloud.utils.net.NetUtils; import com.cloud.utils.script.OutputInterpreter; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.libvirt.LibvirtException; import com.cloud.agent.api.to.NicTO; @@ -44,7 +43,6 @@ public class BridgeVifDriver extends VifDriverBase { - private static final Logger s_logger = Logger.getLogger(BridgeVifDriver.class); private int _timeout; private final Object _vnetBridgeMonitor = new Object(); @@ -92,9 +90,9 @@ public void getPifs() { for (File netdev : netdevs) { final File isbridge = new File(netdev.getAbsolutePath() + "/bridge"); final String netdevName = netdev.getName(); - s_logger.debug("looking in file " + netdev.getAbsolutePath() + "/bridge"); + logger.debug("looking in file " + netdev.getAbsolutePath() + "/bridge"); if (isbridge.exists()) { - s_logger.debug("Found bridge " + netdevName); + logger.debug("Found bridge " + netdevName); bridges.add(netdevName); } } @@ -103,7 +101,7 @@ public void getPifs() { String publicBridgeName = _libvirtComputingResource.getPublicBridgeName(); for (final String bridge : bridges) { - s_logger.debug("looking for pif for bridge " + bridge); + logger.debug("looking for pif for bridge " + bridge); final String pif = getPif(bridge); if (_libvirtComputingResource.isPublicBridge(bridge)) { _pifs.put("public", pif); @@ -117,10 +115,10 @@ public void getPifs() { // guest(private) creates bridges on a pif, if private bridge not found try pif direct // This addresses the unnecessary requirement of someone to create an unused bridge just for traffic label if (_pifs.get("private") == null) { - s_logger.debug("guest(private) traffic label '" + guestBridgeName + "' not found as bridge, looking for physical interface"); + logger.debug("guest(private) traffic label '" + guestBridgeName + "' not found as bridge, looking for physical interface"); final File dev = new File("/sys/class/net/" + guestBridgeName); if (dev.exists()) { - s_logger.debug("guest(private) traffic label '" + guestBridgeName + "' found as a physical device"); + logger.debug("guest(private) traffic label '" + guestBridgeName + "' found as a physical device"); _pifs.put("private", guestBridgeName); } } @@ -128,15 +126,15 @@ public void getPifs() { // public creates bridges on a pif, if private bridge not found try pif direct // This addresses the unnecessary requirement of someone to create an unused bridge just for traffic label if (_pifs.get("public") == null) { - s_logger.debug("public traffic label '" + publicBridgeName+ "' not found as bridge, looking for physical interface"); + logger.debug("public traffic label '" + publicBridgeName+ "' not found as bridge, looking for physical interface"); final File dev = new File("/sys/class/net/" + publicBridgeName); if (dev.exists()) { - s_logger.debug("public traffic label '" + publicBridgeName + "' found as a physical device"); + logger.debug("public traffic label '" + publicBridgeName + "' found as a physical device"); _pifs.put("public", publicBridgeName); } } - s_logger.debug("done looking for pifs, no more bridges"); + logger.debug("done looking for pifs, no more bridges"); } private String getPif(final String bridge) { @@ -159,7 +157,7 @@ private String matchPifFileInDirectory(final String bridgeName) { // if bridgeName already refers to a pif, return it as-is return bridgeName; } - s_logger.debug("failing to get physical interface from bridge " + bridgeName + ", does " + brif.getAbsolutePath() + "exist?"); + logger.debug("failing to get physical interface from bridge " + bridgeName + ", does " + brif.getAbsolutePath() + "exist?"); return ""; } @@ -167,13 +165,13 @@ private String matchPifFileInDirectory(final String bridgeName) { for (File anInterface : interfaces) { final String fname = anInterface.getName(); - s_logger.debug("matchPifFileInDirectory: file name '" + fname + "'"); + logger.debug("matchPifFileInDirectory: file name '" + fname + "'"); if (LibvirtComputingResource.isInterface(fname)) { return fname; } } - s_logger.debug("failing to get physical interface from bridge " + bridgeName + ", did not find an eth*, bond*, team*, vlan*, em*, p*p*, ens*, eno*, enp*, or enx* in " + brif.getAbsolutePath()); + logger.debug("failing to get physical interface from bridge " + bridgeName + ", did not find an eth*, bond*, team*, vlan*, em*, p*p*, ens*, eno*, enp*, or enx* in " + brif.getAbsolutePath()); return ""; } @@ -189,10 +187,10 @@ protected boolean isValidProtocolAndVnetId(final String vNetId, final String pro @Override public LibvirtVMDef.InterfaceDef plug(NicTO nic, String guestOsType, String nicAdapter, Map extraConfig) throws InternalErrorException, LibvirtException { - if (s_logger.isDebugEnabled()) { - s_logger.debug("nic=" + nic); + if (logger.isDebugEnabled()) { + logger.debug("nic=" + nic); if (nicAdapter != null && !nicAdapter.isEmpty()) { - s_logger.debug("custom nic adapter=" + nicAdapter); + logger.debug("custom nic adapter=" + nicAdapter); } } @@ -215,7 +213,7 @@ public LibvirtVMDef.InterfaceDef plug(NicTO nic, String guestOsType, String nicA if (nic.getType() == Networks.TrafficType.Guest) { if (isBroadcastTypeVlanOrVxlan(nic) && isValidProtocolAndVnetId(vNetId, protocol)) { if (trafficLabel != null && !trafficLabel.isEmpty()) { - s_logger.debug("creating a vNet dev and bridge for guest traffic per traffic label " + trafficLabel); + logger.debug("creating a vNet dev and bridge for guest traffic per traffic label " + trafficLabel); String brName = createVnetBr(vNetId, trafficLabel, protocol); intf.defBridgeNet(brName, null, nic.getMac(), getGuestNicModel(guestOsType, nicAdapter), networkRateKBps); } else { @@ -238,7 +236,7 @@ public LibvirtVMDef.InterfaceDef plug(NicTO nic, String guestOsType, String nicA } else if (nic.getType() == Networks.TrafficType.Public) { if (isBroadcastTypeVlanOrVxlan(nic) && isValidProtocolAndVnetId(vNetId, protocol)) { if (trafficLabel != null && !trafficLabel.isEmpty()) { - s_logger.debug("creating a vNet dev and bridge for public traffic per traffic label " + trafficLabel); + logger.debug("creating a vNet dev and bridge for public traffic per traffic label " + trafficLabel); String brName = createVnetBr(vNetId, trafficLabel, protocol); intf.defBridgeNet(brName, null, nic.getMac(), getGuestNicModel(guestOsType, nicAdapter), networkRateKBps); } else { @@ -309,7 +307,7 @@ private void createVnet(String vnetId, String pif, String brName, String protoco if (protocol.equals(Networks.BroadcastDomainType.Vxlan.scheme())) { script = _modifyVxlanPath; } - final Script command = new Script(script, _timeout, s_logger); + final Script command = new Script(script, _timeout, logger); command.add("-v", vnetId); command.add("-p", pif); command.add("-b", brName); @@ -355,7 +353,7 @@ private void deleteVnetBr(String brName, boolean deleteBr) { } if (vNetId == null || vNetId.isEmpty()) { - s_logger.debug("unable to get a vNet ID from name " + brName); + logger.debug("unable to get a vNet ID from name " + brName); return; } @@ -366,7 +364,7 @@ private void deleteVnetBr(String brName, boolean deleteBr) { scriptPath = _modifyVlanPath; } - final Script command = new Script(scriptPath, _timeout, s_logger); + final Script command = new Script(scriptPath, _timeout, logger); command.add("-o", "delete"); command.add("-v", vNetId); command.add("-p", pName); @@ -377,7 +375,7 @@ private void deleteVnetBr(String brName, boolean deleteBr) { final String result = command.execute(); if (result != null) { - s_logger.debug("Delete bridge " + brName + " failed: " + result); + logger.debug("Delete bridge " + brName + " failed: " + result); } } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/DirectVifDriver.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/DirectVifDriver.java index 5037ad1aec71..71afc9409328 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/DirectVifDriver.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/DirectVifDriver.java @@ -20,7 +20,6 @@ package com.cloud.hypervisor.kvm.resource; import org.apache.commons.compress.utils.Sets; -import org.apache.log4j.Logger; import org.libvirt.LibvirtException; import com.cloud.agent.api.to.NicTO; @@ -31,7 +30,6 @@ public class DirectVifDriver extends VifDriverBase { - private static final Logger s_logger = Logger.getLogger(DirectVifDriver.class); /** * Experimental driver to configure direct networking in libvirt. This should only diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/IvsVifDriver.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/IvsVifDriver.java index 178728b7f96d..2386e7d2d590 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/IvsVifDriver.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/IvsVifDriver.java @@ -26,7 +26,6 @@ import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.libvirt.LibvirtException; import com.cloud.agent.api.to.NicTO; @@ -41,7 +40,6 @@ import com.cloud.utils.script.Script; public class IvsVifDriver extends VifDriverBase { - private static final Logger s_logger = Logger.getLogger(IvsVifDriver.class); private int _timeout; private final Object _vnetBridgeMonitor = new Object(); @@ -100,7 +98,7 @@ public InterfaceDef plug(NicTO nic, String guestOsType, String nicAdapter, Map s_mapper = new HashMap(); static { s_mapper.put("CentOS 4.5 (32-bit)", "CentOS 4.5"); @@ -133,10 +134,10 @@ public class KVMGuestOsMapper { } - public static String getGuestOsName(String guestOsName) { + public String getGuestOsName(String guestOsName) { String guestOS = s_mapper.get(guestOsName); if (guestOS == null) { - s_logger.debug("Can't find the mapping of guest os: " + guestOsName); + logger.debug("Can't find the mapping of guest os: " + guestOsName); return "Other"; } else { return guestOS; diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/KVMHABase.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/KVMHABase.java index b9abea4f0bce..896426addca1 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/KVMHABase.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/KVMHABase.java @@ -18,7 +18,8 @@ import java.io.File; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.libvirt.LibvirtException; import org.libvirt.StoragePool; import org.libvirt.StoragePoolInfo; @@ -32,7 +33,7 @@ import com.cloud.agent.properties.AgentPropertiesFileHandler; public class KVMHABase { - private static final Logger s_logger = Logger.getLogger(KVMHABase.class); + protected Logger logger = LogManager.getLogger(getClass()); private long _timeout = 60000; /* 1 minutes */ protected static String s_heartBeatPath; protected long _heartBeatUpdateTimeout = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.HEARTBEAT_UPDATE_TIMEOUT); @@ -173,14 +174,14 @@ protected String getMountPoint(HAStoragePool storagePool) { } } catch (LibvirtException e) { - s_logger.debug("Ignoring libvirt error.", e); + logger.debug("Ignoring libvirt error.", e); } finally { try { if (pool != null) { pool.free(); } } catch (LibvirtException e) { - s_logger.debug("Ignoring libvirt error.", e); + logger.debug("Ignoring libvirt error.", e); } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/KVMHAChecker.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/KVMHAChecker.java index 2df70375107b..db6190fa8f28 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/KVMHAChecker.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/KVMHAChecker.java @@ -20,12 +20,10 @@ import java.util.concurrent.Callable; import java.util.stream.Collectors; -import org.apache.log4j.Logger; import com.cloud.agent.api.to.HostTO; public class KVMHAChecker extends KVMHABase implements Callable { - private static final Logger s_logger = Logger.getLogger(KVMHAChecker.class); private List storagePools; private HostTO host; private boolean reportFailureIfOneStorageIsDown; @@ -46,7 +44,7 @@ public Boolean checkingHeartBeat() { String hostAndPools = String.format("host IP [%s] in pools [%s]", host.getPrivateNetwork().getIp(), storagePools.stream().map(pool -> pool.getPoolUUID()).collect(Collectors.joining(", "))); - s_logger.debug(String.format("Checking heart beat with KVMHAChecker for %s", hostAndPools)); + logger.debug(String.format("Checking heart beat with KVMHAChecker for %s", hostAndPools)); for (HAStoragePool pool : storagePools) { validResult = pool.getPool().checkingHeartBeat(pool, host); @@ -56,7 +54,7 @@ public Boolean checkingHeartBeat() { } if (!validResult) { - s_logger.warn(String.format("All checks with KVMHAChecker for %s considered it as dead. It may cause a shutdown of the host.", hostAndPools)); + logger.warn(String.format("All checks with KVMHAChecker for %s considered it as dead. It may cause a shutdown of the host.", hostAndPools)); } return validResult; @@ -64,7 +62,7 @@ public Boolean checkingHeartBeat() { @Override public Boolean call() throws Exception { - // s_logger.addAppender(new org.apache.log4j.ConsoleAppender(new + // logger.addAppender(new org.apache.log4j.ConsoleAppender(new // org.apache.log4j.PatternLayout(), "System.out")); return checkingHeartBeat(); } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/KVMHAMonitor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/KVMHAMonitor.java index eb09408c14ed..cf407bfc08a8 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/KVMHAMonitor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/KVMHAMonitor.java @@ -20,7 +20,6 @@ import com.cloud.agent.properties.AgentPropertiesFileHandler; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.utils.script.Script; -import org.apache.log4j.Logger; import org.libvirt.Connect; import org.libvirt.LibvirtException; import org.libvirt.StoragePool; @@ -35,7 +34,6 @@ public class KVMHAMonitor extends KVMHABase implements Runnable { - private static final Logger s_logger = Logger.getLogger(KVMHAMonitor.class); private final Map storagePool = new ConcurrentHashMap<>(); private final boolean rebootHostAndAlertManagementOnHeartbeatTimeout; @@ -98,7 +96,7 @@ protected void runHeartBeat() { result = executePoolHeartBeatCommand(uuid, primaryStoragePool, result); if (result != null && rebootHostAndAlertManagementOnHeartbeatTimeout) { - s_logger.warn(String.format("Write heartbeat for pool [%s] failed: %s; stopping cloudstack-agent.", uuid, result)); + logger.warn(String.format("Write heartbeat for pool [%s] failed: %s; stopping cloudstack-agent.", uuid, result)); primaryStoragePool.getPool().createHeartBeatCommand(primaryStoragePool, null, false);; } } @@ -115,11 +113,11 @@ private String executePoolHeartBeatCommand(String uuid, HAStoragePool primarySto result = primaryStoragePool.getPool().createHeartBeatCommand(primaryStoragePool, hostPrivateIp, true); if (result != null) { - s_logger.warn(String.format("Write heartbeat for pool [%s] failed: %s; try: %s of %s.", uuid, result, i, _heartBeatUpdateMaxTries)); + logger.warn(String.format("Write heartbeat for pool [%s] failed: %s; try: %s of %s.", uuid, result, i, _heartBeatUpdateMaxTries)); try { Thread.sleep(_heartBeatUpdateRetrySleep); } catch (InterruptedException e) { - s_logger.debug("[IGNORED] Interrupted between heartbeat retries.", e); + logger.debug("[IGNORED] Interrupted between heartbeat retries.", e); } } else { break; @@ -135,21 +133,21 @@ private void checkForNotExistingPools(Set removedPools, String uuid) { StoragePool storage = conn.storagePoolLookupByUUIDString(uuid); if (storage == null || storage.getInfo().state != StoragePoolState.VIR_STORAGE_POOL_RUNNING) { if (storage == null) { - s_logger.debug(String.format("Libvirt storage pool [%s] not found, removing from HA list.", uuid)); + logger.debug(String.format("Libvirt storage pool [%s] not found, removing from HA list.", uuid)); } else { - s_logger.debug(String.format("Libvirt storage pool [%s] found, but not running, removing from HA list.", uuid)); + logger.debug(String.format("Libvirt storage pool [%s] found, but not running, removing from HA list.", uuid)); } removedPools.add(uuid); } - s_logger.debug(String.format("Found NFS storage pool [%s] in libvirt, continuing.", uuid)); + logger.debug(String.format("Found NFS storage pool [%s] in libvirt, continuing.", uuid)); } catch (LibvirtException e) { - s_logger.debug(String.format("Failed to lookup libvirt storage pool [%s].", uuid), e); + logger.debug(String.format("Failed to lookup libvirt storage pool [%s].", uuid), e); if (e.toString().contains("pool not found")) { - s_logger.debug(String.format("Removing pool [%s] from HA monitor since it was deleted.", uuid)); + logger.debug(String.format("Removing pool [%s] from HA monitor since it was deleted.", uuid)); removedPools.add(uuid); } } @@ -164,7 +162,7 @@ public void run() { try { Thread.sleep(_heartBeatUpdateFreq); } catch (InterruptedException e) { - s_logger.debug("[IGNORED] Interrupted between heartbeats.", e); + logger.debug("[IGNORED] Interrupted between heartbeats.", e); } } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtCapXMLParser.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtCapXMLParser.java index 358fafae4b5c..a2b422b8bfb6 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtCapXMLParser.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtCapXMLParser.java @@ -20,7 +20,6 @@ import java.io.StringReader; import java.util.ArrayList; -import org.apache.log4j.Logger; import org.xml.sax.Attributes; import org.xml.sax.InputSource; import org.xml.sax.SAXException; @@ -34,7 +33,6 @@ public class LibvirtCapXMLParser extends LibvirtXMLParser { private boolean _archTypex8664 = false; private final StringBuffer _emulator = new StringBuffer(); private final StringBuffer _capXML = new StringBuffer(); - private static final Logger s_logger = Logger.getLogger(LibvirtCapXMLParser.class); private final ArrayList guestOsTypes = new ArrayList(); @Override @@ -63,7 +61,7 @@ public void characters(char[] ch, int start, int length) throws SAXException { } else if (_osType) { guestOsTypes.add(new String(ch, start, length)); } else if (_emulatorFlag) { - s_logger.debug("Found " + new String(ch, start, length) + " as a suiteable emulator"); + logger.debug("Found " + new String(ch, start, length) + " as a suiteable emulator"); _emulator.append(ch, start, length); } } @@ -112,9 +110,9 @@ public String parseCapabilitiesXML(String capXML) { _sp.parse(new InputSource(new StringReader(capXML)), this); return _capXML.toString(); } catch (SAXException se) { - s_logger.warn(se.getMessage()); + logger.warn(se.getMessage()); } catch (IOException ie) { - s_logger.error(ie.getMessage()); + logger.error(ie.getMessage()); } return null; } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index fef5d4aa6de1..0a253878dafa 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -75,7 +75,8 @@ import org.apache.commons.lang.math.NumberUtils; import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.xerces.impl.xpath.regex.Match; import org.joda.time.Duration; import org.libvirt.Connect; @@ -233,8 +234,8 @@ * pool | the parent of the storage pool hierarchy * } **/ public class LibvirtComputingResource extends ServerResourceBase implements ServerResource, VirtualRouterDeployer, ResourceStatusUpdater { - protected static Logger s_logger = Logger.getLogger(LibvirtComputingResource.class); + protected static Logger LOGGER = LogManager.getLogger(LibvirtComputingResource.class); private static final String CONFIG_VALUES_SEPARATOR = ","; @@ -513,7 +514,7 @@ public ExecutionResult executeInVR(final String routerIp, final String script, f @Override public ExecutionResult executeInVR(final String routerIp, final String script, final String args, final Duration timeout) { - final Script command = new Script(routerProxyPath, timeout, s_logger); + final Script command = new Script(routerProxyPath, timeout, LOGGER); final AllLinesParser parser = new AllLinesParser(); command.add(script); command.add(routerIp); @@ -525,7 +526,7 @@ public ExecutionResult executeInVR(final String routerIp, final String script, f details = parser.getLines(); } - s_logger.debug("Executing script in VR: " + script); + LOGGER.debug("Executing script in VR: " + script); return new ExecutionResult(command.getExitValue() == 0, details); } @@ -535,12 +536,12 @@ public ExecutionResult createFileInVR(final String routerIp, final String path, final File permKey = new File("/root/.ssh/id_rsa.cloud"); boolean success = true; String details = "Creating file in VR, with ip: " + routerIp + ", file: " + filename; - s_logger.debug(details); + LOGGER.debug(details); try { SshHelper.scpTo(routerIp, 3922, "root", permKey, null, path, content.getBytes(), filename, null); } catch (final Exception e) { - s_logger.warn("Failed to create file " + path + filename + " in VR " + routerIp, e); + LOGGER.warn("Failed to create file " + path + filename + " in VR " + routerIp, e); details = e.getMessage(); success = false; } @@ -727,14 +728,14 @@ public String interpret(final BufferedReader reader) throws IOException { while ((line = reader.readLine()) != null) { final String[] toks = line.trim().split("="); if (toks.length < 2) { - s_logger.warn("Failed to parse Script output: " + line); + LOGGER.warn("Failed to parse Script output: " + line); } else { map.put(toks[0].trim(), toks[1].trim()); } numLines++; } if (numLines == 0) { - s_logger.warn("KeyValueInterpreter: no output lines?"); + LOGGER.warn("KeyValueInterpreter: no output lines?"); } return null; } @@ -788,7 +789,7 @@ private Map getDeveloperProperties() throws ConfigurationExcepti throw new ConfigurationException("Unable to find developer.properties."); } - s_logger.info("developer.properties found at " + file.getAbsolutePath()); + LOGGER.info("developer.properties found at " + file.getAbsolutePath()); try { final Properties properties = PropertiesUtil.loadFromFile(file); @@ -861,7 +862,7 @@ public boolean configure(final String name, final Map params) th try { loadUefiProperties(); } catch (FileNotFoundException e) { - s_logger.error("uefi properties file not found due to: " + e.getLocalizedMessage()); + LOGGER.error("uefi properties file not found due to: " + e.getLocalizedMessage()); } storageLayer = new JavaStorageLayer(); @@ -992,7 +993,7 @@ public boolean configure(final String name, final Map params) th hostHealthCheckScriptPath = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.HEALTH_CHECK_SCRIPT_PATH); if (StringUtils.isNotBlank(hostHealthCheckScriptPath) && !new File(hostHealthCheckScriptPath).exists()) { - s_logger.info(String.format("Unable to find the host health check script at: %s, " + + logger.info(String.format("Unable to find the host health check script at: %s, " + "discarding it", hostHealthCheckScriptPath)); } @@ -1133,7 +1134,7 @@ public boolean configure(final String name, final Map params) th // destroy default network, see https://libvirt.org/sources/java/javadoc/org/libvirt/Network.html try { Network network = conn.networkLookupByName("default"); - s_logger.debug("Found libvirt default network, destroying it and setting autostart to false"); + LOGGER.debug("Found libvirt default network, destroying it and setting autostart to false"); if (network.isActive() == 1) { network.destroy(); } @@ -1141,7 +1142,7 @@ public boolean configure(final String name, final Map params) th network.setAutostart(false); } } catch (final LibvirtException e) { - s_logger.warn("Ignoring libvirt error.", e); + LOGGER.warn("Ignoring libvirt error.", e); } if (HypervisorType.KVM == hypervisorType) { @@ -1159,17 +1160,17 @@ public boolean configure(final String name, final Map params) th hypervisorLibvirtVersion = conn.getLibVirVersion(); hypervisorQemuVersion = conn.getVersion(); } catch (final LibvirtException e) { - s_logger.trace("Ignoring libvirt error.", e); + LOGGER.trace("Ignoring libvirt error.", e); } // Enable/disable IO driver for Qemu (in case it is not set CloudStack can also detect if its supported by qemu) enableIoUring = isIoUringEnabled(); - s_logger.info("IO uring driver for Qemu: " + (enableIoUring ? "enabled" : "disabled")); + LOGGER.info("IO uring driver for Qemu: " + (enableIoUring ? "enabled" : "disabled")); final String cpuArchOverride = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.GUEST_CPU_ARCH); if (StringUtils.isNotEmpty(cpuArchOverride)) { guestCpuArch = cpuArchOverride; - s_logger.info("Using guest CPU architecture: " + guestCpuArch); + LOGGER.info("Using guest CPU architecture: " + guestCpuArch); } guestCpuMode = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.GUEST_CPU_MODE); @@ -1177,7 +1178,7 @@ public boolean configure(final String name, final Map params) th guestCpuModel = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.GUEST_CPU_MODEL); if (hypervisorLibvirtVersion < 9 * 1000 + 10) { - s_logger.warn("Libvirt version 0.9.10 required for guest cpu mode, but version " + prettyVersion(hypervisorLibvirtVersion) + + LOGGER.warn("Libvirt version 0.9.10 required for guest cpu mode, but version " + prettyVersion(hypervisorLibvirtVersion) + " detected, so it will be disabled"); guestCpuMode = ""; guestCpuModel = ""; @@ -1228,21 +1229,21 @@ public boolean configure(final String name, final Map params) th */ if (pifs.get("private") == null) { - s_logger.error("Failed to get private nic name"); + LOGGER.error("Failed to get private nic name"); throw new ConfigurationException("Failed to get private nic name"); } if (pifs.get("public") == null) { - s_logger.error("Failed to get public nic name"); + LOGGER.error("Failed to get public nic name"); throw new ConfigurationException("Failed to get public nic name"); } - s_logger.debug("Found pif: " + pifs.get("private") + " on " + privBridgeName + ", pif: " + pifs.get("public") + " on " + publicBridgeName); + LOGGER.debug("Found pif: " + pifs.get("private") + " on " + privBridgeName + ", pif: " + pifs.get("public") + " on " + publicBridgeName); canBridgeFirewall = canBridgeFirewall(pifs.get("public")); localGateway = Script.runSimpleBashScript("ip route show default 0.0.0.0/0|head -1|awk '{print $3}'"); if (localGateway == null) { - s_logger.warn("No default IPv4 gateway found"); + LOGGER.warn("No default IPv4 gateway found"); } migrateDowntime = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.VM_MIGRATE_DOWNTIME); @@ -1264,9 +1265,9 @@ public boolean configure(final String name, final Map params) th try { migrateSpeed = Integer.parseInt(tokens[0]); } catch (final NumberFormatException e) { - s_logger.trace("Ignoring migrateSpeed extraction error.", e); + LOGGER.trace("Ignoring migrateSpeed extraction error.", e); } - s_logger.debug("device " + pifs.get("public") + " has speed: " + String.valueOf(migrateSpeed)); + LOGGER.debug("device " + pifs.get("public") + " has speed: " + String.valueOf(migrateSpeed)); } } params.put("vm.migrate.speed", String.valueOf(migrateSpeed)); @@ -1292,7 +1293,7 @@ public boolean configure(final String name, final Map params) th final Thread cleanupMonitor = new Thread(isciCleanupMonitor); cleanupMonitor.start(); } else { - s_logger.info("iscsi session clean up is disabled"); + LOGGER.info("iscsi session clean up is disabled"); } setupMemoryBalloonStatsPeriod(conn); @@ -1311,14 +1312,14 @@ protected List getVmsToSetMemoryBalloonStatsPeriod(Connect conn) { try { vmIds = ArrayUtils.toObject(conn.listDomains()); } catch (final LibvirtException e) { - s_logger.error("Unable to get the list of Libvirt domains on this host.", e); + LOGGER.error("Unable to get the list of Libvirt domains on this host.", e); return vmIdList; } vmIdList.addAll(Arrays.asList(vmIds)); - s_logger.debug(String.format("We have found a total of [%s] VMs (Libvirt domains) on this host: [%s].", vmIdList.size(), vmIdList.toString())); + LOGGER.debug(String.format("We have found a total of [%s] VMs (Libvirt domains) on this host: [%s].", vmIdList.size(), vmIdList.toString())); if (vmIdList.isEmpty()) { - s_logger.info("Skipping the memory balloon stats period setting, since there are no VMs (active Libvirt domains) on this host."); + LOGGER.info("Skipping the memory balloon stats period setting, since there are no VMs (active Libvirt domains) on this host."); } return vmIdList; } @@ -1329,13 +1330,13 @@ protected List getVmsToSetMemoryBalloonStatsPeriod(Connect conn) { */ protected Integer getCurrentVmBalloonStatsPeriod() { if (Boolean.TRUE.equals(AgentPropertiesFileHandler.getPropertyValue(AgentProperties.VM_MEMBALLOON_DISABLE))) { - s_logger.info(String.format("The [%s] property is set to 'true', so the memory balloon stats period will be set to 0 for all VMs.", + LOGGER.info(String.format("The [%s] property is set to 'true', so the memory balloon stats period will be set to 0 for all VMs.", AgentProperties.VM_MEMBALLOON_DISABLE.getName())); return 0; } Integer vmBalloonStatsPeriod = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.VM_MEMBALLOON_STATS_PERIOD); if (vmBalloonStatsPeriod == 0) { - s_logger.info(String.format("The [%s] property is set to '0', this prevents memory statistics from being displayed correctly. " + LOGGER.info(String.format("The [%s] property is set to '0', this prevents memory statistics from being displayed correctly. " + "Adjust (increase) the value of this parameter to correct this.", AgentProperties.VM_MEMBALLOON_STATS_PERIOD.getName())); } return vmBalloonStatsPeriod; @@ -1355,20 +1356,20 @@ protected void setupMemoryBalloonStatsPeriod(Connect conn) { parser.parseDomainXML(dm.getXMLDesc(0)); MemBalloonDef memBalloon = parser.getMemBalloon(); if (!MemBalloonDef.MemBalloonModel.VIRTIO.equals(memBalloon.getMemBalloonModel())) { - s_logger.debug(String.format("Skipping the memory balloon stats period setting for the VM (Libvirt Domain) with ID [%s] and name [%s] because this VM has no memory" + LOGGER.debug(String.format("Skipping the memory balloon stats period setting for the VM (Libvirt Domain) with ID [%s] and name [%s] because this VM has no memory" + " balloon.", vmId, dm.getName())); } String setMemBalloonStatsPeriodCommand = String.format(COMMAND_SET_MEM_BALLOON_STATS_PERIOD, vmId, currentVmBalloonStatsPeriod); String setMemBalloonStatsPeriodResult = Script.runSimpleBashScript(setMemBalloonStatsPeriodCommand); if (StringUtils.isNotBlank(setMemBalloonStatsPeriodResult)) { - s_logger.error(String.format("Unable to set up memory balloon stats period for VM (Libvirt Domain) with ID [%s] due to an error when running the [%s] " + LOGGER.error(String.format("Unable to set up memory balloon stats period for VM (Libvirt Domain) with ID [%s] due to an error when running the [%s] " + "command. Output: [%s].", vmId, setMemBalloonStatsPeriodCommand, setMemBalloonStatsPeriodResult)); continue; } - s_logger.debug(String.format("The memory balloon stats period [%s] has been set successfully for the VM (Libvirt Domain) with ID [%s] and name [%s].", + LOGGER.debug(String.format("The memory balloon stats period [%s] has been set successfully for the VM (Libvirt Domain) with ID [%s] and name [%s].", currentVmBalloonStatsPeriod, vmId, dm.getName())); } catch (final Exception e) { - s_logger.warn(String.format("Failed to set up memory balloon stats period for the VM %s with exception %s", parser.getName(), e.getMessage())); + LOGGER.warn(String.format("Failed to set up memory balloon stats period for the VM %s with exception %s", parser.getName(), e.getMessage())); } } } @@ -1376,12 +1377,12 @@ protected void setupMemoryBalloonStatsPeriod(Connect conn) { private void enableSSLForKvmAgent() { final File keyStoreFile = PropertiesUtil.findConfigFile(KeyStoreUtils.KS_FILENAME); if (keyStoreFile == null) { - s_logger.info("Failed to find keystore file: " + KeyStoreUtils.KS_FILENAME); + LOGGER.info("Failed to find keystore file: " + KeyStoreUtils.KS_FILENAME); return; } String keystorePass = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.KEYSTORE_PASSPHRASE); if (StringUtils.isBlank(keystorePass)) { - s_logger.info("Failed to find passphrase for keystore: " + KeyStoreUtils.KS_FILENAME); + LOGGER.info("Failed to find passphrase for keystore: " + KeyStoreUtils.KS_FILENAME); return; } if (keyStoreFile.exists() && !keyStoreFile.isDirectory()) { @@ -1392,20 +1393,20 @@ private void enableSSLForKvmAgent() { protected void configureLocalStorage() throws ConfigurationException { String localStoragePath = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.LOCAL_STORAGE_PATH); - s_logger.debug(String.format("Local Storage Path set: [%s].", localStoragePath)); + LOGGER.debug(String.format("Local Storage Path set: [%s].", localStoragePath)); String localStorageUUIDString = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.LOCAL_STORAGE_UUID); if (localStorageUUIDString == null) { localStorageUUIDString = UUID.randomUUID().toString(); } - s_logger.debug(String.format("Local Storage UUID set: [%s].", localStorageUUIDString)); + LOGGER.debug(String.format("Local Storage UUID set: [%s].", localStorageUUIDString)); String[] localStorageRelativePaths = localStoragePath.split(CONFIG_VALUES_SEPARATOR); String[] localStorageUUIDStrings = localStorageUUIDString.split(CONFIG_VALUES_SEPARATOR); if (localStorageRelativePaths.length != localStorageUUIDStrings.length) { String errorMessage = String.format("The path and UUID of the local storage pools have different length. Path: [%s], UUID: [%s].", localStoragePath, localStorageUUIDString); - s_logger.error(errorMessage); + LOGGER.error(errorMessage); throw new ConfigurationException(errorMessage); } for (String localStorageRelativePath : localStorageRelativePaths) { @@ -1433,7 +1434,7 @@ private void validateLocalStorageUUID(String localStorageUUID) throws Configurat public boolean configureHostParams(final Map params) { final File file = PropertiesUtil.findConfigFile("agent.properties"); if (file == null) { - s_logger.error("Unable to find the file agent.properties"); + LOGGER.error("Unable to find the file agent.properties"); return false; } // Save configurations in agent.properties @@ -1460,25 +1461,25 @@ public boolean configureHostParams(final Map params) { private void configureAgentHooks() { agentHooksBasedir = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.AGENT_HOOKS_BASEDIR); - s_logger.debug("agent.hooks.basedir is " + agentHooksBasedir); + LOGGER.debug("agent.hooks.basedir is " + agentHooksBasedir); agentHooksLibvirtXmlScript = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.AGENT_HOOKS_LIBVIRT_VM_XML_TRANSFORMER_SCRIPT); - s_logger.debug("agent.hooks.libvirt_vm_xml_transformer.script is " + agentHooksLibvirtXmlScript); + LOGGER.debug("agent.hooks.libvirt_vm_xml_transformer.script is " + agentHooksLibvirtXmlScript); agentHooksLibvirtXmlMethod = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.AGENT_HOOKS_LIBVIRT_VM_XML_TRANSFORMER_METHOD); - s_logger.debug("agent.hooks.libvirt_vm_xml_transformer.method is " + agentHooksLibvirtXmlMethod); + LOGGER.debug("agent.hooks.libvirt_vm_xml_transformer.method is " + agentHooksLibvirtXmlMethod); agentHooksVmOnStartScript = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.AGENT_HOOKS_LIBVIRT_VM_ON_START_SCRIPT); - s_logger.debug("agent.hooks.libvirt_vm_on_start.script is " + agentHooksVmOnStartScript); + LOGGER.debug("agent.hooks.libvirt_vm_on_start.script is " + agentHooksVmOnStartScript); agentHooksVmOnStartMethod = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.AGENT_HOOKS_LIBVIRT_VM_ON_START_METHOD); - s_logger.debug("agent.hooks.libvirt_vm_on_start.method is " + agentHooksVmOnStartMethod); + LOGGER.debug("agent.hooks.libvirt_vm_on_start.method is " + agentHooksVmOnStartMethod); agentHooksVmOnStopScript = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.AGENT_HOOKS_LIBVIRT_VM_ON_STOP_SCRIPT); - s_logger.debug("agent.hooks.libvirt_vm_on_stop.script is " + agentHooksVmOnStopScript); + LOGGER.debug("agent.hooks.libvirt_vm_on_stop.script is " + agentHooksVmOnStopScript); agentHooksVmOnStopMethod = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.AGENT_HOOKS_LIBVIRT_VM_ON_STOP_METHOD); - s_logger.debug("agent.hooks.libvirt_vm_on_stop.method is " + agentHooksVmOnStopMethod); + LOGGER.debug("agent.hooks.libvirt_vm_on_stop.method is " + agentHooksVmOnStopMethod); } public boolean isUefiPropertiesFileLoaded() { @@ -1495,14 +1496,14 @@ private void loadUefiProperties() throws FileNotFoundException { throw new FileNotFoundException("Unable to find file uefi.properties."); } - s_logger.info("uefi.properties file found at " + file.getAbsolutePath()); + LOGGER.info("uefi.properties file found at " + file.getAbsolutePath()); try { PropertiesUtil.loadFromFile(uefiProperties, file); - s_logger.info("guest.nvram.template.legacy = " + uefiProperties.getProperty("guest.nvram.template.legacy")); - s_logger.info("guest.loader.legacy = " + uefiProperties.getProperty("guest.loader.legacy")); - s_logger.info("guest.nvram.template.secure = " + uefiProperties.getProperty("guest.nvram.template.secure")); - s_logger.info("guest.loader.secure =" + uefiProperties.getProperty("guest.loader.secure")); - s_logger.info("guest.nvram.path = " + uefiProperties.getProperty("guest.nvram.path")); + LOGGER.info("guest.nvram.template.legacy = " + uefiProperties.getProperty("guest.nvram.template.legacy")); + LOGGER.info("guest.loader.legacy = " + uefiProperties.getProperty("guest.loader.legacy")); + LOGGER.info("guest.nvram.template.secure = " + uefiProperties.getProperty("guest.nvram.template.secure")); + LOGGER.info("guest.loader.secure =" + uefiProperties.getProperty("guest.loader.secure")); + LOGGER.info("guest.nvram.path = " + uefiProperties.getProperty("guest.nvram.path")); } catch (final FileNotFoundException ex) { throw new CloudRuntimeException("Cannot find the file: " + file.getAbsolutePath(), ex); } catch (final IOException ex) { @@ -1533,10 +1534,10 @@ protected void configureVifDrivers(final Map params) throws Conf String defaultVifDriverName = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.LIBVIRT_VIF_DRIVER); if (defaultVifDriverName == null) { if (bridgeType == BridgeType.OPENVSWITCH) { - s_logger.info("No libvirt.vif.driver specified. Defaults to OvsVifDriver."); + LOGGER.info("No libvirt.vif.driver specified. Defaults to OvsVifDriver."); defaultVifDriverName = DEFAULT_OVS_VIF_DRIVER_CLASS_NAME; } else { - s_logger.info("No libvirt.vif.driver specified. Defaults to BridgeVifDriver."); + LOGGER.info("No libvirt.vif.driver specified. Defaults to BridgeVifDriver."); defaultVifDriverName = DEFAULT_BRIDGE_VIF_DRIVER_CLASS_NAME; } } @@ -1632,15 +1633,15 @@ private void getPifs() { for (int i = 0; i < netdevs.length; i++) { final File isbridge = new File(netdevs[i].getAbsolutePath() + "/bridge"); final String netdevName = netdevs[i].getName(); - s_logger.debug("looking in file " + netdevs[i].getAbsolutePath() + "/bridge"); + LOGGER.debug("looking in file " + netdevs[i].getAbsolutePath() + "/bridge"); if (isbridge.exists()) { - s_logger.debug("Found bridge " + netdevName); + LOGGER.debug("Found bridge " + netdevName); bridges.add(netdevName); } } for (final String bridge : bridges) { - s_logger.debug("looking for pif for bridge " + bridge); + LOGGER.debug("looking for pif for bridge " + bridge); final String pif = getPif(bridge); if (isPublicBridge(bridge)) { pifs.put("public", pif); @@ -1654,10 +1655,10 @@ private void getPifs() { // guest(private) creates bridges on a pif, if private bridge not found try pif direct // This addresses the unnecessary requirement of someone to create an unused bridge just for traffic label if (pifs.get("private") == null) { - s_logger.debug("guest(private) traffic label '" + guestBridgeName + "' not found as bridge, looking for physical interface"); + LOGGER.debug("guest(private) traffic label '" + guestBridgeName + "' not found as bridge, looking for physical interface"); final File dev = new File("/sys/class/net/" + guestBridgeName); if (dev.exists()) { - s_logger.debug("guest(private) traffic label '" + guestBridgeName + "' found as a physical device"); + LOGGER.debug("guest(private) traffic label '" + guestBridgeName + "' found as a physical device"); pifs.put("private", guestBridgeName); } } @@ -1665,15 +1666,15 @@ private void getPifs() { // public creates bridges on a pif, if private bridge not found try pif direct // This addresses the unnecessary requirement of someone to create an unused bridge just for traffic label if (pifs.get("public") == null) { - s_logger.debug("public traffic label '" + publicBridgeName + "' not found as bridge, looking for physical interface"); + LOGGER.debug("public traffic label '" + publicBridgeName + "' not found as bridge, looking for physical interface"); final File dev = new File("/sys/class/net/" + publicBridgeName); if (dev.exists()) { - s_logger.debug("public traffic label '" + publicBridgeName + "' found as a physical device"); + LOGGER.debug("public traffic label '" + publicBridgeName + "' found as a physical device"); pifs.put("public", publicBridgeName); } } - s_logger.debug("done looking for pifs, no more bridges"); + LOGGER.debug("done looking for pifs, no more bridges"); } boolean isGuestBridge(String bridge) { @@ -1682,10 +1683,10 @@ boolean isGuestBridge(String bridge) { private void getOvsPifs() { final String cmdout = Script.runSimpleBashScript("ovs-vsctl list-br | sed '{:q;N;s/\\n/%/g;t q}'"); - s_logger.debug("cmdout was " + cmdout); + LOGGER.debug("cmdout was " + cmdout); final List bridges = Arrays.asList(cmdout.split("%")); for (final String bridge : bridges) { - s_logger.debug("looking for pif for bridge " + bridge); + LOGGER.debug("looking for pif for bridge " + bridge); // String pif = getOvsPif(bridge); // Not really interested in the pif name at this point for ovs // bridges @@ -1698,7 +1699,7 @@ private void getOvsPifs() { } pifs.put(bridge, pif); } - s_logger.debug("done looking for pifs, no more bridges"); + LOGGER.debug("done looking for pifs, no more bridges"); } public boolean isPublicBridge(String bridge) { @@ -1725,7 +1726,7 @@ private String matchPifFileInDirectory(final String bridgeName) { // if bridgeName already refers to a pif, return it as-is return bridgeName; } - s_logger.debug("failing to get physical interface from bridge " + bridgeName + ", does " + brif.getAbsolutePath() + "exist?"); + LOGGER.debug("failing to get physical interface from bridge " + bridgeName + ", does " + brif.getAbsolutePath() + "exist?"); return ""; } @@ -1733,13 +1734,13 @@ private String matchPifFileInDirectory(final String bridgeName) { for (int i = 0; i < interfaces.length; i++) { final String fname = interfaces[i].getName(); - s_logger.debug("matchPifFileInDirectory: file name '" + fname + "'"); + LOGGER.debug("matchPifFileInDirectory: file name '" + fname + "'"); if (isInterface(fname)) { return fname; } } - s_logger.debug("failing to get physical interface from bridge " + bridgeName + ", did not find an eth*, bond*, team*, vlan*, em*, p*p*, ens*, eno*, enp*, or enx* in " + brif.getAbsolutePath()); + LOGGER.debug("failing to get physical interface from bridge " + bridgeName + ", did not find an eth*, bond*, team*, vlan*, em*, p*p*, ens*, eno*, enp*, or enx* in " + brif.getAbsolutePath()); return ""; } @@ -1802,7 +1803,7 @@ private boolean checkBridgeNetwork(final String networkName) { } private boolean checkOvsNetwork(final String networkName) { - s_logger.debug("Checking if network " + networkName + " exists as openvswitch bridge"); + LOGGER.debug("Checking if network " + networkName + " exists as openvswitch bridge"); if (networkName == null) { return true; } @@ -1814,13 +1815,13 @@ private boolean checkOvsNetwork(final String networkName) { } public boolean passCmdLine(final String vmName, final String cmdLine) throws InternalErrorException { - final Script command = new Script(patchScriptPath, 300000, s_logger); + final Script command = new Script(patchScriptPath, 300000, LOGGER); String result; command.add("-n", vmName); command.add("-c", cmdLine); result = command.execute(); if (result != null) { - s_logger.error("Passing cmdline failed:" + result); + LOGGER.error("Passing cmdline failed:" + result); return false; } return true; @@ -1880,7 +1881,7 @@ public boolean stop() { final Connect conn = LibvirtConnection.getConnection(); conn.close(); } catch (final LibvirtException e) { - s_logger.trace("Ignoring libvirt error.", e); + LOGGER.trace("Ignoring libvirt error.", e); } return true; @@ -1908,14 +1909,14 @@ public Answer executeRequest(final Command cmd) { public synchronized boolean destroyTunnelNetwork(final String bridge) { findOrCreateTunnelNetwork(bridge); - final Script cmd = new Script(ovsTunnelPath, timeout, s_logger); + final Script cmd = new Script(ovsTunnelPath, timeout, LOGGER); cmd.add("destroy_ovs_bridge"); cmd.add("--bridge", bridge); final String result = cmd.execute(); if (result != null) { - s_logger.debug("OVS Bridge could not be destroyed due to error ==> " + result); + LOGGER.debug("OVS Bridge could not be destroyed due to error ==> " + result); return false; } return true; @@ -1932,9 +1933,9 @@ public synchronized boolean findOrCreateTunnelNetwork(final String nwName) { Script.runSimpleBashScript("ovs-vsctl -- --may-exist add-br " + nwName + " -- set bridge " + nwName + " other_config:ovs-host-setup='-1'"); - s_logger.debug("### KVM network for tunnels created:" + nwName); + LOGGER.debug("### KVM network for tunnels created:" + nwName); } catch (final Exception e) { - s_logger.warn("createTunnelNetwork failed", e); + LOGGER.warn("createTunnelNetwork failed", e); return false; } return true; @@ -1945,7 +1946,7 @@ public synchronized boolean configureTunnelNetwork(final Long networkId, try { final boolean findResult = findOrCreateTunnelNetwork(nwName); if (!findResult) { - s_logger.warn("LibvirtComputingResource.findOrCreateTunnelNetwork() failed! Cannot proceed creating the tunnel."); + LOGGER.warn("LibvirtComputingResource.findOrCreateTunnelNetwork() failed! Cannot proceed creating the tunnel."); return false; } final String configuredHosts = Script @@ -1962,7 +1963,7 @@ public synchronized boolean configureTunnelNetwork(final Long networkId, } } if (!configured) { - final Script cmd = new Script(ovsTunnelPath, timeout, s_logger); + final Script cmd = new Script(ovsTunnelPath, timeout, LOGGER); cmd.add("setup_ovs_bridge"); cmd.add("--key", nwName); cmd.add("--cs_host_id", ((Long)hostId).toString()); @@ -1975,7 +1976,7 @@ public synchronized boolean configureTunnelNetwork(final Long networkId, } } } catch (final Exception e) { - s_logger.warn("createandConfigureTunnelNetwork failed", e); + LOGGER.warn("createandConfigureTunnelNetwork failed", e); return false; } return true; @@ -2003,7 +2004,7 @@ public KVMPhysicalDisk templateToPrimaryDownload(final String templateUrl, final secondaryPool.refresh(); final List disks = secondaryPool.listPhysicalDisks(); if (disks == null || disks.isEmpty()) { - s_logger.error("Failed to get volumes from pool: " + secondaryPool.getUuid()); + LOGGER.error("Failed to get volumes from pool: " + secondaryPool.getUuid()); return null; } for (final KVMPhysicalDisk disk : disks) { @@ -2013,7 +2014,7 @@ public KVMPhysicalDisk templateToPrimaryDownload(final String templateUrl, final } } if (templateVol == null) { - s_logger.error("Failed to get template from pool: " + secondaryPool.getUuid()); + LOGGER.error("Failed to get template from pool: " + secondaryPool.getUuid()); return null; } } else { @@ -2025,7 +2026,7 @@ public KVMPhysicalDisk templateToPrimaryDownload(final String templateUrl, final final KVMPhysicalDisk primaryVol = storagePoolManager.copyPhysicalDisk(templateVol, volUuid, primaryPool, 0); return primaryVol; } catch (final CloudRuntimeException e) { - s_logger.error("Failed to download template to primary storage", e); + LOGGER.error("Failed to download template to primary storage", e); return null; } finally { if (secondaryPool != null) { @@ -2056,7 +2057,7 @@ private String getBroadcastUriFromBridge(final String brName) { final String pif = matchPifFileInDirectory(brName); final Pattern pattern = Pattern.compile("(\\D+)(\\d+)(\\D*)(\\d*)(\\D*)(\\d*)"); final Matcher matcher = pattern.matcher(pif); - s_logger.debug("getting broadcast uri for pif " + pif + " and bridge " + brName); + LOGGER.debug("getting broadcast uri for pif " + pif + " and bridge " + brName); if(matcher.find()) { if (brName.startsWith("brvx")){ return BroadcastDomainType.Vxlan.toUri(matcher.group(2)).toString(); @@ -2068,13 +2069,13 @@ private String getBroadcastUriFromBridge(final String brName) { return BroadcastDomainType.Vlan.toUri(matcher.group(4)).toString(); } else { //untagged or not matching (eth|bond|team)#.# - s_logger.debug("failed to get vNet id from bridge " + brName + LOGGER.debug("failed to get vNet id from bridge " + brName + "attached to physical interface" + pif + ", perhaps untagged interface"); return ""; } } } else { - s_logger.debug("failed to get vNet id from bridge " + brName + "attached to physical interface" + pif); + LOGGER.debug("failed to get vNet id from bridge " + brName + "attached to physical interface" + pif); return ""; } } @@ -2137,7 +2138,7 @@ private ExecutionResult prepareNetworkElementCommand(final SetupGuestNetworkComm return new ExecutionResult(true, null); } catch (final LibvirtException e) { final String msg = "Creating guest network failed due to " + e.toString(); - s_logger.warn(msg, e); + LOGGER.warn(msg, e); return new ExecutionResult(false, msg); } } @@ -2176,7 +2177,7 @@ protected ExecutionResult prepareNetworkElementCommand(final SetSourceNatCommand return new ExecutionResult(true, "success"); } catch (final LibvirtException e) { final String msg = "Ip SNAT failure due to " + e.toString(); - s_logger.error(msg, e); + LOGGER.error(msg, e); return new ExecutionResult(false, msg); } } @@ -2198,7 +2199,7 @@ protected ExecutionResult prepareNetworkElementCommand(final IpAssocVpcCommand c return new ExecutionResult(true, null); } catch (final LibvirtException e) { - s_logger.error("Ip Assoc failure on applying one ip due to exception: ", e); + LOGGER.error("Ip Assoc failure on applying one ip due to exception: ", e); return new ExecutionResult(false, e.getMessage()); } } @@ -2231,10 +2232,10 @@ public ExecutionResult prepareNetworkElementCommand(final IpAssocCommand cmd) { } return new ExecutionResult(true, null); } catch (final LibvirtException e) { - s_logger.error("ipassoccmd failed", e); + LOGGER.error("ipassoccmd failed", e); return new ExecutionResult(false, e.getMessage()); } catch (final InternalErrorException e) { - s_logger.error("ipassoccmd failed", e); + LOGGER.error("ipassoccmd failed", e); return new ExecutionResult(false, e.getMessage()); } } @@ -2271,10 +2272,10 @@ protected ExecutionResult cleanupNetworkElementCommand(final IpAssocCommand cmd) } } catch (final LibvirtException e) { - s_logger.error("ipassoccmd failed", e); + LOGGER.error("ipassoccmd failed", e); return new ExecutionResult(false, e.getMessage()); } catch (final InternalErrorException e) { - s_logger.error("ipassoccmd failed", e); + LOGGER.error("ipassoccmd failed", e); return new ExecutionResult(false, e.getMessage()); } @@ -2308,14 +2309,14 @@ public PowerState getVmState(final Connect conn, final String vmName) { final PowerState s = convertToPowerState(vms.getInfo().state); return s; } catch (final LibvirtException e) { - s_logger.warn("Can't get vm state " + vmName + e.getMessage() + "retry:" + retry); + LOGGER.warn("Can't get vm state " + vmName + e.getMessage() + "retry:" + retry); } finally { try { if (vms != null) { vms.free(); } } catch (final LibvirtException l) { - s_logger.trace("Ignoring libvirt error.", l); + LOGGER.trace("Ignoring libvirt error.", l); } } } @@ -2327,7 +2328,7 @@ public String networkUsage(final String privateIpAddress, final String option, f } public String networkUsage(final String privateIpAddress, final String option, final String vif, String publicIp) { - final Script getUsage = new Script(routerProxyPath, s_logger); + final Script getUsage = new Script(routerProxyPath, LOGGER); getUsage.add("netusage.sh"); getUsage.add(privateIpAddress); if (option.equals("get")) { @@ -2348,7 +2349,7 @@ public String networkUsage(final String privateIpAddress, final String option, f final OutputInterpreter.OneLineParser usageParser = new OutputInterpreter.OneLineParser(); final String result = getUsage.execute(usageParser); if (result != null) { - s_logger.debug("Failed to execute networkUsage:" + result); + LOGGER.debug("Failed to execute networkUsage:" + result); return null; } return usageParser.getLine(); @@ -2373,7 +2374,7 @@ public long[] getNetworkStats(final String privateIP, String publicIp) { } public String getHaproxyStats(final String privateIP, final String publicIp, final Integer port) { - final Script getHaproxyStatsScript = new Script(routerProxyPath, s_logger); + final Script getHaproxyStatsScript = new Script(routerProxyPath, LOGGER); getHaproxyStatsScript.add("get_haproxy_stats.sh"); getHaproxyStatsScript.add(privateIP); getHaproxyStatsScript.add(publicIp); @@ -2382,7 +2383,7 @@ public String getHaproxyStats(final String privateIP, final String publicIp, fin final OutputInterpreter.OneLineParser statsParser = new OutputInterpreter.OneLineParser(); final String result = getHaproxyStatsScript.execute(statsParser); if (result != null) { - s_logger.debug("Failed to execute haproxy stats:" + result); + LOGGER.debug("Failed to execute haproxy stats:" + result); return null; } return statsParser.getLine(); @@ -2399,7 +2400,7 @@ public long[] getNetworkLbStats(final String privateIp, final String publicIp, f } public String configureVPCNetworkUsage(final String privateIpAddress, final String publicIp, final String option, final String vpcCIDR) { - final Script getUsage = new Script(routerProxyPath, s_logger); + final Script getUsage = new Script(routerProxyPath, LOGGER); getUsage.add("vpc_netusage.sh"); getUsage.add(privateIpAddress); getUsage.add("-l", publicIp); @@ -2420,7 +2421,7 @@ public String configureVPCNetworkUsage(final String privateIpAddress, final Stri final OutputInterpreter.OneLineParser usageParser = new OutputInterpreter.OneLineParser(); final String result = getUsage.execute(usageParser); if (result != null) { - s_logger.debug("Failed to execute VPCNetworkUsage:" + result); + LOGGER.debug("Failed to execute VPCNetworkUsage:" + result); return null; } return usageParser.getLine(); @@ -2472,19 +2473,19 @@ protected void setQuotaAndPeriod(VirtualMachineTO vmTO, CpuTuneDef ctd) { int period = CpuTuneDef.DEFAULT_PERIOD; int quota = (int) (period * cpuQuotaPercentage); if (quota < CpuTuneDef.MIN_QUOTA) { - s_logger.info("Calculated quota (" + quota + ") below the minimum (" + CpuTuneDef.MIN_QUOTA + ") for VM domain " + vmTO.getUuid() + ", setting it to minimum " + + LOGGER.info("Calculated quota (" + quota + ") below the minimum (" + CpuTuneDef.MIN_QUOTA + ") for VM domain " + vmTO.getUuid() + ", setting it to minimum " + "and calculating period instead of using the default"); quota = CpuTuneDef.MIN_QUOTA; period = (int) ((double) quota / cpuQuotaPercentage); if (period > CpuTuneDef.MAX_PERIOD) { - s_logger.info("Calculated period (" + period + ") exceeds the maximum (" + CpuTuneDef.MAX_PERIOD + + LOGGER.info("Calculated period (" + period + ") exceeds the maximum (" + CpuTuneDef.MAX_PERIOD + "), setting it to the maximum"); period = CpuTuneDef.MAX_PERIOD; } } ctd.setQuota(quota); ctd.setPeriod(period); - s_logger.info("Setting quota=" + quota + ", period=" + period + " to VM domain " + vmTO.getUuid()); + LOGGER.info("Setting quota=" + quota + ", period=" + period + " to VM domain " + vmTO.getUuid()); } } @@ -2497,7 +2498,7 @@ protected void enlightenWindowsVm(VirtualMachineTO vmTO, FeaturesDef features) { hyv.setFeature("spinlocks", true); hyv.setRetries(8096); features.addHyperVFeature(hyv); - s_logger.info("Enabling KVM Enlightment Features to VM domain " + vmTO.getUuid()); + LOGGER.info("Enabling KVM Enlightment Features to VM domain " + vmTO.getUuid()); } } @@ -2505,7 +2506,7 @@ protected void enlightenWindowsVm(VirtualMachineTO vmTO, FeaturesDef features) { * Creates VM KVM definitions from virtual machine transfer object specifications. */ public LibvirtVMDef createVMFromSpec(final VirtualMachineTO vmTO) { - s_logger.debug(String.format("Creating VM from specifications [%s]", vmTO.toString())); + LOGGER.debug(String.format("Creating VM from specifications [%s]", vmTO.toString())); LibvirtVMDef vm = new LibvirtVMDef(); vm.setDomainName(vmTO.getName()); @@ -2522,10 +2523,10 @@ public LibvirtVMDef createVMFromSpec(final VirtualMachineTO vmTO) { if (MapUtils.isNotEmpty(customParams) && customParams.containsKey(GuestDef.BootType.UEFI.toString())) { isUefiEnabled = true; - s_logger.debug(String.format("Enabled UEFI for VM UUID [%s].", uuid)); + LOGGER.debug(String.format("Enabled UEFI for VM UUID [%s].", uuid)); if (isSecureMode(customParams.get(GuestDef.BootType.UEFI.toString()))) { - s_logger.debug(String.format("Enabled Secure Boot for VM UUID [%s].", uuid)); + LOGGER.debug(String.format("Enabled Secure Boot for VM UUID [%s].", uuid)); isSecureBoot = true; } @@ -2534,7 +2535,7 @@ public LibvirtVMDef createVMFromSpec(final VirtualMachineTO vmTO) { Map extraConfig = vmTO.getExtraConfig(); if (dpdkSupport && (!extraConfig.containsKey(DpdkHelper.DPDK_NUMA) || !extraConfig.containsKey(DpdkHelper.DPDK_HUGE_PAGES))) { - s_logger.info(String.format("DPDK is enabled for VM [%s], but it needs extra configurations for CPU NUMA and Huge Pages for VM deployment.", vmTO.toString())); + LOGGER.info(String.format("DPDK is enabled for VM [%s], but it needs extra configurations for CPU NUMA and Huge Pages for VM deployment.", vmTO.toString())); } configureVM(vmTO, vm, customParams, isUefiEnabled, isSecureBoot, bootMode, extraConfig, uuid); return vm; @@ -2545,7 +2546,7 @@ public LibvirtVMDef createVMFromSpec(final VirtualMachineTO vmTO) { */ private void configureVM(VirtualMachineTO vmTO, LibvirtVMDef vm, Map customParams, boolean isUefiEnabled, boolean isSecureBoot, String bootMode, Map extraConfig, String uuid) { - s_logger.debug(String.format("Configuring VM with UUID [%s].", uuid)); + LOGGER.debug(String.format("Configuring VM with UUID [%s].", uuid)); GuestDef guest = createGuestFromSpec(vmTO, vm, uuid, customParams); if (isUefiEnabled) { @@ -2580,7 +2581,7 @@ private void configureVM(VirtualMachineTO vmTO, LibvirtVMDef vm, Map extraConfig) { if (MapUtils.isNotEmpty(extraConfig) && VirtualMachine.Type.User.equals(vmTO.getType())) { - s_logger.debug(String.format("Appending extra configuration data [%s] to guest VM [%s] domain XML.", extraConfig, vmTO.toString())); + LOGGER.debug(String.format("Appending extra configuration data [%s] to guest VM [%s] domain XML.", extraConfig, vmTO.toString())); addExtraConfigComponent(extraConfig, vm); } } @@ -2753,11 +2754,11 @@ public int calculateCpuShares(VirtualMachineTO vmTO) { if (hostCpuMaxCapacity > 0) { int updatedCpuShares = (int) Math.ceil((requestedCpuShares * CGROUP_V2_UPPER_LIMIT) / (double) hostCpuMaxCapacity); - s_logger.debug(String.format("This host utilizes cgroupv2 (as the max shares value is [%s]), thus, the VM requested shares of [%s] will be converted to " + + logger.debug(String.format("This host utilizes cgroupv2 (as the max shares value is [%s]), thus, the VM requested shares of [%s] will be converted to " + "consider the host limits; the new CPU shares value is [%s].", hostCpuMaxCapacity, requestedCpuShares, updatedCpuShares)); return updatedCpuShares; } - s_logger.debug(String.format("This host does not have a maximum CPU shares set; therefore, this host utilizes cgroupv1 and the VM requested CPU shares [%s] will not be " + + logger.debug(String.format("This host does not have a maximum CPU shares set; therefore, this host utilizes cgroupv1 and the VM requested CPU shares [%s] will not be " + "converted.", requestedCpuShares)); return requestedCpuShares; } @@ -2881,12 +2882,12 @@ protected GuestResourceDef createGuestResourceDef(VirtualMachineTO vmTO){ protected long getCurrentMemAccordingToMemBallooning(VirtualMachineTO vmTO, long maxRam) { long retVal = maxRam; if (noMemBalloon) { - s_logger.warn(String.format("Setting VM's [%s] current memory as max memory [%s] due to memory ballooning is disabled. If you are using a custom service offering, verify if memory ballooning really should be disabled.", vmTO.toString(), maxRam)); + LOGGER.warn(String.format("Setting VM's [%s] current memory as max memory [%s] due to memory ballooning is disabled. If you are using a custom service offering, verify if memory ballooning really should be disabled.", vmTO.toString(), maxRam)); } else if (vmTO != null && vmTO.getType() != VirtualMachine.Type.User) { - s_logger.warn(String.format("Setting System VM's [%s] current memory as max memory [%s].", vmTO.toString(), maxRam)); + LOGGER.warn(String.format("Setting System VM's [%s] current memory as max memory [%s].", vmTO.toString(), maxRam)); } else { long minRam = ByteScaleUtils.bytesToKibibytes(vmTO.getMinRam()); - s_logger.debug(String.format("Setting VM's [%s] current memory as min memory [%s] due to memory ballooning is enabled.", vmTO.toString(), minRam)); + logger.debug(String.format("Setting VM's [%s] current memory as min memory [%s] due to memory ballooning is enabled.", vmTO.toString(), minRam)); retVal = minRam; } return retVal; @@ -3027,13 +3028,13 @@ public int compare(final DiskTO arg0, final DiskTO arg1) { // check for disk activity, if detected we should exit because vm is running elsewhere if (diskActivityCheckEnabled && physicalDisk != null && physicalDisk.getFormat() == PhysicalDiskFormat.QCOW2) { - s_logger.debug("Checking physical disk file at path " + volPath + " for disk activity to ensure vm is not running elsewhere"); + LOGGER.debug("Checking physical disk file at path " + volPath + " for disk activity to ensure vm is not running elsewhere"); try { HypervisorUtils.checkVolumeFileForActivity(volPath, diskActivityCheckTimeoutSeconds, diskActivityInactiveThresholdMilliseconds, diskActivityCheckFileSizeMin); } catch (final IOException ex) { throw new CloudRuntimeException("Unable to check physical disk file for activity", ex); } - s_logger.debug("Disk activity check cleared"); + LOGGER.debug("Disk activity check cleared"); } // if params contains a rootDiskController key, use its value (this is what other HVs are doing) @@ -3132,7 +3133,7 @@ public int compare(final DiskTO arg0, final DiskTO arg1) { } } if (vm.getDevices() == null) { - s_logger.error("There is no devices for" + vm); + LOGGER.error("There is no devices for" + vm); throw new RuntimeException("There is no devices for" + vm); } vm.getDevices().addDevice(disk); @@ -3163,7 +3164,7 @@ public int compare(final DiskTO arg0, final DiskTO arg1) { final int devId = volume.getDiskSeq().intValue(); final String device = mapRbdDevice(physicalDisk); if (device != null) { - s_logger.debug("RBD device on host is: " + device); + LOGGER.debug("RBD device on host is: " + device); final DiskDef diskdef = new DiskDef(); diskdef.defBlockBasedDisk(device, devId, DiskDef.DiskBus.VIRTIO); diskdef.setQemuDriver(false); @@ -3186,10 +3187,10 @@ private KVMPhysicalDisk getPhysicalDiskPrimaryStore(PrimaryDataStoreTO primaryDa * Check if IO_URING is supported by qemu */ protected boolean isIoUringSupportedByQemu() { - s_logger.debug("Checking if iouring is supported"); + LOGGER.debug("Checking if iouring is supported"); String command = getIoUringCheckCommand(); if (org.apache.commons.lang3.StringUtils.isBlank(command)) { - s_logger.debug("Could not check iouring support, disabling it"); + LOGGER.debug("Could not check iouring support, disabling it"); return false; } int exitValue = executeBashScriptAndRetrieveExitValue(command); @@ -3202,7 +3203,7 @@ protected String getIoUringCheckCommand() { File file = new File(qemuPath); if (file.exists()) { String cmd = String.format("ldd %s | grep -Eqe '[[:space:]]liburing\\.so'", qemuPath); - s_logger.debug("Using the check command: " + cmd); + LOGGER.debug("Using the check command: " + cmd); return cmd; } } @@ -3216,7 +3217,7 @@ protected String getIoUringCheckCommand() { * (ii) Libvirt >= 6.3.0 */ public void setDiskIoDriver(DiskDef disk, IoDriverPolicy ioDriver) { - s_logger.debug(String.format("Disk IO driver policy [%s]. The host supports the io_uring policy [%s]", ioDriver, enableIoUring)); + logger.debug(String.format("Disk IO driver policy [%s]. The host supports the io_uring policy [%s]", ioDriver, enableIoUring)); if (ioDriver != null) { if (IoDriverPolicy.IO_URING != ioDriver) { disk.setIoDriver(ioDriver); @@ -3304,7 +3305,7 @@ private void setBurstProperties(final VolumeObjectTO volumeObjectTO, final DiskD private void createVif(final LibvirtVMDef vm, final VirtualMachineTO vmSpec, final NicTO nic, final String nicAdapter, Map extraConfig) throws InternalErrorException, LibvirtException { if (vm.getDevices() == null) { - s_logger.error("LibvirtVMDef object get devices with null result"); + LOGGER.error("LibvirtVMDef object get devices with null result"); throw new InternalErrorException("LibvirtVMDef object get devices with null result"); } final InterfaceDef interfaceDef = getVifDriver(nic.getType(), nic.getName()).plug(nic, vm.getPlatformEmulator(), nicAdapter, extraConfig); @@ -3322,7 +3323,7 @@ public boolean cleanupDisk(final DiskDef disk) { final String path = disk.getDiskPath(); if (StringUtils.isBlank(path)) { - s_logger.debug("Unable to clean up disk with null path (perhaps empty cdrom drive):" + disk); + LOGGER.debug("Unable to clean up disk with null path (perhaps empty cdrom drive):" + disk); return false; } @@ -3351,15 +3352,15 @@ public void detachAndAttachConfigDriveISO(final Connect conn, final String vmNam try { String result = attachOrDetachISO(conn, vmName, configdrive.getDiskPath(), false, CONFIG_DRIVE_ISO_DEVICE_ID); if (result != null) { - s_logger.warn("Detach ConfigDrive ISO with result: " + result); + LOGGER.warn("Detach ConfigDrive ISO with result: " + result); } result = attachOrDetachISO(conn, vmName, configdrive.getDiskPath(), true, CONFIG_DRIVE_ISO_DEVICE_ID); if (result != null) { - s_logger.warn("Attach ConfigDrive ISO with result: " + result); + LOGGER.warn("Attach ConfigDrive ISO with result: " + result); } } catch (final LibvirtException | InternalErrorException | URISyntaxException e) { final String msg = "Detach and attach ConfigDrive ISO failed due to " + e.toString(); - s_logger.warn(msg, e); + LOGGER.warn(msg, e); } } } @@ -3501,17 +3502,17 @@ protected synchronized String attachOrDetachDevice(final Connect conn, final boo try { dm = conn.domainLookupByName(vmName); if (attach) { - s_logger.debug("Attaching device: " + xml); + LOGGER.debug("Attaching device: " + xml); dm.attachDevice(xml); } else { - s_logger.debug("Detaching device: " + xml); + LOGGER.debug("Detaching device: " + xml); dm.detachDevice(xml); } } catch (final LibvirtException e) { if (attach) { - s_logger.warn("Failed to attach device to " + vmName + ": " + e.getMessage()); + LOGGER.warn("Failed to attach device to " + vmName + ": " + e.getMessage()); } else { - s_logger.warn("Failed to detach device from " + vmName + ": " + e.getMessage()); + LOGGER.warn("Failed to detach device from " + vmName + ": " + e.getMessage()); } throw e; } finally { @@ -3519,7 +3520,7 @@ protected synchronized String attachOrDetachDevice(final Connect conn, final boo try { dm.free(); } catch (final LibvirtException l) { - s_logger.trace("Ignoring libvirt error.", l); + LOGGER.trace("Ignoring libvirt error.", l); } } } @@ -3555,19 +3556,19 @@ public PingCommand getCurrentStatus(final long id) { */ private HealthCheckResult getHostHealthCheckResult() { if (StringUtils.isBlank(hostHealthCheckScriptPath)) { - s_logger.debug("Host health check script path is not specified"); + logger.debug("Host health check script path is not specified"); return HealthCheckResult.IGNORE; } File script = new File(hostHealthCheckScriptPath); if (!script.exists() || !script.isFile() || !script.canExecute()) { - s_logger.warn(String.format("The host health check script file set at: %s cannot be executed, " + + logger.warn(String.format("The host health check script file set at: %s cannot be executed, " + "reason: %s", hostHealthCheckScriptPath, !script.exists() ? "file does not exist" : "please check file permissions to execute this file")); return HealthCheckResult.IGNORE; } int exitCode = executeBashScriptAndRetrieveExitValue(hostHealthCheckScriptPath); - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Host health check script exit code: %s", exitCode)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Host health check script exit code: %s", exitCode)); } return retrieveHealthCheckResultFromExitCode(exitCode); } @@ -3585,7 +3586,7 @@ public Type getType() { } private Map getVersionStrings() { - final Script command = new Script(versionStringPath, timeout, s_logger); + final Script command = new Script(versionStringPath, timeout, LOGGER); final KeyValueInterpreter kvi = new KeyValueInterpreter(); final String result = command.execute(kvi); if (result == null) { @@ -3659,17 +3660,17 @@ public StartupCommand[] initialize() { */ protected void calculateHostCpuMaxCapacity(int cpuCores, Long cpuSpeed) { String output = Script.runSimpleBashScript(COMMAND_GET_CGROUP_HOST_VERSION); - s_logger.info(String.format("Host uses control group [%s].", output)); + logger.info(String.format("Host uses control group [%s].", output)); if (!CGROUP_V2.equals(output)) { - s_logger.info(String.format("Setting host CPU max capacity to 0, as it uses cgroup v1.", getHostCpuMaxCapacity())); + logger.info(String.format("Setting host CPU max capacity to 0, as it uses cgroup v1.", getHostCpuMaxCapacity())); setHostCpuMaxCapacity(0); return; } - s_logger.info(String.format("Calculating the max shares of the host.")); + logger.info(String.format("Calculating the max shares of the host.")); setHostCpuMaxCapacity(cpuCores * cpuSpeed.intValue()); - s_logger.info(String.format("The max shares of the host is [%d].", getHostCpuMaxCapacity())); + logger.info(String.format("The max shares of the host is [%d].", getHostCpuMaxCapacity())); } private StartupStorageCommand createLocalStoragePool(String localStoragePath, String localStorageUUID, StartupRoutingCommand cmd) { @@ -3686,7 +3687,7 @@ private StartupStorageCommand createLocalStoragePool(String localStoragePath, St sscmd.setDataCenter(dcId); sscmd.setResourceType(Storage.StorageResourceType.STORAGE_POOL); } catch (final CloudRuntimeException e) { - s_logger.debug("Unable to initialize local storage pool: " + e); + LOGGER.debug("Unable to initialize local storage pool: " + e); } setupLibvirtEventListener(); return sscmd; @@ -3697,15 +3698,15 @@ private void setupLibvirtEventListener() { Connect conn = LibvirtConnection.getConnection(); conn.addLifecycleListener(this::onDomainLifecycleChange); - s_logger.debug("Set up the libvirt domain event lifecycle listener"); + logger.debug("Set up the libvirt domain event lifecycle listener"); } catch (LibvirtException e) { - s_logger.error("Failed to get libvirt connection for domain event lifecycle", e); + logger.error("Failed to get libvirt connection for domain event lifecycle", e); } } private int onDomainLifecycleChange(Domain domain, DomainEvent domainEvent) { try { - s_logger.debug(String.format("Got event lifecycle change on Domain %s, event %s", domain.getName(), domainEvent)); + logger.debug(String.format("Got event lifecycle change on Domain %s, event %s", domain.getName(), domainEvent)); if (domainEvent != null) { switch (domainEvent.getType()) { case STOPPED: @@ -3714,20 +3715,20 @@ private int onDomainLifecycleChange(Domain domain, DomainEvent domainEvent) { * initiated, and avoid pushing extra updates for actions we are initiating without a need for extra tracking */ DomainEventDetail detail = domainEvent.getDetail(); if (StoppedDetail.SHUTDOWN.equals(detail) || StoppedDetail.CRASHED.equals(detail) || StoppedDetail.FAILED.equals(detail)) { - s_logger.info("Triggering out of band status update due to completed self-shutdown or crash of VM"); + logger.info("Triggering out of band status update due to completed self-shutdown or crash of VM"); _agentStatusUpdater.triggerUpdate(); } else { - s_logger.debug("Event detail: " + detail); + logger.debug("Event detail: " + detail); } break; default: - s_logger.debug(String.format("No handling for event %s", domainEvent)); + logger.debug(String.format("No handling for event %s", domainEvent)); } } } catch (LibvirtException e) { - s_logger.error("Libvirt exception while processing lifecycle event", e); + logger.error("Libvirt exception while processing lifecycle event", e); } catch (Throwable e) { - s_logger.error("Error during lifecycle", e); + logger.error("Error during lifecycle", e); } return 0; } @@ -3741,7 +3742,7 @@ private String getIqn() { try { final String textToFind = "InitiatorName="; - final Script iScsiAdmCmd = new Script(true, "grep", 0, s_logger); + final Script iScsiAdmCmd = new Script(true, "grep", 0, LOGGER); iScsiAdmCmd.add(textToFind); iScsiAdmCmd.add("/etc/iscsi/initiatorname.iscsi"); @@ -3773,7 +3774,7 @@ public Pair getSourceHostPath(String diskPath) { String sourcePath = null; try { String mountResult = Script.runSimpleBashScript("mount | grep \"" + diskPath + "\""); - s_logger.debug("Got mount result for " + diskPath + "\n\n" + mountResult); + logger.debug("Got mount result for " + diskPath + "\n\n" + mountResult); if (StringUtils.isNotEmpty(mountResult)) { String[] res = mountResult.strip().split(" "); if (res[0].contains(":")) { @@ -3790,7 +3791,7 @@ public Pair getSourceHostPath(String diskPath) { return new Pair<>(sourceHostIp, sourcePath); } } catch (Exception ex) { - s_logger.warn("Failed to list source host and IP for " + diskPath + ex.toString()); + logger.warn("Failed to list source host and IP for " + diskPath + ex.toString()); } return null; } @@ -3803,14 +3804,14 @@ public List getAllVmNames(final Connect conn) { la.add(names[i]); } } catch (final LibvirtException e) { - s_logger.warn("Failed to list Defined domains", e); + LOGGER.warn("Failed to list Defined domains", e); } int[] ids = null; try { ids = conn.listDomains(); } catch (final LibvirtException e) { - s_logger.warn("Failed to list domains", e); + LOGGER.warn("Failed to list domains", e); return la; } @@ -3820,14 +3821,14 @@ public List getAllVmNames(final Connect conn) { dm = conn.domainLookupByID(ids[i]); la.add(dm.getName()); } catch (final LibvirtException e) { - s_logger.warn("Unable to get vms", e); + LOGGER.warn("Unable to get vms", e); } finally { try { if (dm != null) { dm.free(); } } catch (final LibvirtException e) { - s_logger.trace("Ignoring libvirt error.", e); + LOGGER.trace("Ignoring libvirt error.", e); } } } @@ -3846,7 +3847,7 @@ private HashMap getHostVmStateReport() { conn = LibvirtConnection.getConnectionByType(HypervisorType.KVM.toString()); vmStates.putAll(getHostVmStateReport(conn)); } catch (final LibvirtException e) { - s_logger.debug("Failed to get connection: " + e.getMessage()); + LOGGER.debug("Failed to get connection: " + e.getMessage()); } } @@ -3855,7 +3856,7 @@ private HashMap getHostVmStateReport() { conn = LibvirtConnection.getConnectionByType(HypervisorType.KVM.toString()); vmStates.putAll(getHostVmStateReport(conn)); } catch (final LibvirtException e) { - s_logger.debug("Failed to get connection: " + e.getMessage()); + LOGGER.debug("Failed to get connection: " + e.getMessage()); } } @@ -3871,13 +3872,13 @@ private HashMap getHostVmStateReport(final Conne try { ids = conn.listDomains(); } catch (final LibvirtException e) { - s_logger.warn("Unable to listDomains", e); + LOGGER.warn("Unable to listDomains", e); return null; } try { vms = conn.listDefinedDomains(); } catch (final LibvirtException e) { - s_logger.warn("Unable to listDomains", e); + LOGGER.warn("Unable to listDomains", e); return null; } @@ -3890,7 +3891,7 @@ private HashMap getHostVmStateReport(final Conne final PowerState state = convertToPowerState(ps); - s_logger.trace("VM " + dm.getName() + ": powerstate = " + ps + "; vm state=" + state.toString()); + LOGGER.trace("VM " + dm.getName() + ": powerstate = " + ps + "; vm state=" + state.toString()); final String vmName = dm.getName(); // TODO : for XS/KVM (host-based resource), we require to remove @@ -3901,14 +3902,14 @@ private HashMap getHostVmStateReport(final Conne vmStates.put(vmName, new HostVmStateReportEntry(state, conn.getHostName())); } } catch (final LibvirtException e) { - s_logger.warn("Unable to get vms", e); + LOGGER.warn("Unable to get vms", e); } finally { try { if (dm != null) { dm.free(); } } catch (final LibvirtException e) { - s_logger.trace("Ignoring libvirt error.", e); + LOGGER.trace("Ignoring libvirt error.", e); } } } @@ -3921,7 +3922,7 @@ private HashMap getHostVmStateReport(final Conne final DomainState ps = dm.getInfo().state; final PowerState state = convertToPowerState(ps); final String vmName = dm.getName(); - s_logger.trace("VM " + vmName + ": powerstate = " + ps + "; vm state=" + state.toString()); + LOGGER.trace("VM " + vmName + ": powerstate = " + ps + "; vm state=" + state.toString()); // TODO : for XS/KVM (host-based resource), we require to remove // VM completely from host, for some reason, KVM seems to still keep @@ -3931,14 +3932,14 @@ private HashMap getHostVmStateReport(final Conne vmStates.put(vmName, new HostVmStateReportEntry(state, conn.getHostName())); } } catch (final LibvirtException e) { - s_logger.warn("Unable to get vms", e); + LOGGER.warn("Unable to get vms", e); } finally { try { if (dm != null) { dm.free(); } } catch (final LibvirtException e) { - s_logger.trace("Ignoring libvirt error.", e); + LOGGER.trace("Ignoring libvirt error.", e); } } } @@ -3958,7 +3959,7 @@ public String rebootVM(final Connect conn, final String vmName) throws LibvirtEx dm.reboot(0x1); return null; } catch (final LibvirtException e) { - s_logger.warn("Failed to create vm", e); + LOGGER.warn("Failed to create vm", e); msg = e.getMessage(); } finally { try { @@ -3966,7 +3967,7 @@ public String rebootVM(final Connect conn, final String vmName) throws LibvirtEx dm.free(); } } catch (final LibvirtException e) { - s_logger.trace("Ignoring libvirt error.", e); + LOGGER.trace("Ignoring libvirt error.", e); } } @@ -3982,18 +3983,18 @@ public String stopVM(final Connect conn, final String vmName, final boolean forc dm = conn.domainLookupByName(vmName); cleanVMSnapshotMetadata(dm); } catch (LibvirtException e) { - s_logger.debug("Failed to get vm :" + e.getMessage()); + LOGGER.debug("Failed to get vm :" + e.getMessage()); } finally { try { if (dm != null) { dm.free(); } } catch (LibvirtException l) { - s_logger.trace("Ignoring libvirt error.", l); + LOGGER.trace("Ignoring libvirt error.", l); } } - s_logger.debug("Try to stop the vm at first"); + LOGGER.debug("Try to stop the vm at first"); if (forceStop) { return stopVMInternal(conn, vmName, true); } @@ -4013,25 +4014,25 @@ public String stopVM(final Connect conn, final String vmName, final boolean forc state = dm.getInfo().state; break; } catch (final LibvirtException e) { - s_logger.debug("Failed to get vm status:" + e.getMessage()); + LOGGER.debug("Failed to get vm status:" + e.getMessage()); } finally { try { if (dm != null) { dm.free(); } } catch (final LibvirtException l) { - s_logger.trace("Ignoring libvirt error.", l); + LOGGER.trace("Ignoring libvirt error.", l); } } } if (state == null) { - s_logger.debug("Can't get vm's status, assume it's dead already"); + LOGGER.debug("Can't get vm's status, assume it's dead already"); return null; } if (state != DomainState.VIR_DOMAIN_SHUTOFF) { - s_logger.debug("Try to destroy the vm"); + LOGGER.debug("Try to destroy the vm"); ret = stopVMInternal(conn, vmName, true); if (ret != null) { return ret; @@ -4070,13 +4071,13 @@ protected String stopVMInternal(final Connect conn, final String vmName, final b } catch (final LibvirtException e) { final String error = e.toString(); if (error.contains("Domain not found")) { - s_logger.debug("successfully shut down vm " + vmName); + LOGGER.debug("successfully shut down vm " + vmName); } else { - s_logger.debug("Error in waiting for vm shutdown:" + error); + LOGGER.debug("Error in waiting for vm shutdown:" + error); } } if (retry < 0) { - s_logger.warn("Timed out waiting for domain " + vmName + " to shutdown gracefully"); + LOGGER.warn("Timed out waiting for domain " + vmName + " to shutdown gracefully"); return Script.ERR_TIMEOUT; } else { if (persist == 1) { @@ -4086,13 +4087,13 @@ protected String stopVMInternal(final Connect conn, final String vmName, final b } } catch (final LibvirtException e) { if (e.getMessage().contains("Domain not found")) { - s_logger.debug("VM " + vmName + " doesn't exist, no need to stop it"); + LOGGER.debug("VM " + vmName + " doesn't exist, no need to stop it"); return null; } - s_logger.debug("Failed to stop VM :" + vmName + " :", e); + LOGGER.debug("Failed to stop VM :" + vmName + " :", e); return e.getMessage(); } catch (final InterruptedException ie) { - s_logger.debug("Interrupted sleep"); + LOGGER.debug("Interrupted sleep"); return ie.getMessage(); } finally { try { @@ -4100,7 +4101,7 @@ protected String stopVMInternal(final Connect conn, final String vmName, final b dm.free(); } } catch (final LibvirtException e) { - s_logger.trace("Ignoring libvirt error.", e); + LOGGER.trace("Ignoring libvirt error.", e); } } @@ -4121,7 +4122,7 @@ public Integer getVncPort(final Connect conn, final String vmName) throws Libvir dm.free(); } } catch (final LibvirtException l) { - s_logger.trace("Ignoring libvirt error.", l); + LOGGER.trace("Ignoring libvirt error.", l); } } } @@ -4137,7 +4138,7 @@ private boolean IsHVMEnabled(final Connect conn) { } } } catch (final LibvirtException e) { - s_logger.trace("Ignoring libvirt error.", e); + LOGGER.trace("Ignoring libvirt error.", e); } return false; } @@ -4147,7 +4148,7 @@ private String getHypervisorPath(final Connect conn) { try { parser.parseCapabilitiesXML(conn.getCapabilities()); } catch (final LibvirtException e) { - s_logger.debug(e.getMessage()); + LOGGER.debug(e.getMessage()); } return parser.getEmulator(); } @@ -4173,10 +4174,10 @@ public DiskDef.DiskBus getDiskModelFromVMDetail(final VirtualMachineTO vmTO) { String rootDiskController = details.get(VmDetailConstants.ROOT_DISK_CONTROLLER); if (StringUtils.isNotBlank(rootDiskController)) { - s_logger.debug("Passed custom disk controller for ROOT disk " + rootDiskController); + LOGGER.debug("Passed custom disk controller for ROOT disk " + rootDiskController); for (DiskDef.DiskBus bus : DiskDef.DiskBus.values()) { if (bus.toString().equalsIgnoreCase(rootDiskController)) { - s_logger.debug("Found matching enum for disk controller for ROOT disk " + rootDiskController); + LOGGER.debug("Found matching enum for disk controller for ROOT disk " + rootDiskController); return bus; } } @@ -4192,10 +4193,10 @@ public DiskDef.DiskBus getDataDiskModelFromVMDetail(final VirtualMachineTO vmTO) String dataDiskController = details.get(VmDetailConstants.DATA_DISK_CONTROLLER); if (StringUtils.isNotBlank(dataDiskController)) { - s_logger.debug("Passed custom disk controller for DATA disk " + dataDiskController); + LOGGER.debug("Passed custom disk controller for DATA disk " + dataDiskController); for (DiskDef.DiskBus bus : DiskDef.DiskBus.values()) { if (bus.toString().equalsIgnoreCase(dataDiskController)) { - s_logger.debug("Found matching enum for disk controller for DATA disk " + dataDiskController); + LOGGER.debug("Found matching enum for disk controller for DATA disk " + dataDiskController); return bus; } } @@ -4245,7 +4246,7 @@ public List getInterfaces(final Connect conn, final String vmName) return parser.getInterfaces(); } catch (final LibvirtException e) { - s_logger.debug("Failed to get dom xml: " + e.toString()); + LOGGER.debug("Failed to get dom xml: " + e.toString()); return new ArrayList(); } finally { try { @@ -4253,7 +4254,7 @@ public List getInterfaces(final Connect conn, final String vmName) dm.free(); } } catch (final LibvirtException e) { - s_logger.trace("Ignoring libvirt error.", e); + LOGGER.trace("Ignoring libvirt error.", e); } } } @@ -4267,7 +4268,7 @@ public List getDisks(final Connect conn, final String vmName) { return parser.getDisks(); } catch (final LibvirtException e) { - s_logger.debug("Failed to get dom xml: " + e.toString()); + LOGGER.debug("Failed to get dom xml: " + e.toString()); return new ArrayList(); } finally { try { @@ -4275,7 +4276,7 @@ public List getDisks(final Connect conn, final String vmName) { dm.free(); } } catch (final LibvirtException e) { - s_logger.trace("Ignoring libvirt error.", e); + LOGGER.trace("Ignoring libvirt error.", e); } } } @@ -4285,7 +4286,7 @@ private String executeBashScript(final String script) { } private Script createScript(final String script) { - final Script command = new Script("/bin/bash", timeout, s_logger); + final Script command = new Script("/bin/bash", timeout, LOGGER); command.add("-c"); command.add(script); return command; @@ -4520,8 +4521,8 @@ public VmStatsEntry getVmStat(final Connect conn, final String vmName) throws Li protected long getMemoryFreeInKBs(Domain dm) throws LibvirtException { MemoryStatistic[] memoryStats = dm.memoryStats(NUMMEMSTATS); - if(s_logger.isTraceEnabled()){ - s_logger.trace(String.format("Retrieved memory statistics (information about tags can be found on the libvirt documentation):", ArrayUtils.toString(memoryStats))); + if(LOGGER.isTraceEnabled()){ + LOGGER.trace(String.format("Retrieved memory statistics (information about tags can be found on the libvirt documentation):", ArrayUtils.toString(memoryStats))); } long freeMemory = NumberUtils.LONG_MINUS_ONE; @@ -4538,13 +4539,13 @@ protected long getMemoryFreeInKBs(Domain dm) throws LibvirtException { } if (freeMemory == NumberUtils.LONG_MINUS_ONE){ - s_logger.warn("Couldn't retrieve free memory, returning -1."); + LOGGER.warn("Couldn't retrieve free memory, returning -1."); } return freeMemory; } private boolean canBridgeFirewall(final String prvNic) { - final Script cmd = new Script(securityGroupPath, timeout, s_logger); + final Script cmd = new Script(securityGroupPath, timeout, LOGGER); cmd.add("can_bridge_firewall"); cmd.add("--privnic", prvNic); final String result = cmd.execute(); @@ -4564,7 +4565,7 @@ public boolean destroyNetworkRulesForVM(final Connect conn, final String vmName) final InterfaceDef intf = intfs.get(0); vif = intf.getDevName(); } - final Script cmd = new Script(securityGroupPath, timeout, s_logger); + final Script cmd = new Script(securityGroupPath, timeout, LOGGER); cmd.add("destroy_network_rules_for_vm"); cmd.add("--vmname", vmName); if (vif != null) { @@ -4610,7 +4611,7 @@ public boolean destroyNetworkRulesForNic(final Connect conn, final String vmName final String brname = intf.getBrName(); final String vif = intf.getDevName(); - final Script cmd = new Script(securityGroupPath, timeout, s_logger); + final Script cmd = new Script(securityGroupPath, timeout, LOGGER); cmd.add("destroy_network_rules_for_vm"); cmd.add("--vmname", vmName); if (nic.getIp() != null) { @@ -4637,7 +4638,7 @@ public boolean destroyNetworkRulesForNic(final Connect conn, final String vmName public boolean applyDefaultNetworkRules(final Connect conn, final VirtualMachineTO vm, final boolean checkBeforeApply) { NicTO[] nicTOs = new NicTO[] {}; if (vm != null && vm.getNics() != null) { - s_logger.debug("Checking default network rules for vm " + vm.getName()); + LOGGER.debug("Checking default network rules for vm " + vm.getName()); nicTOs = vm.getNics(); } for (NicTO nic : nicTOs) { @@ -4653,7 +4654,7 @@ public boolean applyDefaultNetworkRules(final Connect conn, final VirtualMachine break; } if (!applyDefaultNetworkRulesOnNic(conn, vm.getName(), vm.getId(), nic, isFirstNic, checkBeforeApply)) { - s_logger.error("Unable to apply default network rule for nic " + nic.getName() + " for VM " + vm.getName()); + LOGGER.error("Unable to apply default network rule for nic " + nic.getName() + " for VM " + vm.getName()); return false; } isFirstNic = false; @@ -4701,7 +4702,7 @@ public boolean defaultNetworkRules(final Connect conn, final String vmName, fina final String brname = intf.getBrName(); final String vif = intf.getDevName(); - final Script cmd = new Script(securityGroupPath, timeout, s_logger); + final Script cmd = new Script(securityGroupPath, timeout, LOGGER); cmd.add("default_network_rules"); cmd.add("--vmname", vmName); cmd.add("--vmid", vmId.toString()); @@ -4742,7 +4743,7 @@ protected boolean post_default_network_rules(final Connect conn, final String vm final String brname = intf.getBrName(); final String vif = intf.getDevName(); - final Script cmd = new Script(securityGroupPath, timeout, s_logger); + final Script cmd = new Script(securityGroupPath, timeout, LOGGER); cmd.add("post_default_network_rules"); cmd.add("--vmname", vmName); cmd.add("--vmid", vmId.toString()); @@ -4768,7 +4769,7 @@ public boolean configureDefaultNetworkRulesForSystemVm(final Connect conn, final return false; } - final Script cmd = new Script(securityGroupPath, timeout, s_logger); + final Script cmd = new Script(securityGroupPath, timeout, LOGGER); cmd.add("default_network_rules_systemvm"); cmd.add("--vmname", vmName); cmd.add("--localbrname", linkLocalBridgeName); @@ -4792,7 +4793,7 @@ public boolean addNetworkRules(final String vmName, final String vmId, final Str } final String newRules = rules.replace(" ", ";"); - final Script cmd = new Script(securityGroupPath, timeout, s_logger); + final Script cmd = new Script(securityGroupPath, timeout, LOGGER); cmd.add("add_network_rules"); cmd.add("--vmname", vmName); cmd.add("--vmid", vmId); @@ -4822,7 +4823,7 @@ public boolean configureNetworkRulesVMSecondaryIP(final Connect conn, final Stri return false; } - final Script cmd = new Script(securityGroupPath, timeout, s_logger); + final Script cmd = new Script(securityGroupPath, timeout, LOGGER); cmd.add("network_rules_vmSecondaryIp"); cmd.add("--vmname", vmName); cmd.add("--vmmac", vmMac); @@ -4838,7 +4839,7 @@ public boolean configureNetworkRulesVMSecondaryIP(final Connect conn, final Stri public boolean setupTungstenVRouter(final String oper, final String inf, final String subnet, final String route, final String vrf) { - final Script cmd = new Script(setupTungstenVrouterPath, timeout, s_logger); + final Script cmd = new Script(setupTungstenVrouterPath, timeout, logger); cmd.add(oper); cmd.add(inf); cmd.add(subnet); @@ -4851,7 +4852,7 @@ public boolean setupTungstenVRouter(final String oper, final String inf, final S public boolean updateTungstenLoadbalancerStats(final String lbUuid, final String lbStatsPort, final String lbStatsUri, final String lbStatsAuth) { - final Script cmd = new Script(updateTungstenLoadbalancerStatsPath, timeout, s_logger); + final Script cmd = new Script(updateTungstenLoadbalancerStatsPath, timeout, logger); cmd.add(lbUuid); cmd.add(lbStatsPort); cmd.add(lbStatsUri); @@ -4863,7 +4864,7 @@ public boolean updateTungstenLoadbalancerStats(final String lbUuid, final String public boolean updateTungstenLoadbalancerSsl(final String lbUuid, final String sslCertName, final String certificateKey, final String privateKey, final String privateIp, final String port) { - final Script cmd = new Script(updateTungstenLoadbalancerSslPath, timeout, s_logger); + final Script cmd = new Script(updateTungstenLoadbalancerSslPath, timeout, logger); cmd.add(lbUuid); cmd.add(sslCertName); cmd.add(certificateKey); @@ -4876,7 +4877,7 @@ public boolean updateTungstenLoadbalancerSsl(final String lbUuid, final String s } public boolean setupTfRoute(final String privateIpAddress, final String fromNetwork, final String toNetwork) { - final Script setupTfRouteScript = new Script(routerProxyPath, timeout, s_logger); + final Script setupTfRouteScript = new Script(routerProxyPath, timeout, logger); setupTfRouteScript.add("setup_tf_route.py"); setupTfRouteScript.add(privateIpAddress); setupTfRouteScript.add(fromNetwork); @@ -4885,7 +4886,7 @@ public boolean setupTfRoute(final String privateIpAddress, final String fromNetw final OutputInterpreter.OneLineParser setupTfRouteParser = new OutputInterpreter.OneLineParser(); final String result = setupTfRouteScript.execute(setupTfRouteParser); if (result != null) { - s_logger.debug("Failed to execute setup TF Route:" + result); + logger.debug("Failed to execute setup TF Route:" + result); return false; } return true; @@ -4895,7 +4896,7 @@ public boolean cleanupRules() { if (!canBridgeFirewall) { return false; } - final Script cmd = new Script(securityGroupPath, timeout, s_logger); + final Script cmd = new Script(securityGroupPath, timeout, LOGGER); cmd.add("cleanup_rules"); final String result = cmd.execute(); if (result != null) { @@ -4905,7 +4906,7 @@ public boolean cleanupRules() { } public String getRuleLogsForVms() { - final Script cmd = new Script(securityGroupPath, timeout, s_logger); + final Script cmd = new Script(securityGroupPath, timeout, LOGGER); cmd.add("get_rule_logs_for_vms"); final OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser(); final String result = cmd.execute(parser); @@ -4919,7 +4920,7 @@ private HashMap> syncNetworkGroups(final long id) { final HashMap> states = new HashMap>(); final String result = getRuleLogsForVms(); - s_logger.trace("syncNetworkGroups: id=" + id + " got: " + result); + LOGGER.trace("syncNetworkGroups: id=" + id + " got: " + result); final String[] rulelogs = result != null ? result.split(";") : new String[0]; for (final String rulesforvm : rulelogs) { final String[] log = rulesforvm.split(","); @@ -4949,12 +4950,12 @@ public Pair getNicStats(final String nicName) { return new Pair(readDouble(nicName, "rx_bytes"), readDouble(nicName, "tx_bytes")); } - static double readDouble(final String nicName, final String fileName) { + double readDouble(final String nicName, final String fileName) { final String path = "/sys/class/net/" + nicName + "/statistics/" + fileName; try { return Double.parseDouble(FileUtils.readFileToString(new File(path))); } catch (final IOException ioe) { - s_logger.warn("Failed to read the " + fileName + " for " + nicName + " from " + path, ioe); + LOGGER.warn("Failed to read the " + fileName + " for " + nicName + " from " + path, ioe); return 0.0; } } @@ -5011,11 +5012,11 @@ public String mapRbdDevice(final KVMPhysicalDisk disk){ } public List> cleanVMSnapshotMetadata(Domain dm) throws LibvirtException { - s_logger.debug("Cleaning the metadata of vm snapshots of vm " + dm.getName()); + LOGGER.debug("Cleaning the metadata of vm snapshots of vm " + dm.getName()); List> vmsnapshots = new ArrayList>(); if (dm.snapshotNum() == 0) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("VM [%s] does not have any snapshots. Skipping cleanup of snapshots for this VM.", dm.getName())); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format("VM [%s] does not have any snapshots. Skipping cleanup of snapshots for this VM.", dm.getName())); } return vmsnapshots; } @@ -5023,8 +5024,8 @@ public List> cleanVMSnapshotMetadata(Domain dm) try { DomainSnapshot snapshotCurrent = dm.snapshotCurrent(); String snapshotXML = snapshotCurrent.getXMLDesc(); - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Current snapshot of VM [%s] has the following XML: [%s].", dm.getName(), snapshotXML)); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format("Current snapshot of VM [%s] has the following XML: [%s].", dm.getName(), snapshotXML)); } snapshotCurrent.free(); @@ -5039,23 +5040,23 @@ public List> cleanVMSnapshotMetadata(Domain dm) currentSnapshotName = getTagValue("name", rootElement); } catch (ParserConfigurationException | SAXException | IOException e) { - s_logger.error(String.format("Failed to parse snapshot configuration [%s] of VM [%s] due to: [%s].", snapshotXML, dm.getName(), e.getMessage()), e); + LOGGER.error(String.format("Failed to parse snapshot configuration [%s] of VM [%s] due to: [%s].", snapshotXML, dm.getName(), e.getMessage()), e); } } catch (LibvirtException e) { - s_logger.error(String.format("Failed to get the current snapshot of VM [%s] due to: [%s]. Continuing the migration process.", dm.getName(), e.getMessage()), e); + LOGGER.error(String.format("Failed to get the current snapshot of VM [%s] due to: [%s]. Continuing the migration process.", dm.getName(), e.getMessage()), e); } int flags = 2; // VIR_DOMAIN_SNAPSHOT_DELETE_METADATA_ONLY = 2 String[] snapshotNames = dm.snapshotListNames(); Arrays.sort(snapshotNames); - s_logger.debug(String.format("Found [%s] snapshots in VM [%s] to clean.", snapshotNames.length, dm.getName())); + LOGGER.debug(String.format("Found [%s] snapshots in VM [%s] to clean.", snapshotNames.length, dm.getName())); for (String snapshotName: snapshotNames) { DomainSnapshot snapshot = dm.snapshotLookupByName(snapshotName); Boolean isCurrent = (currentSnapshotName != null && currentSnapshotName.equals(snapshotName)) ? true: false; vmsnapshots.add(new Ternary(snapshotName, isCurrent, snapshot.getXMLDesc())); } for (String snapshotName: snapshotNames) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Cleaning snapshot [%s] of VM [%s] metadata.", snapshotNames, dm.getName())); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format("Cleaning snapshot [%s] of VM [%s] metadata.", snapshotNames, dm.getName())); } DomainSnapshot snapshot = dm.snapshotLookupByName(snapshotName); snapshot.delete(flags); // clean metadata of vm snapshot @@ -5089,12 +5090,12 @@ private static String getTagValue(String tag, Element eElement) { } public void restoreVMSnapshotMetadata(Domain dm, String vmName, List> vmsnapshots) { - s_logger.debug("Restoring the metadata of vm snapshots of vm " + vmName); + LOGGER.debug("Restoring the metadata of vm snapshots of vm " + vmName); for (Ternary vmsnapshot: vmsnapshots) { String snapshotName = vmsnapshot.first(); Boolean isCurrent = vmsnapshot.second(); String snapshotXML = vmsnapshot.third(); - s_logger.debug("Restoring vm snapshot " + snapshotName + " on " + vmName + " with XML:\n " + snapshotXML); + LOGGER.debug("Restoring vm snapshot " + snapshotName + " on " + vmName + " with XML:\n " + snapshotXML); try { int flags = 1; // VIR_DOMAIN_SNAPSHOT_CREATE_REDEFINE = 1 if (isCurrent) { @@ -5102,7 +5103,7 @@ public void restoreVMSnapshotMetadata(Domain dm, String vmName, List details) { if (!enableManuallySettingCpuTopologyOnKvmVm) { - s_logger.debug(String.format("Skipping manually setting CPU topology on VM's XML due to it is disabled in agent.properties {\"property\": \"%s\", \"value\": %s}.", + LOGGER.debug(String.format("Skipping manually setting CPU topology on VM's XML due to it is disabled in agent.properties {\"property\": \"%s\", \"value\": %s}.", AgentProperties.ENABLE_MANUALLY_SETTING_CPU_TOPOLOGY_ON_KVM_VM.getName(), enableManuallySettingCpuTopologyOnKvmVm)); return; } @@ -5202,14 +5203,14 @@ public void setBackingFileFormat(String volPath) { // VMs which are created in CloudStack 4.14 and before cannot be started or migrated // in latest Linux distributions due to missing backing file format // Please refer to https://libvirt.org/kbase/backing_chains.html#vm-refuses-to-start-due-to-misconfigured-backing-store-format - s_logger.info("Setting backing file format of " + volPath); + LOGGER.info("Setting backing file format of " + volPath); QemuImgFile backingFile = new QemuImgFile(backingFilePath); Map backingFileinfo = qemu.info(backingFile); String backingFileFmt = backingFileinfo.get(QemuImg.FILE_FORMAT); qemu.rebase(file, backingFile, backingFileFmt, false); } } catch (QemuImgException | LibvirtException e) { - s_logger.error("Failed to set backing file format of " + volPath + " due to : " + e.getMessage(), e); + LOGGER.error("Failed to set backing file format of " + volPath + " due to : " + e.getMessage(), e); } } @@ -5247,7 +5248,7 @@ public static Integer getCpuShares(Domain dm) throws LibvirtException { return Integer.parseInt(c.getValueAsString()); } } - s_logger.warn(String.format("Could not get cpu_shares of domain: [%s]. Returning default value of 0. ", dm.getName())); + LOGGER.warn(String.format("Could not get cpu_shares of domain: [%s]. Returning default value of 0. ", dm.getName())); return 0; } @@ -5293,7 +5294,7 @@ public String createLibvirtVolumeSecret(Connect conn, String consumer, byte[] da Match match = new Match(); if (UuidUtils.getUuidRegex().matches(ex.getMessage(), match)) { secretUuid = match.getCapturedText(0); - s_logger.info(String.format("Reusing previously defined secret '%s' for volume '%s'", secretUuid, consumer)); + LOGGER.info(String.format("Reusing previously defined secret '%s' for volume '%s'", secretUuid, consumer)); } else { throw ex; } @@ -5311,12 +5312,12 @@ public void removeLibvirtVolumeSecret(Connect conn, String secretUuid) throws Li secret.undefine(); } catch (LibvirtException ex) { if (ex.getMessage().contains("Secret not found")) { - s_logger.debug(String.format("Secret uuid %s doesn't exist", secretUuid)); + LOGGER.debug(String.format("Secret uuid %s doesn't exist", secretUuid)); return; } throw ex; } - s_logger.debug(String.format("Undefined secret %s", secretUuid)); + LOGGER.debug(String.format("Undefined secret %s", secretUuid)); } public void cleanOldSecretsByDiskDef(Connect conn, List disks) throws LibvirtException { @@ -5345,7 +5346,7 @@ public void setInterfaceDefQueueSettings(Map details, Integer cp interfaceDef.setMultiQueueNumber(nicMultiqueueNumberInteger); } } catch (NumberFormatException ex) { - s_logger.warn(String.format("VM details %s is not a valid integer value %s", VmDetailConstants.NIC_MULTIQUEUE_NUMBER, nicMultiqueueNumber)); + logger.warn(String.format("VM details %s is not a valid integer value %s", VmDetailConstants.NIC_MULTIQUEUE_NUMBER, nicMultiqueueNumber)); } } String nicPackedEnabled = details.get(VmDetailConstants.NIC_PACKED_VIRTQUEUES_ENABLED); @@ -5353,7 +5354,7 @@ public void setInterfaceDefQueueSettings(Map details, Integer cp try { interfaceDef.setPackedVirtQueues(Boolean.valueOf(nicPackedEnabled)); } catch (NumberFormatException ex) { - s_logger.warn(String.format("VM details %s is not a valid Boolean value %s", VmDetailConstants.NIC_PACKED_VIRTQUEUES_ENABLED, nicPackedEnabled)); + logger.warn(String.format("VM details %s is not a valid Boolean value %s", VmDetailConstants.NIC_PACKED_VIRTQUEUES_ENABLED, nicPackedEnabled)); } } } @@ -5368,11 +5369,11 @@ public String copyVolume(String srcIp, String username, String password, String command.append(remoteFile); command.append(" "+tmpPath); command.append(outputFile); - s_logger.debug("Converting remoteFile: "+remoteFile); + logger.debug("Converting remoteFile: "+remoteFile); SshHelper.sshExecute(srcIp, 22, username, null, password, command.toString()); - s_logger.debug("Copying remoteFile to: "+localDir); + logger.debug("Copying remoteFile to: "+localDir); SshHelper.scpFrom(srcIp, 22, username, null, password, localDir, tmpPath+outputFile); - s_logger.debug("Successfully copyied remoteFile to: "+localDir+"/"+outputFile); + logger.debug("Successfully copyied remoteFile to: "+localDir+"/"+outputFile); return outputFile; } catch (Exception e) { throw new RuntimeException(e); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtConnection.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtConnection.java index 1f4ab23a43fc..2b0e088f5c73 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtConnection.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtConnection.java @@ -21,7 +21,8 @@ import com.cloud.agent.properties.AgentProperties; import com.cloud.agent.properties.AgentPropertiesFileHandler; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.libvirt.Connect; import org.libvirt.Library; import org.libvirt.LibvirtException; @@ -30,7 +31,7 @@ import com.cloud.hypervisor.Hypervisor.HypervisorType; public class LibvirtConnection { - private static final Logger s_logger = Logger.getLogger(LibvirtConnection.class); + protected static Logger LOGGER = LogManager.getLogger(LibvirtConnection.class); static private Map s_connections = new HashMap(); static private Connect s_connection; @@ -42,29 +43,29 @@ static public Connect getConnection() throws LibvirtException { } static public Connect getConnection(String hypervisorURI) throws LibvirtException { - s_logger.debug("Looking for libvirtd connection at: " + hypervisorURI); + LOGGER.debug("Looking for libvirtd connection at: " + hypervisorURI); Connect conn = s_connections.get(hypervisorURI); if (conn == null) { - s_logger.info("No existing libvirtd connection found. Opening a new one"); + LOGGER.info("No existing libvirtd connection found. Opening a new one"); setupEventListener(); conn = new Connect(hypervisorURI, false); - s_logger.debug("Successfully connected to libvirt at: " + hypervisorURI); + LOGGER.debug("Successfully connected to libvirt at: " + hypervisorURI); s_connections.put(hypervisorURI, conn); } else { try { conn.getVersion(); } catch (LibvirtException e) { - s_logger.error("Connection with libvirtd is broken: " + e.getMessage()); + LOGGER.error("Connection with libvirtd is broken: " + e.getMessage()); try { conn.close(); } catch (LibvirtException closeEx) { - s_logger.debug("Ignoring error while trying to close broken connection:" + closeEx.getMessage()); + LOGGER.debug("Ignoring error while trying to close broken connection:" + closeEx.getMessage()); } - s_logger.debug("Opening a new libvirtd connection to: " + hypervisorURI); + LOGGER.debug("Opening a new libvirtd connection to: " + hypervisorURI); setupEventListener(); conn = new Connect(hypervisorURI, false); s_connections.put(hypervisorURI, conn); @@ -84,11 +85,11 @@ static public Connect getConnectionByVmName(String vmName) throws LibvirtExcepti return conn; } } catch (Exception e) { - s_logger.debug("Can not find " + hypervisor.toString() + " connection for Instance: " + vmName + ", continuing."); + LOGGER.debug("Can not find " + hypervisor.toString() + " connection for Instance: " + vmName + ", continuing."); } } - s_logger.warn("Can not find a connection for Instance " + vmName + ". Assuming the default connection."); + LOGGER.warn("Can not find a connection for Instance " + vmName + ". Assuming the default connection."); // return the default connection return getConnection(); } @@ -130,9 +131,9 @@ private static synchronized void setupEventListener() throws LibvirtException { // This blocking call contains a loop of its own that will process events until the event loop is stopped or exception is thrown. Library.runEventLoop(); } catch (LibvirtException e) { - s_logger.error("LibvirtException was thrown in event loop: ", e); + LOGGER.error("LibvirtException was thrown in event loop: ", e); } catch (InterruptedException e) { - s_logger.error("Libvirt event loop was interrupted: ", e); + LOGGER.error("Libvirt event loop was interrupted: ", e); } } }); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtDomainXMLParser.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtDomainXMLParser.java index f165796adef2..ad217f6052db 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtDomainXMLParser.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtDomainXMLParser.java @@ -29,7 +29,8 @@ import org.apache.cloudstack.utils.security.ParserUtils; import org.apache.commons.lang3.StringUtils; import org.apache.cloudstack.utils.qemu.QemuObject; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.w3c.dom.Document; import org.w3c.dom.Element; import org.w3c.dom.Node; @@ -49,7 +50,7 @@ import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.WatchDogDef.WatchDogModel; public class LibvirtDomainXMLParser { - private static final Logger s_logger = Logger.getLogger(LibvirtDomainXMLParser.class); + protected Logger logger = LogManager.getLogger(getClass()); private final List interfaces = new ArrayList(); private MemBalloonDef memBalloonDef = new MemBalloonDef(); private final List diskDefs = new ArrayList(); @@ -332,7 +333,7 @@ public boolean parseDomainXML(String domXML) { String bytes = getAttrValue("rate", "bytes", rng); String period = getAttrValue("rate", "period", rng); if (StringUtils.isAnyEmpty(bytes, period)) { - s_logger.debug(String.format("Bytes and period in the rng section should not be null, please check the VM %s", name)); + logger.debug(String.format("Bytes and period in the rng section should not be null, please check the VM %s", name)); } if (bytes == null) { @@ -390,11 +391,11 @@ public boolean parseDomainXML(String domXML) { extractCpuModeDef(rootElement); return true; } catch (ParserConfigurationException e) { - s_logger.debug(e.toString()); + logger.debug(e.toString()); } catch (SAXException e) { - s_logger.debug(e.toString()); + logger.debug(e.toString()); } catch (IOException e) { - s_logger.debug(e.toString()); + logger.debug(e.toString()); } return false; } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtKvmAgentHook.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtKvmAgentHook.java index 2ef65293de2e..edcc5a053269 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtKvmAgentHook.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtKvmAgentHook.java @@ -22,7 +22,8 @@ import groovy.util.GroovyScriptEngine; import groovy.util.ResourceException; import groovy.util.ScriptException; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.codehaus.groovy.runtime.metaclass.MissingMethodExceptionNoStack; import java.io.File; @@ -34,14 +35,14 @@ public class LibvirtKvmAgentHook { private final GroovyScriptEngine gse; private final Binding binding = new Binding(); - private static final Logger s_logger = Logger.getLogger(LibvirtKvmAgentHook.class); + protected Logger logger = LogManager.getLogger(getClass()); public LibvirtKvmAgentHook(String path, String script, String method) throws IOException { this.script = script; this.method = method; File full_path = new File(path, script); if (!full_path.canRead()) { - s_logger.warn("Groovy script '" + full_path.toString() + "' is not available. Transformations will not be applied."); + logger.warn("Groovy script '" + full_path.toString() + "' is not available. Transformations will not be applied."); this.gse = null; } else { this.gse = new GroovyScriptEngine(path); @@ -54,21 +55,21 @@ public boolean isInitialized() { public Object handle(Object arg) throws ResourceException, ScriptException { if (!isInitialized()) { - s_logger.warn("Groovy scripting engine is not initialized. Data transformation skipped."); + logger.warn("Groovy scripting engine is not initialized. Data transformation skipped."); return arg; } GroovyObject cls = (GroovyObject) this.gse.run(this.script, binding); if (null == cls) { - s_logger.warn("Groovy object is not received from script '" + this.script + "'."); + logger.warn("Groovy object is not received from script '" + this.script + "'."); return arg; } else { - Object[] params = {s_logger, arg}; + Object[] params = {logger, arg}; try { Object res = cls.invokeMethod(this.method, params); return res; } catch (MissingMethodExceptionNoStack e) { - s_logger.error("Error occurred when calling method from groovy script, {}", e); + logger.error("Error occurred when calling method from groovy script, {}", e); return arg; } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolDef.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolDef.java index f0ec29f41f69..ff44c8df2fa6 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolDef.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolDef.java @@ -18,7 +18,7 @@ public class LibvirtStoragePoolDef { public enum PoolType { - ISCSI("iscsi"), NETFS("netfs"), LOGICAL("logical"), DIR("dir"), RBD("rbd"), GLUSTERFS("glusterfs"), POWERFLEX("powerflex"); + ISCSI("iscsi"), NETFS("netfs"), loggerICAL("logical"), DIR("dir"), RBD("rbd"), GLUSTERFS("glusterfs"), POWERFLEX("powerflex"); String _poolType; PoolType(String poolType) { diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolXMLParser.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolXMLParser.java index d19c851d7dcc..30616e047987 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolXMLParser.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolXMLParser.java @@ -26,7 +26,8 @@ import org.apache.cloudstack.utils.security.ParserUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.w3c.dom.Document; import org.w3c.dom.Element; import org.w3c.dom.Node; @@ -35,7 +36,7 @@ import org.xml.sax.SAXException; public class LibvirtStoragePoolXMLParser { - private static final Logger s_logger = Logger.getLogger(LibvirtStoragePoolXMLParser.class); + protected Logger logger = LogManager.getLogger(getClass()); public LibvirtStoragePoolDef parseStoragePoolXML(String poolXML) { DocumentBuilder builder; @@ -101,11 +102,11 @@ public LibvirtStoragePoolDef parseStoragePoolXML(String poolXML) { return new LibvirtStoragePoolDef(LibvirtStoragePoolDef.PoolType.valueOf(type.toUpperCase()), poolName, uuid, host, path, targetPath); } } catch (ParserConfigurationException e) { - s_logger.debug(e.toString()); + logger.debug(e.toString()); } catch (SAXException e) { - s_logger.debug(e.toString()); + logger.debug(e.toString()); } catch (IOException e) { - s_logger.debug(e.toString()); + logger.debug(e.toString()); } return null; } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStorageVolumeXMLParser.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStorageVolumeXMLParser.java index c4132ca1ba3b..1b6f73039ca5 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStorageVolumeXMLParser.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStorageVolumeXMLParser.java @@ -23,7 +23,8 @@ import javax.xml.parsers.ParserConfigurationException; import org.apache.cloudstack.utils.security.ParserUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.w3c.dom.Document; import org.w3c.dom.Element; import org.w3c.dom.Node; @@ -32,7 +33,7 @@ import org.xml.sax.SAXException; public class LibvirtStorageVolumeXMLParser { - private static final Logger s_logger = Logger.getLogger(LibvirtStorageVolumeXMLParser.class); + protected Logger logger = LogManager.getLogger(getClass()); public LibvirtStorageVolumeDef parseStorageVolumeXML(String volXML) { DocumentBuilder builder; @@ -51,11 +52,11 @@ public LibvirtStorageVolumeDef parseStorageVolumeXML(String volXML) { Long capacity = Long.parseLong(getTagValue("capacity", rootElement)); return new LibvirtStorageVolumeDef(VolName, capacity, LibvirtStorageVolumeDef.VolumeFormat.getFormat(format), null, null); } catch (ParserConfigurationException e) { - s_logger.debug(e.toString()); + logger.debug(e.toString()); } catch (SAXException e) { - s_logger.debug(e.toString()); + logger.debug(e.toString()); } catch (IOException e) { - s_logger.debug(e.toString()); + logger.debug(e.toString()); } return null; } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java index 6b5fac0e942b..a951cc2af068 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java @@ -26,13 +26,14 @@ import org.apache.cloudstack.utils.qemu.QemuObject; import org.apache.commons.lang.StringEscapeUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.properties.AgentProperties; import com.cloud.agent.properties.AgentPropertiesFileHandler; public class LibvirtVMDef { - private static final Logger s_logger = Logger.getLogger(LibvirtVMDef.class); + protected static Logger LOGGER = LogManager.getLogger(LibvirtVMDef.class); private String _hvsType; private static long s_libvirtVersion; @@ -861,7 +862,7 @@ public void defISODisk(String volPath, Integer devId) { public void defISODisk(String volPath, Integer devId, String diskLabel) { if (devId == null && StringUtils.isBlank(diskLabel)) { - s_logger.debug(String.format("No ID or label informed for volume [%s].", volPath)); + LOGGER.debug(String.format("No ID or label informed for volume [%s].", volPath)); defISODisk(volPath); return; } @@ -871,11 +872,11 @@ public void defISODisk(String volPath, Integer devId, String diskLabel) { _sourcePath = volPath; if (StringUtils.isNotBlank(diskLabel)) { - s_logger.debug(String.format("Using informed label [%s] for volume [%s].", diskLabel, volPath)); + LOGGER.debug(String.format("Using informed label [%s] for volume [%s].", diskLabel, volPath)); _diskLabel = diskLabel; } else { _diskLabel = getDevLabel(devId, DiskBus.IDE, true); - s_logger.debug(String.format("Using device ID [%s] to define the label [%s] for volume [%s].", devId, _diskLabel, volPath)); + LOGGER.debug(String.format("Using device ID [%s] to define the label [%s] for volume [%s].", devId, _diskLabel, volPath)); } _diskFmtType = DiskFmtType.RAW; @@ -2069,7 +2070,7 @@ public String toString() { } } - public static class MetadataDef { + public class MetadataDef { Map customNodes = new HashMap<>(); public T getMetadataNode(Class fieldClass) { @@ -2079,7 +2080,7 @@ public T getMetadataNode(Class fieldClass) { field = fieldClass.newInstance(); customNodes.put(field.getClass().getName(), field); } catch (InstantiationException | IllegalAccessException e) { - s_logger.debug("No default constructor available in class " + fieldClass.getName() + ", ignoring exception", e); + LOGGER.debug("No default constructor available in class " + fieldClass.getName() + ", ignoring exception", e); } } return field; diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtXMLParser.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtXMLParser.java index 48a379c278f3..f5de9b754120 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtXMLParser.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtXMLParser.java @@ -24,13 +24,14 @@ import javax.xml.parsers.SAXParserFactory; import org.apache.cloudstack.utils.security.ParserUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.xml.sax.InputSource; import org.xml.sax.SAXException; import org.xml.sax.helpers.DefaultHandler; public class LibvirtXMLParser extends DefaultHandler { - private static final Logger s_logger = Logger.getLogger(LibvirtXMLParser.class); + protected Logger logger = LogManager.getLogger(getClass()); protected static final SAXParserFactory s_spf; static { s_spf = ParserUtils.getSaferSAXParserFactory(); @@ -43,9 +44,9 @@ public LibvirtXMLParser() { _sp = s_spf.newSAXParser(); _initialized = true; } catch (ParserConfigurationException e) { - s_logger.trace("Ignoring xml parser error.", e); + logger.trace("Ignoring xml parser error.", e); } catch (SAXException e) { - s_logger.trace("Ignoring xml parser error.", e); + logger.trace("Ignoring xml parser error.", e); } } @@ -57,9 +58,9 @@ public boolean parseDomainXML(String domXML) { _sp.parse(new InputSource(new StringReader(domXML)), this); return true; } catch (SAXException se) { - s_logger.warn(se.getMessage()); + logger.warn(se.getMessage()); } catch (IOException ie) { - s_logger.error(ie.getMessage()); + logger.error(ie.getMessage()); } return false; } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/OvsVifDriver.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/OvsVifDriver.java index 3c4c9d48c9d6..e3ce9f4b8a62 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/OvsVifDriver.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/OvsVifDriver.java @@ -30,7 +30,6 @@ import com.cloud.utils.exception.CloudRuntimeException; import org.apache.commons.lang3.BooleanUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.libvirt.LibvirtException; import com.cloud.agent.api.to.NicTO; @@ -45,7 +44,6 @@ import com.cloud.utils.script.Script; public class OvsVifDriver extends VifDriverBase { - private static final Logger s_logger = Logger.getLogger(OvsVifDriver.class); private int _timeout; private String _controlCidr = NetUtils.getLinkLocalCIDR(); private DpdkDriver dpdkDriver; @@ -68,10 +66,10 @@ public void configure(Map params) throws ConfigurationException public void getPifs() { final String cmdout = Script.runSimpleBashScript("ovs-vsctl list-br | sed '{:q;N;s/\\n/%/g;t q}'"); - s_logger.debug("cmdout was " + cmdout); + logger.debug("cmdout was " + cmdout); final List bridges = Arrays.asList(cmdout.split("%")); for (final String bridge : bridges) { - s_logger.debug("looking for pif for bridge " + bridge); + logger.debug("looking for pif for bridge " + bridge); // String pif = getOvsPif(bridge); // Not really interested in the pif name at this point for ovs // bridges @@ -84,7 +82,7 @@ public void getPifs() { } _pifs.put(bridge, pif); } - s_logger.debug("done looking for pifs, no more bridges"); + logger.debug("done looking for pifs, no more bridges"); } /** @@ -94,7 +92,7 @@ public void getPifs() { */ protected void plugDPDKInterface(InterfaceDef intf, String trafficLabel, Map extraConfig, String vlanId, String guestOsType, NicTO nic, String nicAdapter) { - s_logger.debug("DPDK support enabled: configuring per traffic label " + trafficLabel); + logger.debug("DPDK support enabled: configuring per traffic label " + trafficLabel); String dpdkOvsPath = _libvirtComputingResource.dpdkOvsPath; if (StringUtils.isBlank(dpdkOvsPath)) { throw new CloudRuntimeException("DPDK is enabled on the host but no OVS path has been provided"); @@ -111,7 +109,7 @@ protected void plugDPDKInterface(InterfaceDef intf, String trafficLabel, Map extraConfig) throws InternalErrorException, LibvirtException { - s_logger.debug("plugging nic=" + nic); + logger.debug("plugging nic=" + nic); LibvirtVMDef.InterfaceDef intf = new LibvirtVMDef.InterfaceDef(); if (!_libvirtComputingResource.dpdkSupport || !nic.isDpdkEnabled()) { @@ -139,7 +137,7 @@ public InterfaceDef plug(NicTO nic, String guestOsType, String nicAdapter, Map _pifs; protected Map _bridges; diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/rolling/maintenance/RollingMaintenanceAgentExecutor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/rolling/maintenance/RollingMaintenanceAgentExecutor.java index 7635b81b918c..6ba3d510dd8f 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/rolling/maintenance/RollingMaintenanceAgentExecutor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/rolling/maintenance/RollingMaintenanceAgentExecutor.java @@ -21,14 +21,12 @@ import com.cloud.utils.script.OutputInterpreter; import com.cloud.utils.script.Script; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.joda.time.Duration; import java.io.File; public class RollingMaintenanceAgentExecutor extends RollingMaintenanceExecutorBase implements RollingMaintenanceExecutor { - private static final Logger s_logger = Logger.getLogger(RollingMaintenanceAgentExecutor.class); private String output; private boolean success; @@ -41,17 +39,17 @@ public RollingMaintenanceAgentExecutor(String hooksDir) { public Pair startStageExecution(String stage, File scriptFile, int timeout, String payload) { checkHooksDirectory(); Duration duration = Duration.standardSeconds(timeout); - final Script script = new Script(scriptFile.getAbsolutePath(), duration, s_logger); + final Script script = new Script(scriptFile.getAbsolutePath(), duration, logger); final OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser(); if (StringUtils.isNotEmpty(payload)) { script.add(payload); } - s_logger.info("Executing stage: " + stage + " script: " + script); + logger.info("Executing stage: " + stage + " script: " + script); output = script.execute(parser) + " " + parser.getLines(); if (script.isTimeout()) { String msg = "Script " + scriptFile + " timed out"; - s_logger.error(msg); + logger.error(msg); success = false; return new Pair<>(false, msg); } @@ -62,10 +60,10 @@ public Pair startStageExecution(String stage, File scriptFile, } success = exitValue == 0 || exitValue == exitValueAvoidMaintenance; setAvoidMaintenance(exitValue == exitValueAvoidMaintenance); - s_logger.info("Execution finished for stage: " + stage + " script: " + script + ": " + exitValue); - if (s_logger.isDebugEnabled()) { - s_logger.debug(output); - s_logger.debug("Stage " + stage + " execution finished: " + exitValue); + logger.info("Execution finished for stage: " + stage + " script: " + script + ": " + exitValue); + if (logger.isDebugEnabled()) { + logger.debug(output); + logger.debug("Stage " + stage + " execution finished: " + exitValue); } return new Pair<>(true, "Stage " + stage + " finished"); } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/rolling/maintenance/RollingMaintenanceExecutorBase.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/rolling/maintenance/RollingMaintenanceExecutorBase.java index 70c8e1983d61..b74faca0ecf6 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/rolling/maintenance/RollingMaintenanceExecutorBase.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/rolling/maintenance/RollingMaintenanceExecutorBase.java @@ -18,7 +18,8 @@ import com.cloud.utils.exception.CloudRuntimeException; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import java.io.File; @@ -30,7 +31,7 @@ public abstract class RollingMaintenanceExecutorBase implements RollingMaintenan static final int exitValueAvoidMaintenance = 70; static final int exitValueTerminatedSignal = 143; - private static final Logger s_logger = Logger.getLogger(RollingMaintenanceExecutor.class); + protected Logger logger = LogManager.getLogger(getClass()); void setTimeout(int timeout) { this.timeout = timeout; @@ -66,7 +67,7 @@ public File getStageScriptFile(String stage) { return new File(scriptPath + ".py"); } else { String msg = "Unable to locate script for stage: " + stage + " in directory: " + hooksDir; - s_logger.warn(msg); + logger.warn(msg); return null; } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/rolling/maintenance/RollingMaintenanceServiceExecutor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/rolling/maintenance/RollingMaintenanceServiceExecutor.java index bf8147ac8534..c9edcc104b7c 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/rolling/maintenance/RollingMaintenanceServiceExecutor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/rolling/maintenance/RollingMaintenanceServiceExecutor.java @@ -21,7 +21,6 @@ import com.cloud.utils.script.OutputInterpreter; import com.cloud.utils.script.Script; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import java.io.File; import java.io.IOException; @@ -36,7 +35,6 @@ public class RollingMaintenanceServiceExecutor extends RollingMaintenanceExecuto private static final String resultsFileSuffix = "rolling-maintenance-results"; private static final String outputFileSuffix = "rolling-maintenance-output"; - private static final Logger s_logger = Logger.getLogger(RollingMaintenanceServiceExecutor.class); public RollingMaintenanceServiceExecutor(String hooksDir) { super(hooksDir); @@ -55,15 +53,15 @@ private String generateInstanceName(String stage, String file, String payload) { } private String invokeService(String action, String stage, String file, String payload) { - s_logger.debug("Invoking rolling maintenance service for stage: " + stage + " and file " + file + " with action: " + action); + logger.debug("Invoking rolling maintenance service for stage: " + stage + " and file " + file + " with action: " + action); final OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser(); - Script command = new Script("/bin/systemctl", s_logger); + Script command = new Script("/bin/systemctl", logger); command.add(action); String service = servicePrefix + "@" + generateInstanceName(stage, file, payload); command.add(service); String result = command.execute(parser); int exitValue = command.getExitValue(); - s_logger.trace("Execution: " + command.toString() + " - exit code: " + exitValue + + logger.trace("Execution: " + command.toString() + " - exit code: " + exitValue + ": " + result + (StringUtils.isNotBlank(parser.getLines()) ? parser.getLines() : "")); return StringUtils.isBlank(result) ? parser.getLines().replace("\n", " ") : result; } @@ -76,7 +74,7 @@ public Pair startStageExecution(String stage, File scriptFile, if (StringUtils.isNotBlank(result)) { throw new CloudRuntimeException("Error starting stage: " + stage + " execution: " + result); } - s_logger.trace("Stage " + stage + "execution started"); + logger.trace("Stage " + stage + "execution started"); return new Pair<>(true, "OK"); } @@ -111,7 +109,7 @@ public boolean isStageRunning(String stage, File scriptFile, String payload) { if (StringUtils.isNotBlank(result) && result.equals("failed")) { String status = invokeService("status", stage, scriptFile.getAbsolutePath(), payload); String errorMsg = "Stage " + stage + " execution failed, status: " + status; - s_logger.error(errorMsg); + logger.error(errorMsg); throw new CloudRuntimeException(errorMsg); } return StringUtils.isNotBlank(result) && result.equals("active"); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtBackupSnapshotCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtBackupSnapshotCommandWrapper.java index 780fc35b563e..656a63d3db0a 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtBackupSnapshotCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtBackupSnapshotCommandWrapper.java @@ -25,7 +25,6 @@ import java.io.IOException; import java.text.MessageFormat; -import org.apache.log4j.Logger; import org.libvirt.Connect; import org.libvirt.Domain; import org.libvirt.DomainInfo.DomainState; @@ -56,7 +55,6 @@ @ResourceWrapper(handles = BackupSnapshotCommand.class) public final class LibvirtBackupSnapshotCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtBackupSnapshotCommandWrapper.class); @Override public Answer execute(final BackupSnapshotCommand command, final LibvirtComputingResource libvirtComputingResource) { @@ -104,7 +102,7 @@ public Answer execute(final BackupSnapshotCommand command, final LibvirtComputin r.confSet("key", primaryPool.getAuthSecret()); r.confSet("client_mount_timeout", "30"); r.connect(); - s_logger.debug("Successfully connected to Ceph cluster at " + r.confGet("mon_host")); + logger.debug("Successfully connected to Ceph cluster at " + r.confGet("mon_host")); final IoCTX io = r.ioCtxCreate(primaryPool.getSourceDir()); final Rbd rbd = new Rbd(io); @@ -113,7 +111,7 @@ public Answer execute(final BackupSnapshotCommand command, final LibvirtComputin try(BufferedOutputStream bos = new BufferedOutputStream(new FileOutputStream(fh));) { final int chunkSize = 4194304; long offset = 0; - s_logger.debug("Backuping up RBD snapshot " + snapshotName + " to " + snapshotDestPath); + logger.debug("Backuping up RBD snapshot " + snapshotName + " to " + snapshotDestPath); while (true) { final byte[] buf = new byte[chunkSize]; final int bytes = image.read(offset, buf, chunkSize); @@ -123,21 +121,21 @@ public Answer execute(final BackupSnapshotCommand command, final LibvirtComputin bos.write(buf, 0, bytes); offset += bytes; } - s_logger.debug("Completed backing up RBD snapshot " + snapshotName + " to " + snapshotDestPath + ". Bytes written: " + toHumanReadableSize(offset)); + logger.debug("Completed backing up RBD snapshot " + snapshotName + " to " + snapshotDestPath + ". Bytes written: " + toHumanReadableSize(offset)); }catch(final IOException ex) { - s_logger.error("BackupSnapshotAnswer:Exception:"+ ex.getMessage()); + logger.error("BackupSnapshotAnswer:Exception:"+ ex.getMessage()); } r.ioCtxDestroy(io); } catch (final RadosException e) { - s_logger.error("A RADOS operation failed. The error was: " + e.getMessage()); + logger.error("A RADOS operation failed. The error was: " + e.getMessage()); return new BackupSnapshotAnswer(command, false, e.toString(), null, true); } catch (final RbdException e) { - s_logger.error("A RBD operation on " + snapshotDisk.getName() + " failed. The error was: " + e.getMessage()); + logger.error("A RBD operation on " + snapshotDisk.getName() + " failed. The error was: " + e.getMessage()); return new BackupSnapshotAnswer(command, false, e.toString(), null, true); } } else { - final Script scriptCommand = new Script(manageSnapshotPath, cmdsTimeout, s_logger); + final Script scriptCommand = new Script(manageSnapshotPath, cmdsTimeout, logger); scriptCommand.add("-b", snapshotDisk.getPath()); scriptCommand.add("-n", snapshotName); scriptCommand.add("-p", snapshotDestPath); @@ -145,7 +143,7 @@ public Answer execute(final BackupSnapshotCommand command, final LibvirtComputin final String result = scriptCommand.execute(); if (result != null) { - s_logger.debug("Failed to backup snaptshot: " + result); + logger.debug("Failed to backup snaptshot: " + result); return new BackupSnapshotAnswer(command, false, result, null, true); } } @@ -158,7 +156,7 @@ public Answer execute(final BackupSnapshotCommand command, final LibvirtComputin vm = libvirtComputingResource.getDomain(conn, command.getVmName()); state = vm.getInfo().state; } catch (final LibvirtException e) { - s_logger.trace("Ignoring libvirt error.", e); + logger.trace("Ignoring libvirt error.", e); } } @@ -171,7 +169,7 @@ public Answer execute(final BackupSnapshotCommand command, final LibvirtComputin final String vmUuid = vm.getUUIDString(); final Object[] args = new Object[] {snapshotName, vmUuid}; final String snapshot = snapshotXML.format(args); - s_logger.debug(snapshot); + logger.debug(snapshot); final DomainSnapshot snap = vm.snapshotLookupByName(snapshotName); if (snap != null) { snap.delete(0); @@ -189,12 +187,12 @@ public Answer execute(final BackupSnapshotCommand command, final LibvirtComputin vm.resume(); } } else { - final Script scriptCommand = new Script(manageSnapshotPath, cmdsTimeout, s_logger); + final Script scriptCommand = new Script(manageSnapshotPath, cmdsTimeout, logger); scriptCommand.add("-d", snapshotDisk.getPath()); scriptCommand.add("-n", snapshotName); final String result = scriptCommand.execute(); if (result != null) { - s_logger.debug("Failed to backup snapshot: " + result); + logger.debug("Failed to backup snapshot: " + result); return new BackupSnapshotAnswer(command, false, "Failed to backup snapshot: " + result, null, true); } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckSshCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckSshCommandWrapper.java index a04ddecd2503..d41cd63fa4ec 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckSshCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckSshCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.kvm.resource.wrapper; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.check.CheckSshAnswer; @@ -32,7 +31,6 @@ @ResourceWrapper(handles = CheckSshCommand.class) public final class LibvirtCheckSshCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtOvsVpcRoutingPolicyConfigCommandWrapper.class); @Override public Answer execute(final CheckSshCommand command, final LibvirtComputingResource libvirtComputingResource) { @@ -40,8 +38,8 @@ public Answer execute(final CheckSshCommand command, final LibvirtComputingResou final String privateIp = command.getIp(); final int cmdPort = command.getPort(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Ping command port, " + privateIp + ":" + cmdPort); + if (logger.isDebugEnabled()) { + logger.debug("Ping command port, " + privateIp + ":" + cmdPort); } final VirtualRoutingResource virtRouterResource = libvirtComputingResource.getVirtRouterResource(); @@ -49,8 +47,8 @@ public Answer execute(final CheckSshCommand command, final LibvirtComputingResou return new CheckSshAnswer(command, "Can not ping System vm " + vmName + " because of a connection failure"); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Ping command port succeeded for vm " + vmName); + if (logger.isDebugEnabled()) { + logger.debug("Ping command port succeeded for vm " + vmName); } return new CheckSshAnswer(command); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckStorageAvailabilityWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckStorageAvailabilityWrapper.java index 3d57ba0d5dea..b1d57f41fb78 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckStorageAvailabilityWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckStorageAvailabilityWrapper.java @@ -28,14 +28,12 @@ import com.cloud.resource.ResourceWrapper; import com.cloud.storage.Storage; import com.cloud.utils.exception.CloudRuntimeException; -import org.apache.log4j.Logger; import java.util.Map; @ResourceWrapper(handles = CheckStorageAvailabilityCommand.class) public class LibvirtCheckStorageAvailabilityWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtCheckStorageAvailabilityWrapper.class); @Override public Answer execute(CheckStorageAvailabilityCommand command, LibvirtComputingResource resource) { @@ -44,15 +42,15 @@ public Answer execute(CheckStorageAvailabilityCommand command, LibvirtComputingR for (String poolUuid : poolsMap.keySet()) { Storage.StoragePoolType type = poolsMap.get(poolUuid); - s_logger.debug("Checking if storage pool " + poolUuid + " (" + type + ") is mounted on this host"); + logger.debug("Checking if storage pool " + poolUuid + " (" + type + ") is mounted on this host"); try { KVMStoragePool storagePool = storagePoolMgr.getStoragePool(type, poolUuid); if (storagePool == null) { - s_logger.info("Storage pool " + poolUuid + " is not available"); + logger.info("Storage pool " + poolUuid + " is not available"); return new Answer(command, false, "Storage pool " + poolUuid + " not available"); } } catch (CloudRuntimeException e) { - s_logger.info("Storage pool " + poolUuid + " is not available"); + logger.info("Storage pool " + poolUuid + " is not available"); return new Answer(command, e); } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckUrlCommand.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckUrlCommand.java index 5faad5633f3b..90befe6ed643 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckUrlCommand.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckUrlCommand.java @@ -21,7 +21,6 @@ import org.apache.cloudstack.direct.download.DirectDownloadHelper; import org.apache.cloudstack.agent.directdownload.CheckUrlAnswer; import org.apache.cloudstack.agent.directdownload.CheckUrlCommand; -import org.apache.log4j.Logger; import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; import com.cloud.resource.CommandWrapper; @@ -30,7 +29,6 @@ @ResourceWrapper(handles = CheckUrlCommand.class) public class LibvirtCheckUrlCommand extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtCheckUrlCommand.class); @Override public CheckUrlAnswer execute(CheckUrlCommand cmd, LibvirtComputingResource serverResource) { @@ -39,14 +37,14 @@ public CheckUrlAnswer execute(CheckUrlCommand cmd, LibvirtComputingResource serv final Integer connectionRequestTimeout = cmd.getConnectionRequestTimeout(); final Integer socketTimeout = cmd.getSocketTimeout(); - s_logger.info(String.format("Checking URL: %s, with connect timeout: %d, connect request timeout: %d, socket timeout: %d", url, connectTimeout, connectionRequestTimeout, socketTimeout)); + logger.info(String.format("Checking URL: %s, with connect timeout: %d, connect request timeout: %d, socket timeout: %d", url, connectTimeout, connectionRequestTimeout, socketTimeout)); Long remoteSize = null; boolean checkResult = DirectDownloadHelper.checkUrlExistence(url, connectTimeout, connectionRequestTimeout, socketTimeout); if (checkResult) { remoteSize = DirectDownloadHelper.getFileSize(url, cmd.getFormat(), connectTimeout, connectionRequestTimeout, socketTimeout); if (remoteSize == null || remoteSize < 0) { - s_logger.error(String.format("Couldn't properly retrieve the remote size of the template on " + + logger.error(String.format("Couldn't properly retrieve the remote size of the template on " + "url %s, obtained size = %s", url, remoteSize)); return new CheckUrlAnswer(false, remoteSize); } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckVolumeCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckVolumeCommandWrapper.java index 8b0a5aab4619..c8b0aafd0d6d 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckVolumeCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckVolumeCommandWrapper.java @@ -34,7 +34,6 @@ import org.apache.cloudstack.utils.qemu.QemuImg; import org.apache.cloudstack.utils.qemu.QemuImgException; import org.apache.cloudstack.utils.qemu.QemuImgFile; -import org.apache.log4j.Logger; import org.libvirt.LibvirtException; import java.util.Map; @@ -42,8 +41,6 @@ @ResourceWrapper(handles = CheckVolumeCommand.class) public final class LibvirtCheckVolumeCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtCheckVolumeCommandWrapper.class); - @Override public Answer execute(final CheckVolumeCommand command, final LibvirtComputingResource libvirtComputingResource) { String result = null; @@ -64,7 +61,7 @@ public Answer execute(final CheckVolumeCommand command, final LibvirtComputingRe } } catch (final Exception e) { - s_logger.error("Error while locating disk: "+ e.getMessage()); + logger.error("Error while locating disk: "+ e.getMessage()); return new Answer(command, false, result); } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCleanupPersistentNetworkResourceCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCleanupPersistentNetworkResourceCommandWrapper.java index ebc147a73a32..9199be4b84ce 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCleanupPersistentNetworkResourceCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCleanupPersistentNetworkResourceCommandWrapper.java @@ -17,7 +17,6 @@ package com.cloud.hypervisor.kvm.resource.wrapper; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.CleanupPersistentNetworkResourceAnswer; @@ -31,7 +30,6 @@ @ResourceWrapper(handles = CleanupPersistentNetworkResourceCommand.class) public class LibvirtCleanupPersistentNetworkResourceCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtCleanupPersistentNetworkResourceCommandWrapper.class); @Override public Answer execute(CleanupPersistentNetworkResourceCommand command, LibvirtComputingResource serverResource) { NicTO nic = command.getNicTO(); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtConsoleProxyLoadCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtConsoleProxyLoadCommandWrapper.java index 1a2f7cb20b6c..ecfa062ed887 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtConsoleProxyLoadCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtConsoleProxyLoadCommandWrapper.java @@ -26,7 +26,6 @@ import java.net.URL; import java.net.URLConnection; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.Command; @@ -36,7 +35,6 @@ public abstract class LibvirtConsoleProxyLoadCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtConsoleProxyLoadCommandWrapper.class); public Answer executeProxyLoadScan(final Command cmd, final long proxyVmId, final String proxyVmName, final String proxyManagementIp, final int cmdPort) { String result = null; @@ -64,12 +62,12 @@ public Answer executeProxyLoadScan(final Command cmd, final long proxyVmId, fina try { is.close(); } catch (final IOException e) { - s_logger.warn("Exception when closing , console proxy address : " + proxyManagementIp); + logger.warn("Exception when closing , console proxy address : " + proxyManagementIp); success = false; } } } catch (final IOException e) { - s_logger.warn("Unable to open console proxy command port url, console proxy address : " + proxyManagementIp); + logger.warn("Unable to open console proxy command port url, console proxy address : " + proxyManagementIp); success = false; } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtConvertInstanceCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtConvertInstanceCommandWrapper.java index a26311891c87..bd6634c83a4c 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtConvertInstanceCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtConvertInstanceCommandWrapper.java @@ -43,7 +43,6 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import java.io.BufferedInputStream; import java.io.File; @@ -60,8 +59,6 @@ @ResourceWrapper(handles = ConvertInstanceCommand.class) public class LibvirtConvertInstanceCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtConvertInstanceCommandWrapper.class); - private static final List supportedInstanceConvertSourceHypervisors = List.of(Hypervisor.HypervisorType.VMware); @@ -80,7 +77,7 @@ public Answer execute(ConvertInstanceCommand cmd, LibvirtComputingResource serve if (!isInstanceConversionSupportedOnHost()) { String msg = String.format("Cannot convert the instance %s from VMware as the virt-v2v binary is not found. " + "Please install virt-v2v on the host before attempting the instance conversion", sourceInstanceName); - s_logger.info(msg); + logger.info(msg); return new ConvertInstanceAnswer(cmd, false, msg); } @@ -88,14 +85,14 @@ public Answer execute(ConvertInstanceCommand cmd, LibvirtComputingResource serve String err = destinationHypervisorType != Hypervisor.HypervisorType.KVM ? String.format("The destination hypervisor type is %s, KVM was expected, cannot handle it", destinationHypervisorType) : String.format("The source hypervisor type %s is not supported for KVM conversion", sourceHypervisorType); - s_logger.error(err); + logger.error(err); return new ConvertInstanceAnswer(cmd, false, err); } final KVMStoragePoolManager storagePoolMgr = serverResource.getStoragePoolMgr(); KVMStoragePool temporaryStoragePool = getTemporaryStoragePool(conversionTemporaryLocation, storagePoolMgr); - s_logger.info(String.format("Attempting to convert the instance %s from %s to KVM", + logger.info(String.format("Attempting to convert the instance %s from %s to KVM", sourceInstanceName, sourceHypervisorType)); final String convertInstanceUrl = getConvertInstanceUrl(sourceInstance); final String temporaryConvertUuid = UUID.randomUUID().toString(); @@ -109,7 +106,7 @@ public Answer execute(ConvertInstanceCommand cmd, LibvirtComputingResource serve if (!result) { String err = String.format("The virt-v2v conversion of the instance %s failed. " + "Please check the agent logs for the virt-v2v output", sourceInstanceName); - s_logger.error(err); + logger.error(err); return new ConvertInstanceAnswer(cmd, false, err); } String convertedBasePath = String.format("%s/%s", temporaryConvertPath, temporaryConvertUuid); @@ -130,13 +127,13 @@ public Answer execute(ConvertInstanceCommand cmd, LibvirtComputingResource serve } catch (Exception e) { String error = String.format("Error converting instance %s from %s, due to: %s", sourceInstanceName, sourceHypervisorType, e.getMessage()); - s_logger.error(error, e); + logger.error(error, e); return new ConvertInstanceAnswer(cmd, false, error); } finally { - s_logger.debug("Cleaning up instance conversion temporary password file"); + logger.debug("Cleaning up instance conversion temporary password file"); Script.runSimpleBashScript(String.format("rm -rf %s", temporaryPasswordFilePath)); if (conversionTemporaryLocation instanceof NfsTO) { - s_logger.debug("Cleaning up secondary storage temporary location"); + logger.debug("Cleaning up secondary storage temporary location"); storagePoolMgr.deleteStoragePool(temporaryStoragePool.getType(), temporaryStoragePool.getUuid()); } } @@ -164,7 +161,7 @@ protected List getTemporaryDisksFromParsedXml(KVMStoragePool po x.getDeviceType() == LibvirtVMDef.DiskDef.DeviceType.DISK).collect(Collectors.toList()); if (CollectionUtils.isEmpty(disksDefs)) { String err = String.format("Cannot find any disk defined on the converted XML domain %s.xml", convertedBasePath); - s_logger.error(err); + logger.error(err); throw new CloudRuntimeException(err); } sanitizeDisksPath(disksDefs); @@ -182,7 +179,7 @@ private List getPhysicalDisksFromDefPaths(List getTemporaryDisksWithPrefixFromTemporaryPool(KVMStoragePool pool, String path, String prefix) { String msg = String.format("Could not parse correctly the converted XML domain, checking for disks on %s with prefix %s", path, prefix); - s_logger.info(msg); + logger.info(msg); pool.refresh(); List disksWithPrefix = pool.listPhysicalDisks() .stream() @@ -190,7 +187,7 @@ protected List getTemporaryDisksWithPrefixFromTemporaryPool(KVM .collect(Collectors.toList()); if (CollectionUtils.isEmpty(disksWithPrefix)) { msg = String.format("Could not find any converted disk with prefix %s on temporary location %s", prefix, path); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } return disksWithPrefix; @@ -200,10 +197,10 @@ private void cleanupDisksAndDomainFromTemporaryLocation(List di KVMStoragePool temporaryStoragePool, String temporaryConvertUuid) { for (KVMPhysicalDisk disk : disks) { - s_logger.info(String.format("Cleaning up temporary disk %s after conversion from temporary location", disk.getName())); + logger.info(String.format("Cleaning up temporary disk %s after conversion from temporary location", disk.getName())); temporaryStoragePool.deletePhysicalDisk(disk.getName(), Storage.ImageFormat.QCOW2); } - s_logger.info(String.format("Cleaning up temporary domain %s after conversion from temporary location", temporaryConvertUuid)); + logger.info(String.format("Cleaning up temporary domain %s after conversion from temporary location", temporaryConvertUuid)); Script.runSimpleBashScript(String.format("rm -f %s/%s*.xml", temporaryStoragePool.getLocalPath(), temporaryConvertUuid)); } @@ -227,21 +224,21 @@ protected List moveTemporaryDisksToDestination(List %s", password, passwordFile)); return passwordFile; } @@ -377,7 +374,7 @@ protected LibvirtDomainXMLParser parseMigratedVMXmlDomain(String installPath) th String xmlPath = String.format("%s.xml", installPath); if (!new File(xmlPath).exists()) { String err = String.format("Conversion failed. Unable to find the converted XML domain, expected %s", xmlPath); - s_logger.error(err); + logger.error(err); throw new CloudRuntimeException(err); } InputStream is = new BufferedInputStream(new FileInputStream(xmlPath)); @@ -388,8 +385,8 @@ protected LibvirtDomainXMLParser parseMigratedVMXmlDomain(String installPath) th return parser; } catch (RuntimeException e) { String err = String.format("Error parsing the converted instance XML domain at %s: %s", xmlPath, e.getMessage()); - s_logger.error(err, e); - s_logger.debug(xml); + logger.error(err, e); + logger.debug(xml); return null; } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCopyRemoteVolumeCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCopyRemoteVolumeCommandWrapper.java index e48edd8eec0d..025a5ed192cd 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCopyRemoteVolumeCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCopyRemoteVolumeCommandWrapper.java @@ -34,7 +34,6 @@ import org.apache.cloudstack.utils.qemu.QemuImg; import org.apache.cloudstack.utils.qemu.QemuImgException; import org.apache.cloudstack.utils.qemu.QemuImgFile; -import org.apache.log4j.Logger; import org.libvirt.LibvirtException; import java.util.Map; @@ -42,8 +41,6 @@ @ResourceWrapper(handles = CopyRemoteVolumeCommand.class) public final class LibvirtCopyRemoteVolumeCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtCopyRemoteVolumeCommandWrapper.class); - @Override public Answer execute(final CopyRemoteVolumeCommand command, final LibvirtComputingResource libvirtComputingResource) { String result = null; @@ -61,7 +58,7 @@ public Answer execute(final CopyRemoteVolumeCommand command, final LibvirtComput if (storageFilerTO.getType() == Storage.StoragePoolType.Filesystem || storageFilerTO.getType() == Storage.StoragePoolType.NetworkFilesystem) { String filename = libvirtComputingResource.copyVolume(srcIp, username, password, dstPath, srcFile, tmpPath); - s_logger.debug("Volume Copy Successful"); + logger.debug("Volume Copy Successful"); final KVMPhysicalDisk vol = pool.getPhysicalDisk(filename); final String path = vol.getPath(); long size = getVirtualSizeFromFile(path); @@ -71,7 +68,7 @@ public Answer execute(final CopyRemoteVolumeCommand command, final LibvirtComput } } catch (final Exception e) { - s_logger.error("Error while copying file from remote host: "+ e.getMessage()); + logger.error("Error while copying file from remote host: "+ e.getMessage()); return new Answer(command, false, result); } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCopyToSecondaryStorageWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCopyToSecondaryStorageWrapper.java index a6baa1c17855..a8ea0d2ce810 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCopyToSecondaryStorageWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCopyToSecondaryStorageWrapper.java @@ -25,7 +25,6 @@ import org.apache.cloudstack.diagnostics.CopyToSecondaryStorageAnswer; import org.apache.cloudstack.diagnostics.CopyToSecondaryStorageCommand; import org.apache.cloudstack.diagnostics.DiagnosticsService; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; @@ -37,7 +36,6 @@ @ResourceWrapper(handles = CopyToSecondaryStorageCommand.class) public class LibvirtCopyToSecondaryStorageWrapper extends CommandWrapper { - public static final Logger LOGGER = Logger.getLogger(LibvirtCopyToSecondaryStorageWrapper.class); @Override public Answer execute(CopyToSecondaryStorageCommand command, LibvirtComputingResource libvirtResource) { @@ -64,7 +62,7 @@ public Answer execute(CopyToSecondaryStorageCommand command, LibvirtComputingRes Path path = Paths.get(dataDirectory.getAbsolutePath()); setDirFilePermissions(path); if (existsInSecondaryStore) { - LOGGER.info(String.format("Copying %s from %s to secondary store %s", diagnosticsZipFile, vmSshIp, secondaryStorageUrl)); + logger.info(String.format("Copying %s from %s to secondary store %s", diagnosticsZipFile, vmSshIp, secondaryStorageUrl)); int port = Integer.valueOf(LibvirtComputingResource.DEFAULTDOMRSSHPORT); File permKey = new File(LibvirtComputingResource.SSHPRVKEYPATH); SshHelper.scpFrom(vmSshIp, port, "root", permKey, dataDirectoryInSecondaryStore, diagnosticsZipFile); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCopyVolumeCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCopyVolumeCommandWrapper.java index 0795abf06888..4e42af6899aa 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCopyVolumeCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCopyVolumeCommandWrapper.java @@ -37,11 +37,9 @@ import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; -import org.apache.log4j.Logger; @ResourceWrapper(handles = CopyVolumeCommand.class) public final class LibvirtCopyVolumeCommandWrapper extends CommandWrapper { - private static final Logger LOGGER = Logger.getLogger(LibvirtCopyVolumeCommandWrapper.class); @Override public Answer execute(final CopyVolumeCommand command, final LibvirtComputingResource libvirtComputingResource) { @@ -164,7 +162,7 @@ private Answer handleCopyDataFromVolumeToSecondaryStorageUsingSrcDetails(CopyVol } } catch (Exception e) { - LOGGER.warn("Unable to disconnect from the source device.", e); + logger.warn("Unable to disconnect from the source device.", e); } if (secondaryStoragePool != null) { diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCreateCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCreateCommandWrapper.java index bac5551129a5..5ec00889df96 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCreateCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCreateCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.kvm.resource.wrapper; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.storage.CreateAnswer; @@ -39,7 +38,6 @@ @ResourceWrapper(handles = CreateCommand.class) public final class LibvirtCreateCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtCreateCommandWrapper.class); @Override public Answer execute(final CreateCommand command, final LibvirtComputingResource libvirtComputingResource) { @@ -80,7 +78,7 @@ public Answer execute(final CreateCommand command, final LibvirtComputingResourc volume.setCacheMode(dskch.getCacheMode()); return new CreateAnswer(command, volume); } catch (final CloudRuntimeException e) { - s_logger.debug("Failed to create volume: " + e.toString()); + logger.debug("Failed to create volume: " + e.toString()); return new CreateAnswer(command, e); } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCreatePrivateTemplateFromSnapshotCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCreatePrivateTemplateFromSnapshotCommandWrapper.java index de3d12f5c100..b05d6f06d742 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCreatePrivateTemplateFromSnapshotCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCreatePrivateTemplateFromSnapshotCommandWrapper.java @@ -24,7 +24,6 @@ import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.CreatePrivateTemplateFromSnapshotCommand; @@ -46,7 +45,6 @@ @ResourceWrapper(handles = CreatePrivateTemplateFromSnapshotCommand.class) public final class LibvirtCreatePrivateTemplateFromSnapshotCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtCreatePrivateTemplateFromSnapshotCommandWrapper.class); @Override public Answer execute(final CreatePrivateTemplateFromSnapshotCommand command, final LibvirtComputingResource libvirtComputingResource) { @@ -80,7 +78,7 @@ public Answer execute(final CreatePrivateTemplateFromSnapshotCommand command, fi final String createTmplPath = libvirtComputingResource.createTmplPath(); final int cmdsTimeout = libvirtComputingResource.getCmdsTimeout(); - final Script scriptCommand = new Script(createTmplPath, cmdsTimeout, s_logger); + final Script scriptCommand = new Script(createTmplPath, cmdsTimeout, logger); scriptCommand.add("-t", templatePath); scriptCommand.add("-n", tmplFileName); scriptCommand.add("-f", snapshot.getPath()); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCreatePrivateTemplateFromVolumeCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCreatePrivateTemplateFromVolumeCommandWrapper.java index 4a7aae512024..de35a1251bba 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCreatePrivateTemplateFromVolumeCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCreatePrivateTemplateFromVolumeCommandWrapper.java @@ -34,7 +34,6 @@ import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; import org.apache.cloudstack.utils.qemu.QemuImgException; import org.apache.cloudstack.utils.qemu.QemuImgFile; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.CreatePrivateTemplateFromVolumeCommand; @@ -60,7 +59,6 @@ @ResourceWrapper(handles = CreatePrivateTemplateFromVolumeCommand.class) public final class LibvirtCreatePrivateTemplateFromVolumeCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtCreatePrivateTemplateFromVolumeCommandWrapper.class); @Override public Answer execute(final CreatePrivateTemplateFromVolumeCommand command, final LibvirtComputingResource libvirtComputingResource) { @@ -96,7 +94,7 @@ public Answer execute(final CreatePrivateTemplateFromVolumeCommand command, fina final String createTmplPath = libvirtComputingResource.createTmplPath(); final int cmdsTimeout = libvirtComputingResource.getCmdsTimeout(); - final Script scriptCommand = new Script(createTmplPath, cmdsTimeout, s_logger); + final Script scriptCommand = new Script(createTmplPath, cmdsTimeout, logger); scriptCommand.add("-f", disk.getPath()); scriptCommand.add("-t", tmpltPath); scriptCommand.add("-n", command.getUniqueName() + ".qcow2"); @@ -104,11 +102,11 @@ public Answer execute(final CreatePrivateTemplateFromVolumeCommand command, fina final String result = scriptCommand.execute(); if (result != null) { - s_logger.debug("failed to create template: " + result); + logger.debug("failed to create template: " + result); return new CreatePrivateTemplateAnswer(command, false, result); } } else { - s_logger.debug("Converting RBD disk " + disk.getPath() + " into template " + command.getUniqueName()); + logger.debug("Converting RBD disk " + disk.getPath() + " into template " + command.getUniqueName()); final QemuImgFile srcFile = new QemuImgFile(KVMPhysicalDisk.RBDStringBuilder(primary.getSourceHost(), primary.getSourcePort(), primary.getAuthUserName(), @@ -122,7 +120,7 @@ public Answer execute(final CreatePrivateTemplateFromVolumeCommand command, fina final QemuImg q = new QemuImg(0); q.convert(srcFile, destFile); } catch (final QemuImgException | LibvirtException e) { - s_logger.error("Failed to create new template while converting " + srcFile.getFileName() + " to " + destFile.getFileName() + " the error was: " + + logger.error("Failed to create new template while converting " + srcFile.getFileName() + " to " + destFile.getFileName() + " the error was: " + e.getMessage()); } @@ -142,7 +140,7 @@ public Answer execute(final CreatePrivateTemplateFromVolumeCommand command, fina templFo.flush(); }catch(final IOException ex) { - s_logger.error("CreatePrivateTemplateAnswer:Exception:"+ex.getMessage()); + logger.error("CreatePrivateTemplateAnswer:Exception:"+ex.getMessage()); } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCreateVMSnapshotCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCreateVMSnapshotCommandWrapper.java index c7941e7fc897..7cada635a045 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCreateVMSnapshotCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCreateVMSnapshotCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.kvm.resource.wrapper; -import org.apache.log4j.Logger; import org.libvirt.Connect; import org.libvirt.Domain; import org.libvirt.DomainInfo.DomainState; @@ -35,7 +34,6 @@ @ResourceWrapper(handles = CreateVMSnapshotCommand.class) public final class LibvirtCreateVMSnapshotCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtCreateVMSnapshotCommandWrapper.class); @Override public Answer execute(final CreateVMSnapshotCommand cmd, final LibvirtComputingResource libvirtComputingResource) { @@ -67,14 +65,14 @@ public Answer execute(final CreateVMSnapshotCommand cmd, final LibvirtComputingR return new CreateVMSnapshotAnswer(cmd, cmd.getTarget(), cmd.getVolumeTOs()); } catch (LibvirtException e) { String msg = " Create VM snapshot failed due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new CreateVMSnapshotAnswer(cmd, false, msg); } finally { if (dm != null) { try { dm.free(); } catch (LibvirtException l) { - s_logger.trace("Ignoring libvirt error.", l); + logger.trace("Ignoring libvirt error.", l); }; } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtDeleteVMSnapshotCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtDeleteVMSnapshotCommandWrapper.java index 5b55db24f4de..45b0c179938c 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtDeleteVMSnapshotCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtDeleteVMSnapshotCommandWrapper.java @@ -21,7 +21,6 @@ import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; -import org.apache.log4j.Logger; import org.libvirt.Connect; import org.libvirt.Domain; import org.libvirt.DomainInfo; @@ -43,7 +42,6 @@ @ResourceWrapper(handles = DeleteVMSnapshotCommand.class) public final class LibvirtDeleteVMSnapshotCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtDeleteVMSnapshotCommandWrapper.class); @Override public Answer execute(final DeleteVMSnapshotCommand cmd, final LibvirtComputingResource libvirtComputingResource) { @@ -64,7 +62,7 @@ public Answer execute(final DeleteVMSnapshotCommand cmd, final LibvirtComputingR oldState = dm.getInfo().state; if (oldState == DomainInfo.DomainState.VIR_DOMAIN_RUNNING) { - s_logger.debug("Suspending domain " + vmName); + logger.debug("Suspending domain " + vmName); dm.suspend(); // suspend the vm to avoid image corruption } @@ -84,7 +82,7 @@ public Answer execute(final DeleteVMSnapshotCommand cmd, final LibvirtComputingR String msg = " Delete VM snapshot failed due to " + e.toString(); if (dm == null) { - s_logger.debug("Can not find running vm: " + vmName + ", now we are trying to delete the vm snapshot using qemu-img if the format of root volume is QCOW2"); + logger.debug("Can not find running vm: " + vmName + ", now we are trying to delete the vm snapshot using qemu-img if the format of root volume is QCOW2"); VolumeObjectTO rootVolume = null; for (VolumeObjectTO volume: cmd.getVolumeTOs()) { if (volume.getVolumeType() == Volume.Type.ROOT) { @@ -98,7 +96,7 @@ public Answer execute(final DeleteVMSnapshotCommand cmd, final LibvirtComputingR primaryStore.getUuid(), rootVolume.getPath()); String qemu_img_snapshot = Script.runSimpleBashScript("qemu-img snapshot -l " + rootDisk.getPath() + " | tail -n +3 | awk -F ' ' '{print $2}' | grep ^" + cmd.getTarget().getSnapshotName() + "$"); if (qemu_img_snapshot == null) { - s_logger.info("Cannot find snapshot " + cmd.getTarget().getSnapshotName() + " in file " + rootDisk.getPath() + ", return true"); + logger.info("Cannot find snapshot " + cmd.getTarget().getSnapshotName() + " in file " + rootDisk.getPath() + ", return true"); return new DeleteVMSnapshotAnswer(cmd, cmd.getVolumeTOs()); } int result = Script.runSimpleBashScriptForExitValue("qemu-img snapshot -d " + cmd.getTarget().getSnapshotName() + " " + rootDisk.getPath()); @@ -110,14 +108,14 @@ public Answer execute(final DeleteVMSnapshotCommand cmd, final LibvirtComputingR } } } else if (snapshot == null) { - s_logger.debug("Can not find vm snapshot " + cmd.getTarget().getSnapshotName() + " on vm: " + vmName + ", return true"); + logger.debug("Can not find vm snapshot " + cmd.getTarget().getSnapshotName() + " on vm: " + vmName + ", return true"); return new DeleteVMSnapshotAnswer(cmd, cmd.getVolumeTOs()); } else if (tryingResume) { - s_logger.error("Failed to resume vm after delete snapshot " + cmd.getTarget().getSnapshotName() + " on vm: " + vmName + " return true : " + e); + logger.error("Failed to resume vm after delete snapshot " + cmd.getTarget().getSnapshotName() + " on vm: " + vmName + " return true : " + e); return new DeleteVMSnapshotAnswer(cmd, cmd.getVolumeTOs()); } - s_logger.warn(msg, e); + logger.warn(msg, e); return new DeleteVMSnapshotAnswer(cmd, false, msg); } finally { if (dm != null) { @@ -125,12 +123,12 @@ public Answer execute(final DeleteVMSnapshotCommand cmd, final LibvirtComputingR try { dm = libvirtComputingResource.getDomain(conn, vmName); if (oldState == DomainInfo.DomainState.VIR_DOMAIN_RUNNING && dm.getInfo().state == DomainInfo.DomainState.VIR_DOMAIN_PAUSED) { - s_logger.debug("Resuming domain " + vmName); + logger.debug("Resuming domain " + vmName); dm.resume(); } dm.free(); } catch (LibvirtException e) { - s_logger.error("Failed to resume vm after delete snapshot " + cmd.getTarget().getSnapshotName() + " on vm: " + vmName + " return true : " + e); + logger.error("Failed to resume vm after delete snapshot " + cmd.getTarget().getSnapshotName() + " on vm: " + vmName + " return true : " + e); } } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtDestroyCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtDestroyCommandWrapper.java index 361d194189ee..b9106cec42f6 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtDestroyCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtDestroyCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.kvm.resource.wrapper; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.storage.DestroyCommand; @@ -34,7 +33,6 @@ @ResourceWrapper(handles = DestroyCommand.class) public final class LibvirtDestroyCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtDestroyCommandWrapper.class); @Override public Answer execute(final DestroyCommand command, final LibvirtComputingResource libvirtComputingResource) { @@ -45,7 +43,7 @@ public Answer execute(final DestroyCommand command, final LibvirtComputingResour pool.deletePhysicalDisk(vol.getPath(), null); return new Answer(command, true, "Success"); } catch (final CloudRuntimeException e) { - s_logger.debug("Failed to delete volume: " + e.toString()); + logger.debug("Failed to delete volume: " + e.toString()); return new Answer(command, false, e.toString()); } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtFenceCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtFenceCommandWrapper.java index 9a6ee7a41700..f1037d64384c 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtFenceCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtFenceCommandWrapper.java @@ -25,7 +25,6 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.FenceAnswer; @@ -40,7 +39,6 @@ @ResourceWrapper(handles = FenceCommand.class) public final class LibvirtFenceCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtFenceCommandWrapper.class); @Override public Answer execute(final FenceCommand command, final LibvirtComputingResource libvirtComputingResource) { @@ -56,7 +54,7 @@ public Answer execute(final FenceCommand command, final LibvirtComputingResource */ if (pools.size() == 0) { String logline = "No NFS storage pools found. No way to safely fence " + command.getVmName() + " on host " + command.getHostGuid(); - s_logger.warn(logline); + logger.warn(logline); return new FenceAnswer(command, false, logline); } @@ -71,10 +69,10 @@ public Answer execute(final FenceCommand command, final LibvirtComputingResource return new FenceAnswer(command); } } catch (final InterruptedException e) { - s_logger.warn("Unable to fence", e); + logger.warn("Unable to fence", e); return new FenceAnswer(command, false, e.getMessage()); } catch (final ExecutionException e) { - s_logger.warn("Unable to fence", e); + logger.warn("Unable to fence", e); return new FenceAnswer(command, false, e.getMessage()); } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtFreezeThawVMCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtFreezeThawVMCommandWrapper.java index 808d3a20bfbe..a6f2bcc93408 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtFreezeThawVMCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtFreezeThawVMCommandWrapper.java @@ -20,7 +20,6 @@ package com.cloud.hypervisor.kvm.resource.wrapper; import org.apache.cloudstack.utils.qemu.QemuCommand; -import org.apache.log4j.Logger; import org.libvirt.Connect; import org.libvirt.Domain; import org.libvirt.DomainInfo.DomainState; @@ -37,7 +36,6 @@ @ResourceWrapper(handles = FreezeThawVMCommand.class) public class LibvirtFreezeThawVMCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtFreezeThawVMCommandWrapper.class); @Override public Answer execute(FreezeThawVMCommand command, LibvirtComputingResource serverResource) { @@ -60,13 +58,13 @@ public Answer execute(FreezeThawVMCommand command, LibvirtComputingResource serv } String result = getResultOfQemuCommand(command.getOption(), domain); - s_logger.debug(String.format("Result of %s command is %s", command.getOption(), result)); + logger.debug(String.format("Result of %s command is %s", command.getOption(), result)); if (result == null || (result.startsWith("error"))) { return new FreezeThawVMAnswer(command, false, String.format("Failed to %s vm %s due to result status is: %s", command.getOption(), vmName, result)); } String status = getResultOfQemuCommand(FreezeThawVMCommand.STATUS, domain); - s_logger.debug(String.format("Status of %s command is %s", command.getOption(), status)); + logger.debug(String.format("Status of %s command is %s", command.getOption(), status)); if (status != null && new JsonParser().parse(status).isJsonObject()) { String statusResult = new JsonParser().parse(status).getAsJsonObject().get("return").getAsString(); if (statusResult.equals(command.getOption())) { @@ -83,7 +81,7 @@ public Answer execute(FreezeThawVMCommand command, LibvirtComputingResource serv try { domain.free(); } catch (LibvirtException e) { - s_logger.trace("Ingore error ", e); + logger.trace("Ingore error ", e); } } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetHostStatsCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetHostStatsCommandWrapper.java index 5e5835edd019..1af16e72ec09 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetHostStatsCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetHostStatsCommandWrapper.java @@ -29,12 +29,10 @@ import com.cloud.utils.Pair; import org.apache.cloudstack.utils.linux.CPUStat; import org.apache.cloudstack.utils.linux.MemStat; -import org.apache.log4j.Logger; @ResourceWrapper(handles = GetHostStatsCommand.class) public final class LibvirtGetHostStatsCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtGetHostStatsCommandWrapper.class); @Override public Answer execute(final GetHostStatsCommand command, final LibvirtComputingResource libvirtComputingResource) { diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetRemoteVmsCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetRemoteVmsCommandWrapper.java index 700f058b59b8..ead294ad05f7 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetRemoteVmsCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetRemoteVmsCommandWrapper.java @@ -32,7 +32,6 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VirtualMachine; import org.apache.cloudstack.vm.UnmanagedInstanceTO; -import org.apache.log4j.Logger; import org.libvirt.Connect; import org.libvirt.Domain; import org.libvirt.DomainBlockInfo; @@ -46,8 +45,6 @@ @ResourceWrapper(handles = GetRemoteVmsCommand.class) public final class LibvirtGetRemoteVmsCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtGetRemoteVmsCommandWrapper.class); - @Override public Answer execute(final GetRemoteVmsCommand command, final LibvirtComputingResource libvirtComputingResource) { String result = null; @@ -64,22 +61,22 @@ public Answer execute(final GetRemoteVmsCommand command, final LibvirtComputingR final VirtualMachine.PowerState state = libvirtComputingResource.convertToPowerState(ps); - s_logger.debug("VM " + domain.getName() + ": powerstate = " + ps + "; vm state=" + state.toString()); + logger.debug("VM " + domain.getName() + ": powerstate = " + ps + "; vm state=" + state.toString()); if (state == VirtualMachine.PowerState.PowerOff) { try { UnmanagedInstanceTO instance = getUnmanagedInstance(libvirtComputingResource, domain, conn); unmanagedInstances.put(instance.getName(), instance); } catch (Exception e) { - s_logger.error("Error while fetching instance details", e); + logger.error("Error while fetching instance details", e); } } domain.free(); } - s_logger.debug("Found Vms: "+ unmanagedInstances.size()); + logger.debug("Found Vms: "+ unmanagedInstances.size()); return new GetRemoteVmsAnswer(command, "", unmanagedInstances); } catch (final LibvirtException e) { - s_logger.error("Error while listing stopped Vms on remote host: "+ e.getMessage()); + logger.error("Error while listing stopped Vms on remote host: "+ e.getMessage()); return new Answer(command, false, result); } } @@ -106,7 +103,7 @@ private UnmanagedInstanceTO getUnmanagedInstance(LibvirtComputingResource libvir return instance; } catch (Exception e) { - s_logger.debug("Unable to retrieve unmanaged instance info. ", e); + logger.debug("Unable to retrieve unmanaged instance info. ", e); throw new CloudRuntimeException("Unable to retrieve unmanaged instance info. " + e.getMessage()); } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetUnmanagedInstancesCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetUnmanagedInstancesCommandWrapper.java index 65de4f6d3105..9495646bad54 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetUnmanagedInstancesCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetUnmanagedInstancesCommandWrapper.java @@ -30,7 +30,6 @@ import org.apache.cloudstack.vm.UnmanagedInstanceTO; import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.libvirt.Connect; import org.libvirt.Domain; import org.libvirt.DomainBlockInfo; @@ -42,13 +41,12 @@ @ResourceWrapper(handles=GetUnmanagedInstancesCommand.class) public final class LibvirtGetUnmanagedInstancesCommandWrapper extends CommandWrapper { - private static final Logger LOGGER = Logger.getLogger(LibvirtGetUnmanagedInstancesCommandWrapper.class); private static final int requiredVncPasswordLength = 22; @Override public GetUnmanagedInstancesAnswer execute(GetUnmanagedInstancesCommand command, LibvirtComputingResource libvirtComputingResource) { - LOGGER.info("Fetching unmanaged instance on host"); + logger.info("Fetching unmanaged instance on host"); HashMap unmanagedInstances = new HashMap<>(); try { @@ -65,7 +63,7 @@ public GetUnmanagedInstancesAnswer execute(GetUnmanagedInstancesCommand command, } } catch (Exception e) { String err = String.format("Error listing unmanaged instances: %s", e.getMessage()); - LOGGER.error(err, e); + logger.error(err, e); return new GetUnmanagedInstancesAnswer(command, err); } @@ -81,7 +79,7 @@ private List getDomains(GetUnmanagedInstancesCommand command, final Domain domain = libvirtComputingResource.getDomain(conn, vmNameCmd); if (domain == null) { String msg = String.format("VM %s not found", vmNameCmd); - LOGGER.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } @@ -104,14 +102,14 @@ private List getDomains(GetUnmanagedInstancesCommand command, private void checkIfVmExists(String vmNameCmd,final Domain domain) throws LibvirtException { if (StringUtils.isNotEmpty(vmNameCmd) && !vmNameCmd.equals(domain.getName())) { - LOGGER.error("GetUnmanagedInstancesCommand: exact vm name not found " + vmNameCmd); + logger.error("GetUnmanagedInstancesCommand: exact vm name not found " + vmNameCmd); throw new CloudRuntimeException("GetUnmanagedInstancesCommand: exact vm name not found " + vmNameCmd); } } private void checkIfVmIsManaged(GetUnmanagedInstancesCommand command,String vmNameCmd,final Domain domain) throws LibvirtException { if (command.hasManagedInstance(domain.getName())) { - LOGGER.error("GetUnmanagedInstancesCommand: vm already managed " + vmNameCmd); + logger.error("GetUnmanagedInstancesCommand: vm already managed " + vmNameCmd); throw new CloudRuntimeException("GetUnmanagedInstancesCommand: vm already managed " + vmNameCmd); } } @@ -137,7 +135,7 @@ private UnmanagedInstanceTO getUnmanagedInstance(LibvirtComputingResource libvir return instance; } catch (Exception e) { - LOGGER.info("Unable to retrieve unmanaged instance info. " + e.getMessage(), e); + logger.info("Unable to retrieve unmanaged instance info. " + e.getMessage(), e); return null; } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVmDiskStatsCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVmDiskStatsCommandWrapper.java index 6316be9fbf38..6edd667ddcc9 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVmDiskStatsCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVmDiskStatsCommandWrapper.java @@ -22,7 +22,6 @@ import java.util.HashMap; import java.util.List; -import org.apache.log4j.Logger; import org.libvirt.Connect; import org.libvirt.LibvirtException; @@ -37,7 +36,6 @@ @ResourceWrapper(handles = GetVmDiskStatsCommand.class) public final class LibvirtGetVmDiskStatsCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtGetVmDiskStatsCommandWrapper.class); @Override public Answer execute(final GetVmDiskStatsCommand command, final LibvirtComputingResource libvirtComputingResource) { @@ -56,12 +54,12 @@ public Answer execute(final GetVmDiskStatsCommand command, final LibvirtComputin vmDiskStatsNameMap.put(vmName, statEntry); } catch (LibvirtException e) { - s_logger.warn("Can't get vm disk stats: " + e.toString() + ", continue"); + logger.warn("Can't get vm disk stats: " + e.toString() + ", continue"); } } return new GetVmDiskStatsAnswer(command, "", command.getHostName(), vmDiskStatsNameMap); } catch (final LibvirtException e) { - s_logger.debug("Can't get vm disk stats: " + e.toString()); + logger.debug("Can't get vm disk stats: " + e.toString()); return new GetVmDiskStatsAnswer(command, null, null, null); } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVmIpAddressCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVmIpAddressCommandWrapper.java index 1c27bdd958fd..227e68872dac 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVmIpAddressCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVmIpAddressCommandWrapper.java @@ -26,12 +26,10 @@ import com.cloud.resource.ResourceWrapper; import com.cloud.utils.net.NetUtils; import com.cloud.utils.script.Script; -import org.apache.log4j.Logger; @ResourceWrapper(handles = GetVmIpAddressCommand.class) public final class LibvirtGetVmIpAddressCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtGetVmIpAddressCommandWrapper.class); @Override public Answer execute(final GetVmIpAddressCommand command, final LibvirtComputingResource libvirtComputingResource) { @@ -53,7 +51,7 @@ public Answer execute(final GetVmIpAddressCommand command, final LibvirtComputin ip = ipAddr; break; } - s_logger.debug("GetVmIp: "+command.getVmName()+ " Ip: "+ipAddr+" does not belong to network "+networkCidr); + logger.debug("GetVmIp: "+command.getVmName()+ " Ip: "+ipAddr+" does not belong to network "+networkCidr); } } } else { @@ -61,7 +59,7 @@ public Answer execute(final GetVmIpAddressCommand command, final LibvirtComputin String ipList = Script.runSimpleBashScript(new StringBuilder().append("virt-win-reg --unsafe-printable-strings ").append(command.getVmName()) .append(" 'HKEY_LOCAL_MACHINE\\SYSTEM\\ControlSet001\\Services\\Tcpip\\Parameters\\Interfaces' | grep DhcpIPAddress | awk -F : '{print $2}' | sed -e 's/^\"//' -e 's/\"$//'").toString()); if(ipList != null) { - s_logger.debug("GetVmIp: "+command.getVmName()+ "Ips: "+ipList); + logger.debug("GetVmIp: "+command.getVmName()+ "Ips: "+ipList); String[] ips = ipList.split("\n"); for (String ipAddr : ips){ // Check if the IP belongs to the network @@ -69,13 +67,13 @@ public Answer execute(final GetVmIpAddressCommand command, final LibvirtComputin ip = ipAddr; break; } - s_logger.debug("GetVmIp: "+command.getVmName()+ " Ip: "+ipAddr+" does not belong to network "+networkCidr); + logger.debug("GetVmIp: "+command.getVmName()+ " Ip: "+ipAddr+" does not belong to network "+networkCidr); } } } if(ip != null){ result = true; - s_logger.debug("GetVmIp: "+command.getVmName()+ " Found Ip: "+ip); + logger.debug("GetVmIp: "+command.getVmName()+ " Found Ip: "+ip); } return new Answer(command, result, ip); } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVmNetworkStatsCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVmNetworkStatsCommandWrapper.java index 20ee4fd9dea8..03321122c4ae 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVmNetworkStatsCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVmNetworkStatsCommandWrapper.java @@ -22,7 +22,6 @@ import java.util.HashMap; import java.util.List; -import org.apache.log4j.Logger; import org.libvirt.Connect; import org.libvirt.LibvirtException; @@ -37,7 +36,6 @@ @ResourceWrapper(handles = GetVmNetworkStatsCommand.class) public final class LibvirtGetVmNetworkStatsCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtGetVmNetworkStatsCommandWrapper.class); @Override public Answer execute(final GetVmNetworkStatsCommand command, final LibvirtComputingResource libvirtComputingResource) { @@ -56,12 +54,12 @@ public Answer execute(final GetVmNetworkStatsCommand command, final LibvirtCompu vmNetworkStatsNameMap.put(vmName, statEntry); } catch (LibvirtException e) { - s_logger.warn("Can't get vm network stats: " + e.toString() + ", continue"); + logger.warn("Can't get vm network stats: " + e.toString() + ", continue"); } } return new GetVmNetworkStatsAnswer(command, "", command.getHostName(), vmNetworkStatsNameMap); } catch (final LibvirtException e) { - s_logger.debug("Can't get vm network stats: " + e.toString()); + logger.debug("Can't get vm network stats: " + e.toString()); return new GetVmNetworkStatsAnswer(command, null, null, null); } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVmStatsCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVmStatsCommandWrapper.java index 24853d0a2e81..834b0e834dff 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVmStatsCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVmStatsCommandWrapper.java @@ -22,7 +22,6 @@ import java.util.HashMap; import java.util.List; -import org.apache.log4j.Logger; import org.libvirt.Connect; import org.libvirt.LibvirtException; @@ -37,7 +36,6 @@ @ResourceWrapper(handles = GetVmStatsCommand.class) public final class LibvirtGetVmStatsCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtGetVmStatsCommandWrapper.class); @Override public Answer execute(final GetVmStatsCommand command, final LibvirtComputingResource libvirtComputingResource) { @@ -57,12 +55,12 @@ public Answer execute(final GetVmStatsCommand command, final LibvirtComputingRes vmStatsNameMap.put(vmName, statEntry); } catch (LibvirtException e) { - s_logger.warn("Can't get vm stats: " + e.toString() + ", continue"); + logger.warn("Can't get vm stats: " + e.toString() + ", continue"); } } return new GetVmStatsAnswer(command, vmStatsNameMap); } catch (final LibvirtException e) { - s_logger.debug("Can't get vm stats: " + e.toString()); + logger.debug("Can't get vm stats: " + e.toString()); return new GetVmStatsAnswer(command, null); } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVolumeStatsCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVolumeStatsCommandWrapper.java index a2f50ac6555f..677af99ed157 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVolumeStatsCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVolumeStatsCommandWrapper.java @@ -21,7 +21,6 @@ import java.util.HashMap; -import org.apache.log4j.Logger; import org.libvirt.Connect; import org.libvirt.LibvirtException; @@ -40,7 +39,6 @@ @ResourceWrapper(handles = GetVolumeStatsCommand.class) public final class LibvirtGetVolumeStatsCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtGetVmDiskStatsCommandWrapper.class); @Override public Answer execute(final GetVolumeStatsCommand cmd, final LibvirtComputingResource libvirtComputingResource) { diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtHandleConfigDriveCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtHandleConfigDriveCommandWrapper.java index 3d2f7282b06b..135d447fbb2a 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtHandleConfigDriveCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtHandleConfigDriveCommandWrapper.java @@ -25,7 +25,6 @@ import org.apache.cloudstack.storage.configdrive.ConfigDriveBuilder; import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.HandleConfigDriveIsoAnswer; @@ -42,7 +41,6 @@ @ResourceWrapper(handles = HandleConfigDriveIsoCommand.class) public final class LibvirtHandleConfigDriveCommandWrapper extends CommandWrapper { - private static final Logger LOG = Logger.getLogger(LibvirtHandleConfigDriveCommandWrapper.class); @Override public Answer execute(final HandleConfigDriveIsoCommand command, final LibvirtComputingResource libvirtComputingResource) { @@ -50,11 +48,11 @@ public Answer execute(final HandleConfigDriveIsoCommand command, final LibvirtCo try { if (command.isCreate()) { - LOG.debug("Creating config drive: " + command.getIsoFile()); + logger.debug("Creating config drive: " + command.getIsoFile()); NetworkElement.Location location = NetworkElement.Location.PRIMARY; if (command.isHostCachePreferred()) { - LOG.debug("Using the KVM host for config drive"); + logger.debug("Using the KVM host for config drive"); mountPoint = libvirtComputingResource.getConfigPath(); location = NetworkElement.Location.HOST; } else { @@ -79,14 +77,14 @@ public Answer execute(final HandleConfigDriveIsoCommand command, final LibvirtCo } if (pool.supportsConfigDriveIso()) { - LOG.debug("Using the pool: " + poolUuid + " for config drive"); + logger.debug("Using the pool: " + poolUuid + " for config drive"); mountPoint = pool.getLocalPath(); } else if (command.getUseHostCacheOnUnsupportedPool()) { - LOG.debug("Config drive for KVM is not supported for pool type: " + poolType.toString() + ", using the KVM host"); + logger.debug("Config drive for KVM is not supported for pool type: " + poolType.toString() + ", using the KVM host"); mountPoint = libvirtComputingResource.getConfigPath(); location = NetworkElement.Location.HOST; } else { - LOG.debug("Config drive for KVM is not supported for pool type: " + poolType.toString()); + logger.debug("Config drive for KVM is not supported for pool type: " + poolType.toString()); return new HandleConfigDriveIsoAnswer(command, "Config drive for KVM is not supported for pool type: " + poolType.toString()); } } @@ -98,7 +96,7 @@ public Answer execute(final HandleConfigDriveIsoCommand command, final LibvirtCo return new HandleConfigDriveIsoAnswer(command, "Invalid config drive ISO data received"); } if (isoFile.exists()) { - LOG.debug("An old config drive iso already exists"); + logger.debug("An old config drive iso already exists"); } Files.createDirectories(isoPath.getParent()); @@ -106,7 +104,7 @@ public Answer execute(final HandleConfigDriveIsoCommand command, final LibvirtCo return new HandleConfigDriveIsoAnswer(command, location); } else { - LOG.debug("Deleting config drive: " + command.getIsoFile()); + logger.debug("Deleting config drive: " + command.getIsoFile()); Path configDrivePath = null; if (command.isHostCachePreferred()) { @@ -138,10 +136,10 @@ public Answer execute(final HandleConfigDriveIsoCommand command, final LibvirtCo return new HandleConfigDriveIsoAnswer(command); } } catch (final IOException e) { - LOG.debug("Failed to handle config drive due to " + e.getMessage(), e); + logger.debug("Failed to handle config drive due to " + e.getMessage(), e); return new HandleConfigDriveIsoAnswer(command, "Failed due to exception: " + e.getMessage()); } catch (final CloudRuntimeException e) { - LOG.debug("Failed to handle config drive due to " + e.getMessage(), e); + logger.debug("Failed to handle config drive due to " + e.getMessage(), e); return new HandleConfigDriveIsoAnswer(command, "Failed due to exception: " + e.toString()); } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtManageSnapshotCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtManageSnapshotCommandWrapper.java index 0e2492c146fe..ec900e9981e0 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtManageSnapshotCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtManageSnapshotCommandWrapper.java @@ -22,7 +22,6 @@ import java.io.File; import java.text.MessageFormat; -import org.apache.log4j.Logger; import org.libvirt.Connect; import org.libvirt.Domain; import org.libvirt.DomainInfo.DomainState; @@ -49,7 +48,6 @@ @ResourceWrapper(handles = ManageSnapshotCommand.class) public final class LibvirtManageSnapshotCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtManageSnapshotCommandWrapper.class); @Override public Answer execute(final ManageSnapshotCommand command, final LibvirtComputingResource libvirtComputingResource) { @@ -66,7 +64,7 @@ public Answer execute(final ManageSnapshotCommand command, final LibvirtComputin vm = libvirtComputingResource.getDomain(conn, command.getVmName()); state = vm.getInfo().state; } catch (final LibvirtException e) { - s_logger.trace("Ignoring libvirt error.", e); + logger.trace("Ignoring libvirt error.", e); } } @@ -83,7 +81,7 @@ public Answer execute(final ManageSnapshotCommand command, final LibvirtComputin final String vmUuid = vm.getUUIDString(); final Object[] args = new Object[] {snapshotName, vmUuid}; final String snapshot = snapshotXML.format(args); - s_logger.debug(snapshot); + logger.debug(snapshot); if (command.getCommandSwitch().equalsIgnoreCase(ManageSnapshotCommand.CREATE_SNAPSHOT)) { vm.snapshotCreateXML(snapshot); } else { @@ -121,31 +119,31 @@ public Answer execute(final ManageSnapshotCommand command, final LibvirtComputin r.confSet("key", primaryPool.getAuthSecret()); r.confSet("client_mount_timeout", "30"); r.connect(); - s_logger.debug("Successfully connected to Ceph cluster at " + r.confGet("mon_host")); + logger.debug("Successfully connected to Ceph cluster at " + r.confGet("mon_host")); final IoCTX io = r.ioCtxCreate(primaryPool.getSourceDir()); final Rbd rbd = new Rbd(io); final RbdImage image = rbd.open(disk.getName()); if (command.getCommandSwitch().equalsIgnoreCase(ManageSnapshotCommand.CREATE_SNAPSHOT)) { - s_logger.debug("Attempting to create RBD snapshot " + disk.getName() + "@" + snapshotName); + logger.debug("Attempting to create RBD snapshot " + disk.getName() + "@" + snapshotName); image.snapCreate(snapshotName); } else { - s_logger.debug("Attempting to remove RBD snapshot " + disk.getName() + "@" + snapshotName); + logger.debug("Attempting to remove RBD snapshot " + disk.getName() + "@" + snapshotName); image.snapRemove(snapshotName); } rbd.close(image); r.ioCtxDestroy(io); } catch (final Exception e) { - s_logger.error("A RBD snapshot operation on " + disk.getName() + " failed. The error was: " + e.getMessage()); + logger.error("A RBD snapshot operation on " + disk.getName() + " failed. The error was: " + e.getMessage()); } } else { /* VM is not running, create a snapshot by ourself */ final int cmdsTimeout = libvirtComputingResource.getCmdsTimeout(); final String manageSnapshotPath = libvirtComputingResource.manageSnapshotPath(); - final Script scriptCommand = new Script(manageSnapshotPath, cmdsTimeout, s_logger); + final Script scriptCommand = new Script(manageSnapshotPath, cmdsTimeout, logger); if (command.getCommandSwitch().equalsIgnoreCase(ManageSnapshotCommand.CREATE_SNAPSHOT)) { scriptCommand.add("-c", disk.getPath()); } else { @@ -155,14 +153,14 @@ public Answer execute(final ManageSnapshotCommand command, final LibvirtComputin scriptCommand.add("-n", snapshotName); final String result = scriptCommand.execute(); if (result != null) { - s_logger.debug("Failed to manage snapshot: " + result); + logger.debug("Failed to manage snapshot: " + result); return new ManageSnapshotAnswer(command, false, "Failed to manage snapshot: " + result); } } } return new ManageSnapshotAnswer(command, command.getSnapshotId(), disk.getPath() + File.separator + snapshotName, true, null); } catch (final LibvirtException e) { - s_logger.debug("Failed to manage snapshot: " + e.toString()); + logger.debug("Failed to manage snapshot: " + e.toString()); return new ManageSnapshotAnswer(command, false, "Failed to manage snapshot: " + e.toString()); } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java index fb526626ef8f..210af77f84a6 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java @@ -49,7 +49,6 @@ import org.apache.commons.io.FilenameUtils; import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.libvirt.Connect; import org.libvirt.Domain; import org.libvirt.DomainInfo.DomainState; @@ -90,7 +89,6 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper vlanToPersistenceMap = command.getVlanToPersistenceMap(); final String destinationUri = createMigrationURI(command.getDestinationIp(), libvirtComputingResource); final List migrateDiskInfoList = command.getMigrateDiskInfoList(); - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Trying to migrate VM [%s] to destination host: [%s].", vmName, destinationUri)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Trying to migrate VM [%s] to destination host: [%s].", vmName, destinationUri)); } String result = null; @@ -127,8 +125,8 @@ public Answer execute(final MigrateCommand command, final LibvirtComputingResour conn = libvirtUtilitiesHelper.getConnectionByVmName(vmName); ifaces = libvirtComputingResource.getInterfaces(conn, vmName); disks = libvirtComputingResource.getDisks(conn, vmName); - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Found domain with name [%s]. Starting VM migration to host [%s].", vmName, destinationUri)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Found domain with name [%s]. Starting VM migration to host [%s].", vmName, destinationUri)); } VirtualMachineTO to = command.getVirtualMachine(); @@ -156,8 +154,8 @@ Use VIR_DOMAIN_XML_SECURE (value = 1) prior to v1.0.0. final String target = command.getDestinationIp(); xmlDesc = dm.getXMLDesc(xmlFlag); - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("VM [%s] with XML configuration [%s] will be migrated to host [%s].", vmName, xmlDesc, target)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("VM [%s] with XML configuration [%s] will be migrated to host [%s].", vmName, xmlDesc, target)); } // Limit the VNC password in case the length is greater than 8 characters @@ -168,10 +166,10 @@ Use VIR_DOMAIN_XML_SECURE (value = 1) prior to v1.0.0. String oldIsoVolumePath = getOldVolumePath(disks, vmName); String newIsoVolumePath = getNewVolumePathIfDatastoreHasChanged(libvirtComputingResource, conn, to); if (newIsoVolumePath != null && !newIsoVolumePath.equals(oldIsoVolumePath)) { - s_logger.debug(String.format("Editing mount path of iso from %s to %s", oldIsoVolumePath, newIsoVolumePath)); + logger.debug(String.format("Editing mount path of iso from %s to %s", oldIsoVolumePath, newIsoVolumePath)); xmlDesc = replaceDiskSourceFile(xmlDesc, newIsoVolumePath, vmName); - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Replaced disk mount point [%s] with [%s] in VM [%s] XML configuration. New XML configuration is [%s].", oldIsoVolumePath, newIsoVolumePath, vmName, xmlDesc)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Replaced disk mount point [%s] with [%s] in VM [%s] XML configuration. New XML configuration is [%s].", oldIsoVolumePath, newIsoVolumePath, vmName, xmlDesc)); } } // delete the metadata of vm snapshots before migration @@ -192,23 +190,23 @@ Use VIR_DOMAIN_XML_SECURE (value = 1) prior to v1.0.0. final boolean migrateStorageManaged = command.isMigrateStorageManaged(); if (migrateStorage) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Changing VM [%s] volumes during migration to host: [%s].", vmName, target)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Changing VM [%s] volumes during migration to host: [%s].", vmName, target)); } xmlDesc = replaceStorage(xmlDesc, mapMigrateStorage, migrateStorageManaged); - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Changed VM [%s] XML configuration of used storage. New XML configuration is [%s].", vmName, xmlDesc)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Changed VM [%s] XML configuration of used storage. New XML configuration is [%s].", vmName, xmlDesc)); } } Map dpdkPortsMapping = command.getDpdkInterfaceMapping(); if (MapUtils.isNotEmpty(dpdkPortsMapping)) { - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("Changing VM [%s] DPDK interfaces during migration to host: [%s].", vmName, target)); + if (logger.isTraceEnabled()) { + logger.trace(String.format("Changing VM [%s] DPDK interfaces during migration to host: [%s].", vmName, target)); } xmlDesc = replaceDpdkInterfaces(xmlDesc, dpdkPortsMapping); - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Changed VM [%s] XML configuration of DPDK interfaces. New XML configuration is [%s].", vmName, xmlDesc)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Changed VM [%s] XML configuration of DPDK interfaces. New XML configuration is [%s].", vmName, xmlDesc)); } } @@ -221,7 +219,7 @@ Use VIR_DOMAIN_XML_SECURE (value = 1) prior to v1.0.0. } //run migration in thread so we can monitor it - s_logger.info(String.format("Starting live migration of instance [%s] to destination host [%s] having the final XML configuration: [%s].", vmName, dconn.getURI(), xmlDesc)); + logger.info(String.format("Starting live migration of instance [%s] to destination host [%s] having the final XML configuration: [%s].", vmName, dconn.getURI(), xmlDesc)); final ExecutorService executor = Executors.newFixedThreadPool(1); boolean migrateNonSharedInc = command.isMigrateNonSharedInc() && !migrateStorageManaged; @@ -240,15 +238,15 @@ Use VIR_DOMAIN_XML_SECURE (value = 1) prior to v1.0.0. try { final int setDowntime = dm.migrateSetMaxDowntime(migrateDowntime); if (setDowntime == 0 ) { - s_logger.debug("Set max downtime for migration of " + vmName + " to " + String.valueOf(migrateDowntime) + "ms"); + logger.debug("Set max downtime for migration of " + vmName + " to " + String.valueOf(migrateDowntime) + "ms"); } } catch (final LibvirtException e) { - s_logger.debug("Failed to set max downtime for migration, perhaps migration completed? Error: " + e.getMessage()); + logger.debug("Failed to set max downtime for migration, perhaps migration completed? Error: " + e.getMessage()); } } } if (sleeptime % 1000 == 0) { - s_logger.info("Waiting for migration of " + vmName + " to complete, waited " + sleeptime + "ms"); + logger.info("Waiting for migration of " + vmName + " to complete, waited " + sleeptime + "ms"); } // abort the vm migration if the job is executed more than vm.migrate.wait @@ -258,18 +256,18 @@ Use VIR_DOMAIN_XML_SECURE (value = 1) prior to v1.0.0. try { state = dm.getInfo().state; } catch (final LibvirtException e) { - s_logger.info("Couldn't get VM domain state after " + sleeptime + "ms: " + e.getMessage()); + logger.info("Couldn't get VM domain state after " + sleeptime + "ms: " + e.getMessage()); } if (state != null && state == DomainState.VIR_DOMAIN_RUNNING) { try { DomainJobInfo job = dm.getJobInfo(); - s_logger.info(String.format("Aborting migration of VM [%s] with domain job [%s] due to time out after %d seconds.", vmName, job, migrateWait)); + logger.info(String.format("Aborting migration of VM [%s] with domain job [%s] due to time out after %d seconds.", vmName, job, migrateWait)); dm.abortJob(); result = String.format("Migration of VM [%s] was cancelled by CloudStack due to time out after %d seconds.", vmName, migrateWait); - s_logger.debug(result); + logger.debug(result); break; } catch (final LibvirtException e) { - s_logger.error(String.format("Failed to abort the VM migration job of VM [%s] due to: [%s].", vmName, e.getMessage()), e); + logger.error(String.format("Failed to abort the VM migration job of VM [%s] due to: [%s].", vmName, e.getMessage()), e); } } } @@ -281,33 +279,33 @@ Use VIR_DOMAIN_XML_SECURE (value = 1) prior to v1.0.0. try { state = dm.getInfo().state; } catch (final LibvirtException e) { - s_logger.info("Couldn't get VM domain state after " + sleeptime + "ms: " + e.getMessage()); + logger.info("Couldn't get VM domain state after " + sleeptime + "ms: " + e.getMessage()); } if (state != null && state == DomainState.VIR_DOMAIN_RUNNING) { try { - s_logger.info("Pausing VM " + vmName + " due to property vm.migrate.pauseafter setting to " + migratePauseAfter + "ms to complete migration"); + logger.info("Pausing VM " + vmName + " due to property vm.migrate.pauseafter setting to " + migratePauseAfter + "ms to complete migration"); dm.suspend(); } catch (final LibvirtException e) { // pause could be racy if it attempts to pause right when vm is finished, simply warn - s_logger.info("Failed to pause vm " + vmName + " : " + e.getMessage()); + logger.info("Failed to pause vm " + vmName + " : " + e.getMessage()); } } } } - s_logger.info(String.format("Migration thread of VM [%s] finished.", vmName)); + logger.info(String.format("Migration thread of VM [%s] finished.", vmName)); destDomain = migrateThread.get(AgentPropertiesFileHandler.getPropertyValue(AgentProperties.VM_MIGRATE_DOMAIN_RETRIEVE_TIMEOUT), TimeUnit.SECONDS); if (destDomain != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Cleaning the disks of VM [%s] in the source pool after VM migration finished.", vmName)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Cleaning the disks of VM [%s] in the source pool after VM migration finished.", vmName)); } deleteOrDisconnectDisksOnSourcePool(libvirtComputingResource, migrateDiskInfoList, disks); libvirtComputingResource.cleanOldSecretsByDiskDef(conn, disks); } } catch (final LibvirtException e) { - s_logger.error(String.format("Can't migrate domain [%s] due to: [%s].", vmName, e.getMessage()), e); + logger.error(String.format("Can't migrate domain [%s] due to: [%s].", vmName, e.getMessage()), e); result = e.getMessage(); if (result.startsWith("unable to connect to server") && result.endsWith("refused")) { result = String.format("Migration was refused connection to destination: %s. Please check libvirt configuration compatibility and firewall rules on the source and destination hosts.", destinationUri); @@ -320,7 +318,7 @@ Use VIR_DOMAIN_XML_SECURE (value = 1) prior to v1.0.0. | SAXException | TransformerException | URISyntaxException e) { - s_logger.error(String.format("Can't migrate domain [%s] due to: [%s].", vmName, e.getMessage()), e); + logger.error(String.format("Can't migrate domain [%s] due to: [%s].", vmName, e.getMessage()), e); if (result == null) { result = "Exception during migrate: " + e.getMessage(); } @@ -345,7 +343,7 @@ Use VIR_DOMAIN_XML_SECURE (value = 1) prior to v1.0.0. destDomain.free(); } } catch (final LibvirtException e) { - s_logger.trace("Ignoring libvirt error.", e); + logger.trace("Ignoring libvirt error.", e); } } @@ -383,7 +381,7 @@ protected String updateVmSharesIfNeeded(MigrateCommand migrateCommand, String xm int currentCpuShares = libvirtComputingResource.calculateCpuShares(migrateCommand.getVirtualMachine()); if (newVmCpuShares == currentCpuShares) { - s_logger.info(String.format("Current CPU shares [%s] is equal in both hosts; therefore, there is no need to update the CPU shares for the new host.", + logger.info(String.format("Current CPU shares [%s] is equal in both hosts; therefore, there is no need to update the CPU shares for the new host.", currentCpuShares)); return xmlDesc; } @@ -397,7 +395,7 @@ protected String updateVmSharesIfNeeded(MigrateCommand migrateCommand, String xm Node sharesNode = root.getElementsByTagName("shares").item(0); String currentShares = sharesNode.getTextContent(); - s_logger.info(String.format("VM [%s] will have CPU shares altered from [%s] to [%s] as part of migration because the cgroups version differs between hosts.", + logger.info(String.format("VM [%s] will have CPU shares altered from [%s] to [%s] as part of migration because the cgroups version differs between hosts.", migrateCommand.getVmName(), currentShares, newVmCpuShares)); sharesNode.setTextContent(String.valueOf(newVmCpuShares)); return getXml(document); @@ -498,7 +496,7 @@ protected void deleteLocalVolume(String localPath) { StorageVol storageVolLookupByPath = conn.storageVolLookupByPath(localPath); storageVolLookupByPath.delete(0); } catch (LibvirtException e) { - s_logger.error(String.format("Cannot delete local volume [%s] due to: %s", localPath, e)); + logger.error(String.format("Cannot delete local volume [%s] due to: %s", localPath, e)); } } @@ -511,7 +509,7 @@ protected MigrateDiskInfo searchDiskDefOnMigrateDiskInfoList(List { - private static final Logger LOGGER = Logger.getLogger(LibvirtMigrateVolumeCommandWrapper.class); @Override public Answer execute(final MigrateVolumeCommand command, final LibvirtComputingResource libvirtComputingResource) { @@ -139,18 +137,18 @@ protected MigrateVolumeAnswer migratePowerFlexVolume(final MigrateVolumeCommand parameters[0] = parameter; dm.blockCopy(destDiskLabel, diskdef, parameters, Domain.BlockCopyFlags.REUSE_EXT); - LOGGER.info(String.format("Block copy has started for the volume %s : %s ", destDiskLabel, srcPath)); + logger.info(String.format("Block copy has started for the volume %s : %s ", destDiskLabel, srcPath)); return checkBlockJobStatus(command, dm, destDiskLabel, srcPath, destPath, libvirtComputingResource, conn, srcSecretUUID); } catch (Exception e) { String msg = "Migrate volume failed due to " + e.toString(); - LOGGER.warn(msg, e); + logger.warn(msg, e); if (destDiskLabel != null) { try { dm.blockJobAbort(destDiskLabel, Domain.BlockJobAbortFlags.ASYNC); } catch (LibvirtException ex) { - LOGGER.error("Migrate volume failed while aborting the block job due to " + ex.getMessage()); + logger.error("Migrate volume failed while aborting the block job due to " + ex.getMessage()); } } return new MigrateVolumeAnswer(command, false, msg, null); @@ -159,7 +157,7 @@ protected MigrateVolumeAnswer migratePowerFlexVolume(final MigrateVolumeCommand try { dm.free(); } catch (LibvirtException l) { - LOGGER.trace("Ignoring libvirt error.", l); + logger.trace("Ignoring libvirt error.", l); }; } } @@ -171,9 +169,9 @@ protected MigrateVolumeAnswer checkBlockJobStatus(MigrateVolumeCommand command, while (waitTimeInSec > 0) { DomainBlockJobInfo blockJobInfo = dm.getBlockJobInfo(diskLabel, 0); if (blockJobInfo != null) { - LOGGER.debug(String.format("Volume %s : %s block copy progress: %s%% current value:%s end value:%s", diskLabel, srcPath, (blockJobInfo.end == 0)? 0 : 100*(blockJobInfo.cur / (double) blockJobInfo.end), blockJobInfo.cur, blockJobInfo.end)); + logger.debug(String.format("Volume %s : %s block copy progress: %s%% current value:%s end value:%s", diskLabel, srcPath, (blockJobInfo.end == 0)? 0 : 100*(blockJobInfo.cur / (double) blockJobInfo.end), blockJobInfo.cur, blockJobInfo.end)); if (blockJobInfo.cur == blockJobInfo.end) { - LOGGER.info(String.format("Block copy completed for the volume %s : %s", diskLabel, srcPath)); + logger.info(String.format("Block copy completed for the volume %s : %s", diskLabel, srcPath)); dm.blockJobAbort(diskLabel, Domain.BlockJobAbortFlags.PIVOT); if (StringUtils.isNotEmpty(srcSecretUUID)) { libvirtComputingResource.removeLibvirtVolumeSecret(conn, srcSecretUUID); @@ -181,7 +179,7 @@ protected MigrateVolumeAnswer checkBlockJobStatus(MigrateVolumeCommand command, break; } } else { - LOGGER.info("Failed to get the block copy status, trying to abort the job"); + logger.info("Failed to get the block copy status, trying to abort the job"); dm.blockJobAbort(diskLabel, Domain.BlockJobAbortFlags.ASYNC); } waitTimeInSec--; @@ -195,11 +193,11 @@ protected MigrateVolumeAnswer checkBlockJobStatus(MigrateVolumeCommand command, if (waitTimeInSec <= 0) { String msg = "Block copy is taking long time, failing the job"; - LOGGER.error(msg); + logger.error(msg); try { dm.blockJobAbort(diskLabel, Domain.BlockJobAbortFlags.ASYNC); } catch (LibvirtException ex) { - LOGGER.error("Migrate volume failed while aborting the block job due to " + ex.getMessage()); + logger.error("Migrate volume failed while aborting the block job due to " + ex.getMessage()); } return new MigrateVolumeAnswer(command, false, msg, null); } @@ -311,14 +309,14 @@ protected MigrateVolumeAnswer migrateRegularVolume(final MigrateVolumeCommand co storagePoolManager.disconnectPhysicalDisk(destPrimaryDataStore.getPoolType(), destPrimaryDataStore.getUuid(), destPath); } catch (Exception e) { - LOGGER.warn("Unable to disconnect from the destination device.", e); + logger.warn("Unable to disconnect from the destination device.", e); } try { storagePoolManager.disconnectPhysicalDisk(srcPrimaryDataStore.getPoolType(), srcPrimaryDataStore.getUuid(), srcPath); } catch (Exception e) { - LOGGER.warn("Unable to disconnect from the source device.", e); + logger.warn("Unable to disconnect from the source device.", e); } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtModifySshKeysCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtModifySshKeysCommandWrapper.java index b9af013f77eb..a4d1c070f787 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtModifySshKeysCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtModifySshKeysCommandWrapper.java @@ -24,7 +24,6 @@ import java.io.FileOutputStream; import java.io.IOException; -import org.apache.log4j.Logger; import com.cloud.utils.StringUtils; import com.cloud.agent.api.Answer; @@ -37,7 +36,6 @@ @ResourceWrapper(handles = ModifySshKeysCommand.class) public final class LibvirtModifySshKeysCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtModifySshKeysCommandWrapper.class); @Override public Answer execute(final ModifySshKeysCommand command, final LibvirtComputingResource libvirtComputingResource) { @@ -52,13 +50,13 @@ public Answer execute(final ModifySshKeysCommand command, final LibvirtComputing String result = null; if (!sshKeysDir.exists()) { // Change permissions for the 700 - final Script script = new Script("mkdir", libvirtComputingResource.getTimeout(), s_logger); + final Script script = new Script("mkdir", libvirtComputingResource.getTimeout(), logger); script.add("-m", "700"); script.add(sshkeyspath); script.execute(); if (!sshKeysDir.exists()) { - s_logger.debug("failed to create directory " + sshkeyspath); + logger.debug("failed to create directory " + sshkeyspath); } } @@ -68,7 +66,7 @@ public Answer execute(final ModifySshKeysCommand command, final LibvirtComputing pubKeyFile.createNewFile(); } catch (final IOException e) { result = "Failed to create file: " + e.toString(); - s_logger.debug(result); + logger.debug(result); } } @@ -78,10 +76,10 @@ public Answer execute(final ModifySshKeysCommand command, final LibvirtComputing } catch (final FileNotFoundException e) { result = "File" + sshpubkeypath + "is not found:" + e.toString(); - s_logger.debug(result); + logger.debug(result); } catch (final IOException e) { result = "Write file " + sshpubkeypath + ":" + e.toString(); - s_logger.debug(result); + logger.debug(result); } } @@ -91,7 +89,7 @@ public Answer execute(final ModifySshKeysCommand command, final LibvirtComputing prvKeyFile.createNewFile(); } catch (final IOException e) { result = "Failed to create file: " + e.toString(); - s_logger.debug(result); + logger.debug(result); } } @@ -103,12 +101,12 @@ public Answer execute(final ModifySshKeysCommand command, final LibvirtComputing } } catch (final FileNotFoundException e) { result = "File" + sshprvkeypath + "is not found:" + e.toString(); - s_logger.debug(result); + logger.debug(result); } catch (final IOException e) { result = "Write file " + sshprvkeypath + ":" + e.toString(); - s_logger.debug(result); + logger.debug(result); } - final Script script = new Script("chmod", libvirtComputingResource.getTimeout(), s_logger); + final Script script = new Script("chmod", libvirtComputingResource.getTimeout(), logger); script.add("600", sshprvkeypath); script.execute(); } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtModifyTargetsCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtModifyTargetsCommandWrapper.java index 724caad3f227..b05917143d55 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtModifyTargetsCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtModifyTargetsCommandWrapper.java @@ -23,7 +23,6 @@ import java.util.List; import java.util.Map; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.ModifyTargetsAnswer; @@ -38,7 +37,6 @@ @ResourceWrapper(handles = ModifyTargetsCommand.class) public final class LibvirtModifyTargetsCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtModifyTargetsCommandWrapper.class); @Override public Answer execute(final ModifyTargetsCommand command, final LibvirtComputingResource libvirtComputingResource) { diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtNetworkRulesSystemVmCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtNetworkRulesSystemVmCommandWrapper.java index 65169a3f23d5..b5ee13920a68 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtNetworkRulesSystemVmCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtNetworkRulesSystemVmCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.kvm.resource.wrapper; -import org.apache.log4j.Logger; import org.libvirt.Connect; import org.libvirt.LibvirtException; @@ -32,7 +31,6 @@ @ResourceWrapper(handles = NetworkRulesSystemVmCommand.class) public final class LibvirtNetworkRulesSystemVmCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtOvsVpcRoutingPolicyConfigCommandWrapper.class); @Override public Answer execute(final NetworkRulesSystemVmCommand command, final LibvirtComputingResource libvirtComputingResource) { @@ -43,7 +41,7 @@ public Answer execute(final NetworkRulesSystemVmCommand command, final LibvirtCo final Connect conn = libvirtUtilitiesHelper.getConnectionByVmName(command.getVmName()); success = libvirtComputingResource.configureDefaultNetworkRulesForSystemVm(conn, command.getVmName()); } catch (final LibvirtException e) { - s_logger.trace("Ignoring libvirt error.", e); + logger.trace("Ignoring libvirt error.", e); } return new Answer(command, success, ""); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtNetworkRulesVmSecondaryIpCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtNetworkRulesVmSecondaryIpCommandWrapper.java index 07c091ee0eee..890558ca3651 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtNetworkRulesVmSecondaryIpCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtNetworkRulesVmSecondaryIpCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.kvm.resource.wrapper; -import org.apache.log4j.Logger; import org.libvirt.Connect; import org.libvirt.LibvirtException; @@ -32,7 +31,6 @@ @ResourceWrapper(handles = NetworkRulesVmSecondaryIpCommand.class) public final class LibvirtNetworkRulesVmSecondaryIpCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtOvsVpcRoutingPolicyConfigCommandWrapper.class); @Override public Answer execute(final NetworkRulesVmSecondaryIpCommand command, final LibvirtComputingResource libvirtComputingResource) { @@ -43,7 +41,7 @@ public Answer execute(final NetworkRulesVmSecondaryIpCommand command, final Libv final Connect conn = libvirtUtilitiesHelper.getConnectionByVmName(command.getVmName()); result = libvirtComputingResource.configureNetworkRulesVMSecondaryIP(conn, command.getVmName(), command.getVmMac(), command.getVmSecIp(), command.getAction()); } catch (final LibvirtException e) { - s_logger.debug("Could not configure VM secondary IP! => " + e.getLocalizedMessage()); + logger.debug("Could not configure VM secondary IP! => " + e.getLocalizedMessage()); } return new Answer(command, result, ""); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtOvsCreateTunnelCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtOvsCreateTunnelCommandWrapper.java index fc6839583036..14aaf234c162 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtOvsCreateTunnelCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtOvsCreateTunnelCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.kvm.resource.wrapper; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.OvsCreateTunnelAnswer; @@ -32,19 +31,18 @@ @ResourceWrapper(handles = OvsCreateTunnelCommand.class) public final class LibvirtOvsCreateTunnelCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtOvsCreateTunnelCommandWrapper.class); @Override public Answer execute(final OvsCreateTunnelCommand command, final LibvirtComputingResource libvirtComputingResource) { final String bridge = command.getNetworkName(); try { if (!libvirtComputingResource.findOrCreateTunnelNetwork(bridge)) { - s_logger.debug("Error during bridge setup"); + logger.debug("Error during bridge setup"); return new OvsCreateTunnelAnswer(command, false, "Cannot create network", bridge); } libvirtComputingResource.configureTunnelNetwork(command.getNetworkId(), command.getFrom(), command.getNetworkName()); - final Script scriptCommand = new Script(libvirtComputingResource.getOvsTunnelPath(), libvirtComputingResource.getTimeout(), s_logger); + final Script scriptCommand = new Script(libvirtComputingResource.getOvsTunnelPath(), libvirtComputingResource.getTimeout(), logger); scriptCommand.add("create_tunnel"); scriptCommand.add("--bridge", bridge); scriptCommand.add("--remote_ip", command.getRemoteIp()); @@ -59,7 +57,7 @@ public Answer execute(final OvsCreateTunnelCommand command, final LibvirtComputi return new OvsCreateTunnelAnswer(command, false, result, bridge); } } catch (final Exception e) { - s_logger.warn("Caught execption when creating ovs tunnel", e); + logger.warn("Caught execption when creating ovs tunnel", e); return new OvsCreateTunnelAnswer(command, false, e.getMessage(), bridge); } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtOvsDestroyBridgeCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtOvsDestroyBridgeCommandWrapper.java index 2e70a89b185e..0de9096659a5 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtOvsDestroyBridgeCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtOvsDestroyBridgeCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.kvm.resource.wrapper; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.OvsDestroyBridgeCommand; @@ -30,14 +29,13 @@ @ResourceWrapper(handles = OvsDestroyBridgeCommand.class) public final class LibvirtOvsDestroyBridgeCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtOvsDestroyBridgeCommandWrapper.class); @Override public Answer execute(final OvsDestroyBridgeCommand command, final LibvirtComputingResource libvirtComputingResource) { final boolean result = libvirtComputingResource.destroyTunnelNetwork(command.getBridgeName()); if (!result) { - s_logger.debug("Error trying to destroy OVS Bridge!"); + logger.debug("Error trying to destroy OVS Bridge!"); } return new Answer(command, result, null); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtOvsDestroyTunnelCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtOvsDestroyTunnelCommandWrapper.java index a1a9851e78fb..83fc26bf0ff4 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtOvsDestroyTunnelCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtOvsDestroyTunnelCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.kvm.resource.wrapper; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.OvsDestroyTunnelCommand; @@ -31,18 +30,17 @@ @ResourceWrapper(handles = OvsDestroyTunnelCommand.class) public final class LibvirtOvsDestroyTunnelCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtOvsDestroyTunnelCommandWrapper.class); @Override public Answer execute(final OvsDestroyTunnelCommand command, final LibvirtComputingResource libvirtComputingResource) { try { if (!libvirtComputingResource.findOrCreateTunnelNetwork(command.getBridgeName())) { - s_logger.warn("Unable to find tunnel network for GRE key:" + logger.warn("Unable to find tunnel network for GRE key:" + command.getBridgeName()); return new Answer(command, false, "No network found"); } - final Script scriptCommand = new Script(libvirtComputingResource.getOvsTunnelPath(), libvirtComputingResource.getTimeout(), s_logger); + final Script scriptCommand = new Script(libvirtComputingResource.getOvsTunnelPath(), libvirtComputingResource.getTimeout(), logger); scriptCommand.add("destroy_tunnel"); scriptCommand.add("--bridge", command.getBridgeName()); scriptCommand.add("--iface_name", command.getInPortName()); @@ -53,7 +51,7 @@ public Answer execute(final OvsDestroyTunnelCommand command, final LibvirtComput return new Answer(command, false, result); } } catch (final Exception e) { - s_logger.warn("caught execption when destroy ovs tunnel", e); + logger.warn("caught execption when destroy ovs tunnel", e); return new Answer(command, false, e.getMessage()); } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtOvsFetchInterfaceCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtOvsFetchInterfaceCommandWrapper.java index 5c79de5f4bf9..db07cc5291a0 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtOvsFetchInterfaceCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtOvsFetchInterfaceCommandWrapper.java @@ -20,7 +20,6 @@ package com.cloud.hypervisor.kvm.resource.wrapper; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.OvsFetchInterfaceAnswer; @@ -33,13 +32,12 @@ @ResourceWrapper(handles = OvsFetchInterfaceCommand.class) public final class LibvirtOvsFetchInterfaceCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtOvsFetchInterfaceCommandWrapper.class); @Override public Answer execute(final OvsFetchInterfaceCommand command, final LibvirtComputingResource libvirtComputingResource) { final String label = command.getLabel(); - s_logger.debug("Will look for network with name-label:" + label); + logger.debug("Will look for network with name-label:" + label); try { String ipadd = Script.runSimpleBashScript("ifconfig " + label + " | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'"); if (StringUtils.isEmpty(ipadd)) { @@ -57,7 +55,7 @@ public Answer execute(final OvsFetchInterfaceCommand command, final LibvirtCompu + " retrieved successfully", ipadd, mask, mac); } catch (final Exception e) { - s_logger.warn("Caught execption when fetching interface", e); + logger.warn("Caught execption when fetching interface", e); return new OvsFetchInterfaceAnswer(command, false, "EXCEPTION:" + e.getMessage()); } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtOvsSetupBridgeCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtOvsSetupBridgeCommandWrapper.java index 2eb0d082c49f..986495004296 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtOvsSetupBridgeCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtOvsSetupBridgeCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.kvm.resource.wrapper; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.OvsSetupBridgeCommand; @@ -30,7 +29,6 @@ @ResourceWrapper(handles = OvsSetupBridgeCommand.class) public final class LibvirtOvsSetupBridgeCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtOvsSetupBridgeCommandWrapper.class); @Override public Answer execute(final OvsSetupBridgeCommand command, final LibvirtComputingResource libvirtComputingResource) { @@ -41,7 +39,7 @@ public Answer execute(final OvsSetupBridgeCommand command, final LibvirtComputin final boolean finalResult = findResult && configResult; if (!finalResult) { - s_logger.debug("::FAILURE:: OVS Bridge was NOT configured properly!"); + logger.debug("::FAILURE:: OVS Bridge was NOT configured properly!"); } return new Answer(command, finalResult, null); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtOvsVpcPhysicalTopologyConfigCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtOvsVpcPhysicalTopologyConfigCommandWrapper.java index 5fc8e8cf70b4..2f5c418d3d79 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtOvsVpcPhysicalTopologyConfigCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtOvsVpcPhysicalTopologyConfigCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.kvm.resource.wrapper; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.OvsVpcPhysicalTopologyConfigCommand; @@ -31,12 +30,11 @@ @ResourceWrapper(handles = OvsVpcPhysicalTopologyConfigCommand.class) public final class LibvirtOvsVpcPhysicalTopologyConfigCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtOvsVpcPhysicalTopologyConfigCommandWrapper.class); @Override public Answer execute(final OvsVpcPhysicalTopologyConfigCommand command, final LibvirtComputingResource libvirtComputingResource) { try { - final Script scriptCommand = new Script(libvirtComputingResource.getOvsTunnelPath(), libvirtComputingResource.getTimeout(), s_logger); + final Script scriptCommand = new Script(libvirtComputingResource.getOvsTunnelPath(), libvirtComputingResource.getTimeout(), logger); scriptCommand.add("configure_ovs_bridge_for_network_topology"); scriptCommand.add("--bridge", command.getBridgeName()); scriptCommand.add("--config", command.getVpcConfigInJson()); @@ -48,7 +46,7 @@ public Answer execute(final OvsVpcPhysicalTopologyConfigCommand command, final L return new Answer(command, false, result); } } catch (final Exception e) { - s_logger.warn("caught exception while updating host with latest routing polcies", e); + logger.warn("caught exception while updating host with latest routing polcies", e); return new Answer(command, false, e.getMessage()); } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtOvsVpcRoutingPolicyConfigCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtOvsVpcRoutingPolicyConfigCommandWrapper.java index e82801995468..b481cecde23c 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtOvsVpcRoutingPolicyConfigCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtOvsVpcRoutingPolicyConfigCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.kvm.resource.wrapper; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.OvsVpcRoutingPolicyConfigCommand; @@ -31,12 +30,11 @@ @ResourceWrapper(handles = OvsVpcRoutingPolicyConfigCommand.class) public final class LibvirtOvsVpcRoutingPolicyConfigCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtOvsVpcRoutingPolicyConfigCommandWrapper.class); @Override public Answer execute(final OvsVpcRoutingPolicyConfigCommand command, final LibvirtComputingResource libvirtComputingResource) { try { - final Script scriptCommand = new Script(libvirtComputingResource.getOvsTunnelPath(), libvirtComputingResource.getTimeout(), s_logger); + final Script scriptCommand = new Script(libvirtComputingResource.getOvsTunnelPath(), libvirtComputingResource.getTimeout(), logger); scriptCommand.add("configure_ovs_bridge_for_routing_policies"); scriptCommand.add("--bridge", command.getBridgeName()); scriptCommand.add("--config", command.getVpcConfigInJson()); @@ -48,7 +46,7 @@ public Answer execute(final OvsVpcRoutingPolicyConfigCommand command, final Libv return new Answer(command, false, result); } } catch (final Exception e) { - s_logger.warn("caught exception while updating host with latest VPC topology", e); + logger.warn("caught exception while updating host with latest VPC topology", e); return new Answer(command, false, e.getMessage()); } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPatchSystemVmCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPatchSystemVmCommandWrapper.java index 7800e95c39b9..a9e4d0da4320 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPatchSystemVmCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPatchSystemVmCommandWrapper.java @@ -31,13 +31,11 @@ import com.cloud.utils.ssh.SshHelper; import com.cloud.utils.validation.ChecksumUtil; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import java.io.File; @ResourceWrapper(handles = PatchSystemVmCommand.class) public class LibvirtPatchSystemVmCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtPatchSystemVmCommandWrapper.class); private static int sshPort = Integer.parseInt(LibvirtComputingResource.DEFAULTDOMRSSHPORT); private static File pemFile = new File(LibvirtComputingResource.SSHPRVKEYPATH); @@ -63,7 +61,7 @@ public Answer execute(PatchSystemVmCommand cmd, LibvirtComputingResource serverR if (!StringUtils.isEmpty(checksum) && checksum.equals(scriptChecksum) && !cmd.isForced()) { String msg = String.format("No change in the scripts checksum, not patching systemVM %s", sysVMName); - s_logger.info(msg); + logger.info(msg); return new PatchSystemVmAnswer(cmd, msg, lines[0], lines[1]); } @@ -82,7 +80,7 @@ public Answer execute(PatchSystemVmCommand cmd, LibvirtComputingResource serverR String res = patchResult.second().replace("\n", " "); String[] output = res.split(":"); if (output.length != 2) { - s_logger.warn("Failed to get the latest script version"); + logger.warn("Failed to get the latest script version"); } else { scriptVersion = output[1].split(" ")[0]; } @@ -98,12 +96,12 @@ private ExecutionResult getSystemVmVersionAndChecksum(LibvirtComputingResource s result = serverResource.executeInVR(controlIp, VRScripts.VERSION, null); if (!result.isSuccess()) { String errMsg = String.format("GetSystemVMVersionCmd on %s failed, message %s", controlIp, result.getDetails()); - s_logger.error(errMsg); + logger.error(errMsg); throw new CloudRuntimeException(errMsg); } } catch (final Exception e) { final String msg = "GetSystemVMVersionCmd failed due to " + e; - s_logger.error(msg, e); + logger.error(msg, e); throw new CloudRuntimeException(msg, e); } return result; diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPingTestCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPingTestCommandWrapper.java index 6b428c6bc159..605861492487 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPingTestCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPingTestCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.kvm.resource.wrapper; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.PingTestCommand; @@ -31,7 +30,6 @@ @ResourceWrapper(handles = PingTestCommand.class) public final class LibvirtPingTestCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtPingTestCommandWrapper.class); @Override public Answer execute(final PingTestCommand command, final LibvirtComputingResource libvirtComputingResource) { @@ -53,13 +51,13 @@ public Answer execute(final PingTestCommand command, final LibvirtComputingResou } protected String doPingTest(final LibvirtComputingResource libvirtComputingResource, final String computingHostIp) { - final Script command = new Script(libvirtComputingResource.getPingTestPath(), 10000, s_logger); + final Script command = new Script(libvirtComputingResource.getPingTestPath(), 10000, logger); command.add("-h", computingHostIp); return command.execute(); } protected String doPingTest(final LibvirtComputingResource libvirtComputingResource, final String domRIp, final String vmIp) { - final Script command = new Script(libvirtComputingResource.getPingTestPath(), 10000, s_logger); + final Script command = new Script(libvirtComputingResource.getPingTestPath(), 10000, logger); command.add("-i", domRIp); command.add("-p", vmIp); return command.execute(); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPlugNicCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPlugNicCommandWrapper.java index dffa8360c200..b0950376a93d 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPlugNicCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPlugNicCommandWrapper.java @@ -30,7 +30,6 @@ import com.cloud.resource.CommandWrapper; import com.cloud.resource.ResourceWrapper; import com.cloud.vm.VirtualMachine; -import org.apache.log4j.Logger; import org.libvirt.Connect; import org.libvirt.Domain; import org.libvirt.LibvirtException; @@ -40,7 +39,6 @@ @ResourceWrapper(handles = PlugNicCommand.class) public final class LibvirtPlugNicCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtPlugNicCommandWrapper.class); @Override public Answer execute(final PlugNicCommand command, final LibvirtComputingResource libvirtComputingResource) { @@ -57,7 +55,7 @@ public Answer execute(final PlugNicCommand command, final LibvirtComputingResour Integer nicnum = 0; for (final InterfaceDef pluggedNic : pluggedNics) { if (pluggedNic.getMacAddress().equalsIgnoreCase(nic.getMac())) { - s_logger.debug("found existing nic for mac " + pluggedNic.getMacAddress() + " at index " + nicnum); + logger.debug("found existing nic for mac " + pluggedNic.getMacAddress() + " at index " + nicnum); return new PlugNicAnswer(command, true, "success"); } nicnum++; @@ -82,18 +80,18 @@ public Answer execute(final PlugNicCommand command, final LibvirtComputingResour return new PlugNicAnswer(command, true, "success"); } catch (final LibvirtException e) { final String msg = " Plug Nic failed due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new PlugNicAnswer(command, false, msg); } catch (final InternalErrorException e) { final String msg = " Plug Nic failed due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new PlugNicAnswer(command, false, msg); } finally { if (vm != null) { try { vm.free(); } catch (final LibvirtException l) { - s_logger.trace("Ignoring libvirt error.", l); + logger.trace("Ignoring libvirt error.", l); } } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPostCertificateRenewalCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPostCertificateRenewalCommandWrapper.java index 5f8e2ca7a4ed..49e079348efb 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPostCertificateRenewalCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPostCertificateRenewalCommandWrapper.java @@ -19,7 +19,6 @@ import org.apache.cloudstack.ca.PostCertificateRenewalCommand; import org.apache.cloudstack.ca.SetupCertificateAnswer; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; @@ -30,14 +29,13 @@ @ResourceWrapper(handles = PostCertificateRenewalCommand.class) public final class LibvirtPostCertificateRenewalCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtPostCertificateRenewalCommandWrapper.class); @Override public Answer execute(final PostCertificateRenewalCommand command, final LibvirtComputingResource serverResource) { - s_logger.info("Restarting libvirt after certificate provisioning/renewal"); + logger.info("Restarting libvirt after certificate provisioning/renewal"); if (command != null) { final int timeout = 30000; - Script script = new Script(true, "service", timeout, s_logger); + Script script = new Script(true, "service", timeout, logger); script.add("libvirtd"); script.add("restart"); script.execute(); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java index 6292ca71c2ef..c8b205113465 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java @@ -26,7 +26,6 @@ import org.apache.cloudstack.storage.configdrive.ConfigDrive; import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.commons.collections.MapUtils; -import org.apache.log4j.Logger; import org.libvirt.Connect; import org.libvirt.LibvirtException; @@ -52,7 +51,6 @@ @ResourceWrapper(handles = PrepareForMigrationCommand.class) public final class LibvirtPrepareForMigrationCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtPrepareForMigrationCommandWrapper.class); @Override public Answer execute(final PrepareForMigrationCommand command, final LibvirtComputingResource libvirtComputingResource) { @@ -62,8 +60,8 @@ public Answer execute(final PrepareForMigrationCommand command, final LibvirtCom return handleRollback(command, libvirtComputingResource); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Preparing host for migrating " + vm); + if (logger.isDebugEnabled()) { + logger.debug("Preparing host for migrating " + vm); } final NicTO[] nics = vm.getNics(); @@ -111,10 +109,10 @@ public Answer execute(final PrepareForMigrationCommand command, final LibvirtCom secretConsumer = volume.getDetails().get(DiskTO.SECRET_CONSUMER_DETAIL); } String secretUuid = libvirtComputingResource.createLibvirtVolumeSecret(conn, secretConsumer, volumeObjectTO.getPassphrase()); - s_logger.debug(String.format("Created libvirt secret %s for disk %s", secretUuid, volumeObjectTO.getPath())); + logger.debug(String.format("Created libvirt secret %s for disk %s", secretUuid, volumeObjectTO.getPath())); volumeObjectTO.clearPassphrase(); } else { - s_logger.debug(String.format("disk %s has no passphrase or encryption", volumeObjectTO)); + logger.debug(String.format("disk %s has no passphrase or encryption", volumeObjectTO)); } } } @@ -130,7 +128,7 @@ public Answer execute(final PrepareForMigrationCommand command, final LibvirtCom if (MapUtils.isNotEmpty(dpdkInterfaceMapping)) { for (DpdkTO to : dpdkInterfaceMapping.values()) { String cmd = String.format("ovs-vsctl del-port %s", to.getPort()); - s_logger.debug("Removing DPDK port: " + to.getPort()); + logger.debug("Removing DPDK port: " + to.getPort()); Script.runSimpleBashScript(cmd); } } @@ -147,12 +145,12 @@ protected PrepareForMigrationAnswer createPrepareForMigrationAnswer(PrepareForMi PrepareForMigrationAnswer answer = new PrepareForMigrationAnswer(command); if (MapUtils.isNotEmpty(dpdkInterfaceMapping)) { - s_logger.debug(String.format("Setting DPDK interface for the migration of VM [%s].", vm)); + logger.debug(String.format("Setting DPDK interface for the migration of VM [%s].", vm)); answer.setDpdkInterfaceMapping(dpdkInterfaceMapping); } int newCpuShares = libvirtComputingResource.calculateCpuShares(vm); - s_logger.debug(String.format("Setting CPU shares to [%s] for the migration of VM [%s].", newCpuShares, vm)); + logger.debug(String.format("Setting CPU shares to [%s] for the migration of VM [%s].", newCpuShares, vm)); answer.setNewVmCpuShares(newCpuShares); return answer; diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareUnmanageVMInstanceCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareUnmanageVMInstanceCommandWrapper.java index 683730890380..601a3da8642f 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareUnmanageVMInstanceCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareUnmanageVMInstanceCommandWrapper.java @@ -22,27 +22,25 @@ import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; import com.cloud.resource.CommandWrapper; import com.cloud.resource.ResourceWrapper; -import org.apache.log4j.Logger; import org.libvirt.Connect; import org.libvirt.Domain; @ResourceWrapper(handles=PrepareUnmanageVMInstanceCommand.class) public final class LibvirtPrepareUnmanageVMInstanceCommandWrapper extends CommandWrapper { - private static final Logger LOGGER = Logger.getLogger(LibvirtPrepareUnmanageVMInstanceCommandWrapper.class); @Override public PrepareUnmanageVMInstanceAnswer execute(PrepareUnmanageVMInstanceCommand command, LibvirtComputingResource libvirtComputingResource) { final String vmName = command.getInstanceName(); final LibvirtUtilitiesHelper libvirtUtilitiesHelper = libvirtComputingResource.getLibvirtUtilitiesHelper(); - LOGGER.debug(String.format("Verify if KVM instance: [%s] is available before Unmanaging VM.", vmName)); + logger.debug(String.format("Verify if KVM instance: [%s] is available before Unmanaging VM.", vmName)); try { final Connect conn = libvirtUtilitiesHelper.getConnectionByVmName(vmName); final Domain domain = libvirtComputingResource.getDomain(conn, vmName); if (domain == null) { - LOGGER.error("Prepare Unmanage VMInstanceCommand: vm not found " + vmName); + logger.error("Prepare Unmanage VMInstanceCommand: vm not found " + vmName); new PrepareUnmanageVMInstanceAnswer(command, false, String.format("Cannot find VM with name [%s] in KVM host.", vmName)); } } catch (Exception e){ - LOGGER.error("PrepareUnmanagedInstancesCommand failed due to " + e.getMessage()); + logger.error("PrepareUnmanagedInstancesCommand failed due to " + e.getMessage()); return new PrepareUnmanageVMInstanceAnswer(command, false, "Error: " + e.getMessage()); } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPvlanSetupCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPvlanSetupCommandWrapper.java index 91fb9242fb98..3e4baa9ae3f1 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPvlanSetupCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPvlanSetupCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.kvm.resource.wrapper; -import org.apache.log4j.Logger; import org.joda.time.Duration; import com.cloud.agent.api.Answer; @@ -32,7 +31,6 @@ @ResourceWrapper(handles = PvlanSetupCommand.class) public final class LibvirtPvlanSetupCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtPvlanSetupCommandWrapper.class); @Override public Answer execute(final PvlanSetupCommand command, final LibvirtComputingResource libvirtComputingResource) { @@ -56,30 +54,30 @@ public Answer execute(final PvlanSetupCommand command, final LibvirtComputingRes if (command.getType() == PvlanSetupCommand.Type.DHCP) { final String ovsPvlanDhcpHostPath = libvirtComputingResource.getOvsPvlanDhcpHostPath(); - final Script script = new Script(ovsPvlanDhcpHostPath, timeout, s_logger); + final Script script = new Script(ovsPvlanDhcpHostPath, timeout, logger); script.add(opr, pvlanType, "-b", guestBridgeName, "-p", primaryPvlan, "-s", isolatedPvlan, "-m", dhcpMac, "-d", dhcpIp); result = script.execute(); if (result != null) { - s_logger.warn("Failed to program pvlan for dhcp server with mac " + dhcpMac); + logger.warn("Failed to program pvlan for dhcp server with mac " + dhcpMac); } else { - s_logger.info("Programmed pvlan for dhcp server with mac " + dhcpMac); + logger.info("Programmed pvlan for dhcp server with mac " + dhcpMac); } } // We run this even for DHCP servers since they're all vms after all final String ovsPvlanVmPath = libvirtComputingResource.getOvsPvlanVmPath(); - final Script script = new Script(ovsPvlanVmPath, timeout, s_logger); + final Script script = new Script(ovsPvlanVmPath, timeout, logger); script.add(opr, pvlanType, "-b", guestBridgeName, "-p", primaryPvlan, "-s", isolatedPvlan, "-m", vmMac); result = script.execute(); if (result != null) { - s_logger.warn("Failed to program pvlan for vm with mac " + vmMac); + logger.warn("Failed to program pvlan for vm with mac " + vmMac); return new Answer(command, false, result); } else { - s_logger.info("Programmed pvlan for vm with mac " + vmMac); + logger.info("Programmed pvlan for vm with mac " + vmMac); } return new Answer(command, true, result); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtReadyCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtReadyCommandWrapper.java index c0089c0e3a69..0b0f69f3eed5 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtReadyCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtReadyCommandWrapper.java @@ -33,12 +33,10 @@ import com.cloud.resource.ResourceWrapper; import com.cloud.utils.script.Script; -import org.apache.log4j.Logger; @ResourceWrapper(handles = ReadyCommand.class) public final class LibvirtReadyCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtReadyCommandWrapper.class); @Override public Answer execute(final ReadyCommand command, final LibvirtComputingResource libvirtComputingResource) { @@ -57,9 +55,9 @@ private boolean hostSupportsUefi(boolean isUbuntuHost) { if (isUbuntuHost) { cmd = "dpkg -l ovmf"; } - s_logger.debug("Running command : [" + cmd + "] with timeout : " + timeout + " ms"); + logger.debug("Running command : [" + cmd + "] with timeout : " + timeout + " ms"); int result = Script.runSimpleBashScriptForExitValue(cmd, timeout, false); - s_logger.debug("Got result : " + result); + logger.debug("Got result : " + result); return result == 0; } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRebootCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRebootCommandWrapper.java index 15a3be4167a6..87617cb2c543 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRebootCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRebootCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.kvm.resource.wrapper; -import org.apache.log4j.Logger; import org.libvirt.Connect; import org.libvirt.LibvirtException; @@ -34,7 +33,6 @@ @ResourceWrapper(handles = RebootCommand.class) public final class LibvirtRebootCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtRebootCommandWrapper.class); @Override public Answer execute(final RebootCommand command, final LibvirtComputingResource libvirtComputingResource) { @@ -49,7 +47,7 @@ public Answer execute(final RebootCommand command, final LibvirtComputingResourc try { vncPort = libvirtComputingResource.getVncPort(conn, command.getVmName()); } catch (final LibvirtException e) { - s_logger.trace("Ignoring libvirt error.", e); + logger.trace("Ignoring libvirt error.", e); } if (vmSpec != null) { libvirtComputingResource.applyDefaultNetworkRules(conn, vmSpec, false); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtReplugNicCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtReplugNicCommandWrapper.java index 558c7f0441ca..f22cfd2a96f9 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtReplugNicCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtReplugNicCommandWrapper.java @@ -19,7 +19,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.libvirt.Connect; import org.libvirt.Domain; import org.libvirt.LibvirtException; @@ -38,7 +37,6 @@ @ResourceWrapper(handles = ReplugNicCommand.class) public final class LibvirtReplugNicCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtReplugNicCommandWrapper.class); public enum DomainAffect { CURRENT(0), LIVE(1), CONFIG(2), BOTH(3); @@ -78,15 +76,15 @@ public Answer execute(final ReplugNicCommand command, final LibvirtComputingReso int i = 0; do { i++; - s_logger.debug("ReplugNic: Detaching interface" + oldPluggedNic + " (Attempt: " + i + ")"); + logger.debug("ReplugNic: Detaching interface" + oldPluggedNic + " (Attempt: " + i + ")"); vm.detachDevice(oldPluggedNic.toString()); } while (findPluggedNic(libvirtComputingResource, nic, vmName, conn) != null && i <= 10); - s_logger.debug("ReplugNic: Attaching interface" + interfaceDef); + logger.debug("ReplugNic: Attaching interface" + interfaceDef); vm.attachDevice(interfaceDef.toString()); interfaceDef.setLinkStateUp(true); - s_logger.debug("ReplugNic: Updating interface" + interfaceDef); + logger.debug("ReplugNic: Updating interface" + interfaceDef); vm.updateDeviceFlags(interfaceDef.toString(), DomainAffect.LIVE.getValue()); // We don't know which "traffic type" is associated with @@ -98,14 +96,14 @@ public Answer execute(final ReplugNicCommand command, final LibvirtComputingReso return new ReplugNicAnswer(command, true, "success"); } catch (final LibvirtException | InternalErrorException e) { final String msg = " Plug Nic failed due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new ReplugNicAnswer(command, false, msg); } finally { if (vm != null) { try { vm.free(); } catch (final LibvirtException l) { - s_logger.trace("Ignoring libvirt error.", l); + logger.trace("Ignoring libvirt error.", l); } } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtResizeVolumeCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtResizeVolumeCommandWrapper.java index 4f1ad728b5d9..6a3901e345cf 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtResizeVolumeCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtResizeVolumeCommandWrapper.java @@ -35,7 +35,6 @@ import org.apache.cloudstack.utils.qemu.QemuImgException; import org.apache.cloudstack.utils.qemu.QemuImgFile; import org.apache.cloudstack.utils.qemu.QemuObject; -import org.apache.log4j.Logger; import org.libvirt.Connect; import org.libvirt.Domain; import org.libvirt.DomainInfo; @@ -63,7 +62,6 @@ @ResourceWrapper(handles = ResizeVolumeCommand.class) public final class LibvirtResizeVolumeCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtResizeVolumeCommandWrapper.class); @Override public Answer execute(final ResizeVolumeCommand command, final LibvirtComputingResource libvirtComputingResource) { @@ -77,7 +75,7 @@ public Answer execute(final ResizeVolumeCommand command, final LibvirtComputingR if ( currentSize == newSize) { // nothing to do - s_logger.info("No need to resize volume: current size " + toHumanReadableSize(currentSize) + " is same as new size " + toHumanReadableSize(newSize)); + logger.info("No need to resize volume: current size " + toHumanReadableSize(currentSize) + " is same as new size " + toHumanReadableSize(newSize)); return new ResizeVolumeAnswer(command, true, "success", currentSize); } @@ -111,15 +109,15 @@ public Answer execute(final ResizeVolumeCommand command, final LibvirtComputingR return new ResizeVolumeAnswer(command, false, "Unable to shrink volumes of type " + type); } } else { - s_logger.debug("Volume " + path + " is on a RBD/Linstor storage pool. No need to query for additional information."); + logger.debug("Volume " + path + " is on a RBD/Linstor storage pool. No need to query for additional information."); } - s_logger.debug("Resizing volume: " + path + ", from: " + toHumanReadableSize(currentSize) + ", to: " + toHumanReadableSize(newSize) + ", type: " + type + ", name: " + vmInstanceName + ", shrinkOk: " + shrinkOk); + logger.debug("Resizing volume: " + path + ", from: " + toHumanReadableSize(currentSize) + ", to: " + toHumanReadableSize(newSize) + ", type: " + type + ", name: " + vmInstanceName + ", shrinkOk: " + shrinkOk); /* libvirt doesn't support resizing (C)LVM devices, and corrupts QCOW2 in some scenarios, so we have to do these via qemu-img */ if (pool.getType() != StoragePoolType.CLVM && pool.getType() != StoragePoolType.Linstor && pool.getType() != StoragePoolType.PowerFlex && vol.getFormat() != PhysicalDiskFormat.QCOW2) { - s_logger.debug("Volume " + path + " can be resized by libvirt. Asking libvirt to resize the volume."); + logger.debug("Volume " + path + " can be resized by libvirt. Asking libvirt to resize the volume."); try { final LibvirtUtilitiesHelper libvirtUtilitiesHelper = libvirtComputingResource.getLibvirtUtilitiesHelper(); @@ -147,12 +145,12 @@ public Answer execute(final ResizeVolumeCommand command, final LibvirtComputingR with both encrypted and non-encrypted volumes. */ if (!vmIsRunning && command.getPassphrase() != null && command.getPassphrase().length > 0 ) { - s_logger.debug("Invoking qemu-img to resize an offline, encrypted volume"); + logger.debug("Invoking qemu-img to resize an offline, encrypted volume"); QemuObject.EncryptFormat encryptFormat = QemuObject.EncryptFormat.enumValue(command.getEncryptFormat()); resizeEncryptedQcowFile(vol, encryptFormat,newSize, command.getPassphrase(), libvirtComputingResource); } else { - s_logger.debug("Invoking resize script to handle type " + type); - final Script resizecmd = new Script(libvirtComputingResource.getResizeVolumePath(), libvirtComputingResource.getCmdsTimeout(), s_logger); + logger.debug("Invoking resize script to handle type " + type); + final Script resizecmd = new Script(libvirtComputingResource.getResizeVolumePath(), libvirtComputingResource.getCmdsTimeout(), logger); resizecmd.add("-s", String.valueOf(newSize)); resizecmd.add("-c", String.valueOf(currentSize)); resizecmd.add("-p", path); @@ -174,11 +172,11 @@ public Answer execute(final ResizeVolumeCommand command, final LibvirtComputingR pool = storagePoolMgr.getStoragePool(spool.getType(), spool.getUuid()); pool.refresh(); final long finalSize = pool.getPhysicalDisk(volumeId).getVirtualSize(); - s_logger.debug("after resize, size reports as: " + toHumanReadableSize(finalSize) + ", requested: " + toHumanReadableSize(newSize)); + logger.debug("after resize, size reports as: " + toHumanReadableSize(finalSize) + ", requested: " + toHumanReadableSize(newSize)); return new ResizeVolumeAnswer(command, true, "success", finalSize); } catch (final CloudRuntimeException e) { final String error = "Failed to resize volume: " + e.getMessage(); - s_logger.debug(error); + logger.debug(error); return new ResizeVolumeAnswer(command, false, error); } finally { command.clearPassphrase(); @@ -192,7 +190,7 @@ private boolean isVmRunning(final String vmName, final LibvirtComputingResource Domain dom = conn.domainLookupByName(vmName); return (dom != null && dom.getInfo().state == DomainInfo.DomainState.VIR_DOMAIN_RUNNING); } catch (LibvirtException ex) { - s_logger.info(String.format("Did not find a running VM '%s'", vmName)); + logger.info(String.format("Did not find a running VM '%s'", vmName)); } return false; } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRestoreVMSnapshotCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRestoreVMSnapshotCommandWrapper.java index ce8c2095660f..0b0187ae668c 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRestoreVMSnapshotCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRestoreVMSnapshotCommandWrapper.java @@ -23,7 +23,6 @@ import java.util.Map; import org.apache.cloudstack.storage.to.VolumeObjectTO; -import org.apache.log4j.Logger; import org.libvirt.Connect; import org.libvirt.Domain; import org.libvirt.LibvirtException; @@ -40,7 +39,6 @@ @ResourceWrapper(handles = RestoreVMSnapshotCommand.class) public final class LibvirtRestoreVMSnapshotCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtRestoreVMSnapshotCommandWrapper.class); @Override public Answer execute(final RestoreVMSnapshotCommand cmd, final LibvirtComputingResource libvirtComputingResource) { @@ -65,7 +63,7 @@ public Answer execute(final RestoreVMSnapshotCommand cmd, final LibvirtComputing for (VMSnapshotTO snapshot: snapshots) { VMSnapshotTO parent = snapshotAndParents.get(snapshot.getId()); String vmSnapshotXML = libvirtUtilitiesHelper.generateVMSnapshotXML(snapshot, parent, xmlDesc); - s_logger.debug("Restoring vm snapshot " + snapshot.getSnapshotName() + " on " + vmName + " with XML:\n " + vmSnapshotXML); + logger.debug("Restoring vm snapshot " + snapshot.getSnapshotName() + " on " + vmName + " with XML:\n " + vmSnapshotXML); try { int flags = 1; // VIR_DOMAIN_SNAPSHOT_CREATE_REDEFINE = 1 if (snapshot.getCurrent()) { @@ -73,7 +71,7 @@ public Answer execute(final RestoreVMSnapshotCommand cmd, final LibvirtComputing } dm.snapshotCreateXML(vmSnapshotXML, flags); } catch (LibvirtException e) { - s_logger.debug("Failed to restore vm snapshot " + snapshot.getSnapshotName() + " on " + vmName); + logger.debug("Failed to restore vm snapshot " + snapshot.getSnapshotName() + " on " + vmName); return new RestoreVMSnapshotAnswer(cmd, false, e.toString()); } } @@ -81,14 +79,14 @@ public Answer execute(final RestoreVMSnapshotCommand cmd, final LibvirtComputing return new RestoreVMSnapshotAnswer(cmd, listVolumeTo, vmState); } catch (LibvirtException e) { String msg = " Restore snapshot failed due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new RestoreVMSnapshotAnswer(cmd, false, msg); } finally { if (dm != null) { try { dm.free(); } catch (LibvirtException l) { - s_logger.trace("Ignoring libvirt error.", l); + logger.trace("Ignoring libvirt error.", l); }; } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRevertSnapshotCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRevertSnapshotCommandWrapper.java index 4071c1bcb453..d2f1ef827a15 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRevertSnapshotCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRevertSnapshotCommandWrapper.java @@ -32,7 +32,6 @@ import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.cloudstack.storage.to.SnapshotObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; -import org.apache.log4j.Logger; import com.ceph.rados.IoCTX; import com.ceph.rados.Rados; @@ -58,7 +57,6 @@ @ResourceWrapper(handles = RevertSnapshotCommand.class) public class LibvirtRevertSnapshotCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtRevertSnapshotCommandWrapper.class); private static final String MON_HOST = "mon_host"; private static final String KEY = "key"; private static final String CLIENT_MOUNT_TIMEOUT = "client_mount_timeout"; @@ -103,7 +101,7 @@ public Answer execute(final RevertSnapshotCommand command, final LibvirtComputin IoCTX io = rados.ioCtxCreate(primaryPool.getSourceDir()); Rbd rbd = new Rbd(io); - s_logger.debug(String.format("Attempting to rollback RBD snapshot [name:%s], [volumeid:%s], [snapshotid:%s]", snapshot.getName(), volumePath, rbdSnapshotId)); + logger.debug(String.format("Attempting to rollback RBD snapshot [name:%s], [volumeid:%s], [snapshotid:%s]", snapshot.getName(), volumePath, rbdSnapshotId)); RbdImage image = rbd.open(volumePath); image.snapRollBack(rbdSnapshotId); @@ -117,13 +115,13 @@ public Answer execute(final RevertSnapshotCommand command, final LibvirtComputin } if (primaryPool.getType() == StoragePoolType.CLVM) { - Script cmd = new Script(libvirtComputingResource.manageSnapshotPath(), libvirtComputingResource.getCmdsTimeout(), s_logger); + Script cmd = new Script(libvirtComputingResource.manageSnapshotPath(), libvirtComputingResource.getCmdsTimeout(), logger); cmd.add("-v", getFullPathAccordingToStorage(secondaryStoragePool, snapshotRelPath)); cmd.add("-n", snapshotDisk.getName()); cmd.add("-p", snapshotDisk.getPath()); String result = cmd.execute(); if (result != null) { - s_logger.debug("Failed to revert snaptshot: " + result); + logger.debug("Failed to revert snaptshot: " + result); return new Answer(command, false, result); } } else { @@ -135,10 +133,10 @@ public Answer execute(final RevertSnapshotCommand command, final LibvirtComputin } catch (CloudRuntimeException e) { return new Answer(command, false, e.toString()); } catch (RadosException e) { - s_logger.error("Failed to connect to Rados pool while trying to revert snapshot. Exception: ", e); + logger.error("Failed to connect to Rados pool while trying to revert snapshot. Exception: ", e); return new Answer(command, false, e.toString()); } catch (RbdException e) { - s_logger.error("Failed to connect to revert snapshot due to RBD exception: ", e); + logger.error("Failed to connect to revert snapshot due to RBD exception: ", e); return new Answer(command, false, e.toString()); } } @@ -163,11 +161,11 @@ protected void revertVolumeToSnapshot(SnapshotObjectTO snapshotOnPrimaryStorage, String snapshotPath = resultGetSnapshot.first(); SnapshotObjectTO snapshotToPrint = resultGetSnapshot.second(); - s_logger.debug(String.format("Reverting volume [%s] to snapshot [%s].", volumeObjectTo, snapshotToPrint)); + logger.debug(String.format("Reverting volume [%s] to snapshot [%s].", volumeObjectTo, snapshotToPrint)); try { replaceVolumeWithSnapshot(volumePath, snapshotPath); - s_logger.debug(String.format("Successfully reverted volume [%s] to snapshot [%s].", volumeObjectTo, snapshotToPrint)); + logger.debug(String.format("Successfully reverted volume [%s] to snapshot [%s].", volumeObjectTo, snapshotToPrint)); } catch (IOException ex) { throw new CloudRuntimeException(String.format("Unable to revert volume [%s] to snapshot [%s] due to [%s].", volumeObjectTo, snapshotToPrint, ex.getMessage()), ex); } @@ -192,8 +190,8 @@ protected Pair getSnapshot(SnapshotObjectTO snapshotOn snapshotOnSecondaryStorage, snapshotOnSecondaryStorage.getVolume())); } - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("Snapshot does not exists on primary storage [%s], searching snapshot [%s] on secondary storage [%s].", + if (logger.isTraceEnabled()) { + logger.trace(String.format("Snapshot does not exists on primary storage [%s], searching snapshot [%s] on secondary storage [%s].", kvmStoragePoolPrimary, snapshotOnSecondaryStorage, kvmStoragePoolSecondary)); } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRevertToVMSnapshotCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRevertToVMSnapshotCommandWrapper.java index 086d6ef7a89f..02dc80364ac4 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRevertToVMSnapshotCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRevertToVMSnapshotCommandWrapper.java @@ -22,7 +22,6 @@ import java.util.List; import org.apache.cloudstack.storage.to.VolumeObjectTO; -import org.apache.log4j.Logger; import org.libvirt.Connect; import org.libvirt.Domain; import org.libvirt.DomainSnapshot; @@ -40,7 +39,6 @@ @ResourceWrapper(handles = RevertToVMSnapshotCommand.class) public final class LibvirtRevertToVMSnapshotCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtRevertToVMSnapshotCommandWrapper.class); @Override public Answer execute(final RevertToVMSnapshotCommand cmd, final LibvirtComputingResource libvirtComputingResource) { @@ -80,14 +78,14 @@ public Answer execute(final RevertToVMSnapshotCommand cmd, final LibvirtComputin return new RevertToVMSnapshotAnswer(cmd, listVolumeTo, vmState); } catch (LibvirtException e) { String msg = " Revert to VM snapshot failed due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new RevertToVMSnapshotAnswer(cmd, false, msg); } finally { if (dm != null) { try { dm.free(); } catch (LibvirtException l) { - s_logger.trace("Ignoring libvirt error.", l); + logger.trace("Ignoring libvirt error.", l); }; } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRevokeDirectDownloadCertificateWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRevokeDirectDownloadCertificateWrapper.java index 6c83c4d9f066..348151557110 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRevokeDirectDownloadCertificateWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRevokeDirectDownloadCertificateWrapper.java @@ -29,7 +29,6 @@ import org.apache.cloudstack.agent.directdownload.RevokeDirectDownloadCertificateCommand; import org.apache.cloudstack.utils.security.KeyStoreUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import java.io.File; import java.io.FileNotFoundException; @@ -38,7 +37,6 @@ @ResourceWrapper(handles = RevokeDirectDownloadCertificateCommand.class) public class LibvirtRevokeDirectDownloadCertificateWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtRevokeDirectDownloadCertificateWrapper.class); /** * Retrieve agent.properties file @@ -60,7 +58,7 @@ private String getKeystorePassword(File agentFile) { try { pass = PropertiesUtil.loadFromFile(agentFile).getProperty(KeyStoreUtils.KS_PASSPHRASE_PROPERTY); } catch (IOException e) { - s_logger.error("Could not get 'keystore.passphrase' property value due to: " + e.getMessage()); + logger.error("Could not get 'keystore.passphrase' property value due to: " + e.getMessage()); } } return pass; @@ -89,15 +87,15 @@ public Answer execute(RevokeDirectDownloadCertificateCommand command, LibvirtCom certificateAlias, keyStoreFile, privatePassword); int existsCmdResult = Script.runSimpleBashScriptForExitValue(checkCmd); if (existsCmdResult == 1) { - s_logger.error("Certificate alias " + certificateAlias + " does not exist, no need to revoke it"); + logger.error("Certificate alias " + certificateAlias + " does not exist, no need to revoke it"); } else { String revokeCmd = String.format("keytool -delete -alias %s -keystore %s -storepass %s", certificateAlias, keyStoreFile, privatePassword); - s_logger.debug("Revoking certificate alias " + certificateAlias + " from keystore " + keyStoreFile); + logger.debug("Revoking certificate alias " + certificateAlias + " from keystore " + keyStoreFile); Script.runSimpleBashScriptForExitValue(revokeCmd); } } catch (FileNotFoundException | CloudRuntimeException e) { - s_logger.error("Error while setting up certificate " + certificateAlias, e); + logger.error("Error while setting up certificate " + certificateAlias, e); return new Answer(command, false, e.getMessage()); } return new Answer(command); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRollingMaintenanceCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRollingMaintenanceCommandWrapper.java index a1b1af60c9f6..e56386ad3ad3 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRollingMaintenanceCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRollingMaintenanceCommandWrapper.java @@ -28,14 +28,12 @@ import com.cloud.resource.ResourceWrapper; import com.cloud.resource.RollingMaintenanceManager; import com.cloud.utils.exception.CloudRuntimeException; -import org.apache.log4j.Logger; import java.io.File; @ResourceWrapper(handles = RollingMaintenanceCommand.class) public class LibvirtRollingMaintenanceCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtRollingMaintenanceCommandWrapper.class); @Override public RollingMaintenanceAnswer execute(RollingMaintenanceCommand command, LibvirtComputingResource resource) { @@ -49,16 +47,16 @@ public RollingMaintenanceAnswer execute(RollingMaintenanceCommand command, Libvi if (command.isCheckMaintenanceScript()) { return new RollingMaintenanceAnswer(command, scriptFile != null); } else if (scriptFile == null) { - s_logger.info("No script file defined for stage " + stage + ". Skipping stage..."); + logger.info("No script file defined for stage " + stage + ". Skipping stage..."); return new RollingMaintenanceAnswer(command, true, "Skipped stage " + stage, true); } if (command.isStarted() && executor instanceof RollingMaintenanceAgentExecutor) { String msg = "Stage has been started previously and the agent restarted, setting stage as finished"; - s_logger.info(msg); + logger.info(msg); return new RollingMaintenanceAnswer(command, true, msg, true); } - s_logger.info("Processing stage " + stage); + logger.info("Processing stage " + stage); if (!command.isStarted()) { executor.startStageExecution(stage, scriptFile, timeout, payload); } @@ -69,10 +67,10 @@ public RollingMaintenanceAnswer execute(RollingMaintenanceCommand command, Libvi String output = executor.getStageExecutionOutput(stage, scriptFile); RollingMaintenanceAnswer answer = new RollingMaintenanceAnswer(command, success, output, true); if (executor.getStageAvoidMaintenance(stage, scriptFile)) { - s_logger.info("Avoid maintenance flag added to the answer for the stage " + stage); + logger.info("Avoid maintenance flag added to the answer for the stage " + stage); answer.setAvoidMaintenance(true); } - s_logger.info("Finished processing stage " + stage); + logger.info("Finished processing stage " + stage); return answer; } catch (CloudRuntimeException e) { return new RollingMaintenanceAnswer(command, false, e.getMessage(), false); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtSecurityGroupRulesCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtSecurityGroupRulesCommandWrapper.java index 3f8aebaf2516..33164264ae80 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtSecurityGroupRulesCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtSecurityGroupRulesCommandWrapper.java @@ -21,7 +21,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.libvirt.Connect; import org.libvirt.LibvirtException; @@ -37,7 +36,6 @@ @ResourceWrapper(handles = SecurityGroupRulesCmd.class) public final class LibvirtSecurityGroupRulesCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtSecurityGroupRulesCommandWrapper.class); @Override public Answer execute(final SecurityGroupRulesCmd command, final LibvirtComputingResource libvirtComputingResource) { @@ -54,7 +52,7 @@ public Answer execute(final SecurityGroupRulesCmd command, final LibvirtComputin final VirtualMachineTO vm = command.getVmTO(); if (!libvirtComputingResource.applyDefaultNetworkRules(conn, vm, true)) { - s_logger.warn("Failed to program default network rules for vm " + command.getVmName()); + logger.warn("Failed to program default network rules for vm " + command.getVmName()); return new SecurityGroupRuleAnswer(command, false, "programming default network rules failed"); } } catch (final LibvirtException e) { @@ -65,10 +63,10 @@ public Answer execute(final SecurityGroupRulesCmd command, final LibvirtComputin Long.toString(command.getSeqNum()), command.getGuestMac(), command.stringifyRules(), vif, brname, command.getSecIpsString()); if (!result) { - s_logger.warn("Failed to program network rules for vm " + command.getVmName()); + logger.warn("Failed to program network rules for vm " + command.getVmName()); return new SecurityGroupRuleAnswer(command, false, "programming network rules failed"); } else { - s_logger.debug("Programmed network rules for vm " + command.getVmName() + " guestIp=" + command.getGuestIp() + ",ingress numrules=" + logger.debug("Programmed network rules for vm " + command.getVmName() + " guestIp=" + command.getGuestIp() + ",ingress numrules=" + command.getIngressRuleSet().size() + ",egress numrules=" + command.getEgressRuleSet().size()); return new SecurityGroupRuleAnswer(command); } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtSetupDirectDownloadCertificateCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtSetupDirectDownloadCertificateCommandWrapper.java index fff8da7c4ea3..eb4e6be7609e 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtSetupDirectDownloadCertificateCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtSetupDirectDownloadCertificateCommandWrapper.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.agent.directdownload.SetupDirectDownloadCertificateCommand; import org.apache.cloudstack.utils.security.KeyStoreUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import java.io.File; import java.io.FileNotFoundException; @@ -39,7 +38,6 @@ public class LibvirtSetupDirectDownloadCertificateCommandWrapper extends Command private static final String temporaryCertFilePrefix = "CSCERTIFICATE"; - private static final Logger s_logger = Logger.getLogger(LibvirtSetupDirectDownloadCertificateCommandWrapper.class); /** * Retrieve agent.properties file @@ -61,7 +59,7 @@ private String getKeystorePassword(File agentFile) { try { pass = PropertiesUtil.loadFromFile(agentFile).getProperty(KeyStoreUtils.KS_PASSPHRASE_PROPERTY); } catch (IOException e) { - s_logger.error("Could not get 'keystore.passphrase' property value due to: " + e.getMessage()); + logger.error("Could not get 'keystore.passphrase' property value due to: " + e.getMessage()); } } return pass; @@ -78,12 +76,12 @@ private String getKeyStoreFilePath(File agentFile) { * Import certificate from temporary file into keystore */ private void importCertificate(String tempCerFilePath, String keyStoreFile, String certificateName, String privatePassword) { - s_logger.debug("Importing certificate from temporary file to keystore"); + logger.debug("Importing certificate from temporary file to keystore"); String importCommandFormat = "keytool -importcert -file %s -keystore %s -alias '%s' -storepass '%s' -noprompt"; String importCmd = String.format(importCommandFormat, tempCerFilePath, keyStoreFile, certificateName, privatePassword); int result = Script.runSimpleBashScriptForExitValue(importCmd); if (result != 0) { - s_logger.debug("Certificate " + certificateName + " not imported as it already exist on keystore"); + logger.debug("Certificate " + certificateName + " not imported as it already exist on keystore"); } } @@ -93,7 +91,7 @@ private void importCertificate(String tempCerFilePath, String keyStoreFile, Stri private String createTemporaryFile(File agentFile, String certificateName, String certificate) { String tempCerFilePath = String.format("%s/%s-%s", agentFile.getParent(), temporaryCertFilePrefix, certificateName); - s_logger.debug("Creating temporary certificate file into: " + tempCerFilePath); + logger.debug("Creating temporary certificate file into: " + tempCerFilePath); int result = Script.runSimpleBashScriptForExitValue(String.format("echo '%s' > %s", certificate, tempCerFilePath)); if (result != 0) { throw new CloudRuntimeException("Could not create the certificate file on path: " + tempCerFilePath); @@ -105,7 +103,7 @@ private String createTemporaryFile(File agentFile, String certificateName, Strin * Remove temporary file */ private void cleanupTemporaryFile(String temporaryFile) { - s_logger.debug("Cleaning up temporary certificate file"); + logger.debug("Cleaning up temporary certificate file"); Script.runSimpleBashScript("rm -f " + temporaryFile); } @@ -126,7 +124,7 @@ public Answer execute(SetupDirectDownloadCertificateCommand cmd, LibvirtComputin importCertificate(temporaryFile, keyStoreFile, certificateName, privatePassword); cleanupTemporaryFile(temporaryFile); } catch (FileNotFoundException | CloudRuntimeException e) { - s_logger.error("Error while setting up certificate " + certificateName, e); + logger.error("Error while setting up certificate " + certificateName, e); return new Answer(cmd, false, e.getMessage()); } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtSetupPersistentNetworkCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtSetupPersistentNetworkCommandWrapper.java index a2ec64451a7c..66be619777dd 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtSetupPersistentNetworkCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtSetupPersistentNetworkCommandWrapper.java @@ -17,7 +17,6 @@ package com.cloud.hypervisor.kvm.resource.wrapper; -import org.apache.log4j.Logger; import org.libvirt.LibvirtException; import com.cloud.agent.api.Answer; @@ -33,7 +32,6 @@ @ResourceWrapper(handles = SetupPersistentNetworkCommand.class) public class LibvirtSetupPersistentNetworkCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtSetupPersistentNetworkCommandWrapper.class); @Override public Answer execute(SetupPersistentNetworkCommand command, LibvirtComputingResource serverResource) { diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStartCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStartCommandWrapper.java index 7b69993f2e5e..32d687ff98c4 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStartCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStartCommandWrapper.java @@ -24,7 +24,6 @@ import com.cloud.agent.resource.virtualnetwork.VRScripts; import com.cloud.utils.FileUtil; -import org.apache.log4j.Logger; import org.libvirt.Connect; import org.libvirt.DomainInfo.DomainState; import org.libvirt.LibvirtException; @@ -49,7 +48,6 @@ @ResourceWrapper(handles = StartCommand.class) public final class LibvirtStartCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtStartCommandWrapper.class); @Override public Answer execute(final StartCommand command, final LibvirtComputingResource libvirtComputingResource) { @@ -83,7 +81,7 @@ public Answer execute(final StartCommand command, final LibvirtComputingResource libvirtComputingResource.createVifs(vmSpec, vm); - s_logger.debug("starting " + vmName + ": " + vm.toString()); + logger.debug("starting " + vmName + ": " + vm.toString()); String vmInitialSpecification = vm.toString(); String vmFinalSpecification = performXmlTransformHook(vmInitialSpecification, libvirtComputingResource); libvirtComputingResource.startVM(conn, vmName, vmFinalSpecification); @@ -124,12 +122,12 @@ public Answer execute(final StartCommand command, final LibvirtComputingResource FileUtil.scpPatchFiles(controlIp, VRScripts.CONFIG_CACHE_LOCATION, Integer.parseInt(LibvirtComputingResource.DEFAULTDOMRSSHPORT), pemFile, LibvirtComputingResource.systemVmPatchFiles, LibvirtComputingResource.BASEPATH); if (!virtRouterResource.isSystemVMSetup(vmName, controlIp)) { String errMsg = "Failed to patch systemVM"; - s_logger.error(errMsg); + logger.error(errMsg); return new StartAnswer(command, errMsg); } } catch (Exception e) { String errMsg = "Failed to scp files to system VM. Patching of systemVM failed"; - s_logger.error(errMsg, e); + logger.error(errMsg, e); return new StartAnswer(command, String.format("%s due to: %s", errMsg, e.getMessage())); } } @@ -138,19 +136,19 @@ public Answer execute(final StartCommand command, final LibvirtComputingResource state = DomainState.VIR_DOMAIN_RUNNING; return new StartAnswer(command); } catch (final LibvirtException e) { - s_logger.warn("LibvirtException ", e); + logger.warn("LibvirtException ", e); if (conn != null) { libvirtComputingResource.handleVmStartFailure(conn, vmName, vm); } return new StartAnswer(command, e.getMessage()); } catch (final InternalErrorException e) { - s_logger.warn("InternalErrorException ", e); + logger.warn("InternalErrorException ", e); if (conn != null) { libvirtComputingResource.handleVmStartFailure(conn, vmName, vm); } return new StartAnswer(command, e.getMessage()); } catch (final URISyntaxException e) { - s_logger.warn("URISyntaxException ", e); + logger.warn("URISyntaxException ", e); if (conn != null) { libvirtComputingResource.handleVmStartFailure(conn, vmName, vm); } @@ -167,7 +165,7 @@ private void performAgentStartHook(String vmName, LibvirtComputingResource libvi LibvirtKvmAgentHook onStartHook = libvirtComputingResource.getStartHook(); onStartHook.handle(vmName); } catch (Exception e) { - s_logger.warn("Exception occurred when handling LibVirt VM onStart hook: {}", e); + logger.warn("Exception occurred when handling LibVirt VM onStart hook: {}", e); } } @@ -178,11 +176,11 @@ private String performXmlTransformHook(String vmInitialSpecification, final Libv LibvirtKvmAgentHook t = libvirtComputingResource.getTransformer(); vmFinalSpecification = (String) t.handle(vmInitialSpecification); if (null == vmFinalSpecification) { - s_logger.warn("Libvirt XML transformer returned NULL, will use XML specification unchanged."); + logger.warn("Libvirt XML transformer returned NULL, will use XML specification unchanged."); vmFinalSpecification = vmInitialSpecification; } } catch(Exception e) { - s_logger.warn("Exception occurred when handling LibVirt XML transformer hook: {}", e); + logger.warn("Exception occurred when handling LibVirt XML transformer hook: {}", e); vmFinalSpecification = vmInitialSpecification; } return vmFinalSpecification; diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStopCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStopCommandWrapper.java index 7ee6ccddf66e..8b3942f5ebad 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStopCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStopCommandWrapper.java @@ -30,7 +30,6 @@ import com.cloud.utils.ssh.SshHelper; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; -import org.apache.log4j.Logger; import org.libvirt.Connect; import org.libvirt.Domain; import org.libvirt.DomainInfo.DomainState; @@ -49,7 +48,6 @@ @ResourceWrapper(handles = StopCommand.class) public final class LibvirtStopCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtStopCommandWrapper.class); private static final String CMDLINE_PATH = "/var/cache/cloud/cmdline"; private static final String CMDLINE_BACKUP_PATH = "/var/cache/cloud/cmdline.backup"; @@ -67,21 +65,21 @@ public Answer execute(final StopCommand command, final LibvirtComputingResource return new StopAnswer(command, "vm is still running on host", false); } } catch (final Exception e) { - s_logger.debug("Failed to get vm status in case of checkboforecleanup is true", e); + logger.debug("Failed to get vm status in case of checkboforecleanup is true", e); } } File pemFile = new File(LibvirtComputingResource.SSHPRVKEYPATH); try { if(vmName.startsWith("s-") || vmName.startsWith("v-")){ //move the command line file to backup. - s_logger.debug("backing up the cmdline"); + logger.debug("backing up the cmdline"); try{ Pair ret = SshHelper.sshExecute(command.getControlIp(), 3922, "root", pemFile, null,"cp -f "+CMDLINE_PATH+" "+CMDLINE_BACKUP_PATH); if(!ret.first()){ - s_logger.debug("Failed to backup cmdline file due to "+ret.second()); + logger.debug("Failed to backup cmdline file due to "+ret.second()); } } catch (Exception e){ - s_logger.debug("Failed to backup cmdline file due to "+e.getMessage()); + logger.debug("Failed to backup cmdline file due to "+e.getMessage()); } } @@ -123,7 +121,7 @@ public Answer execute(final StopCommand command, final LibvirtComputingResource for (DpdkTO to : dpdkInterfaceMapping.values()) { String portToRemove = to.getPort(); String cmd = String.format("ovs-vsctl del-port %s", portToRemove); - s_logger.debug("Removing DPDK port: " + portToRemove); + logger.debug("Removing DPDK port: " + portToRemove); Script.runSimpleBashScript(cmd); } } @@ -141,16 +139,16 @@ public Answer execute(final StopCommand command, final LibvirtComputingResource return new StopAnswer(command, result, true); } catch (final LibvirtException e) { - s_logger.debug("unable to stop VM:"+vmName+" due to"+e.getMessage()); + logger.debug("unable to stop VM:"+vmName+" due to"+e.getMessage()); try{ if(vmName.startsWith("s-") || vmName.startsWith("v-")) - s_logger.debug("restoring cmdline file from backup"); + logger.debug("restoring cmdline file from backup"); Pair ret = SshHelper.sshExecute(command.getControlIp(), 3922, "root", pemFile, null, "mv "+CMDLINE_BACKUP_PATH+" "+CMDLINE_PATH); if(!ret.first()){ - s_logger.debug("unable to restore cmdline due to "+ret.second()); + logger.debug("unable to restore cmdline due to "+ret.second()); } }catch (final Exception ex){ - s_logger.debug("unable to restore cmdline due to:"+ex.getMessage()); + logger.debug("unable to restore cmdline due to:"+ex.getMessage()); } return new StopAnswer(command, e.getMessage(), false); } @@ -161,7 +159,7 @@ private void performAgentStopHook(String vmName, final LibvirtComputingResource LibvirtKvmAgentHook onStopHook = libvirtComputingResource.getStopHook(); onStopHook.handle(vmName); } catch (Exception e) { - s_logger.warn("Exception occurred when handling LibVirt VM onStop hook: {}", e); + logger.warn("Exception occurred when handling LibVirt VM onStop hook: {}", e); } } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtUnPlugNicCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtUnPlugNicCommandWrapper.java index e40563b291bf..e31589c0ca0a 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtUnPlugNicCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtUnPlugNicCommandWrapper.java @@ -22,7 +22,6 @@ import java.util.List; import java.util.Map; -import org.apache.log4j.Logger; import org.libvirt.Connect; import org.libvirt.Domain; import org.libvirt.LibvirtException; @@ -40,7 +39,6 @@ @ResourceWrapper(handles = UnPlugNicCommand.class) public final class LibvirtUnPlugNicCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LibvirtUnPlugNicCommandWrapper.class); @Override public Answer execute(final UnPlugNicCommand command, final LibvirtComputingResource libvirtComputingResource) { @@ -73,14 +71,14 @@ public Answer execute(final UnPlugNicCommand command, final LibvirtComputingReso return new UnPlugNicAnswer(command, true, "success"); } catch (final LibvirtException e) { final String msg = " Unplug Nic failed due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new UnPlugNicAnswer(command, false, msg); } finally { if (vm != null) { try { vm.free(); } catch (final LibvirtException l) { - s_logger.trace("Ignoring libvirt error.", l); + logger.trace("Ignoring libvirt error.", l); } } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtUtilitiesHelper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtUtilitiesHelper.java index 86ab024cbb9b..a2d161ac94bf 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtUtilitiesHelper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtUtilitiesHelper.java @@ -22,7 +22,8 @@ import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.libvirt.Connect; import org.libvirt.LibvirtException; @@ -41,7 +42,7 @@ * and the methods wrapped here. */ public class LibvirtUtilitiesHelper { - private static final Logger s_logger = Logger.getLogger(LibvirtUtilitiesHelper.class); + protected static Logger LOGGER = LogManager.getLogger(LibvirtUtilitiesHelper.class); public static final int TIMEOUT = 10000; @@ -129,7 +130,7 @@ protected static Pair isLibvirtVersionEqualOrHigherThanVersionI return new Pair<>(String.valueOf(currentLibvirtVersion), currentLibvirtVersion >= version); } catch (LibvirtException ex) { String exceptionMessage = ex.getMessage(); - s_logger.error(String.format("Unable to validate if the Libvirt's version is equal or higher than [%s] due to [%s]. Returning 'false' as default'.", version, + LOGGER.error(String.format("Unable to validate if the Libvirt's version is equal or higher than [%s] due to [%s]. Returning 'false' as default'.", version, exceptionMessage), ex); return new Pair<>(String.format("Unknown due to [%s]", exceptionMessage), false); } @@ -140,7 +141,7 @@ protected static Pair isLibvirtVersionEqualOrHigherThanVersionI */ public static boolean isLibvirtSupportingFlagDeleteOnCommandVirshBlockcommit(Connect conn) { Pair result = isLibvirtVersionEqualOrHigherThanVersionInParameter(conn, LIBVIRT_VERSION_THAT_SUPPORTS_FLAG_DELETE_ON_COMMAND_VIRSH_BLOCKCOMMIT); - s_logger.debug(String.format("The current Libvirt's version [%s]%s supports the flag '--delete' on command 'virsh blockcommit'.", result.first(), + LOGGER.debug(String.format("The current Libvirt's version [%s]%s supports the flag '--delete' on command 'virsh blockcommit'.", result.first(), result.second() ? "" : " does not")); return result.second(); } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java index 46cca60ac4ed..f023457461e9 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java @@ -25,7 +25,8 @@ import org.apache.cloudstack.utils.qemu.QemuImgException; import org.apache.cloudstack.utils.qemu.QemuImgFile; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.libvirt.LibvirtException; import com.cloud.agent.api.to.DiskTO; @@ -37,7 +38,7 @@ import com.cloud.utils.script.Script; public class IscsiAdmStorageAdaptor implements StorageAdaptor { - private static final Logger s_logger = Logger.getLogger(IscsiAdmStorageAdaptor.class); + protected Logger logger = LogManager.getLogger(getClass()); private static final Map MapStorageUuidToStoragePool = new HashMap<>(); @@ -85,7 +86,7 @@ public KVMPhysicalDisk createPhysicalDisk(String volumeUuid, KVMStoragePool pool @Override public boolean connectPhysicalDisk(String volumeUuid, KVMStoragePool pool, Map details) { // ex. sudo iscsiadm -m node -T iqn.2012-03.com.test:volume1 -p 192.168.233.10:3260 -o new - Script iScsiAdmCmd = new Script(true, "iscsiadm", 0, s_logger); + Script iScsiAdmCmd = new Script(true, "iscsiadm", 0, logger); iScsiAdmCmd.add("-m", "node"); iScsiAdmCmd.add("-T", getIqn(volumeUuid)); @@ -95,12 +96,12 @@ public boolean connectPhysicalDisk(String volumeUuid, KVMStoragePool pool, Map(); - s_logger.debug("Initialize cleanup thread"); + logger.debug("Initialize cleanup thread"); iscsiStorageAdaptor = new IscsiAdmStorageAdaptor(); } @@ -62,7 +63,7 @@ protected void runInContext() { //populate all the iscsi disks currently attached to this host File[] iscsiVolumes = new File(ISCSI_PATH_PREFIX).listFiles(); if (iscsiVolumes == null || iscsiVolumes.length == 0) { - s_logger.debug("No iscsi sessions found for cleanup"); + logger.debug("No iscsi sessions found for cleanup"); return; } @@ -76,7 +77,7 @@ protected void runInContext() { disconnectInactiveSessions(); } catch (LibvirtException e) { - s_logger.warn("[ignored] Error trying to cleanup ", e); + logger.warn("[ignored] Error trying to cleanup ", e); } } @@ -92,7 +93,7 @@ private void initializeDiskStatusMap(File[] iscsiVolumes){ diskStatusMap.clear(); for( File v : iscsiVolumes) { if (isIscsiDisk(v.getAbsolutePath())) { - s_logger.debug("found iscsi disk by cleanup thread, marking inactive: " + v.getAbsolutePath()); + logger.debug("found iscsi disk by cleanup thread, marking inactive: " + v.getAbsolutePath()); diskStatusMap.put(v.getAbsolutePath(), false); } } @@ -105,7 +106,7 @@ private void initializeDiskStatusMap(File[] iscsiVolumes){ private void updateDiskStatusMapWithInactiveIscsiSessions(Connect conn){ try { int[] domains = conn.listDomains(); - s_logger.debug(String.format("found %d domains", domains.length)); + logger.debug(String.format("found %d domains", domains.length)); for (int domId : domains) { Domain dm = conn.domainLookupByID(domId); final String domXml = dm.getXMLDesc(0); @@ -117,12 +118,12 @@ private void updateDiskStatusMapWithInactiveIscsiSessions(Connect conn){ for (final LibvirtVMDef.DiskDef disk : disks) { if (diskStatusMap.containsKey(disk.getDiskPath())&&!disk.getDiskPath().matches(REGEX_PART)) { diskStatusMap.put(disk.getDiskPath(), true); - s_logger.debug("active disk found by cleanup thread" + disk.getDiskPath()); + logger.debug("active disk found by cleanup thread" + disk.getDiskPath()); } } } } catch (LibvirtException e) { - s_logger.warn("[ignored] Error trying to cleanup ", e); + logger.warn("[ignored] Error trying to cleanup ", e); } } @@ -141,10 +142,10 @@ private void disconnectInactiveSessions(){ if (!diskStatusMap.get(diskPath)) { if (Files.exists(Paths.get(diskPath))) { try { - s_logger.info("Cleaning up disk " + diskPath); + logger.info("Cleaning up disk " + diskPath); iscsiStorageAdaptor.disconnectPhysicalDiskByPath(diskPath); } catch (Exception e) { - s_logger.warn("[ignored] Error cleaning up " + diskPath, e); + logger.warn("[ignored] Error cleaning up " + diskPath, e); } } } @@ -159,7 +160,7 @@ public void run() { try { Thread.sleep(CLEANUP_INTERVAL_SEC * 1000); } catch (InterruptedException e) { - s_logger.debug("[ignored] interrupted between heartbeats."); + logger.debug("[ignored] interrupted between heartbeats."); } Thread monitorThread = new Thread(new Monitor()); @@ -167,7 +168,7 @@ public void run() { try { monitorThread.join(); } catch (InterruptedException e) { - s_logger.debug("[ignored] interrupted joining monitor."); + logger.debug("[ignored] interrupted joining monitor."); } } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java index 679407e2f58c..27f70b71ab4f 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java @@ -32,7 +32,8 @@ import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; import org.apache.commons.collections.MapUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.reflections.Reflections; import com.cloud.agent.api.to.DiskTO; @@ -48,7 +49,7 @@ import com.cloud.vm.VirtualMachine; public class KVMStoragePoolManager { - private static final Logger s_logger = Logger.getLogger(KVMStoragePoolManager.class); + protected Logger logger = LogManager.getLogger(getClass()); private class StoragePoolInformation { String name; @@ -106,13 +107,13 @@ public KVMStoragePoolManager(StorageLayer storagelayer, KVMHAMonitor monitor) { Reflections reflections = new Reflections("com.cloud.hypervisor.kvm.storage"); Set> storageAdaptorClasses = reflections.getSubTypesOf(StorageAdaptor.class); for (Class storageAdaptorClass : storageAdaptorClasses) { - s_logger.debug("Checking pool type for adaptor " + storageAdaptorClass.getName()); + logger.debug("Checking pool type for adaptor " + storageAdaptorClass.getName()); if (Modifier.isAbstract(storageAdaptorClass.getModifiers()) || storageAdaptorClass.isInterface()) { - s_logger.debug("Skipping registration of abstract class / interface " + storageAdaptorClass.getName()); + logger.debug("Skipping registration of abstract class / interface " + storageAdaptorClass.getName()); continue; } if (storageAdaptorClass.isAssignableFrom(LibvirtStorageAdaptor.class)) { - s_logger.debug("Skipping re-registration of LibvirtStorageAdaptor"); + logger.debug("Skipping re-registration of LibvirtStorageAdaptor"); continue; } try { @@ -131,9 +132,9 @@ public KVMStoragePoolManager(StorageLayer storagelayer, KVMHAMonitor monitor) { StoragePoolType storagePoolType = adaptor.getStoragePoolType(); if (storagePoolType != null) { if (this._storageMapper.containsKey(storagePoolType.toString())) { - s_logger.warn(String.format("Duplicate StorageAdaptor type %s, not loading %s", storagePoolType, storageAdaptorClass.getName())); + logger.warn(String.format("Duplicate StorageAdaptor type %s, not loading %s", storagePoolType, storageAdaptorClass.getName())); } else { - s_logger.info(String.format("Adding storage adaptor for %s", storageAdaptorClass.getName())); + logger.info(String.format("Adding storage adaptor for %s", storageAdaptorClass.getName())); this._storageMapper.put(storagePoolType.toString(), adaptor); } } @@ -143,7 +144,7 @@ public KVMStoragePoolManager(StorageLayer storagelayer, KVMHAMonitor monitor) { } for (Map.Entry adaptors : this._storageMapper.entrySet()) { - s_logger.debug("Registered a StorageAdaptor for " + adaptors.getKey()); + logger.debug("Registered a StorageAdaptor for " + adaptors.getKey()); } } @@ -187,7 +188,7 @@ public boolean connectPhysicalDisksViaVmSpec(VirtualMachineTO vmSpec) { result = adaptor.connectPhysicalDisk(vol.getPath(), pool, disk.getDetails()); if (!result) { - s_logger.error("Failed to connect disks via vm spec for vm: " + vmName + " volume:" + vol.toString()); + logger.error("Failed to connect disks via vm spec for vm: " + vmName + " volume:" + vol.toString()); return result; } } @@ -196,7 +197,7 @@ public boolean connectPhysicalDisksViaVmSpec(VirtualMachineTO vmSpec) { } public boolean disconnectPhysicalDisk(Map volumeToDisconnect) { - s_logger.debug(String.format("Disconnect physical disks using volume map: %s", volumeToDisconnect.toString())); + logger.debug(String.format("Disconnect physical disks using volume map: %s", volumeToDisconnect.toString())); if (MapUtils.isEmpty(volumeToDisconnect)) { return false; } @@ -205,18 +206,18 @@ public boolean disconnectPhysicalDisk(Map volumeToDisconnect) { String poolType = volumeToDisconnect.get(DiskTO.PROTOCOL_TYPE); StorageAdaptor adaptor = _storageMapper.get(poolType); if (adaptor != null) { - s_logger.info(String.format("Disconnecting physical disk using the storage adaptor found for pool type: %s", poolType)); + logger.info(String.format("Disconnecting physical disk using the storage adaptor found for pool type: %s", poolType)); return adaptor.disconnectPhysicalDisk(volumeToDisconnect); } - s_logger.debug(String.format("Couldn't find the storage adaptor for pool type: %s to disconnect the physical disk, trying with others", poolType)); + logger.debug(String.format("Couldn't find the storage adaptor for pool type: %s to disconnect the physical disk, trying with others", poolType)); } for (Map.Entry set : _storageMapper.entrySet()) { StorageAdaptor adaptor = set.getValue(); if (adaptor.disconnectPhysicalDisk(volumeToDisconnect)) { - s_logger.debug(String.format("Disconnected physical disk using the storage adaptor for pool type: %s", set.getKey())); + logger.debug(String.format("Disconnected physical disk using the storage adaptor for pool type: %s", set.getKey())); return true; } } @@ -225,12 +226,12 @@ public boolean disconnectPhysicalDisk(Map volumeToDisconnect) { } public boolean disconnectPhysicalDiskByPath(String path) { - s_logger.debug(String.format("Disconnect physical disk by path: %s", path)); + logger.debug(String.format("Disconnect physical disk by path: %s", path)); for (Map.Entry set : _storageMapper.entrySet()) { StorageAdaptor adaptor = set.getValue(); if (adaptor.disconnectPhysicalDiskByPath(path)) { - s_logger.debug(String.format("Disconnected physical disk by local path: %s, using the storage adaptor for pool type: %s", path, set.getKey())); + logger.debug(String.format("Disconnected physical disk by local path: %s, using the storage adaptor for pool type: %s", path, set.getKey())); return true; } } @@ -245,7 +246,7 @@ public boolean disconnectPhysicalDisksViaVmSpec(VirtualMachineTO vmSpec) { We may not know about these yet. This might mean that we can't use the vmspec map, because when we restart the agent we lose all of the info about running VMs. */ - s_logger.debug("disconnectPhysicalDiskViaVmSpec: Attempted to stop a VM that is not yet in our hash map"); + logger.debug("disconnectPhysicalDiskViaVmSpec: Attempted to stop a VM that is not yet in our hash map"); return true; } @@ -258,7 +259,7 @@ public boolean disconnectPhysicalDisksViaVmSpec(VirtualMachineTO vmSpec) { for (DiskTO disk : disks) { if (disk.getType() != Volume.Type.ISO) { - s_logger.debug("Disconnecting disk " + disk.getPath()); + logger.debug("Disconnecting disk " + disk.getPath()); VolumeObjectTO vol = (VolumeObjectTO)disk.getData(); PrimaryDataStoreTO store = (PrimaryDataStoreTO)vol.getDataStore(); @@ -266,7 +267,7 @@ public boolean disconnectPhysicalDisksViaVmSpec(VirtualMachineTO vmSpec) { KVMStoragePool pool = getStoragePool(store.getPoolType(), store.getUuid()); if (pool == null) { - s_logger.error("Pool " + store.getUuid() + " of type " + store.getPoolType() + " was not found, skipping disconnect logic"); + logger.error("Pool " + store.getUuid() + " of type " + store.getPoolType() + " was not found, skipping disconnect logic"); continue; } @@ -277,7 +278,7 @@ public boolean disconnectPhysicalDisksViaVmSpec(VirtualMachineTO vmSpec) { boolean subResult = adaptor.disconnectPhysicalDisk(vol.getPath(), pool); if (!subResult) { - s_logger.error("Failed to disconnect disks via vm spec for vm: " + vmName + " volume:" + vol.toString()); + logger.error("Failed to disconnect disks via vm spec for vm: " + vmName + " volume:" + vol.toString()); result = false; } @@ -350,14 +351,14 @@ public KVMPhysicalDisk getPhysicalDisk(StoragePoolType type, String poolUuid, St return vol; } } catch (Exception e) { - s_logger.debug("Failed to find volume:" + volName + " due to " + e.toString() + ", retry:" + cnt); + logger.debug("Failed to find volume:" + volName + " due to " + e.toString() + ", retry:" + cnt); errMsg = e.toString(); } try { Thread.sleep(3000); } catch (InterruptedException e) { - s_logger.debug("[ignored] interrupted while trying to get storage pool."); + logger.debug("[ignored] interrupted while trying to get storage pool."); } cnt++; } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index f958a1b181d4..c7261f30e8ae 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@ -78,7 +78,8 @@ import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.commons.lang3.builder.ToStringStyle; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.libvirt.Connect; import org.libvirt.Domain; import org.libvirt.DomainInfo; @@ -135,7 +136,7 @@ import com.cloud.vm.VmDetailConstants; public class KVMStorageProcessor implements StorageProcessor { - private static final Logger s_logger = Logger.getLogger(KVMStorageProcessor.class); + protected Logger logger = LogManager.getLogger(getClass()); private final KVMStoragePoolManager storagePoolMgr; private final LibvirtComputingResource resource; private StorageLayer storageLayer; @@ -185,14 +186,14 @@ public boolean configure(final String name, final Map params) th @Override public SnapshotAndCopyAnswer snapshotAndCopy(final SnapshotAndCopyCommand cmd) { - s_logger.info("'SnapshotAndCopyAnswer snapshotAndCopy(SnapshotAndCopyCommand)' not currently used for KVMStorageProcessor"); + logger.info("'SnapshotAndCopyAnswer snapshotAndCopy(SnapshotAndCopyCommand)' not currently used for KVMStorageProcessor"); return new SnapshotAndCopyAnswer(); } @Override public ResignatureAnswer resignature(final ResignatureCommand cmd) { - s_logger.info("'ResignatureAnswer resignature(ResignatureCommand)' not currently used for KVMStorageProcessor"); + logger.info("'ResignatureAnswer resignature(ResignatureCommand)' not currently used for KVMStorageProcessor"); return new ResignatureAnswer(); } @@ -246,7 +247,7 @@ public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) { /* Copy volume to primary storage */ tmplVol.setUseAsTemplate(); - s_logger.debug("Copying template to primary storage, template format is " + tmplVol.getFormat() ); + logger.debug("Copying template to primary storage, template format is " + tmplVol.getFormat() ); final KVMStoragePool primaryPool = storagePoolMgr.getStoragePool(primaryStore.getPoolType(), primaryStore.getUuid()); KVMPhysicalDisk primaryVol = null; @@ -254,11 +255,11 @@ public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) { final VolumeObjectTO volume = (VolumeObjectTO)destData; // pass along volume's target size if it's bigger than template's size, for storage types that copy template rather than cloning on deploy if (volume.getSize() != null && volume.getSize() > tmplVol.getVirtualSize()) { - s_logger.debug("Using configured size of " + toHumanReadableSize(volume.getSize())); + logger.debug("Using configured size of " + toHumanReadableSize(volume.getSize())); tmplVol.setSize(volume.getSize()); tmplVol.setVirtualSize(volume.getSize()); } else { - s_logger.debug("Using template's size of " + toHumanReadableSize(tmplVol.getVirtualSize())); + logger.debug("Using template's size of " + toHumanReadableSize(tmplVol.getVirtualSize())); } primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, volume.getUuid(), primaryPool, cmd.getWaitInMillSeconds()); } else if (destData instanceof TemplateObjectTO) { @@ -269,13 +270,13 @@ public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) { String path = details != null ? details.get("managedStoreTarget") : null; if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details)) { - s_logger.warn("Failed to connect physical disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); + logger.warn("Failed to connect physical disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); } primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, path != null ? path : destTempl.getUuid(), primaryPool, cmd.getWaitInMillSeconds()); if (!storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path)) { - s_logger.warn("Failed to disconnect physical disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); + logger.warn("Failed to disconnect physical disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); } } else { primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, UUID.randomUUID().toString(), primaryPool, cmd.getWaitInMillSeconds()); @@ -321,7 +322,7 @@ public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) { secondaryPool.delete(); } } catch(final Exception e) { - s_logger.debug("Failed to clean up secondary storage", e); + logger.debug("Failed to clean up secondary storage", e); } } } @@ -344,7 +345,7 @@ private KVMPhysicalDisk templateToPrimaryDownload(final String templateUrl, fina secondaryPool.refresh(); final List disks = secondaryPool.listPhysicalDisks(); if (disks == null || disks.isEmpty()) { - s_logger.error("Failed to get volumes from pool: " + secondaryPool.getUuid()); + logger.error("Failed to get volumes from pool: " + secondaryPool.getUuid()); return null; } for (final KVMPhysicalDisk disk : disks) { @@ -354,7 +355,7 @@ private KVMPhysicalDisk templateToPrimaryDownload(final String templateUrl, fina } } if (templateVol == null) { - s_logger.error("Failed to get template from pool: " + secondaryPool.getUuid()); + logger.error("Failed to get template from pool: " + secondaryPool.getUuid()); return null; } } else { @@ -364,17 +365,17 @@ private KVMPhysicalDisk templateToPrimaryDownload(final String templateUrl, fina /* Copy volume to primary storage */ if (size > templateVol.getSize()) { - s_logger.debug("Overriding provided template's size with new size " + toHumanReadableSize(size)); + logger.debug("Overriding provided template's size with new size " + toHumanReadableSize(size)); templateVol.setSize(size); templateVol.setVirtualSize(size); } else { - s_logger.debug("Using templates disk size of " + toHumanReadableSize(templateVol.getVirtualSize()) + "since size passed was " + toHumanReadableSize(size)); + logger.debug("Using templates disk size of " + toHumanReadableSize(templateVol.getVirtualSize()) + "since size passed was " + toHumanReadableSize(size)); } final KVMPhysicalDisk primaryVol = storagePoolMgr.copyPhysicalDisk(templateVol, volUuid, primaryPool, timeout); return primaryVol; } catch (final CloudRuntimeException e) { - s_logger.error("Failed to download template to primary storage", e); + logger.error("Failed to download template to primary storage", e); return null; } finally { if (secondaryPool != null) { @@ -408,17 +409,17 @@ public Answer cloneVolumeFromBaseTemplate(final CopyCommand cmd) { String path = details != null ? details.get("managedStoreTarget") : null; if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), templatePath, details)) { - s_logger.warn("Failed to connect base template volume at path: " + templatePath + ", in storage pool id: " + primaryStore.getUuid()); + logger.warn("Failed to connect base template volume at path: " + templatePath + ", in storage pool id: " + primaryStore.getUuid()); } BaseVol = storagePoolMgr.getPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), templatePath); if (BaseVol == null) { - s_logger.debug("Failed to get the physical disk for base template volume at path: " + templatePath); + logger.debug("Failed to get the physical disk for base template volume at path: " + templatePath); throw new CloudRuntimeException("Failed to get the physical disk for base template volume at path: " + templatePath); } if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details)) { - s_logger.warn("Failed to connect new volume at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); + logger.warn("Failed to connect new volume at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); } vol = storagePoolMgr.copyPhysicalDisk(BaseVol, path != null ? path : volume.getUuid(), primaryPool, cmd.getWaitInMillSeconds(), null, volume.getPassphrase(), volume.getProvisioningType()); @@ -454,7 +455,7 @@ public Answer cloneVolumeFromBaseTemplate(final CopyCommand cmd) { return new CopyCmdAnswer(newVol); } catch (final CloudRuntimeException e) { - s_logger.debug("Failed to create volume: ", e); + logger.debug("Failed to create volume: ", e); return new CopyCmdAnswer(e.toString()); } finally { volume.clearPassphrase(); @@ -524,7 +525,7 @@ public Answer copyVolumeFromImageCacheToPrimary(final CopyCommand cmd) { return new CopyCmdAnswer(newVol); } catch (final CloudRuntimeException e) { - s_logger.debug("Failed to copyVolumeFromImageCacheToPrimary: ", e); + logger.debug("Failed to copyVolumeFromImageCacheToPrimary: ", e); return new CopyCmdAnswer(e.toString()); } finally { @@ -572,7 +573,7 @@ public Answer copyVolumeFromPrimaryToSecondary(final CopyCommand cmd) { newVol.setFormat(destFormat); return new CopyCmdAnswer(newVol); } catch (final CloudRuntimeException e) { - s_logger.debug("Failed to copyVolumeFromPrimaryToSecondary: ", e); + logger.debug("Failed to copyVolumeFromPrimaryToSecondary: ", e); return new CopyCmdAnswer(e.toString()); } finally { srcVol.clearPassphrase(); @@ -623,7 +624,7 @@ public Answer createTemplateFromVolume(final CopyCommand cmd) { final String templateName = UUID.randomUUID().toString(); if (primary.getType() != StoragePoolType.RBD) { - final Script command = new Script(_createTmplPath, wait, s_logger); + final Script command = new Script(_createTmplPath, wait, logger); command.add("-f", disk.getPath()); command.add("-t", tmpltPath); command.add(NAME_OPTION, templateName + ".qcow2"); @@ -631,11 +632,11 @@ public Answer createTemplateFromVolume(final CopyCommand cmd) { final String result = command.execute(); if (result != null) { - s_logger.debug("failed to create template: " + result); + logger.debug("failed to create template: " + result); return new CopyCmdAnswer(result); } } else { - s_logger.debug("Converting RBD disk " + disk.getPath() + " into template " + templateName); + logger.debug("Converting RBD disk " + disk.getPath() + " into template " + templateName); final QemuImgFile srcFile = new QemuImgFile(KVMPhysicalDisk.RBDStringBuilder(primary.getSourceHost(), primary.getSourcePort(), primary.getAuthUserName(), @@ -697,13 +698,13 @@ public Answer createTemplateFromVolume(final CopyCommand cmd) { return new CopyCmdAnswer(newTemplate); } catch (final QemuImgException e) { - s_logger.error(e.getMessage()); + logger.error(e.getMessage()); return new CopyCmdAnswer(e.toString()); } catch (final IOException e) { - s_logger.debug("Failed to createTemplateFromVolume: ", e); + logger.debug("Failed to createTemplateFromVolume: ", e); return new CopyCmdAnswer(e.toString()); } catch (final Exception e) { - s_logger.debug("Failed to createTemplateFromVolume: ", e); + logger.debug("Failed to createTemplateFromVolume: ", e); return new CopyCmdAnswer(e.toString()); } finally { volume.clearPassphrase(); @@ -779,7 +780,7 @@ else if (srcData instanceof SnapshotObjectTO) { String templateName = UUID.randomUUID().toString(); - s_logger.debug("Converting " + srcDisk.getFormat().toString() + " disk " + srcDisk.getPath() + " into template " + templateName); + logger.debug("Converting " + srcDisk.getFormat().toString() + " disk " + srcDisk.getPath() + " into template " + templateName); String destName = templateFolder + "/" + templateName + ".qcow2"; @@ -838,10 +839,10 @@ else if (srcData instanceof SnapshotObjectTO) { return new CopyCmdAnswer(newTemplate); } catch (Exception ex) { if (isVolume) { - s_logger.debug("Failed to create template from volume: ", ex); + logger.debug("Failed to create template from volume: ", ex); } else { - s_logger.debug("Failed to create template from snapshot: ", ex); + logger.debug("Failed to create template from snapshot: ", ex); } return new CopyCmdAnswer(ex.toString()); @@ -890,7 +891,7 @@ protected Answer copyToObjectStore(final CopyCommand cmd) { newSnapshot.setPath(destPath); return new CopyCmdAnswer(newSnapshot); } catch (final Exception e) { - s_logger.error("failed to upload" + srcPath, e); + logger.error("failed to upload" + srcPath, e); return new CopyCmdAnswer("failed to upload" + srcPath + e.toString()); } finally { try { @@ -901,7 +902,7 @@ protected Answer copyToObjectStore(final CopyCommand cmd) { srcStorePool.delete(); } } catch (final Exception e) { - s_logger.debug("Failed to clean up:", e); + logger.debug("Failed to clean up:", e); } } } @@ -980,10 +981,10 @@ public Answer backupSnapshot(final CopyCommand cmd) { final String rbdSnapshot = snapshotDisk.getPath() + "@" + snapshotName; final String snapshotFile = snapshotDestPath + "/" + snapshotName; try { - s_logger.debug("Attempting to backup RBD snapshot " + rbdSnapshot); + logger.debug("Attempting to backup RBD snapshot " + rbdSnapshot); final File snapDir = new File(snapshotDestPath); - s_logger.debug("Attempting to create " + snapDir.getAbsolutePath() + " recursively for snapshot storage"); + logger.debug("Attempting to create " + snapDir.getAbsolutePath() + " recursively for snapshot storage"); FileUtils.forceMkdir(snapDir); final QemuImgFile srcFile = @@ -994,7 +995,7 @@ public Answer backupSnapshot(final CopyCommand cmd) { final QemuImgFile destFile = new QemuImgFile(snapshotFile); destFile.setFormat(PhysicalDiskFormat.QCOW2); - s_logger.debug("Backing up RBD snapshot " + rbdSnapshot + " to " + snapshotFile); + logger.debug("Backing up RBD snapshot " + rbdSnapshot + " to " + snapshotFile); final QemuImg q = new QemuImg(cmd.getWaitInMillSeconds()); q.convert(srcFile, destFile); @@ -1003,20 +1004,20 @@ public Answer backupSnapshot(final CopyCommand cmd) { size = snapFile.length(); } - s_logger.debug("Finished backing up RBD snapshot " + rbdSnapshot + " to " + snapshotFile + " Snapshot size: " + toHumanReadableSize(size)); + logger.debug("Finished backing up RBD snapshot " + rbdSnapshot + " to " + snapshotFile + " Snapshot size: " + toHumanReadableSize(size)); } catch (final FileNotFoundException e) { - s_logger.error("Failed to open " + snapshotDestPath + ". The error was: " + e.getMessage()); + logger.error("Failed to open " + snapshotDestPath + ". The error was: " + e.getMessage()); return new CopyCmdAnswer(e.toString()); } catch (final IOException e) { - s_logger.error("Failed to create " + snapshotDestPath + ". The error was: " + e.getMessage()); + logger.error("Failed to create " + snapshotDestPath + ". The error was: " + e.getMessage()); return new CopyCmdAnswer(e.toString()); } catch (final QemuImgException | LibvirtException e) { - s_logger.error("Failed to backup the RBD snapshot from " + rbdSnapshot + + logger.error("Failed to backup the RBD snapshot from " + rbdSnapshot + " to " + snapshotFile + " the error was: " + e.getMessage()); return new CopyCmdAnswer(e.toString()); } } else { - final Script command = new Script(_manageSnapshotPath, cmd.getWaitInMillSeconds(), s_logger); + final Script command = new Script(_manageSnapshotPath, cmd.getWaitInMillSeconds(), logger); command.add("-b", isCreatedFromVmSnapshot ? snapshotDisk.getPath() : snapshot.getPath()); command.add(NAME_OPTION, snapshotName); command.add("-p", snapshotDestPath); @@ -1026,7 +1027,7 @@ public Answer backupSnapshot(final CopyCommand cmd) { command.add("-t", descName); final String result = command.execute(); if (result != null) { - s_logger.debug("Failed to backup snaptshot: " + result); + logger.debug("Failed to backup snaptshot: " + result); return new CopyCmdAnswer(result); } final File snapFile = new File(snapshotDestPath + "/" + descName); @@ -1040,12 +1041,12 @@ public Answer backupSnapshot(final CopyCommand cmd) { newSnapshot.setPhysicalSize(size); return new CopyCmdAnswer(newSnapshot); } catch (final LibvirtException | CloudRuntimeException e) { - s_logger.debug("Failed to backup snapshot: ", e); + logger.debug("Failed to backup snapshot: ", e); return new CopyCmdAnswer(e.toString()); } finally { srcVolume.clearPassphrase(); if (isCreatedFromVmSnapshot) { - s_logger.debug("Ignoring removal of vm snapshot on primary as this snapshot is created from vm snapshot"); + logger.debug("Ignoring removal of vm snapshot on primary as this snapshot is created from vm snapshot"); } else if (primaryPool.getType() != StoragePoolType.RBD) { deleteSnapshotOnPrimary(cmd, snapshot, primaryPool); } @@ -1055,7 +1056,7 @@ public Answer backupSnapshot(final CopyCommand cmd) { secondaryStoragePool.delete(); } } catch (final Exception ex) { - s_logger.debug("Failed to delete secondary storage", ex); + logger.debug("Failed to delete secondary storage", ex); } } } @@ -1074,10 +1075,10 @@ private void deleteSnapshotOnPrimary(final CopyCommand cmd, final SnapshotObject try { Files.deleteIfExists(Paths.get(snapshotPath)); } catch (IOException ex) { - s_logger.error(String.format("Failed to delete snapshot [%s] on primary storage [%s].", snapshotPath, primaryPool.getUuid()), ex); + logger.error(String.format("Failed to delete snapshot [%s] on primary storage [%s].", snapshotPath, primaryPool.getUuid()), ex); } } else { - s_logger.debug(String.format("This backup is temporary, not deleting snapshot [%s] on primary storage [%s]", snapshotPath, primaryPool.getUuid())); + logger.debug(String.format("This backup is temporary, not deleting snapshot [%s] on primary storage [%s]", snapshotPath, primaryPool.getUuid())); } } @@ -1164,7 +1165,7 @@ public Answer dettachIso(final DettachCommand cmd) { private String getDataStoreUrlFromStore(DataStoreTO store) { List supportedPoolType = List.of(StoragePoolType.NetworkFilesystem, StoragePoolType.Filesystem); if (!(store instanceof NfsTO) && (!(store instanceof PrimaryDataStoreTO) || !supportedPoolType.contains(((PrimaryDataStoreTO) store).getPoolType()))) { - s_logger.error(String.format("Unsupported protocol, store: %s", store.getUuid())); + logger.error(String.format("Unsupported protocol, store: %s", store.getUuid())); throw new InvalidParameterValueException("unsupported protocol"); } @@ -1208,11 +1209,11 @@ protected synchronized void attachOrDetachDevice(final Connect conn, final boole dm = conn.domainLookupByName(vmName); if (attach) { - s_logger.debug("Attaching device: " + diskXml); + logger.debug("Attaching device: " + diskXml); dm.attachDevice(diskXml); return; } - s_logger.debug(String.format("Detaching device: [%s].", diskXml)); + logger.debug(String.format("Detaching device: [%s].", diskXml)); dm.detachDevice(diskXml); long wait = waitDetachDevice; while (!checkDetachSuccess(diskPath, dm) && wait > 0) { @@ -1223,13 +1224,13 @@ protected synchronized void attachOrDetachDevice(final Connect conn, final boole "not support the sent detach command or the device is busy at the moment. Try again in a couple of minutes.", waitDetachDevice)); } - s_logger.debug(String.format("The detach command was executed successfully. The device [%s] was removed from the VM instance with UUID [%s].", + logger.debug(String.format("The detach command was executed successfully. The device [%s] was removed from the VM instance with UUID [%s].", diskPath, dm.getUUIDString())); } catch (final LibvirtException e) { if (attach) { - s_logger.warn("Failed to attach device to " + vmName + ": " + e.getMessage()); + logger.warn("Failed to attach device to " + vmName + ": " + e.getMessage()); } else { - s_logger.warn("Failed to detach device from " + vmName + ": " + e.getMessage()); + logger.warn("Failed to detach device from " + vmName + ": " + e.getMessage()); } throw e; } finally { @@ -1237,7 +1238,7 @@ protected synchronized void attachOrDetachDevice(final Connect conn, final boole try { dm.free(); } catch (final LibvirtException l) { - s_logger.trace("Ignoring libvirt error.", l); + logger.trace("Ignoring libvirt error.", l); } } } @@ -1252,7 +1253,7 @@ private long getWaitAfterSleep(Domain dm, String diskPath, long wait) throws Lib try { wait -= waitDelayForVirshCommands; Thread.sleep(waitDelayForVirshCommands); - s_logger.trace(String.format("Trying to detach device [%s] from VM instance with UUID [%s]. " + + logger.trace(String.format("Trying to detach device [%s] from VM instance with UUID [%s]. " + "Waiting [%s] milliseconds before assuming the VM was unable to detach the volume.", diskPath, dm.getUUIDString(), wait)); } catch (InterruptedException e) { throw new CloudRuntimeException(e); @@ -1272,7 +1273,7 @@ protected boolean checkDetachSuccess(String diskPath, Domain dm) throws LibvirtE List disks = parser.getDisks(); for (DiskDef diskDef : disks) { if (StringUtils.equals(diskPath, diskDef.getDiskPath())) { - s_logger.debug(String.format("The hypervisor sent the detach command, but it is still possible to identify the device [%s] in the instance with UUID [%s].", + logger.debug(String.format("The hypervisor sent the detach command, but it is still possible to identify the device [%s] in the instance with UUID [%s].", diskPath, dm.getUUIDString())); return false; } @@ -1366,7 +1367,7 @@ protected synchronized void attachOrDetachDisk(final Connect conn, final boolean if (resource.getHypervisorType() == Hypervisor.HypervisorType.LXC) { final String device = resource.mapRbdDevice(attachingDisk); if (device != null) { - s_logger.debug("RBD device on host is: "+device); + logger.debug("RBD device on host is: "+device); attachingDisk.setPath(device); } } @@ -1380,7 +1381,7 @@ protected synchronized void attachOrDetachDisk(final Connect conn, final boolean } } if (diskdef == null) { - s_logger.warn(String.format("Could not find disk [%s] attached to VM instance with UUID [%s]. We will set it as detached in the database to ensure consistency.", + logger.warn(String.format("Could not find disk [%s] attached to VM instance with UUID [%s]. We will set it as detached in the database to ensure consistency.", attachingDisk.getPath(), dm.getUUIDString())); return; } @@ -1405,7 +1406,7 @@ protected synchronized void attachOrDetachDisk(final Connect conn, final boolean // For LXC, map image to host and then attach to Vm final String device = resource.mapRbdDevice(attachingDisk); if (device != null) { - s_logger.debug("RBD device on host is: "+device); + logger.debug("RBD device on host is: "+device); diskdef.defBlockBasedDisk(device, devId, busT); } else { throw new InternalErrorException("Error while mapping disk "+attachingDisk.getPath()+" on host"); @@ -1512,7 +1513,7 @@ public Answer attachVolume(final AttachCommand cmd) { final KVMPhysicalDisk phyDisk = storagePoolMgr.getPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), vol.getPath()); final String volCacheMode = vol.getCacheMode() == null ? null : vol.getCacheMode().toString(); - s_logger.debug(String.format("Attaching physical disk %s with format %s", phyDisk.getPath(), phyDisk.getFormat())); + logger.debug(String.format("Attaching physical disk %s with format %s", phyDisk.getPath(), phyDisk.getFormat())); attachOrDetachDisk(conn, true, vmName, phyDisk, disk.getDiskSeq().intValue(), serial, vol.getBytesReadRate(), vol.getBytesReadRateMax(), vol.getBytesReadRateMaxLength(), @@ -1522,14 +1523,14 @@ public Answer attachVolume(final AttachCommand cmd) { return new AttachAnswer(disk); } catch (final LibvirtException e) { - s_logger.debug("Failed to attach volume: " + vol.getPath() + ", due to ", e); + logger.debug("Failed to attach volume: " + vol.getPath() + ", due to ", e); storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), vol.getPath()); return new AttachAnswer(e.toString()); } catch (final InternalErrorException e) { - s_logger.debug("Failed to attach volume: " + vol.getPath() + ", due to ", e); + logger.debug("Failed to attach volume: " + vol.getPath() + ", due to ", e); return new AttachAnswer(e.toString()); } catch (final CloudRuntimeException e) { - s_logger.debug("Failed to attach volume: " + vol.getPath() + ", due to ", e); + logger.debug("Failed to attach volume: " + vol.getPath() + ", due to ", e); return new AttachAnswer(e.toString()); } finally { vol.clearPassphrase(); @@ -1560,13 +1561,13 @@ public Answer dettachVolume(final DettachCommand cmd) { return new DettachAnswer(disk); } catch (final LibvirtException e) { - s_logger.debug("Failed to detach volume: " + vol.getPath() + ", due to ", e); + logger.debug("Failed to detach volume: " + vol.getPath() + ", due to ", e); return new DettachAnswer(e.toString()); } catch (final InternalErrorException e) { - s_logger.debug("Failed to detach volume: " + vol.getPath() + ", due to ", e); + logger.debug("Failed to detach volume: " + vol.getPath() + ", due to ", e); return new DettachAnswer(e.toString()); } catch (final CloudRuntimeException e) { - s_logger.debug("Failed to detach volume: " + vol.getPath() + ", due to ", e); + logger.debug("Failed to detach volume: " + vol.getPath() + ", due to ", e); return new DettachAnswer(e.toString()); } finally { vol.clearPassphrase(); @@ -1595,7 +1596,7 @@ protected KVMPhysicalDisk createLinkedCloneVolume(MigrationOptions migrationOpti * Create full clone volume from VM snapshot */ protected KVMPhysicalDisk createFullCloneVolume(MigrationOptions migrationOptions, VolumeObjectTO volume, KVMStoragePool primaryPool, PhysicalDiskFormat format) { - s_logger.debug("For VM migration with full-clone volume: Creating empty stub disk for source disk " + migrationOptions.getSrcVolumeUuid() + " and size: " + toHumanReadableSize(volume.getSize()) + " and format: " + format); + logger.debug("For VM migration with full-clone volume: Creating empty stub disk for source disk " + migrationOptions.getSrcVolumeUuid() + " and size: " + toHumanReadableSize(volume.getSize()) + " and format: " + format); return primaryPool.createPhysicalDisk(volume.getUuid(), format, volume.getProvisioningType(), volume.getSize(), volume.getPassphrase()); } @@ -1647,7 +1648,7 @@ public Answer createVolume(final CreateObjectCommand cmd) { return new CreateObjectAnswer(newVol); } catch (final Exception e) { - s_logger.debug("Failed to create volume: ", e); + logger.debug("Failed to create volume: ", e); return new CreateObjectAnswer(e.toString()); } finally { volume.clearPassphrase(); @@ -1722,7 +1723,7 @@ public Answer createSnapshot(final CreateObjectCommand cmd) { vm = resource.getDomain(conn, vmName); state = vm.getInfo().state; } catch (final LibvirtException e) { - s_logger.trace("Ignoring libvirt error.", e); + logger.trace("Ignoring libvirt error.", e); } } @@ -1754,7 +1755,7 @@ public Answer createSnapshot(final CreateObjectCommand cmd) { throw e; } - s_logger.info(String.format("It was not possible to take live disk snapshot for volume [%s], in VM [%s], due to [%s]. We will take full snapshot of the VM" + logger.info(String.format("It was not possible to take live disk snapshot for volume [%s], in VM [%s], due to [%s]. We will take full snapshot of the VM" + " and extract the disk instead. Consider upgrading your QEMU binary.", volume, vmName, e.getMessage())); takeFullVmSnapshotForBinariesThatDoesNotSupportLiveDiskSnapshot(vm, snapshotName, vmName); @@ -1793,22 +1794,22 @@ public Answer createSnapshot(final CreateObjectCommand cmd) { final Rbd rbd = new Rbd(io); final RbdImage image = rbd.open(disk.getName()); - s_logger.debug("Attempting to create RBD snapshot " + disk.getName() + "@" + snapshotName); + logger.debug("Attempting to create RBD snapshot " + disk.getName() + "@" + snapshotName); image.snapCreate(snapshotName); rbd.close(image); r.ioCtxDestroy(io); } catch (final Exception e) { - s_logger.error("A RBD snapshot operation on " + disk.getName() + " failed. The error was: " + e.getMessage()); + logger.error("A RBD snapshot operation on " + disk.getName() + " failed. The error was: " + e.getMessage()); } } else if (primaryPool.getType() == StoragePoolType.CLVM) { /* VM is not running, create a snapshot by ourself */ - final Script command = new Script(_manageSnapshotPath, _cmdsTimeout, s_logger); + final Script command = new Script(_manageSnapshotPath, _cmdsTimeout, logger); command.add(MANAGE_SNAPSTHOT_CREATE_OPTION, disk.getPath()); command.add(NAME_OPTION, snapshotName); final String result = command.execute(); if (result != null) { - s_logger.debug("Failed to manage snapshot: " + result); + logger.debug("Failed to manage snapshot: " + result); return new CreateObjectAnswer("Failed to manage snapshot: " + result); } } else { @@ -1824,7 +1825,7 @@ public Answer createSnapshot(final CreateObjectCommand cmd) { return new CreateObjectAnswer(newSnapshot); } catch (CloudRuntimeException | LibvirtException | IOException ex) { String errorMsg = String.format("Failed take snapshot for volume [%s], in VM [%s], due to [%s].", volume, vmName, ex.getMessage()); - s_logger.error(errorMsg, ex); + logger.error(errorMsg, ex); return new CreateObjectAnswer(errorMsg); } finally { volume.clearPassphrase(); @@ -1832,7 +1833,7 @@ public Answer createSnapshot(final CreateObjectCommand cmd) { } protected void deleteFullVmSnapshotAfterConvertingItToExternalDiskSnapshot(Domain vm, String snapshotName, VolumeObjectTO volume, String vmName) throws LibvirtException { - s_logger.debug(String.format("Deleting full VM snapshot [%s] of VM [%s] as we already converted it to an external disk snapshot of the volume [%s].", snapshotName, vmName, + logger.debug(String.format("Deleting full VM snapshot [%s] of VM [%s] as we already converted it to an external disk snapshot of the volume [%s].", snapshotName, vmName, volume)); DomainSnapshot domainSnapshot = vm.snapshotLookupByName(snapshotName); @@ -1846,13 +1847,13 @@ protected void extractDiskFromFullVmSnapshot(KVMPhysicalDisk disk, VolumeObjectT try { QemuImg qemuImg = new QemuImg(_cmdsTimeout); - s_logger.debug(String.format("Converting full VM snapshot [%s] of VM [%s] to external disk snapshot of the volume [%s].", snapshotName, vmName, volume)); + logger.debug(String.format("Converting full VM snapshot [%s] of VM [%s] to external disk snapshot of the volume [%s].", snapshotName, vmName, volume)); qemuImg.convert(srcFile, destFile, null, snapshotName, true); } catch (QemuImgException qemuException) { String message = String.format("Could not convert full VM snapshot [%s] of VM [%s] to external disk snapshot of volume [%s] due to [%s].", snapshotName, vmName, volume, qemuException.getMessage()); - s_logger.error(message, qemuException); + logger.error(message, qemuException); throw new CloudRuntimeException(message, qemuException); } finally { deleteFullVmSnapshotAfterConvertingItToExternalDiskSnapshot(vm, snapshotName, volume, vmName); @@ -1864,7 +1865,7 @@ protected void takeFullVmSnapshotForBinariesThatDoesNotSupportLiveDiskSnapshot(D long start = System.currentTimeMillis(); vm.snapshotCreateXML(String.format(XML_CREATE_FULL_VM_SNAPSHOT, snapshotName, vmUuid)); - s_logger.debug(String.format("Full VM Snapshot [%s] of VM [%s] took [%s] seconds to finish.", snapshotName, vmName, (System.currentTimeMillis() - start)/1000)); + logger.debug(String.format("Full VM Snapshot [%s] of VM [%s] took [%s] seconds to finish.", snapshotName, vmName, (System.currentTimeMillis() - start)/1000)); } protected void validateConvertResult(String convertResult, String snapshotPath) throws CloudRuntimeException, IOException { @@ -1892,7 +1893,7 @@ protected void mergeSnapshotIntoBaseFile(Domain vm, String diskLabel, String bas String mergeResult = Script.runSimpleBashScript(mergeCommand); if (mergeResult == null) { - s_logger.debug(String.format("Successfully merged snapshot [%s] into VM [%s] %s base file.", snapshotName, vmName, volume)); + logger.debug(String.format("Successfully merged snapshot [%s] into VM [%s] %s base file.", snapshotName, vmName, volume)); manuallyDeleteUnusedSnapshotFile(isLibvirtSupportingFlagDeleteOnCommandVirshBlockcommit, getSnapshotTemporaryPath(baseFilePath, snapshotName)); return; } @@ -1901,7 +1902,7 @@ protected void mergeSnapshotIntoBaseFile(Domain vm, String diskLabel, String bas + " will start to write in the base file again. All changes made between the snapshot and the VM stop will be in the snapshot. If the VM is stopped, the snapshot must be" + " merged into the base file manually.", snapshotName, vmName, volume, mergeCommand, mergeResult); - s_logger.warn(String.format("%s VM XML: [%s].", errorMsg, vm.getXMLDesc(0))); + logger.warn(String.format("%s VM XML: [%s].", errorMsg, vm.getXMLDesc(0))); throw new CloudRuntimeException(errorMsg); } @@ -1913,17 +1914,17 @@ protected void mergeSnapshotIntoBaseFile(Domain vm, String diskLabel, String bas */ protected void manuallyDeleteUnusedSnapshotFile(boolean isLibvirtSupportingFlagDeleteOnCommandVirshBlockcommit, String snapshotPath) { if (isLibvirtSupportingFlagDeleteOnCommandVirshBlockcommit) { - s_logger.debug(String.format("The current Libvirt's version supports the flag '--delete' on command 'virsh blockcommit', we will skip the manually deletion of the" + logger.debug(String.format("The current Libvirt's version supports the flag '--delete' on command 'virsh blockcommit', we will skip the manually deletion of the" + " unused snapshot file [%s] as it already was automatically deleted.", snapshotPath)); return; } - s_logger.debug(String.format("The current Libvirt's version does not supports the flag '--delete' on command 'virsh blockcommit', therefore we will manually delete the" + logger.debug(String.format("The current Libvirt's version does not supports the flag '--delete' on command 'virsh blockcommit', therefore we will manually delete the" + " unused snapshot file [%s].", snapshotPath)); try { Files.deleteIfExists(Paths.get(snapshotPath)); - s_logger.debug(String.format("Manually deleted unused snapshot file [%s].", snapshotPath)); + logger.debug(String.format("Manually deleted unused snapshot file [%s].", snapshotPath)); } catch (IOException ex) { throw new CloudRuntimeException(String.format("Unable to manually delete unused snapshot file [%s] due to [%s].", snapshotPath, ex.getMessage())); } @@ -1938,7 +1939,7 @@ protected void manuallyDeleteUnusedSnapshotFile(boolean isLibvirtSupportingFlagD */ protected String convertBaseFileToSnapshotFileInPrimaryStorageDir(KVMStoragePool primaryPool, String baseFile, String snapshotPath, VolumeObjectTO volume, int wait) { try { - s_logger.debug(String.format("Trying to convert volume [%s] (%s) to snapshot [%s].", volume, baseFile, snapshotPath)); + logger.debug(String.format("Trying to convert volume [%s] (%s) to snapshot [%s].", volume, baseFile, snapshotPath)); primaryPool.createFolder(TemplateConstants.DEFAULT_SNAPSHOT_ROOT_DIR); @@ -1951,7 +1952,7 @@ protected String convertBaseFileToSnapshotFileInPrimaryStorageDir(KVMStoragePool QemuImg q = new QemuImg(wait); q.convert(srcFile, destFile); - s_logger.debug(String.format("Converted volume [%s] (from path \"%s\") to snapshot [%s].", volume, baseFile, snapshotPath)); + logger.debug(String.format("Converted volume [%s] (from path \"%s\") to snapshot [%s].", volume, baseFile, snapshotPath)); return null; } catch (QemuImgException | LibvirtException ex) { return String.format("Failed to convert %s snapshot of volume [%s] to [%s] due to [%s].", volume, baseFile, snapshotPath, ex.getMessage()); @@ -1988,7 +1989,7 @@ protected String takeVolumeSnapshot(List disks, String snapshotName, St long start = System.currentTimeMillis(); vm.snapshotCreateXML(createSnapshotXmlFormated, VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY); - s_logger.debug(String.format("Snapshot [%s] took [%s] seconds to finish.", snapshotName, (System.currentTimeMillis() - start)/1000)); + logger.debug(String.format("Snapshot [%s] took [%s] seconds to finish.", snapshotName, (System.currentTimeMillis() - start)/1000)); return diskLabelToSnapshot; } @@ -2062,7 +2063,7 @@ protected void validateAvailableSizeOnPoolToTakeVolumeSnapshot(KVMStoragePool pr diskDescription)); } - s_logger.debug(String.format("Pool [%s] has enough available size [%s] to take volume [%s] snapshot.", poolDescription, availablePoolSize, diskDescription)); + logger.debug(String.format("Pool [%s] has enough available size [%s] to take volume [%s] snapshot.", poolDescription, availablePoolSize, diskDescription)); } protected boolean isAvailablePoolSizeDividedByDiskSizeLesserThanMinRate(long availablePoolSize, long diskSize) { @@ -2075,7 +2076,7 @@ private Rados radosConnect(final KVMStoragePool primaryPool) throws RadosExcepti r.confSet(CEPH_AUTH_KEY, primaryPool.getAuthSecret()); r.confSet(CEPH_CLIENT_MOUNT_TIMEOUT, CEPH_DEFAULT_MOUNT_TIMEOUT); r.connect(); - s_logger.debug("Successfully connected to Ceph cluster at " + r.confGet(CEPH_MON_HOST)); + logger.debug("Successfully connected to Ceph cluster at " + r.confGet(CEPH_MON_HOST)); return r; } @@ -2088,13 +2089,13 @@ public Answer deleteVolume(final DeleteCommand cmd) { try { pool.getPhysicalDisk(vol.getPath()); } catch (final Exception e) { - s_logger.debug("can't find volume: " + vol.getPath() + ", return true"); + logger.debug("can't find volume: " + vol.getPath() + ", return true"); return new Answer(null); } pool.deletePhysicalDisk(vol.getPath(), vol.getFormat()); return new Answer(null); } catch (final CloudRuntimeException e) { - s_logger.debug("Failed to delete volume: ", e); + logger.debug("Failed to delete volume: ", e); return new Answer(null, false, e.toString()); } finally { vol.clearPassphrase(); @@ -2136,7 +2137,7 @@ public Answer createVolumeFromSnapshot(final CopyCommand cmd) { return new CopyCmdAnswer(newVol); } catch (final CloudRuntimeException e) { - s_logger.debug("Failed to createVolumeFromSnapshot: ", e); + logger.debug("Failed to createVolumeFromSnapshot: ", e); return new CopyCmdAnswer(e.toString()); } finally { volume.clearPassphrase(); @@ -2155,11 +2156,11 @@ private KVMPhysicalDisk createVolumeFromRBDSnapshot(CopyCommand cmd, DataTO dest VolumeObjectTO newVol = (VolumeObjectTO) destData; if (StoragePoolType.RBD.equals(primaryStore.getPoolType())) { - s_logger.debug(String.format("Attempting to create volume from RBD snapshot %s", snapshotName)); + logger.debug(String.format("Attempting to create volume from RBD snapshot %s", snapshotName)); if (StoragePoolType.RBD.equals(pool.getPoolType())) { disk = createRBDvolumeFromRBDSnapshot(snapshotDisk, snapshotName, newVol.getUuid(), PhysicalDiskFormat.RAW, newVol.getSize(), destPool, cmd.getWaitInMillSeconds()); - s_logger.debug(String.format("Created RBD volume %s from snapshot %s", disk, snapshotDisk)); + logger.debug(String.format("Created RBD volume %s from snapshot %s", disk, snapshotDisk)); } else { Map details = cmd.getOptions2(); @@ -2172,7 +2173,7 @@ private KVMPhysicalDisk createVolumeFromRBDSnapshot(CopyCommand cmd, DataTO dest destPool, cmd.getWaitInMillSeconds()); storagePoolMgr.disconnectPhysicalDisk(pool.getPoolType(), pool.getUuid(), path); - s_logger.debug(String.format("Created RBD volume %s from snapshot %s", disk, snapshotDisk)); + logger.debug(String.format("Created RBD volume %s from snapshot %s", disk, snapshotDisk)); } } @@ -2249,12 +2250,12 @@ private KVMPhysicalDisk createRBDvolumeFromRBDSnapshot(KVMPhysicalDisk volume, S } if (!snapFound) { - s_logger.debug(String.format("Could not find snapshot %s on RBD", snapshotName)); + logger.debug(String.format("Could not find snapshot %s on RBD", snapshotName)); return null; } srcImage.snapProtect(snapshotName); - s_logger.debug(String.format("Try to clone snapshot %s on RBD", snapshotName)); + logger.debug(String.format("Try to clone snapshot %s on RBD", snapshotName)); rbd.clone(volume.getName(), snapshotName, io, disk.getName(), LibvirtStorageAdaptor.RBD_FEATURES, 0); RbdImage diskImage = rbd.open(disk.getName()); if (disk.getVirtualSize() > volume.getVirtualSize()) { @@ -2268,7 +2269,7 @@ private KVMPhysicalDisk createRBDvolumeFromRBDSnapshot(KVMPhysicalDisk volume, S rbd.close(srcImage); r.ioCtxDestroy(io); } catch (RadosException | RbdException e) { - s_logger.error(String.format("Failed due to %s", e.getMessage()), e); + logger.error(String.format("Failed due to %s", e.getMessage()), e); disk = null; } @@ -2294,16 +2295,16 @@ public Answer deleteSnapshot(final DeleteCommand cmd) { Rbd rbd = new Rbd(io); RbdImage image = rbd.open(disk.getName()); try { - s_logger.info("Attempting to remove RBD snapshot " + snapshotFullName); + logger.info("Attempting to remove RBD snapshot " + snapshotFullName); if (image.snapIsProtected(snapshotName)) { - s_logger.debug("Unprotecting RBD snapshot " + snapshotFullName); + logger.debug("Unprotecting RBD snapshot " + snapshotFullName); image.snapUnprotect(snapshotName); } image.snapRemove(snapshotName); - s_logger.info("Snapshot " + snapshotFullName + " successfully removed from " + + logger.info("Snapshot " + snapshotFullName + " successfully removed from " + primaryPool.getType().toString() + " pool."); } catch (RbdException e) { - s_logger.error("Failed to remove snapshot " + snapshotFullName + ", with exception: " + e.toString() + + logger.error("Failed to remove snapshot " + snapshotFullName + ", with exception: " + e.toString() + ", RBD error: " + ErrorCode.getErrorMessage(e.getReturnValue())); } finally { rbd.close(image); @@ -2311,24 +2312,24 @@ public Answer deleteSnapshot(final DeleteCommand cmd) { } } else if (storagePoolTypesToDeleteSnapshotFile.contains(primaryPool.getType())) { - s_logger.info(String.format("Deleting snapshot (id=%s, name=%s, path=%s, storage type=%s) on primary storage", snapshotTO.getId(), snapshotTO.getName(), + logger.info(String.format("Deleting snapshot (id=%s, name=%s, path=%s, storage type=%s) on primary storage", snapshotTO.getId(), snapshotTO.getName(), snapshotTO.getPath(), primaryPool.getType())); deleteSnapshotFile(snapshotTO); } else { - s_logger.warn("Operation not implemented for storage pool type of " + primaryPool.getType().toString()); + logger.warn("Operation not implemented for storage pool type of " + primaryPool.getType().toString()); throw new InternalErrorException("Operation not implemented for storage pool type of " + primaryPool.getType().toString()); } return new Answer(cmd, true, "Snapshot " + snapshotFullName + " removed successfully."); } catch (RadosException e) { - s_logger.error("Failed to remove snapshot " + snapshotFullName + ", with exception: " + e.toString() + + logger.error("Failed to remove snapshot " + snapshotFullName + ", with exception: " + e.toString() + ", RBD error: " + ErrorCode.getErrorMessage(e.getReturnValue())); return new Answer(cmd, false, "Failed to remove snapshot " + snapshotFullName); } catch (RbdException e) { - s_logger.error("Failed to remove snapshot " + snapshotFullName + ", with exception: " + e.toString() + + logger.error("Failed to remove snapshot " + snapshotFullName + ", with exception: " + e.toString() + ", RBD error: " + ErrorCode.getErrorMessage(e.getReturnValue())); return new Answer(cmd, false, "Failed to remove snapshot " + snapshotFullName); } catch (Exception e) { - s_logger.error("Failed to remove snapshot " + snapshotFullName + ", with exception: " + e.toString()); + logger.error("Failed to remove snapshot " + snapshotFullName + ", with exception: " + e.toString()); return new Answer(cmd, false, "Failed to remove snapshot " + snapshotFullName); } finally { volume.clearPassphrase(); @@ -2342,7 +2343,7 @@ public Answer deleteSnapshot(final DeleteCommand cmd) { protected void deleteSnapshotFile(SnapshotObjectTO snapshotObjectTo) throws CloudRuntimeException { try { Files.deleteIfExists(Paths.get(snapshotObjectTo.getPath())); - s_logger.debug(String.format("Deleted snapshot [%s].", snapshotObjectTo)); + logger.debug(String.format("Deleted snapshot [%s].", snapshotObjectTo)); } catch (IOException ex) { throw new CloudRuntimeException(String.format("Unable to delete snapshot [%s] due to [%s].", snapshotObjectTo, ex.getMessage())); } @@ -2366,12 +2367,12 @@ public Answer handleDownloadTemplateToPrimaryStorage(DirectDownloadCommand cmd) KVMStoragePool destPool = null; try { - s_logger.debug("Verifying temporary location for downloading the template exists on the host"); + logger.debug("Verifying temporary location for downloading the template exists on the host"); String temporaryDownloadPath = resource.getDirectDownloadTemporaryDownloadPath(); if (!isLocationAccessible(temporaryDownloadPath)) { String msg = "The temporary location path for downloading templates does not exist: " + temporaryDownloadPath + " on this host"; - s_logger.error(msg); + logger.error(msg); return new DirectDownloadAnswer(false, msg, true); } @@ -2381,24 +2382,24 @@ public Answer handleDownloadTemplateToPrimaryStorage(DirectDownloadCommand cmd) templateSize = UriUtils.getRemoteSize(url); } - s_logger.debug("Checking for free space on the host for downloading the template with physical size: " + templateSize + " and virtual size: " + cmd.getTemplateSize()); + logger.debug("Checking for free space on the host for downloading the template with physical size: " + templateSize + " and virtual size: " + cmd.getTemplateSize()); if (!isEnoughSpaceForDownloadTemplateOnTemporaryLocation(templateSize)) { String msg = "Not enough space on the defined temporary location to download the template " + cmd.getTemplateId(); - s_logger.error(msg); + logger.error(msg); return new DirectDownloadAnswer(false, msg, true); } destPool = storagePoolMgr.getStoragePool(pool.getPoolType(), pool.getUuid()); downloader = DirectDownloadHelper.getDirectTemplateDownloaderFromCommand(cmd, destPool.getLocalPath(), temporaryDownloadPath); - s_logger.debug("Trying to download template"); + logger.debug("Trying to download template"); Pair result = downloader.downloadTemplate(); if (!result.first()) { - s_logger.warn("Couldn't download template"); + logger.warn("Couldn't download template"); return new DirectDownloadAnswer(false, "Unable to download template", true); } String tempFilePath = result.second(); if (!downloader.validateChecksum()) { - s_logger.warn("Couldn't validate template checksum"); + logger.warn("Couldn't validate template checksum"); return new DirectDownloadAnswer(false, "Checksum validation failed", false); } @@ -2406,16 +2407,16 @@ public Answer handleDownloadTemplateToPrimaryStorage(DirectDownloadCommand cmd) String destTemplatePath = (destTemplate != null) ? destTemplate.getPath() : null; if (!storagePoolMgr.connectPhysicalDisk(pool.getPoolType(), pool.getUuid(), destTemplatePath, null)) { - s_logger.warn("Unable to connect physical disk at path: " + destTemplatePath + ", in storage pool id: " + pool.getUuid()); + logger.warn("Unable to connect physical disk at path: " + destTemplatePath + ", in storage pool id: " + pool.getUuid()); } template = storagePoolMgr.createPhysicalDiskFromDirectDownloadTemplate(tempFilePath, destTemplatePath, destPool, cmd.getFormat(), cmd.getWaitInMillSeconds()); if (!storagePoolMgr.disconnectPhysicalDisk(pool.getPoolType(), pool.getUuid(), destTemplatePath)) { - s_logger.warn("Unable to disconnect physical disk at path: " + destTemplatePath + ", in storage pool id: " + pool.getUuid()); + logger.warn("Unable to disconnect physical disk at path: " + destTemplatePath + ", in storage pool id: " + pool.getUuid()); } } catch (CloudRuntimeException e) { - s_logger.warn("Error downloading template " + cmd.getTemplateId() + " due to: " + e.getMessage()); + logger.warn("Error downloading template " + cmd.getTemplateId() + " due to: " + e.getMessage()); return new DirectDownloadAnswer(false, "Unable to download template: " + e.getMessage(), true); } catch (IllegalArgumentException e) { return new DirectDownloadAnswer(false, "Unable to create direct downloader: " + e.getMessage(), true); @@ -2441,18 +2442,18 @@ public Answer copyVolumeFromPrimaryToPrimary(CopyCommand cmd) { KVMStoragePool destPool = null; try { - s_logger.debug("Copying src volume (id: " + srcVol.getId() + ", format: " + srcFormat + ", path: " + srcVolumePath + ", primary storage: [id: " + srcPrimaryStore.getId() + ", type: " + srcPrimaryStore.getPoolType() + "]) to dest volume (id: " + + logger.debug("Copying src volume (id: " + srcVol.getId() + ", format: " + srcFormat + ", path: " + srcVolumePath + ", primary storage: [id: " + srcPrimaryStore.getId() + ", type: " + srcPrimaryStore.getPoolType() + "]) to dest volume (id: " + destVol.getId() + ", format: " + destFormat + ", path: " + destVolumePath + ", primary storage: [id: " + destPrimaryStore.getId() + ", type: " + destPrimaryStore.getPoolType() + "])."); if (srcPrimaryStore.isManaged()) { if (!storagePoolMgr.connectPhysicalDisk(srcPrimaryStore.getPoolType(), srcPrimaryStore.getUuid(), srcVolumePath, srcPrimaryStore.getDetails())) { - s_logger.warn("Failed to connect src volume at path: " + srcVolumePath + ", in storage pool id: " + srcPrimaryStore.getUuid()); + logger.warn("Failed to connect src volume at path: " + srcVolumePath + ", in storage pool id: " + srcPrimaryStore.getUuid()); } } final KVMPhysicalDisk volume = storagePoolMgr.getPhysicalDisk(srcPrimaryStore.getPoolType(), srcPrimaryStore.getUuid(), srcVolumePath); if (volume == null) { - s_logger.debug("Failed to get physical disk for volume: " + srcVolumePath); + logger.debug("Failed to get physical disk for volume: " + srcVolumePath); throw new CloudRuntimeException("Failed to get physical disk for volume at path: " + srcVolumePath); } @@ -2461,7 +2462,7 @@ public Answer copyVolumeFromPrimaryToPrimary(CopyCommand cmd) { String destVolumeName = null; if (destPrimaryStore.isManaged()) { if (!storagePoolMgr.connectPhysicalDisk(destPrimaryStore.getPoolType(), destPrimaryStore.getUuid(), destVolumePath, destPrimaryStore.getDetails())) { - s_logger.warn("Failed to connect dest volume at path: " + destVolumePath + ", in storage pool id: " + destPrimaryStore.getUuid()); + logger.warn("Failed to connect dest volume at path: " + destVolumePath + ", in storage pool id: " + destPrimaryStore.getUuid()); } String managedStoreTarget = destPrimaryStore.getDetails() != null ? destPrimaryStore.getDetails().get("managedStoreTarget") : null; destVolumeName = managedStoreTarget != null ? managedStoreTarget : destVolumePath; @@ -2480,7 +2481,7 @@ public Answer copyVolumeFromPrimaryToPrimary(CopyCommand cmd) { } } catch (Exception e) { // Any exceptions while copying the disk, should send failed answer with the error message String errMsg = String.format("Failed to copy volume: %s to dest storage: %s, due to %s", srcVol.getName(), destPrimaryStore.getName(), e.toString()); - s_logger.debug(errMsg, e); + logger.debug(errMsg, e); throw new CloudRuntimeException(errMsg); } finally { if (srcPrimaryStore.isManaged()) { @@ -2499,7 +2500,7 @@ public Answer copyVolumeFromPrimaryToPrimary(CopyCommand cmd) { newVol.setEncryptFormat(destVol.getEncryptFormat()); return new CopyCmdAnswer(newVol); } catch (final CloudRuntimeException e) { - s_logger.debug("Failed to copyVolumeFromPrimaryToPrimary: ", e); + logger.debug("Failed to copyVolumeFromPrimaryToPrimary: ", e); return new CopyCmdAnswer(e.toString()); } finally { srcVol.clearPassphrase(); @@ -2521,7 +2522,7 @@ private boolean isLocationAccessible(String temporaryDownloadPath) { */ protected boolean isEnoughSpaceForDownloadTemplateOnTemporaryLocation(Long templateSize) { if (templateSize == null || templateSize == 0L) { - s_logger.info("The server did not provide the template size, assuming there is enough space to download it"); + logger.info("The server did not provide the template size, assuming there is enough space to download it"); return true; } String cmd = String.format("df --output=avail %s -B 1 | tail -1", resource.getDirectDownloadTemporaryDownloadPath()); @@ -2531,7 +2532,7 @@ protected boolean isEnoughSpaceForDownloadTemplateOnTemporaryLocation(Long templ availableBytes = Long.parseLong(resultInBytes); } catch (NumberFormatException e) { String msg = "Could not parse the output " + resultInBytes + " as a number, therefore not able to check for free space"; - s_logger.error(msg, e); + logger.error(msg, e); return false; } return availableBytes >= templateSize; @@ -2539,13 +2540,13 @@ protected boolean isEnoughSpaceForDownloadTemplateOnTemporaryLocation(Long templ @Override public Answer checkDataStoreStoragePolicyCompliance(CheckDataStoreStoragePolicyComplainceCommand cmd) { - s_logger.info("'CheckDataStoreStoragePolicyComplainceCommand' not currently applicable for KVMStorageProcessor"); + logger.info("'CheckDataStoreStoragePolicyComplainceCommand' not currently applicable for KVMStorageProcessor"); return new Answer(cmd,false,"Not currently applicable for KVMStorageProcessor"); } @Override public Answer syncVolumePath(SyncVolumePathCommand cmd) { - s_logger.info("SyncVolumePathCommand not currently applicable for KVMStorageProcessor"); + logger.info("SyncVolumePathCommand not currently applicable for KVMStorageProcessor"); return new Answer(cmd, false, "Not currently applicable for KVMStorageProcessor"); } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java index 83a5c06cb5b7..3002fea41d3d 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java @@ -32,7 +32,8 @@ import org.apache.cloudstack.utils.qemu.QemuImgFile; import org.apache.cloudstack.utils.qemu.QemuObject; import org.apache.commons.codec.binary.Base64; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.libvirt.Connect; import org.libvirt.LibvirtException; import org.libvirt.Secret; @@ -74,7 +75,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { - private static final Logger s_logger = Logger.getLogger(LibvirtStorageAdaptor.class); + protected Logger logger = LogManager.getLogger(getClass()); private StorageLayer _storageLayer; private String _mountPoint = "/mnt"; private String _manageSnapshotPath; @@ -106,7 +107,7 @@ public boolean createFolder(String uuid, String path, String localPath) { String mountPoint = _mountPoint + File.separator + uuid; if (localPath != null) { - s_logger.debug(String.format("Pool [%s] is of type local or shared mount point; therefore, we will use the local path [%s] to create the folder [%s] (if it does not" + logger.debug(String.format("Pool [%s] is of type local or shared mount point; therefore, we will use the local path [%s] to create the folder [%s] (if it does not" + " exist).", uuid, localPath, path)); mountPoint = localPath; @@ -126,18 +127,18 @@ public KVMPhysicalDisk createDiskFromTemplateBacking(KVMPhysicalDisk template, S destPool.getType(), size, passphrase != null && passphrase.length > 0); if (!poolTypesThatEnableCreateDiskFromTemplateBacking.contains(destPool.getType())) { - s_logger.info(String.format("Skipping creation of %s due to pool type is none of the following types %s.", volumeDesc, poolTypesThatEnableCreateDiskFromTemplateBacking.stream() + logger.info(String.format("Skipping creation of %s due to pool type is none of the following types %s.", volumeDesc, poolTypesThatEnableCreateDiskFromTemplateBacking.stream() .map(type -> type.toString()).collect(Collectors.joining(", ")))); return null; } if (format != PhysicalDiskFormat.QCOW2) { - s_logger.info(String.format("Skipping creation of %s due to format [%s] is not [%s].", volumeDesc, format, PhysicalDiskFormat.QCOW2)); + logger.info(String.format("Skipping creation of %s due to format [%s] is not [%s].", volumeDesc, format, PhysicalDiskFormat.QCOW2)); return null; } - s_logger.info(String.format("Creating %s.", volumeDesc)); + logger.info(String.format("Creating %s.", volumeDesc)); String destPoolLocalPath = destPool.getLocalPath(); String destPath = String.format("%s%s%s", destPoolLocalPath, destPoolLocalPath.endsWith("/") ? "" : "/", name); @@ -152,13 +153,13 @@ public KVMPhysicalDisk createDiskFromTemplateBacking(KVMPhysicalDisk template, S if (keyFile.isSet()) { passphraseObjects.add(QemuObject.prepareSecretForQemuImg(format, QemuObject.EncryptFormat.LUKS, keyFile.toString(), "sec0", options)); } - s_logger.debug(String.format("Passphrase is staged to keyFile: %s", keyFile.isSet())); + logger.debug(String.format("Passphrase is staged to keyFile: %s", keyFile.isSet())); QemuImg qemu = new QemuImg(timeout); qemu.create(destFile, backingFile, options, passphraseObjects); } catch (QemuImgException | LibvirtException | IOException e) { // why don't we throw an exception here? I guess we fail to find the volume later and that results in a failure returned? - s_logger.error(String.format("Failed to create %s in [%s] due to [%s].", volumeDesc, destPath, e.getMessage()), e); + logger.error(String.format("Failed to create %s in [%s] due to [%s].", volumeDesc, destPath, e.getMessage()), e); } return null; @@ -228,7 +229,7 @@ public StorageVol getVolume(StoragePool pool, String volName) { try { vol = pool.storageVolLookupByName(volName); } catch (LibvirtException e) { - s_logger.debug("Could not find volume " + volName + ": " + e.getMessage()); + logger.debug("Could not find volume " + volName + ": " + e.getMessage()); } /** @@ -238,15 +239,15 @@ public StorageVol getVolume(StoragePool pool, String volName) { */ if (vol == null) { try { - s_logger.debug("Refreshing storage pool " + pool.getName()); + logger.debug("Refreshing storage pool " + pool.getName()); refreshPool(pool); } catch (LibvirtException e) { - s_logger.debug("Failed to refresh storage pool: " + e.getMessage()); + logger.debug("Failed to refresh storage pool: " + e.getMessage()); } try { vol = pool.storageVolLookupByName(volName); - s_logger.debug("Found volume " + volName + " in storage pool " + pool.getName() + " after refreshing the pool"); + logger.debug("Found volume " + volName + " in storage pool " + pool.getName() + " after refreshing the pool"); } catch (LibvirtException e) { throw new CloudRuntimeException("Could not find volume " + volName + ": " + e.getMessage()); } @@ -257,7 +258,7 @@ public StorageVol getVolume(StoragePool pool, String volName) { public StorageVol createVolume(Connect conn, StoragePool pool, String uuid, long size, VolumeFormat format) throws LibvirtException { LibvirtStorageVolumeDef volDef = new LibvirtStorageVolumeDef(UUID.randomUUID().toString(), size, format, null, null); - s_logger.debug(volDef.toString()); + logger.debug(volDef.toString()); return pool.storageVolCreateXML(volDef.toString(), 0); } @@ -268,7 +269,7 @@ public void storagePoolRefresh(StoragePool pool) { refreshPool(pool); } } catch (LibvirtException e) { - s_logger.debug("refresh storage pool failed: " + e.toString()); + logger.debug("refresh storage pool failed: " + e.toString()); } } @@ -278,24 +279,24 @@ private StoragePool createNetfsStoragePool(PoolType fsType, Connect conn, String _storageLayer.mkdir(targetPath); StoragePool sp = null; try { - s_logger.debug(spd.toString()); + logger.debug(spd.toString()); // check whether the pool is already mounted int mountpointResult = Script.runSimpleBashScriptForExitValue("mountpoint -q " + targetPath); // if the pool is mounted, try to unmount it if(mountpointResult == 0) { - s_logger.info("Attempting to unmount old mount at " + targetPath); + logger.info("Attempting to unmount old mount at " + targetPath); String result = Script.runSimpleBashScript("umount -l " + targetPath); if (result == null) { - s_logger.info("Succeeded in unmounting " + targetPath); + logger.info("Succeeded in unmounting " + targetPath); } else { - s_logger.error("Failed in unmounting storage"); + logger.error("Failed in unmounting storage"); } } sp = conn.storagePoolCreateXML(spd.toString(), 0); return sp; } catch (LibvirtException e) { - s_logger.error(e.toString()); + logger.error(e.toString()); throw e; } } @@ -303,17 +304,17 @@ private StoragePool createNetfsStoragePool(PoolType fsType, Connect conn, String private StoragePool createSharedStoragePool(Connect conn, String uuid, String host, String path) { String mountPoint = path; if (!_storageLayer.exists(mountPoint)) { - s_logger.error(mountPoint + " does not exists. Check local.storage.path in agent.properties."); + logger.error(mountPoint + " does not exists. Check local.storage.path in agent.properties."); return null; } LibvirtStoragePoolDef spd = new LibvirtStoragePoolDef(PoolType.DIR, uuid, uuid, host, path, path); StoragePool sp = null; try { - s_logger.debug(spd.toString()); + logger.debug(spd.toString()); sp = conn.storagePoolCreateXML(spd.toString(), 0); return sp; } catch (LibvirtException e) { - s_logger.error(e.toString()); + logger.error(e.toString()); if (sp != null) { try { if (sp.isPersistent() == 1) { @@ -324,7 +325,7 @@ private StoragePool createSharedStoragePool(Connect conn, String uuid, String ho } sp.free(); } catch (LibvirtException l) { - s_logger.debug("Failed to define shared mount point storage pool with: " + l.toString()); + logger.debug("Failed to define shared mount point storage pool with: " + l.toString()); } } return null; @@ -337,14 +338,14 @@ private StoragePool createCLVMStoragePool(Connect conn, String uuid, String host String volgroupName = path; volgroupName = volgroupName.replaceFirst("/", ""); - LibvirtStoragePoolDef spd = new LibvirtStoragePoolDef(PoolType.LOGICAL, volgroupName, uuid, host, volgroupPath, volgroupPath); + LibvirtStoragePoolDef spd = new LibvirtStoragePoolDef(PoolType.loggerICAL, volgroupName, uuid, host, volgroupPath, volgroupPath); StoragePool sp = null; try { - s_logger.debug(spd.toString()); + logger.debug(spd.toString()); sp = conn.storagePoolCreateXML(spd.toString(), 0); return sp; } catch (LibvirtException e) { - s_logger.error(e.toString()); + logger.error(e.toString()); if (sp != null) { try { if (sp.isPersistent() == 1) { @@ -355,7 +356,7 @@ private StoragePool createCLVMStoragePool(Connect conn, String uuid, String host } sp.free(); } catch (LibvirtException l) { - s_logger.debug("Failed to define clvm storage pool with: " + l.toString()); + logger.debug("Failed to define clvm storage pool with: " + l.toString()); } } return null; @@ -376,17 +377,17 @@ private StoragePool createRBDStoragePool(Connect conn, String uuid, String host, sd.setCephName(userInfoTemp[0] + "@" + host + ":" + port + "/" + path); try { - s_logger.debug(sd.toString()); + logger.debug(sd.toString()); s = conn.secretDefineXML(sd.toString()); s.setValue(Base64.decodeBase64(userInfoTemp[1])); } catch (LibvirtException e) { - s_logger.error("Failed to define the libvirt secret: " + e.toString()); + logger.error("Failed to define the libvirt secret: " + e.toString()); if (s != null) { try { s.undefine(); s.free(); } catch (LibvirtException l) { - s_logger.error("Failed to undefine the libvirt secret: " + l.toString()); + logger.error("Failed to undefine the libvirt secret: " + l.toString()); } } return null; @@ -397,11 +398,11 @@ private StoragePool createRBDStoragePool(Connect conn, String uuid, String host, } try { - s_logger.debug(spd.toString()); + logger.debug(spd.toString()); sp = conn.storagePoolCreateXML(spd.toString(), 0); return sp; } catch (LibvirtException e) { - s_logger.error("Failed to create RBD storage pool: " + e.toString()); + logger.error("Failed to create RBD storage pool: " + e.toString()); if (sp != null) { try { if (sp.isPersistent() == 1) { @@ -412,17 +413,17 @@ private StoragePool createRBDStoragePool(Connect conn, String uuid, String host, } sp.free(); } catch (LibvirtException l) { - s_logger.error("Failed to undefine RBD storage pool: " + l.toString()); + logger.error("Failed to undefine RBD storage pool: " + l.toString()); } } if (s != null) { try { - s_logger.error("Failed to create the RBD storage pool, cleaning up the libvirt secret"); + logger.error("Failed to create the RBD storage pool, cleaning up the libvirt secret"); s.undefine(); s.free(); } catch (LibvirtException se) { - s_logger.error("Failed to remove the libvirt secret: " + se.toString()); + logger.error("Failed to remove the libvirt secret: " + se.toString()); } } @@ -472,14 +473,14 @@ public KVMStoragePool getStoragePool(String uuid) { @Override public KVMStoragePool getStoragePool(String uuid, boolean refreshInfo) { - s_logger.info("Trying to fetch storage pool " + uuid + " from libvirt"); + logger.info("Trying to fetch storage pool " + uuid + " from libvirt"); StoragePool storage = null; try { Connect conn = LibvirtConnection.getConnection(); storage = conn.storagePoolLookupByUUIDString(uuid); if (storage.getInfo().state != StoragePoolState.VIR_STORAGE_POOL_RUNNING) { - s_logger.warn("Storage pool " + uuid + " is not in running state. Attempting to start it."); + logger.warn("Storage pool " + uuid + " is not in running state. Attempting to start it."); storage.create(0); } LibvirtStoragePoolDef spd = getStoragePoolDef(conn, storage); @@ -493,7 +494,7 @@ public KVMStoragePool getStoragePool(String uuid, boolean refreshInfo) { type = StoragePoolType.Filesystem; } else if (spd.getPoolType() == LibvirtStoragePoolDef.PoolType.RBD) { type = StoragePoolType.RBD; - } else if (spd.getPoolType() == LibvirtStoragePoolDef.PoolType.LOGICAL) { + } else if (spd.getPoolType() == LibvirtStoragePoolDef.PoolType.loggerICAL) { type = StoragePoolType.CLVM; } else if (spd.getPoolType() == LibvirtStoragePoolDef.PoolType.GLUSTERFS) { type = StoragePoolType.Gluster; @@ -535,21 +536,21 @@ public KVMStoragePool getStoragePool(String uuid, boolean refreshInfo) { * refresh the pool */ if (refreshInfo) { - s_logger.info("Asking libvirt to refresh storage pool " + uuid); + logger.info("Asking libvirt to refresh storage pool " + uuid); pool.refresh(); } pool.setCapacity(storage.getInfo().capacity); pool.setUsed(storage.getInfo().allocation); pool.setAvailable(storage.getInfo().available); - s_logger.debug("Successfully refreshed pool " + uuid + + logger.debug("Successfully refreshed pool " + uuid + " Capacity: " + toHumanReadableSize(storage.getInfo().capacity) + " Used: " + toHumanReadableSize(storage.getInfo().allocation) + " Available: " + toHumanReadableSize(storage.getInfo().available)); return pool; } catch (LibvirtException e) { - s_logger.debug("Could not find storage pool " + uuid + " in libvirt"); + logger.debug("Could not find storage pool " + uuid + " in libvirt"); throw new CloudRuntimeException(e.toString(), e); } } @@ -590,14 +591,14 @@ public KVMPhysicalDisk getPhysicalDisk(String volumeUuid, KVMStoragePool pool) { } return disk; } catch (LibvirtException e) { - s_logger.debug("Failed to get physical disk:", e); + logger.debug("Failed to get physical disk:", e); throw new CloudRuntimeException(e.toString()); } } @Override public KVMStoragePool createStoragePool(String name, String host, int port, String path, String userInfo, StoragePoolType type, Map details) { - s_logger.info("Attempting to create storage pool " + name + " (" + type.toString() + ") in libvirt"); + logger.info("Attempting to create storage pool " + name + " (" + type.toString() + ") in libvirt"); StoragePool sp = null; Connect conn = null; @@ -612,14 +613,14 @@ public KVMStoragePool createStoragePool(String name, String host, int port, Stri if (sp != null && sp.isActive() == 0) { sp.undefine(); sp = null; - s_logger.info("Found existing defined storage pool " + name + ". It wasn't running, so we undefined it."); + logger.info("Found existing defined storage pool " + name + ". It wasn't running, so we undefined it."); } if (sp != null) { - s_logger.info("Found existing defined storage pool " + name + ", using it."); + logger.info("Found existing defined storage pool " + name + ", using it."); } } catch (LibvirtException e) { sp = null; - s_logger.warn("Storage pool " + name + " was not found running in libvirt. Need to create it."); + logger.warn("Storage pool " + name + " was not found running in libvirt. Need to create it."); } // libvirt strips trailing slashes off of path, we will too in order to match @@ -633,12 +634,12 @@ public KVMStoragePool createStoragePool(String name, String host, int port, Stri // if anyone is, undefine the pool so we can define it as requested. // This should be safe since a pool in use can't be removed, and no // volumes are affected by unregistering the pool with libvirt. - s_logger.info("Didn't find an existing storage pool " + name + " by UUID, checking for pools with duplicate paths"); + logger.info("Didn't find an existing storage pool " + name + " by UUID, checking for pools with duplicate paths"); try { String[] poolnames = conn.listStoragePools(); for (String poolname : poolnames) { - s_logger.debug("Checking path of existing pool " + poolname + " against pool we want to create"); + logger.debug("Checking path of existing pool " + poolname + " against pool we want to create"); StoragePool p = conn.storagePoolLookupByName(poolname); LibvirtStoragePoolDef pdef = getStoragePoolDef(conn, p); if (pdef == null) { @@ -647,7 +648,7 @@ public KVMStoragePool createStoragePool(String name, String host, int port, Stri String targetPath = pdef.getTargetPath(); if (targetPath != null && targetPath.equals(path)) { - s_logger.debug("Storage pool utilizing path '" + path + "' already exists as pool " + poolname + + logger.debug("Storage pool utilizing path '" + path + "' already exists as pool " + poolname + ", undefining so we can re-define with correct name " + name); if (p.isPersistent() == 1) { p.destroy(); @@ -658,25 +659,25 @@ public KVMStoragePool createStoragePool(String name, String host, int port, Stri } } } catch (LibvirtException e) { - s_logger.error("Failure in attempting to see if an existing storage pool might be using the path of the pool to be created:" + e); + logger.error("Failure in attempting to see if an existing storage pool might be using the path of the pool to be created:" + e); } - s_logger.debug("Attempting to create storage pool " + name); + logger.debug("Attempting to create storage pool " + name); if (type == StoragePoolType.NetworkFilesystem) { try { sp = createNetfsStoragePool(PoolType.NETFS, conn, name, host, path); } catch (LibvirtException e) { - s_logger.error("Failed to create netfs mount: " + host + ":" + path , e); - s_logger.error(e.getStackTrace()); + logger.error("Failed to create netfs mount: " + host + ":" + path , e); + logger.error(e.getStackTrace()); throw new CloudRuntimeException(e.toString()); } } else if (type == StoragePoolType.Gluster) { try { sp = createNetfsStoragePool(PoolType.GLUSTERFS, conn, name, host, path); } catch (LibvirtException e) { - s_logger.error("Failed to create glusterfs mount: " + host + ":" + path , e); - s_logger.error(e.getStackTrace()); + logger.error("Failed to create glusterfs mount: " + host + ":" + path , e); + logger.error(e.getStackTrace()); throw new CloudRuntimeException(e.toString()); } } else if (type == StoragePoolType.SharedMountPoint || type == StoragePoolType.Filesystem) { @@ -694,7 +695,7 @@ public KVMStoragePool createStoragePool(String name, String host, int port, Stri try { if (sp.isActive() == 0) { - s_logger.debug("Attempting to activate pool " + name); + logger.debug("Attempting to activate pool " + name); sp.create(0); } @@ -713,7 +714,7 @@ public KVMStoragePool createStoragePool(String name, String host, int port, Stri @Override public boolean deleteStoragePool(String uuid) { - s_logger.info("Attempting to remove storage pool " + uuid + " from libvirt"); + logger.info("Attempting to remove storage pool " + uuid + " from libvirt"); Connect conn = null; try { conn = LibvirtConnection.getConnection(); @@ -727,7 +728,7 @@ public boolean deleteStoragePool(String uuid) { try { sp = conn.storagePoolLookupByUUIDString(uuid); } catch (LibvirtException e) { - s_logger.warn("Storage pool " + uuid + " doesn't exist in libvirt. Assuming it is already removed"); + logger.warn("Storage pool " + uuid + " doesn't exist in libvirt. Assuming it is already removed"); return true; } @@ -738,7 +739,7 @@ public boolean deleteStoragePool(String uuid) { try { s = conn.secretLookupByUUIDString(uuid); } catch (LibvirtException e) { - s_logger.info("Storage pool " + uuid + " has no corresponding secret. Not removing any secret."); + logger.info("Storage pool " + uuid + " has no corresponding secret. Not removing any secret."); } try { @@ -754,21 +755,21 @@ public boolean deleteStoragePool(String uuid) { s.free(); } - s_logger.info("Storage pool " + uuid + " was successfully removed from libvirt."); + logger.info("Storage pool " + uuid + " was successfully removed from libvirt."); return true; } catch (LibvirtException e) { // handle ebusy error when pool is quickly destroyed if (e.toString().contains("exit status 16")) { String targetPath = _mountPoint + File.separator + uuid; - s_logger.error("deleteStoragePool removed pool from libvirt, but libvirt had trouble unmounting the pool. Trying umount location " + targetPath + + logger.error("deleteStoragePool removed pool from libvirt, but libvirt had trouble unmounting the pool. Trying umount location " + targetPath + "again in a few seconds"); String result = Script.runSimpleBashScript("sleep 5 && umount " + targetPath); if (result == null) { - s_logger.error("Succeeded in unmounting " + targetPath); + logger.error("Succeeded in unmounting " + targetPath); return true; } - s_logger.error("Failed to unmount " + targetPath); + logger.error("Failed to unmount " + targetPath); } throw new CloudRuntimeException(e.toString(), e); } @@ -778,7 +779,7 @@ public boolean deleteStoragePool(String uuid) { public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool, PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, byte[] passphrase) { - s_logger.info("Attempting to create volume " + name + " (" + pool.getType().toString() + ") in pool " + logger.info("Attempting to create volume " + name + " (" + pool.getType().toString() + ") in pool " + pool.getUuid() + " with size " + toHumanReadableSize(size)); StoragePoolType poolType = pool.getType(); @@ -813,7 +814,7 @@ private KVMPhysicalDisk createPhysicalDiskByLibVirt(String name, KVMStoragePool LibvirtStorageVolumeDef volDef = new LibvirtStorageVolumeDef(name, size, libvirtformat, null, null); - s_logger.debug(volDef.toString()); + logger.debug(volDef.toString()); try { StorageVol vol = virtPool.storageVolCreateXML(volDef.toString(), 0); volPath = vol.getPath(); @@ -934,7 +935,7 @@ public boolean disconnectPhysicalDiskByPath(String localPath) { @Override public boolean deletePhysicalDisk(String uuid, KVMStoragePool pool, Storage.ImageFormat format) { - s_logger.info("Attempting to remove volume " + uuid + " from pool " + pool.getUuid()); + logger.info("Attempting to remove volume " + uuid + " from pool " + pool.getUuid()); /** * RBD volume can have snapshots and while they exist libvirt @@ -944,48 +945,48 @@ public boolean deletePhysicalDisk(String uuid, KVMStoragePool pool, Storage.Imag */ if (pool.getType() == StoragePoolType.RBD) { try { - s_logger.info("Unprotecting and Removing RBD snapshots of image " + pool.getSourceDir() + "/" + uuid + " prior to removing the image"); + logger.info("Unprotecting and Removing RBD snapshots of image " + pool.getSourceDir() + "/" + uuid + " prior to removing the image"); Rados r = new Rados(pool.getAuthUserName()); r.confSet("mon_host", pool.getSourceHost() + ":" + pool.getSourcePort()); r.confSet("key", pool.getAuthSecret()); r.confSet("client_mount_timeout", "30"); r.connect(); - s_logger.debug("Successfully connected to Ceph cluster at " + r.confGet("mon_host")); + logger.debug("Successfully connected to Ceph cluster at " + r.confGet("mon_host")); IoCTX io = r.ioCtxCreate(pool.getSourceDir()); Rbd rbd = new Rbd(io); RbdImage image = rbd.open(uuid); - s_logger.debug("Fetching list of snapshots of RBD image " + pool.getSourceDir() + "/" + uuid); + logger.debug("Fetching list of snapshots of RBD image " + pool.getSourceDir() + "/" + uuid); List snaps = image.snapList(); try { for (RbdSnapInfo snap : snaps) { if (image.snapIsProtected(snap.name)) { - s_logger.debug("Unprotecting snapshot " + pool.getSourceDir() + "/" + uuid + "@" + snap.name); + logger.debug("Unprotecting snapshot " + pool.getSourceDir() + "/" + uuid + "@" + snap.name); image.snapUnprotect(snap.name); } else { - s_logger.debug("Snapshot " + pool.getSourceDir() + "/" + uuid + "@" + snap.name + " is not protected."); + logger.debug("Snapshot " + pool.getSourceDir() + "/" + uuid + "@" + snap.name + " is not protected."); } - s_logger.debug("Removing snapshot " + pool.getSourceDir() + "/" + uuid + "@" + snap.name); + logger.debug("Removing snapshot " + pool.getSourceDir() + "/" + uuid + "@" + snap.name); image.snapRemove(snap.name); } - s_logger.info("Successfully unprotected and removed any remaining snapshots (" + snaps.size() + ") of " + logger.info("Successfully unprotected and removed any remaining snapshots (" + snaps.size() + ") of " + pool.getSourceDir() + "/" + uuid + " Continuing to remove the RBD image"); } catch (RbdException e) { - s_logger.error("Failed to remove snapshot with exception: " + e.toString() + + logger.error("Failed to remove snapshot with exception: " + e.toString() + ", RBD error: " + ErrorCode.getErrorMessage(e.getReturnValue())); throw new CloudRuntimeException(e.toString() + " - " + ErrorCode.getErrorMessage(e.getReturnValue())); } finally { - s_logger.debug("Closing image and destroying context"); + logger.debug("Closing image and destroying context"); rbd.close(image); r.ioCtxDestroy(io); } } catch (RadosException e) { - s_logger.error("Failed to remove snapshot with exception: " + e.toString() + + logger.error("Failed to remove snapshot with exception: " + e.toString() + ", RBD error: " + ErrorCode.getErrorMessage(e.getReturnValue())); throw new CloudRuntimeException(e.toString() + " - " + ErrorCode.getErrorMessage(e.getReturnValue())); } catch (RbdException e) { - s_logger.error("Failed to remove snapshot with exception: " + e.toString() + + logger.error("Failed to remove snapshot with exception: " + e.toString() + ", RBD error: " + ErrorCode.getErrorMessage(e.getReturnValue())); throw new CloudRuntimeException(e.toString() + " - " + ErrorCode.getErrorMessage(e.getReturnValue())); } @@ -994,7 +995,7 @@ public boolean deletePhysicalDisk(String uuid, KVMStoragePool pool, Storage.Imag LibvirtStoragePool libvirtPool = (LibvirtStoragePool)pool; try { StorageVol vol = getVolume(libvirtPool.getPool(), uuid); - s_logger.debug("Instructing libvirt to remove volume " + uuid + " from pool " + pool.getUuid()); + logger.debug("Instructing libvirt to remove volume " + uuid + " from pool " + pool.getUuid()); if(Storage.ImageFormat.DIR.equals(format)){ deleteDirVol(libvirtPool, vol); } else { @@ -1020,7 +1021,7 @@ public boolean deletePhysicalDisk(String uuid, KVMStoragePool pool, Storage.Imag public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template, String name, PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, KVMStoragePool destPool, int timeout, byte[] passphrase) { - s_logger.info("Creating volume " + name + " from template " + template.getName() + " in pool " + destPool.getUuid() + + logger.info("Creating volume " + name + " from template " + template.getName() + " in pool " + destPool.getUuid() + " (" + destPool.getType().toString() + ") with size " + toHumanReadableSize(size)); KVMPhysicalDisk disk = null; @@ -1139,7 +1140,7 @@ private KVMPhysicalDisk createDiskFromTemplateOnRBD(KVMPhysicalDisk template, QemuImg qemu = new QemuImg(timeout); qemu.convert(srcFile, destFile); } catch (QemuImgException | LibvirtException e) { - s_logger.error("Failed to create " + disk.getPath() + + logger.error("Failed to create " + disk.getPath() + " due to a failed executing of qemu-img: " + e.getMessage()); } } else { @@ -1155,14 +1156,14 @@ private KVMPhysicalDisk createDiskFromTemplateOnRBD(KVMPhysicalDisk template, try { if ((srcPool.getSourceHost().equals(destPool.getSourceHost())) && (srcPool.getSourceDir().equals(destPool.getSourceDir()))) { /* We are on the same Ceph cluster, but we require RBD format 2 on the source image */ - s_logger.debug("Trying to perform a RBD clone (layering) since we are operating in the same storage pool"); + logger.debug("Trying to perform a RBD clone (layering) since we are operating in the same storage pool"); Rados r = new Rados(srcPool.getAuthUserName()); r.confSet("mon_host", srcPool.getSourceHost() + ":" + srcPool.getSourcePort()); r.confSet("key", srcPool.getAuthSecret()); r.confSet("client_mount_timeout", "30"); r.connect(); - s_logger.debug("Successfully connected to Ceph cluster at " + r.confGet("mon_host")); + logger.debug("Successfully connected to Ceph cluster at " + r.confGet("mon_host")); IoCTX io = r.ioCtxCreate(srcPool.getSourceDir()); Rbd rbd = new Rbd(io); @@ -1170,33 +1171,33 @@ private KVMPhysicalDisk createDiskFromTemplateOnRBD(KVMPhysicalDisk template, if (srcImage.isOldFormat()) { /* The source image is RBD format 1, we have to do a regular copy */ - s_logger.debug("The source image " + srcPool.getSourceDir() + "/" + template.getName() + + logger.debug("The source image " + srcPool.getSourceDir() + "/" + template.getName() + " is RBD format 1. We have to perform a regular copy (" + toHumanReadableSize(disk.getVirtualSize()) + " bytes)"); rbd.create(disk.getName(), disk.getVirtualSize(), RBD_FEATURES, rbdOrder); RbdImage destImage = rbd.open(disk.getName()); - s_logger.debug("Starting to copy " + srcImage.getName() + " to " + destImage.getName() + " in Ceph pool " + srcPool.getSourceDir()); + logger.debug("Starting to copy " + srcImage.getName() + " to " + destImage.getName() + " in Ceph pool " + srcPool.getSourceDir()); rbd.copy(srcImage, destImage); - s_logger.debug("Finished copying " + srcImage.getName() + " to " + destImage.getName() + " in Ceph pool " + srcPool.getSourceDir()); + logger.debug("Finished copying " + srcImage.getName() + " to " + destImage.getName() + " in Ceph pool " + srcPool.getSourceDir()); rbd.close(destImage); } else { - s_logger.debug("The source image " + srcPool.getSourceDir() + "/" + template.getName() + logger.debug("The source image " + srcPool.getSourceDir() + "/" + template.getName() + " is RBD format 2. We will perform a RBD clone using snapshot " + rbdTemplateSnapName); /* The source image is format 2, we can do a RBD snapshot+clone (layering) */ - s_logger.debug("Checking if RBD snapshot " + srcPool.getSourceDir() + "/" + template.getName() + logger.debug("Checking if RBD snapshot " + srcPool.getSourceDir() + "/" + template.getName() + "@" + rbdTemplateSnapName + " exists prior to attempting a clone operation."); List snaps = srcImage.snapList(); - s_logger.debug("Found " + snaps.size() + " snapshots on RBD image " + srcPool.getSourceDir() + "/" + template.getName()); + logger.debug("Found " + snaps.size() + " snapshots on RBD image " + srcPool.getSourceDir() + "/" + template.getName()); boolean snapFound = false; for (RbdSnapInfo snap : snaps) { if (rbdTemplateSnapName.equals(snap.name)) { - s_logger.debug("RBD snapshot " + srcPool.getSourceDir() + "/" + template.getName() + logger.debug("RBD snapshot " + srcPool.getSourceDir() + "/" + template.getName() + "@" + rbdTemplateSnapName + " already exists."); snapFound = true; break; @@ -1204,20 +1205,20 @@ private KVMPhysicalDisk createDiskFromTemplateOnRBD(KVMPhysicalDisk template, } if (!snapFound) { - s_logger.debug("Creating RBD snapshot " + rbdTemplateSnapName + " on image " + name); + logger.debug("Creating RBD snapshot " + rbdTemplateSnapName + " on image " + name); srcImage.snapCreate(rbdTemplateSnapName); - s_logger.debug("Protecting RBD snapshot " + rbdTemplateSnapName + " on image " + name); + logger.debug("Protecting RBD snapshot " + rbdTemplateSnapName + " on image " + name); srcImage.snapProtect(rbdTemplateSnapName); } rbd.clone(template.getName(), rbdTemplateSnapName, io, disk.getName(), RBD_FEATURES, rbdOrder); - s_logger.debug("Successfully cloned " + template.getName() + "@" + rbdTemplateSnapName + " to " + disk.getName()); + logger.debug("Successfully cloned " + template.getName() + "@" + rbdTemplateSnapName + " to " + disk.getName()); /* We also need to resize the image if the VM was deployed with a larger root disk size */ if (disk.getVirtualSize() > template.getVirtualSize()) { RbdImage diskImage = rbd.open(disk.getName()); diskImage.resize(disk.getVirtualSize()); rbd.close(diskImage); - s_logger.debug("Resized " + disk.getName() + " to " + toHumanReadableSize(disk.getVirtualSize())); + logger.debug("Resized " + disk.getName() + " to " + toHumanReadableSize(disk.getVirtualSize())); } } @@ -1226,21 +1227,21 @@ private KVMPhysicalDisk createDiskFromTemplateOnRBD(KVMPhysicalDisk template, r.ioCtxDestroy(io); } else { /* The source pool or host is not the same Ceph cluster, we do a simple copy with Qemu-Img */ - s_logger.debug("Both the source and destination are RBD, but not the same Ceph cluster. Performing a copy"); + logger.debug("Both the source and destination are RBD, but not the same Ceph cluster. Performing a copy"); Rados rSrc = new Rados(srcPool.getAuthUserName()); rSrc.confSet("mon_host", srcPool.getSourceHost() + ":" + srcPool.getSourcePort()); rSrc.confSet("key", srcPool.getAuthSecret()); rSrc.confSet("client_mount_timeout", "30"); rSrc.connect(); - s_logger.debug("Successfully connected to source Ceph cluster at " + rSrc.confGet("mon_host")); + logger.debug("Successfully connected to source Ceph cluster at " + rSrc.confGet("mon_host")); Rados rDest = new Rados(destPool.getAuthUserName()); rDest.confSet("mon_host", destPool.getSourceHost() + ":" + destPool.getSourcePort()); rDest.confSet("key", destPool.getAuthSecret()); rDest.confSet("client_mount_timeout", "30"); rDest.connect(); - s_logger.debug("Successfully connected to source Ceph cluster at " + rDest.confGet("mon_host")); + logger.debug("Successfully connected to source Ceph cluster at " + rDest.confGet("mon_host")); IoCTX sIO = rSrc.ioCtxCreate(srcPool.getSourceDir()); Rbd sRbd = new Rbd(sIO); @@ -1248,14 +1249,14 @@ private KVMPhysicalDisk createDiskFromTemplateOnRBD(KVMPhysicalDisk template, IoCTX dIO = rDest.ioCtxCreate(destPool.getSourceDir()); Rbd dRbd = new Rbd(dIO); - s_logger.debug("Creating " + disk.getName() + " on the destination cluster " + rDest.confGet("mon_host") + " in pool " + + logger.debug("Creating " + disk.getName() + " on the destination cluster " + rDest.confGet("mon_host") + " in pool " + destPool.getSourceDir()); dRbd.create(disk.getName(), disk.getVirtualSize(), RBD_FEATURES, rbdOrder); RbdImage srcImage = sRbd.open(template.getName()); RbdImage destImage = dRbd.open(disk.getName()); - s_logger.debug("Copying " + template.getName() + " from Ceph cluster " + rSrc.confGet("mon_host") + " to " + disk.getName() + logger.debug("Copying " + template.getName() + " from Ceph cluster " + rSrc.confGet("mon_host") + " to " + disk.getName() + " on cluster " + rDest.confGet("mon_host")); sRbd.copy(srcImage, destImage); @@ -1266,10 +1267,10 @@ private KVMPhysicalDisk createDiskFromTemplateOnRBD(KVMPhysicalDisk template, rDest.ioCtxDestroy(dIO); } } catch (RadosException e) { - s_logger.error("Failed to perform a RADOS action on the Ceph cluster, the error was: " + e.getMessage()); + logger.error("Failed to perform a RADOS action on the Ceph cluster, the error was: " + e.getMessage()); disk = null; } catch (RbdException e) { - s_logger.error("Failed to perform a RBD action on the Ceph cluster, the error was: " + e.getMessage()); + logger.error("Failed to perform a RBD action on the Ceph cluster, the error was: " + e.getMessage()); disk = null; } } @@ -1328,7 +1329,7 @@ public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMSt String sourcePath = disk.getPath(); KVMPhysicalDisk newDisk; - s_logger.debug("copyPhysicalDisk: disk size:" + toHumanReadableSize(disk.getSize()) + ", virtualsize:" + toHumanReadableSize(disk.getVirtualSize())+" format:"+disk.getFormat()); + logger.debug("copyPhysicalDisk: disk size:" + toHumanReadableSize(disk.getSize()) + ", virtualsize:" + toHumanReadableSize(disk.getVirtualSize())+" format:"+disk.getFormat()); if (destPool.getType() != StoragePoolType.RBD) { if (disk.getFormat() == PhysicalDiskFormat.TAR) { newDisk = destPool.createPhysicalDisk(name, PhysicalDiskFormat.DIR, Storage.ProvisioningType.THIN, disk.getVirtualSize(), null); @@ -1384,12 +1385,12 @@ public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMSt newDisk.setVirtualSize(virtualSize); newDisk.setSize(virtualSize); } catch (QemuImgException | LibvirtException e) { - s_logger.error("Failed to convert " + srcFile.getFileName() + " to " + destFile.getFileName() + " the error was: " + e.getMessage()); + logger.error("Failed to convert " + srcFile.getFileName() + " to " + destFile.getFileName() + " the error was: " + e.getMessage()); newDisk = null; } } } catch (QemuImgException e) { - s_logger.error("Failed to fetch the information of file " + srcFile.getFileName() + " the error was: " + e.getMessage()); + logger.error("Failed to fetch the information of file " + srcFile.getFileName() + " the error was: " + e.getMessage()); newDisk = null; } } @@ -1398,7 +1399,7 @@ public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMSt * Using qemu-img we copy the QCOW2 disk to RAW (on RBD) directly. * To do so it's mandatory that librbd on the system is at least 0.67.7 (Ceph Dumpling) */ - s_logger.debug("The source image is not RBD, but the destination is. We will convert into RBD format 2"); + logger.debug("The source image is not RBD, but the destination is. We will convert into RBD format 2"); try { srcFile = new QemuImgFile(sourcePath, sourceFormat); String rbdDestPath = destPool.getSourceDir() + "/" + name; @@ -1409,9 +1410,9 @@ public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMSt rbdDestPath); destFile = new QemuImgFile(rbdDestFile, destFormat); - s_logger.debug("Starting copy from source image " + srcFile.getFileName() + " to RBD image " + rbdDestPath); + logger.debug("Starting copy from source image " + srcFile.getFileName() + " to RBD image " + rbdDestPath); qemu.convert(srcFile, destFile); - s_logger.debug("Successfully converted source image " + srcFile.getFileName() + " to RBD image " + rbdDestPath); + logger.debug("Successfully converted source image " + srcFile.getFileName() + " to RBD image " + rbdDestPath); /* We have to stat the RBD image to see how big it became afterwards */ Rados r = new Rados(destPool.getAuthUserName()); @@ -1419,7 +1420,7 @@ public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMSt r.confSet("key", destPool.getAuthSecret()); r.confSet("client_mount_timeout", "30"); r.connect(); - s_logger.debug("Successfully connected to Ceph cluster at " + r.confGet("mon_host")); + logger.debug("Successfully connected to Ceph cluster at " + r.confGet("mon_host")); IoCTX io = r.ioCtxCreate(destPool.getSourceDir()); Rbd rbd = new Rbd(io); @@ -1428,20 +1429,20 @@ public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMSt RbdImageInfo rbdInfo = image.stat(); newDisk.setSize(rbdInfo.size); newDisk.setVirtualSize(rbdInfo.size); - s_logger.debug("After copy the resulting RBD image " + rbdDestPath + " is " + toHumanReadableSize(rbdInfo.size) + " bytes long"); + logger.debug("After copy the resulting RBD image " + rbdDestPath + " is " + toHumanReadableSize(rbdInfo.size) + " bytes long"); rbd.close(image); r.ioCtxDestroy(io); } catch (QemuImgException | LibvirtException e) { String srcFilename = srcFile != null ? srcFile.getFileName() : null; String destFilename = destFile != null ? destFile.getFileName() : null; - s_logger.error(String.format("Failed to convert from %s to %s the error was: %s", srcFilename, destFilename, e.getMessage())); + logger.error(String.format("Failed to convert from %s to %s the error was: %s", srcFilename, destFilename, e.getMessage())); newDisk = null; } catch (RadosException e) { - s_logger.error("A Ceph RADOS operation failed (" + e.getReturnValue() + "). The error was: " + e.getMessage()); + logger.error("A Ceph RADOS operation failed (" + e.getReturnValue() + "). The error was: " + e.getMessage()); newDisk = null; } catch (RbdException e) { - s_logger.error("A Ceph RBD operation failed (" + e.getReturnValue() + "). The error was: " + e.getMessage()); + logger.error("A Ceph RBD operation failed (" + e.getReturnValue() + "). The error was: " + e.getMessage()); newDisk = null; } } else { @@ -1459,7 +1460,7 @@ public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMSt try { qemu.convert(srcFile, destFile); } catch (QemuImgException | LibvirtException e) { - s_logger.error("Failed to convert " + srcFile.getFileName() + " to " + destFile.getFileName() + " the error was: " + e.getMessage()); + logger.error("Failed to convert " + srcFile.getFileName() + " to " + destFile.getFileName() + " the error was: " + e.getMessage()); newDisk = null; } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java index d81b40391d09..52adc59cbe7b 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java @@ -20,7 +20,8 @@ import java.util.List; import java.util.Map; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.joda.time.Duration; import org.libvirt.StoragePool; @@ -39,7 +40,7 @@ import com.cloud.utils.script.Script; public class LibvirtStoragePool implements KVMStoragePool { - private static final Logger s_logger = Logger.getLogger(LibvirtStoragePool.class); + protected Logger logger = LogManager.getLogger(getClass()); protected String uuid; protected long capacity; protected long used; @@ -149,19 +150,19 @@ public KVMPhysicalDisk getPhysicalDisk(String volumeUid) { if (disk != null) { return disk; } - s_logger.debug("find volume bypass libvirt volumeUid " + volumeUid); + logger.debug("find volume bypass libvirt volumeUid " + volumeUid); //For network file system or file system, try to use java file to find the volume, instead of through libvirt. BUG:CLOUDSTACK-4459 String localPoolPath = this.getLocalPath(); File f = new File(localPoolPath + File.separator + volumeUuid); if (!f.exists()) { - s_logger.debug("volume: " + volumeUuid + " not exist on storage pool"); + logger.debug("volume: " + volumeUuid + " not exist on storage pool"); throw new CloudRuntimeException("Can't find volume:" + volumeUuid); } disk = new KVMPhysicalDisk(f.getPath(), volumeUuid, this); disk.setFormat(PhysicalDiskFormat.QCOW2); disk.setSize(f.length()); disk.setVirtualSize(f.length()); - s_logger.debug("find volume bypass libvirt disk " + disk.toString()); + logger.debug("find volume bypass libvirt disk " + disk.toString()); return disk; } @@ -271,7 +272,7 @@ public boolean delete() { try { return this._storageAdaptor.deleteStoragePool(this); } catch (Exception e) { - s_logger.debug("Failed to delete storage pool", e); + logger.debug("Failed to delete storage pool", e); } return false; } @@ -309,7 +310,7 @@ public String getHearthBeatPath() { public String createHeartBeatCommand(HAStoragePool primaryStoragePool, String hostPrivateIp, boolean hostValidation) { - Script cmd = new Script(primaryStoragePool.getPool().getHearthBeatPath(), HeartBeatUpdateTimeout, s_logger); + Script cmd = new Script(primaryStoragePool.getPool().getHearthBeatPath(), HeartBeatUpdateTimeout, logger); cmd.add("-i", primaryStoragePool.getPoolIp()); cmd.add("-p", primaryStoragePool.getPoolMountSourcePath()); cmd.add("-m", primaryStoragePool.getMountDestPath()); @@ -339,7 +340,7 @@ public String getStorageNodeId() { public Boolean checkingHeartBeat(HAStoragePool pool, HostTO host) { boolean validResult = false; String hostIp = host.getPrivateNetwork().getIp(); - Script cmd = new Script(getHearthBeatPath(), HeartBeatCheckerTimeout, s_logger); + Script cmd = new Script(getHearthBeatPath(), HeartBeatCheckerTimeout, logger); cmd.add("-i", pool.getPoolIp()); cmd.add("-p", pool.getPoolMountSourcePath()); cmd.add("-m", pool.getMountDestPath()); @@ -350,11 +351,11 @@ public Boolean checkingHeartBeat(HAStoragePool pool, HostTO host) { String result = cmd.execute(parser); String parsedLine = parser.getLine(); - s_logger.debug(String.format("Checking heart beat with KVMHAChecker [{command=\"%s\", result: \"%s\", log: \"%s\", pool: \"%s\"}].", cmd.toString(), result, parsedLine, + logger.debug(String.format("Checking heart beat with KVMHAChecker [{command=\"%s\", result: \"%s\", log: \"%s\", pool: \"%s\"}].", cmd.toString(), result, parsedLine, pool.getPoolIp())); if (result == null && parsedLine.contains("DEAD")) { - s_logger.warn(String.format("Checking heart beat with KVMHAChecker command [%s] returned [%s]. [%s]. It may cause a shutdown of host IP [%s].", cmd.toString(), + logger.warn(String.format("Checking heart beat with KVMHAChecker command [%s] returned [%s]. [%s]. It may cause a shutdown of host IP [%s].", cmd.toString(), result, parsedLine, hostIp)); } else { validResult = true; @@ -364,7 +365,7 @@ public Boolean checkingHeartBeat(HAStoragePool pool, HostTO host) { @Override public Boolean vmActivityCheck(HAStoragePool pool, HostTO host, Duration activityScriptTimeout, String volumeUUIDListString, String vmActivityCheckPath, long duration) { - Script cmd = new Script(vmActivityCheckPath, activityScriptTimeout.getStandardSeconds(), s_logger); + Script cmd = new Script(vmActivityCheckPath, activityScriptTimeout.getStandardSeconds(), logger); cmd.add("-i", pool.getPoolIp()); cmd.add("-p", pool.getPoolMountSourcePath()); cmd.add("-m", pool.getMountDestPath()); @@ -377,10 +378,10 @@ public Boolean vmActivityCheck(HAStoragePool pool, HostTO host, Duration activit String result = cmd.execute(parser); String parsedLine = parser.getLine(); - s_logger.debug(String.format("Checking heart beat with KVMHAVMActivityChecker [{command=\"%s\", result: \"%s\", log: \"%s\", pool: \"%s\"}].", cmd.toString(), result, parsedLine, pool.getPoolIp())); + logger.debug(String.format("Checking heart beat with KVMHAVMActivityChecker [{command=\"%s\", result: \"%s\", log: \"%s\", pool: \"%s\"}].", cmd.toString(), result, parsedLine, pool.getPoolIp())); if (result == null && parsedLine.contains("DEAD")) { - s_logger.warn(String.format("Checking heart beat with KVMHAVMActivityChecker command [%s] returned [%s]. It is [%s]. It may cause a shutdown of host IP [%s].", cmd.toString(), result, parsedLine, host.getPrivateNetwork().getIp())); + logger.warn(String.format("Checking heart beat with KVMHAVMActivityChecker command [%s] returned [%s]. It is [%s]. It may cause a shutdown of host IP [%s].", cmd.toString(), result, parsedLine, host.getPrivateNetwork().getIp())); return false; } else { return true; diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ManagedNfsStorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ManagedNfsStorageAdaptor.java index 62029fa1e340..8ec56b885f23 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ManagedNfsStorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ManagedNfsStorageAdaptor.java @@ -22,7 +22,8 @@ import java.util.Map; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.libvirt.Connect; import org.libvirt.LibvirtException; import org.libvirt.StoragePool; @@ -44,7 +45,7 @@ import com.cloud.utils.script.Script; public class ManagedNfsStorageAdaptor implements StorageAdaptor { - private static final Logger s_logger = Logger.getLogger(ManagedNfsStorageAdaptor.class); + protected Logger logger = LogManager.getLogger(getClass()); private String _mountPoint = "/mnt"; private StorageLayer _storageLayer; @@ -112,7 +113,7 @@ public boolean connectPhysicalDisk(String volumeUuid, KVMStoragePool pool, Map MapStorageUuidToStoragePool = new HashMap<>(); /** @@ -651,7 +652,7 @@ static void cleanupStaleMaps() { synchronized(CLEANUP_LOCK) { long start = System.currentTimeMillis(); ScriptResult result = runScript(cleanupScript, cleanupTimeoutSecs * 1000); - LOGGER.debug("Multipath Cleanup Job elapsed time (ms): "+ (System.currentTimeMillis() - start) + "; result: " + result.getExitCode(), null); + LOGGER.debug("Multipath Cleanup Job elapsed time (ms): "+ (System.currentTimeMillis() - start) + "; result: " + result.getExitCode()); } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStorageAdaptor.java index 2cd68753e014..9190510c7a72 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStorageAdaptor.java @@ -37,7 +37,8 @@ import org.apache.cloudstack.utils.qemu.QemuImgFile; import org.apache.cloudstack.utils.qemu.QemuObject; import org.apache.commons.io.filefilter.WildcardFileFilter; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.libvirt.LibvirtException; import com.cloud.storage.Storage; @@ -48,7 +49,7 @@ import org.apache.commons.lang3.StringUtils; public class ScaleIOStorageAdaptor implements StorageAdaptor { - private static final Logger LOGGER = Logger.getLogger(ScaleIOStorageAdaptor.class); + protected Logger logger = LogManager.getLogger(getClass()); private static final Map MapStorageUuidToStoragePool = new HashMap<>(); private static final int DEFAULT_DISK_WAIT_TIME_IN_SECS = 60; @@ -60,7 +61,7 @@ public ScaleIOStorageAdaptor() { public KVMStoragePool getStoragePool(String uuid) { KVMStoragePool pool = MapStorageUuidToStoragePool.get(uuid); if (pool == null) { - LOGGER.error("Pool: " + uuid + " not found, probably sdc not connected on agent start"); + logger.error("Pool: " + uuid + " not found, probably sdc not connected on agent start"); throw new CloudRuntimeException("Pool: " + uuid + " not found, reconnect sdc and restart agent if sdc not connected on agent start"); } @@ -80,7 +81,7 @@ public KVMStoragePool getStoragePool(String uuid, boolean refreshInfo) { @Override public KVMPhysicalDisk getPhysicalDisk(String volumePath, KVMStoragePool pool) { if (StringUtils.isEmpty(volumePath) || pool == null) { - LOGGER.error("Unable to get physical disk, volume path or pool not specified"); + logger.error("Unable to get physical disk, volume path or pool not specified"); return null; } @@ -95,18 +96,18 @@ public KVMPhysicalDisk getPhysicalDisk(String volumePath, KVMStoragePool pool) { diskFilePath = ScaleIOUtil.DISK_PATH + File.separator + diskFileName; final File diskFile = new File(diskFilePath); if (!diskFile.exists()) { - LOGGER.debug("Physical disk file: " + diskFilePath + " doesn't exists on the storage pool: " + pool.getUuid()); + logger.debug("Physical disk file: " + diskFilePath + " doesn't exists on the storage pool: " + pool.getUuid()); return null; } } else { - LOGGER.debug("Try with wildcard filter to get the physical disk: " + volumeId + " on the storage pool: " + pool.getUuid()); + logger.debug("Try with wildcard filter to get the physical disk: " + volumeId + " on the storage pool: " + pool.getUuid()); final File dir = new File(ScaleIOUtil.DISK_PATH); final FileFilter fileFilter = new WildcardFileFilter(ScaleIOUtil.DISK_NAME_PREFIX_FILTER + volumeId); final File[] files = dir.listFiles(fileFilter); if (files != null && files.length == 1) { diskFilePath = files[0].getAbsolutePath(); } else { - LOGGER.debug("Unable to find the physical disk: " + volumeId + " on the storage pool: " + pool.getUuid()); + logger.debug("Unable to find the physical disk: " + volumeId + " on the storage pool: " + pool.getUuid()); return null; } } @@ -136,7 +137,7 @@ public KVMPhysicalDisk getPhysicalDisk(String volumePath, KVMStoragePool pool) { return disk; } catch (Exception e) { - LOGGER.error("Failed to get the physical disk: " + volumePath + " on the storage pool: " + pool.getUuid() + " due to " + e.getMessage()); + logger.error("Failed to get the physical disk: " + volumePath + " on the storage pool: " + pool.getUuid() + " due to " + e.getMessage()); throw new CloudRuntimeException("Failed to get the physical disk: " + volumePath + " on the storage pool: " + pool.getUuid()); } } @@ -190,7 +191,7 @@ public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool, Qemu qemuObjects.add(QemuObject.prepareSecretForQemuImg(disk.getFormat(), disk.getQemuEncryptFormat(), keyFile.toString(), "sec0", options)); QemuImgFile file = new QemuImgFile(disk.getPath(), formattedSize, disk.getFormat()); qemuImg.create(file, null, options, qemuObjects); - LOGGER.debug(String.format("Successfully formatted %s as encrypted QCOW2", file.getFileName())); + logger.debug(String.format("Successfully formatted %s as encrypted QCOW2", file.getFileName())); } catch (QemuImgException | LibvirtException | IOException ex) { throw new CloudRuntimeException("Failed to set up encrypted QCOW on block device " + disk.getPath(), ex); } @@ -211,7 +212,7 @@ public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool, Qemu @Override public boolean connectPhysicalDisk(String volumePath, KVMStoragePool pool, Map details) { if (StringUtils.isEmpty(volumePath) || pool == null) { - LOGGER.error("Unable to connect physical disk due to insufficient data"); + logger.error("Unable to connect physical disk due to insufficient data"); throw new CloudRuntimeException("Unable to connect physical disk due to insufficient data"); } @@ -228,7 +229,7 @@ public boolean connectPhysicalDisk(String volumePath, KVMStoragePool pool, Map 0) { physicalDisk = getPhysicalDisk(volumePath, pool); if (physicalDisk != null && physicalDisk.getSize() > 0) { - LOGGER.debug("Found the volume with id: " + volumePath + " of the storage pool: " + pool.getUuid()); + logger.debug("Found the volume with id: " + volumePath + " of the storage pool: " + pool.getUuid()); return true; } @@ -253,11 +254,11 @@ private boolean waitForDiskToBecomeAvailable(String volumePath, KVMStoragePool p physicalDisk = getPhysicalDisk(volumePath, pool); if (physicalDisk != null && physicalDisk.getSize() > 0) { - LOGGER.debug("Found the volume using id: " + volumePath + " of the storage pool: " + pool.getUuid()); + logger.debug("Found the volume using id: " + volumePath + " of the storage pool: " + pool.getUuid()); return true; } - LOGGER.debug("Unable to find the volume with id: " + volumePath + " of the storage pool: " + pool.getUuid()); + logger.debug("Unable to find the volume with id: " + volumePath + " of the storage pool: " + pool.getUuid()); return false; } @@ -266,17 +267,17 @@ private long getPhysicalDiskSize(String diskPath) { return 0; } - Script diskCmd = new Script("blockdev", LOGGER); + Script diskCmd = new Script("blockdev", logger); diskCmd.add("--getsize64", diskPath); OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser(); String result = diskCmd.execute(parser); if (result != null) { - LOGGER.warn("Unable to get the disk size at path: " + diskPath); + logger.warn("Unable to get the disk size at path: " + diskPath); return 0; } else { - LOGGER.info("Able to retrieve the disk size at path:" + diskPath); + logger.info("Able to retrieve the disk size at path:" + diskPath); } return Long.parseLong(parser.getLine()); @@ -325,7 +326,7 @@ public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMSt @Override public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout, byte[] srcPassphrase, byte[]dstPassphrase, Storage.ProvisioningType provisioningType) { if (StringUtils.isEmpty(name) || disk == null || destPool == null) { - LOGGER.error("Unable to copy physical disk due to insufficient data"); + logger.error("Unable to copy physical disk due to insufficient data"); throw new CloudRuntimeException("Unable to copy physical disk due to insufficient data"); } @@ -333,11 +334,11 @@ public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMSt provisioningType = Storage.ProvisioningType.THIN; } - LOGGER.debug("Copy physical disk with size: " + disk.getSize() + ", virtualsize: " + disk.getVirtualSize()+ ", format: " + disk.getFormat()); + logger.debug("Copy physical disk with size: " + disk.getSize() + ", virtualsize: " + disk.getVirtualSize()+ ", format: " + disk.getFormat()); KVMPhysicalDisk destDisk = destPool.getPhysicalDisk(name); if (destDisk == null) { - LOGGER.error("Failed to find the disk: " + name + " of the storage pool: " + destPool.getUuid()); + logger.error("Failed to find the disk: " + name + " of the storage pool: " + destPool.getUuid()); throw new CloudRuntimeException("Failed to find the disk: " + name + " of the storage pool: " + destPool.getUuid()); } @@ -388,33 +389,33 @@ public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMSt } boolean forceSourceFormat = srcQemuFile.getFormat() == QemuImg.PhysicalDiskFormat.RAW; - LOGGER.debug(String.format("Starting copy from source disk %s(%s) to PowerFlex volume %s(%s), forcing source format is %b", srcQemuFile.getFileName(), srcQemuFile.getFormat(), destQemuFile.getFileName(), destQemuFile.getFormat(), forceSourceFormat)); + logger.debug(String.format("Starting copy from source disk %s(%s) to PowerFlex volume %s(%s), forcing source format is %b", srcQemuFile.getFileName(), srcQemuFile.getFormat(), destQemuFile.getFileName(), destQemuFile.getFormat(), forceSourceFormat)); qemuImageOpts.setImageOptsFlag(true); qemu.convert(srcQemuFile, destQemuFile, options, qemuObjects, qemuImageOpts,null, forceSourceFormat); - LOGGER.debug("Successfully converted source disk image " + srcQemuFile.getFileName() + " to PowerFlex volume: " + destDisk.getPath()); + logger.debug("Successfully converted source disk image " + srcQemuFile.getFileName() + " to PowerFlex volume: " + destDisk.getPath()); if (destQemuFile.getFormat() == QemuImg.PhysicalDiskFormat.QCOW2 && !disk.useAsTemplate()) { QemuImageOptions resizeOptions = new QemuImageOptions(destQemuFile.getFormat(), destPath, destKeyName); resizeQcow2ToVolume(destPath, resizeOptions, qemuObjects, timeout); - LOGGER.debug("Resized volume at " + destPath); + logger.debug("Resized volume at " + destPath); } } catch (QemuImgException | LibvirtException | IOException e) { try { Map srcInfo = qemu.info(srcQemuFile); - LOGGER.debug("Source disk info: " + Arrays.asList(srcInfo)); + logger.debug("Source disk info: " + Arrays.asList(srcInfo)); } catch (Exception ignored) { - LOGGER.warn("Unable to get info from source disk: " + disk.getName()); + logger.warn("Unable to get info from source disk: " + disk.getName()); } String errMsg = String.format("Unable to convert/copy from %s to %s, due to: %s", disk.getName(), name, ((StringUtils.isEmpty(e.getMessage())) ? "an unknown error" : e.getMessage())); - LOGGER.error(errMsg); + logger.error(errMsg); throw new CloudRuntimeException(errMsg, e); } finally { if (cryptSetup != null) { try { cryptSetup.close(name); } catch (CryptSetupException ex) { - LOGGER.warn("Failed to clean up LUKS disk after copying disk", ex); + logger.warn("Failed to clean up LUKS disk after copying disk", ex); } } } @@ -452,11 +453,11 @@ public KVMPhysicalDisk createDiskFromTemplateBacking(KVMPhysicalDisk template, S @Override public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format, int timeout) { if (StringUtils.isAnyEmpty(templateFilePath, destTemplatePath) || destPool == null) { - LOGGER.error("Unable to create template from direct download template file due to insufficient data"); + logger.error("Unable to create template from direct download template file due to insufficient data"); throw new CloudRuntimeException("Unable to create template from direct download template file due to insufficient data"); } - LOGGER.debug("Create template from direct download template - file path: " + templateFilePath + ", dest path: " + destTemplatePath + ", format: " + format.toString()); + logger.debug("Create template from direct download template - file path: " + templateFilePath + ", dest path: " + destTemplatePath + ", format: " + format.toString()); File sourceFile = new File(templateFilePath); if (!sourceFile.exists()) { @@ -464,7 +465,7 @@ public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFileP } if (destTemplatePath == null || destTemplatePath.isEmpty()) { - LOGGER.error("Failed to create template, target template disk path not provided"); + logger.error("Failed to create template, target template disk path not provided"); throw new CloudRuntimeException("Target template disk path not provided"); } @@ -473,7 +474,7 @@ public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFileP } if (Storage.ImageFormat.RAW.equals(format) && Storage.ImageFormat.QCOW2.equals(format)) { - LOGGER.error("Failed to create template, unsupported template format: " + format.toString()); + logger.error("Failed to create template, unsupported template format: " + format.toString()); throw new CloudRuntimeException("Unsupported template format: " + format.toString()); } @@ -485,13 +486,13 @@ public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFileP QemuImg qemu = new QemuImg(timeout, true, false); destDisk = destPool.getPhysicalDisk(destTemplatePath); if (destDisk == null) { - LOGGER.error("Failed to find the disk: " + destTemplatePath + " of the storage pool: " + destPool.getUuid()); + logger.error("Failed to find the disk: " + destTemplatePath + " of the storage pool: " + destPool.getUuid()); throw new CloudRuntimeException("Failed to find the disk: " + destTemplatePath + " of the storage pool: " + destPool.getUuid()); } if (isTemplateExtractable(templateFilePath)) { srcTemplateFilePath = sourceFile.getParent() + "/" + UUID.randomUUID().toString(); - LOGGER.debug("Extract the downloaded template " + templateFilePath + " to " + srcTemplateFilePath); + logger.debug("Extract the downloaded template " + templateFilePath + " to " + srcTemplateFilePath); String extractCommand = getExtractCommandForDownloadedFile(templateFilePath, srcTemplateFilePath); Script.runSimpleBashScript(extractCommand); Script.runSimpleBashScript("rm -f " + templateFilePath); @@ -514,12 +515,12 @@ public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFileP destFile = new QemuImgFile(destDisk.getPath(), QemuImg.PhysicalDiskFormat.QCOW2); destFile.setSize(srcFile.getSize()); - LOGGER.debug("Starting copy from source downloaded template " + srcFile.getFileName() + " to PowerFlex template volume: " + destDisk.getPath()); + logger.debug("Starting copy from source downloaded template " + srcFile.getFileName() + " to PowerFlex template volume: " + destDisk.getPath()); qemu.create(destFile); qemu.convert(srcFile, destFile); - LOGGER.debug("Successfully converted source downloaded template " + srcFile.getFileName() + " to PowerFlex template volume: " + destDisk.getPath()); + logger.debug("Successfully converted source downloaded template " + srcFile.getFileName() + " to PowerFlex template volume: " + destDisk.getPath()); } catch (QemuImgException | LibvirtException e) { - LOGGER.error("Failed to convert. The error was: " + e.getMessage(), e); + logger.error("Failed to convert. The error was: " + e.getMessage(), e); destDisk = null; } finally { Script.runSimpleBashScript("rm -f " + srcTemplateFilePath); diff --git a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHAProvider.java b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHAProvider.java index 14922727d34d..e061f1e8952d 100644 --- a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHAProvider.java +++ b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHAProvider.java @@ -33,14 +33,12 @@ import org.apache.cloudstack.ha.provider.host.HAAbstractHostProvider; import org.apache.cloudstack.outofbandmanagement.OutOfBandManagement.PowerOperation; import org.apache.cloudstack.outofbandmanagement.OutOfBandManagementService; -import org.apache.log4j.Logger; import org.joda.time.DateTime; import javax.inject.Inject; import java.security.InvalidParameterException; public final class KVMHAProvider extends HAAbstractHostProvider implements HAProvider, Configurable { - private final static Logger LOG = Logger.getLogger(KVMHAProvider.class); @Inject protected KVMHostActivityChecker hostActivityChecker; @@ -75,11 +73,11 @@ public boolean recover(Host r) throws HARecoveryException { final OutOfBandManagementResponse resp = outOfBandManagementService.executePowerOperation(r, PowerOperation.RESET, null); return resp.getSuccess(); } else { - LOG.warn("OOBM recover operation failed for the host " + r.getName()); + logger.warn("OOBM recover operation failed for the host " + r.getName()); return false; } } catch (Exception e){ - LOG.warn("OOBM service is not configured or enabled for this host " + r.getName() + " error is " + e.getMessage()); + logger.warn("OOBM service is not configured or enabled for this host " + r.getName() + " error is " + e.getMessage()); throw new HARecoveryException(" OOBM service is not configured or enabled for this host " + r.getName(), e); } } @@ -92,11 +90,11 @@ public boolean fence(Host r) throws HAFenceException { final OutOfBandManagementResponse resp = outOfBandManagementService.executePowerOperation(r, PowerOperation.OFF, null); return resp.getSuccess(); } else { - LOG.warn("OOBM fence operation failed for this host " + r.getName()); + logger.warn("OOBM fence operation failed for this host " + r.getName()); return false; } } catch (Exception e){ - LOG.warn("OOBM service is not configured or enabled for this host " + r.getName() + " error is " + e.getMessage()); + logger.warn("OOBM service is not configured or enabled for this host " + r.getName() + " error is " + e.getMessage()); throw new HAFenceException("OBM service is not configured or enabled for this host " + r.getName() , e); } } diff --git a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHostActivityChecker.java b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHostActivityChecker.java index 0866d668a439..10d684bbdd32 100644 --- a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHostActivityChecker.java +++ b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/kvm/ha/KVMHostActivityChecker.java @@ -42,7 +42,6 @@ import org.apache.cloudstack.ha.provider.HealthCheckerInterface; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.commons.lang.ArrayUtils; -import org.apache.log4j.Logger; import javax.inject.Inject; import java.util.ArrayList; @@ -51,7 +50,6 @@ import java.util.List; public class KVMHostActivityChecker extends AdapterBase implements ActivityCheckerInterface, HealthCheckerInterface { - private final static Logger LOG = Logger.getLogger(KVMHostActivityChecker.class); @Inject private VolumeDao volumeDao; @@ -75,7 +73,7 @@ public boolean isActive(Host r, DateTime suspectTime) throws HACheckerException throw e; } catch (Exception e){ String message = String.format("Operation timed out, probably the %s is not reachable.", r.toString()); - LOG.warn(message, e); + logger.warn(message, e); throw new HACheckerException(message, e); } } @@ -93,22 +91,22 @@ private boolean isAgentActive(Host agent) { Status neighbourStatus = Status.Unknown; final CheckOnHostCommand cmd = new CheckOnHostCommand(agent, HighAvailabilityManager.KvmHAFenceHostIfHeartbeatFailsOnStorage.value()); try { - LOG.debug(String.format("Checking %s status...", agent.toString())); + logger.debug(String.format("Checking %s status...", agent.toString())); Answer answer = agentMgr.easySend(agent.getId(), cmd); if (answer != null) { hostStatus = answer.getResult() ? Status.Down : Status.Up; - LOG.debug(String.format("%s has the status [%s].", agent.toString(), hostStatus)); + logger.debug(String.format("%s has the status [%s].", agent.toString(), hostStatus)); if ( hostStatus == Status.Up ){ return true; } } else { - LOG.debug(String.format("Setting %s to \"Disconnected\" status.", agent.toString())); + logger.debug(String.format("Setting %s to \"Disconnected\" status.", agent.toString())); hostStatus = Status.Disconnected; } } catch (Exception e) { - LOG.warn(String.format("Failed to send command CheckOnHostCommand to %s.", agent.toString()), e); + logger.warn(String.format("Failed to send command CheckOnHostCommand to %s.", agent.toString()), e); } List neighbors = resourceManager.listHostsInClusterByStatus(agent.getClusterId(), Status.Up); @@ -118,22 +116,22 @@ private boolean isAgentActive(Host agent) { } try { - LOG.debug(String.format("Investigating %s via neighbouring %s.", agent.toString(), neighbor.toString())); + logger.debug(String.format("Investigating %s via neighbouring %s.", agent.toString(), neighbor.toString())); Answer answer = agentMgr.easySend(neighbor.getId(), cmd); if (answer != null) { neighbourStatus = answer.getResult() ? Status.Down : Status.Up; - LOG.debug(String.format("Neighbouring %s returned status [%s] for the investigated %s.", neighbor.toString(), neighbourStatus, agent.toString())); + logger.debug(String.format("Neighbouring %s returned status [%s] for the investigated %s.", neighbor.toString(), neighbourStatus, agent.toString())); if (neighbourStatus == Status.Up) { break; } } else { - LOG.debug(String.format("Neighbouring %s is Disconnected.", neighbor.toString())); + logger.debug(String.format("Neighbouring %s is Disconnected.", neighbor.toString())); } } catch (Exception e) { - LOG.warn(String.format("Failed to send command CheckOnHostCommand to %s.", neighbor.toString()), e); + logger.warn(String.format("Failed to send command CheckOnHostCommand to %s.", neighbor.toString()), e); } } if (neighbourStatus == Status.Up && (hostStatus == Status.Disconnected || hostStatus == Status.Down)) { @@ -143,7 +141,7 @@ private boolean isAgentActive(Host agent) { hostStatus = Status.Down; } - LOG.debug(String.format("%s has the status [%s].", agent.toString(), hostStatus)); + logger.debug(String.format("%s has the status [%s].", agent.toString(), hostStatus)); return hostStatus == Status.Up; } @@ -157,7 +155,7 @@ private boolean isVMActivtyOnHost(Host agent, DateTime suspectTime) throws HAChe for (StoragePool pool : poolVolMap.keySet()) { activityStatus = verifyActivityOfStorageOnHost(poolVolMap, pool, agent, suspectTime, activityStatus); if (!activityStatus) { - LOG.warn(String.format("It seems that the storage pool [%s] does not have activity on %s.", pool.getId(), agent.toString())); + logger.warn(String.format("It seems that the storage pool [%s] does not have activity on %s.", pool.getId(), agent.toString())); break; } } @@ -169,21 +167,21 @@ protected boolean verifyActivityOfStorageOnHost(HashMap volume_list = poolVolMap.get(pool); final CheckVMActivityOnStoragePoolCommand cmd = new CheckVMActivityOnStoragePoolCommand(agent, pool, volume_list, suspectTime); - LOG.debug(String.format("Checking VM activity for %s on storage pool [%s].", agent.toString(), pool.getId())); + logger.debug(String.format("Checking VM activity for %s on storage pool [%s].", agent.toString(), pool.getId())); try { Answer answer = storageManager.sendToPool(pool, getNeighbors(agent), cmd); if (answer != null) { activityStatus = !answer.getResult(); - LOG.debug(String.format("%s %s activity on storage pool [%s]", agent.toString(), activityStatus ? "has" : "does not have", pool.getId())); + logger.debug(String.format("%s %s activity on storage pool [%s]", agent.toString(), activityStatus ? "has" : "does not have", pool.getId())); } else { String message = String.format("Did not get a valid response for VM activity check for %s on storage pool [%s].", agent.toString(), pool.getId()); - LOG.debug(message); + logger.debug(message); throw new IllegalStateException(message); } } catch (StorageUnavailableException e){ String message = String.format("Storage [%s] is unavailable to do the check, probably the %s is not reachable.", pool.getId(), agent.toString()); - LOG.warn(message, e); + logger.warn(message, e); throw new HACheckerException(message, e); } return activityStatus; @@ -193,14 +191,14 @@ private HashMap> getVolumeUuidOnHost(Host agent) { List vm_list = vmInstanceDao.listByHostId(agent.getId()); List volume_list = new ArrayList(); for (VirtualMachine vm : vm_list) { - LOG.debug(String.format("Retrieving volumes of VM [%s]...", vm.getId())); + logger.debug(String.format("Retrieving volumes of VM [%s]...", vm.getId())); List vm_volume_list = volumeDao.findByInstance(vm.getId()); volume_list.addAll(vm_volume_list); } HashMap> poolVolMap = new HashMap>(); for (Volume vol : volume_list) { - LOG.debug(String.format("Retrieving storage pool [%s] of volume [%s]...", vol.getPoolId(), vol.getId())); + logger.debug(String.format("Retrieving storage pool [%s] of volume [%s]...", vol.getPoolId(), vol.getId())); StoragePool sp = storagePool.findById(vol.getPoolId()); if (!poolVolMap.containsKey(sp)) { List list = new ArrayList(); @@ -217,7 +215,7 @@ private HashMap> getVolumeUuidOnHost(Host agent) { public long[] getNeighbors(Host agent) { List neighbors = new ArrayList(); List cluster_hosts = resourceManager.listHostsInClusterByStatus(agent.getClusterId(), Status.Up); - LOG.debug(String.format("Retrieving all \"Up\" hosts from cluster [%s]...", agent.getClusterId())); + logger.debug(String.format("Retrieving all \"Up\" hosts from cluster [%s]...", agent.getClusterId())); for (HostVO host : cluster_hosts) { if (host.getId() == agent.getId() || (host.getHypervisorType() != Hypervisor.HypervisorType.KVM && host.getHypervisorType() != Hypervisor.HypervisorType.LXC)) { continue; diff --git a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/linux/CPUStat.java b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/linux/CPUStat.java index d180d013a242..9dd14b1b510d 100644 --- a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/linux/CPUStat.java +++ b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/linux/CPUStat.java @@ -21,10 +21,11 @@ import java.io.FileNotFoundException; import java.util.Scanner; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; public class CPUStat { - private static final Logger s_logger = Logger.getLogger(CPUStat.class); + protected Logger logger = LogManager.getLogger(getClass()); private Integer _cores; private UptimeStats _lastStats; @@ -58,7 +59,7 @@ private UptimeStats getUptimeAndCpuIdleTime() { String[] stats = scanner.useDelimiter("\\Z").next().split("\\s+"); uptime = new UptimeStats(Double.parseDouble(stats[0]), Double.parseDouble(stats[1])); } catch (FileNotFoundException ex) { - s_logger.warn("File " + _uptimeFile + " not found:" + ex.toString()); + logger.warn("File " + _uptimeFile + " not found:" + ex.toString()); } return uptime; } @@ -87,7 +88,7 @@ public Double getCpuLoadAverage() { try (Scanner scanner = new Scanner(f,"UTF-8");) { load = scanner.useDelimiter("\\Z").next().split("\\s+"); } catch (FileNotFoundException ex) { - s_logger.warn("File " + _uptimeFile + " not found:" + ex.toString()); + logger.warn("File " + _uptimeFile + " not found:" + ex.toString()); } return Double.parseDouble(load[0]); } diff --git a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/linux/KVMHostInfo.java b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/linux/KVMHostInfo.java index d160cbfac3bb..420cb1732bd1 100644 --- a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/linux/KVMHostInfo.java +++ b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/linux/KVMHostInfo.java @@ -29,7 +29,8 @@ import org.apache.cloudstack.utils.security.ParserUtils; import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.libvirt.Connect; import org.libvirt.LibvirtException; import org.libvirt.NodeInfo; @@ -46,7 +47,7 @@ public class KVMHostInfo { - private static final Logger LOGGER = Logger.getLogger(KVMHostInfo.class); + protected static Logger LOGGER = LogManager.getLogger(KVMHostInfo.class); private int totalCpus; private int allocatableCpus; diff --git a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/qemu/QemuImg.java b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/qemu/QemuImg.java index 1cd63b9b5661..d0736019469f 100644 --- a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/qemu/QemuImg.java +++ b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/qemu/QemuImg.java @@ -32,12 +32,13 @@ import com.cloud.storage.Storage; import com.cloud.utils.script.OutputInterpreter; import com.cloud.utils.script.Script; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import static java.util.regex.Pattern.CASE_INSENSITIVE; public class QemuImg { - private Logger logger = Logger.getLogger(this.getClass()); + private Logger logger = LogManager.getLogger(this.getClass()); public static final String BACKING_FILE = "backing_file"; public static final String BACKING_FILE_FORMAT = "backing_file_format"; diff --git a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java index aac7f7343afe..19515ac83613 100644 --- a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java +++ b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java @@ -68,7 +68,7 @@ import org.apache.commons.lang.ArrayUtils; import org.apache.commons.lang.SystemUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; import org.joda.time.Duration; import org.junit.Assert; import org.junit.Before; @@ -271,7 +271,7 @@ public class LibvirtComputingResourceTest { public void setup() throws Exception { libvirtComputingResourceSpy.qemuSocketsPath = new File("/var/run/qemu"); libvirtComputingResourceSpy.parser = parserMock; - LibvirtComputingResource.s_logger = loggerMock; + LibvirtComputingResource.LOGGER = loggerMock; } /** @@ -6050,7 +6050,7 @@ public void getVmsToSetMemoryBalloonStatsPeriodTestLibvirtError() throws Libvirt List result = libvirtComputingResourceSpy.getVmsToSetMemoryBalloonStatsPeriod(connMock); - Mockito.verify(loggerMock).error(Mockito.anyString(), Mockito.any()); + Mockito.verify(loggerMock).error(Mockito.anyString(), (Throwable) Mockito.any()); Assert.assertTrue(result.isEmpty()); } diff --git a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtConvertInstanceCommandWrapperTest.java b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtConvertInstanceCommandWrapperTest.java index 14c63b548588..d70f5f088845 100644 --- a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtConvertInstanceCommandWrapperTest.java +++ b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtConvertInstanceCommandWrapperTest.java @@ -182,7 +182,6 @@ public void testSanitizeDisksPath() { @Test public void testMoveTemporaryDisksToDestination() { KVMPhysicalDisk sourceDisk = Mockito.mock(KVMPhysicalDisk.class); - Mockito.when(sourceDisk.getPool()).thenReturn(temporaryPool); List disks = List.of(sourceDisk); String destinationPoolUuid = UUID.randomUUID().toString(); List destinationPools = List.of(destinationPoolUuid); diff --git a/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/hypervisor/OvmDiscoverer.java b/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/hypervisor/OvmDiscoverer.java index 46e152838114..82cb61d7a1fd 100644 --- a/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/hypervisor/OvmDiscoverer.java +++ b/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/hypervisor/OvmDiscoverer.java @@ -27,7 +27,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.xmlrpc.XmlRpcException; import com.cloud.agent.api.StartupCommand; @@ -53,7 +52,6 @@ import com.cloud.utils.ssh.SSHCmdHelper; public class OvmDiscoverer extends DiscovererBase implements Discoverer, ResourceStateAdapter { - private static final Logger s_logger = Logger.getLogger(OvmDiscoverer.class); protected String _publicNetworkDevice; protected String _privateNetworkDevice; protected String _guestNetworkDevice; @@ -97,25 +95,25 @@ private boolean checkIfExisted(String guid) { if (!url.getScheme().equals("http")) { String msg = "urlString is not http so we're not taking care of the discovery for this: " + url; - s_logger.debug(msg); + logger.debug(msg); return null; } if (clusterId == null) { String msg = "must specify cluster Id when add host"; - s_logger.debug(msg); + logger.debug(msg); throw new CloudRuntimeException(msg); } if (podId == null) { String msg = "must specify pod Id when add host"; - s_logger.debug(msg); + logger.debug(msg); throw new CloudRuntimeException(msg); } ClusterVO cluster = _clusterDao.findById(clusterId); if (cluster == null || (cluster.getHypervisorType() != HypervisorType.Ovm)) { - if (s_logger.isInfoEnabled()) - s_logger.info("invalid cluster id or cluster is not for Ovm hypervisors"); + if (logger.isInfoEnabled()) + logger.info("invalid cluster id or cluster is not for Ovm hypervisors"); return null; } @@ -139,7 +137,7 @@ private boolean checkIfExisted(String guid) { throw new CloudRuntimeException("The host " + hostIp + " has been added before"); } - s_logger.debug("Ovm discover is going to disover host having guid " + guid); + logger.debug("Ovm discover is going to disover host having guid " + guid); ClusterVO clu = _clusterDao.findById(clusterId); if (clu.getGuid() == null) { @@ -196,16 +194,16 @@ private boolean checkIfExisted(String guid) { resources.put(ovmResource, details); return resources; } catch (XmlRpcException e) { - s_logger.debug("XmlRpc exception, Unable to discover OVM: " + url, e); + logger.debug("XmlRpc exception, Unable to discover OVM: " + url, e); return null; } catch (UnknownHostException e) { - s_logger.debug("Host name resolve failed exception, Unable to discover OVM: " + url, e); + logger.debug("Host name resolve failed exception, Unable to discover OVM: " + url, e); return null; } catch (ConfigurationException e) { - s_logger.debug("Configure resource failed, Unable to discover OVM: " + url, e); + logger.debug("Configure resource failed, Unable to discover OVM: " + url, e); return null; } catch (Exception e) { - s_logger.debug("Unable to discover OVM: " + url, e); + logger.debug("Unable to discover OVM: " + url, e); return null; } } diff --git a/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/hypervisor/OvmFencer.java b/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/hypervisor/OvmFencer.java index 6a247d914398..60792f0ddb7d 100644 --- a/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/hypervisor/OvmFencer.java +++ b/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/hypervisor/OvmFencer.java @@ -22,7 +22,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.FenceAnswer; @@ -39,7 +38,6 @@ import com.cloud.vm.VirtualMachine; public class OvmFencer extends AdapterBase implements FenceBuilder { - private static final Logger s_logger = Logger.getLogger(OvmFencer.class); @Inject AgentManager _agentMgr; @Inject @@ -69,7 +67,7 @@ public OvmFencer() { @Override public Boolean fenceOff(VirtualMachine vm, Host host) { if (host.getHypervisorType() != HypervisorType.Ovm) { - s_logger.debug("Don't know how to fence non Ovm hosts " + host.getHypervisorType()); + logger.debug("Don't know how to fence non Ovm hosts " + host.getHypervisorType()); return null; } @@ -93,13 +91,13 @@ public Boolean fenceOff(VirtualMachine vm, Host host) { try { answer = (FenceAnswer)_agentMgr.send(h.getId(), fence); } catch (AgentUnavailableException e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Moving on to the next host because " + h.toString() + " is unavailable", e); + if (logger.isDebugEnabled()) { + logger.debug("Moving on to the next host because " + h.toString() + " is unavailable", e); } continue; } catch (OperationTimedoutException e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Moving on to the next host because " + h.toString() + " is unavailable", e); + if (logger.isDebugEnabled()) { + logger.debug("Moving on to the next host because " + h.toString() + " is unavailable", e); } continue; } @@ -109,8 +107,8 @@ public Boolean fenceOff(VirtualMachine vm, Host host) { } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to fence off " + vm.toString() + " on " + host.toString()); + if (logger.isDebugEnabled()) { + logger.debug("Unable to fence off " + vm.toString() + " on " + host.toString()); } return false; diff --git a/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/hypervisor/OvmResourceBase.java b/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/hypervisor/OvmResourceBase.java index cf2f1fbed64d..9d958a9894a4 100644 --- a/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/hypervisor/OvmResourceBase.java +++ b/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/hypervisor/OvmResourceBase.java @@ -29,7 +29,8 @@ import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.xmlrpc.XmlRpcException; import com.trilead.ssh2.SCPClient; @@ -134,7 +135,7 @@ import com.cloud.vm.VirtualMachine.PowerState; public class OvmResourceBase implements ServerResource, HypervisorResource { - private static final Logger s_logger = Logger.getLogger(OvmResourceBase.class); + protected Logger logger = LogManager.getLogger(getClass()); String _name; Long _zoneId; Long _podId; @@ -183,7 +184,7 @@ public boolean configure(String name, Map params) throws Configu _agentUserName = (String)params.get("agentusername"); _agentPassword = (String)params.get("agentpassword"); } catch (Exception e) { - s_logger.debug("Configure " + _name + " failed", e); + logger.debug("Configure " + _name + " failed", e); throw new ConfigurationException("Configure " + _name + " failed, " + e.toString()); } @@ -218,7 +219,7 @@ public boolean configure(String name, Map params) throws Configu try { setupServer(); } catch (Exception e) { - s_logger.debug("Setup server failed, ip " + _ip, e); + logger.debug("Setup server failed, ip " + _ip, e); throw new ConfigurationException("Unable to setup server"); } @@ -228,7 +229,7 @@ public boolean configure(String name, Map params) throws Configu OvmHost.registerAsVmServer(_conn); _bridges = OvmBridge.getAllBridges(_conn); } catch (XmlRpcException e) { - s_logger.debug("Get bridges failed", e); + logger.debug("Get bridges failed", e); throw new ConfigurationException("Cannot get bridges on host " + _ip + "," + e.getMessage()); } @@ -251,14 +252,14 @@ public boolean configure(String name, Map params) throws Configu try { _canBridgeFirewall = canBridgeFirewall(); } catch (XmlRpcException e) { - s_logger.error("Failed to detect whether the host supports security groups.", e); + logger.error("Failed to detect whether the host supports security groups.", e); _canBridgeFirewall = false; } */ _canBridgeFirewall = false; - s_logger.debug("OVM host doesn't support security groups."); + logger.debug("OVM host doesn't support security groups."); return true; } @@ -318,9 +319,9 @@ protected void fillHostInfo(StartupRoutingCommand cmd) { d.put("guest.network.device", _guestNetworkName); cmd.setHostDetails(d); - s_logger.debug(String.format("Add a OVM host(%s)", hostDetails.toJson())); + logger.debug(String.format("Add a OVM host(%s)", hostDetails.toJson())); } catch (XmlRpcException e) { - s_logger.debug("XML RPC Exception" + e.getMessage(), e); + logger.debug("XML RPC Exception" + e.getMessage(), e); throw new CloudRuntimeException("XML RPC Exception" + e.getMessage(), e); } } @@ -353,8 +354,8 @@ protected void setupServer() throws IOException { continue; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Copying " + script.getPath() + " to " + s_ovsAgentPath + " on " + _ip + " with permission 0644"); + if (logger.isDebugEnabled()) { + logger.debug("Copying " + script.getPath() + " to " + s_ovsAgentPath + " on " + _ip + " with permission 0644"); } scp.put(script.getPath(), s_ovsAgentPath, "0644"); } @@ -378,7 +379,7 @@ public StartupCommand[] initialize() { cmd.setCaps("hvm"); return new StartupCommand[] {cmd}; } catch (Exception e) { - s_logger.debug("Ovm resource initializes failed", e); + logger.debug("Ovm resource initializes failed", e); return null; } } @@ -389,7 +390,7 @@ public PingCommand getCurrentStatus(long id) { OvmHost.ping(_conn); return new PingRoutingCommand(getType(), id, getHostVmStateReport()); } catch (XmlRpcException e) { - s_logger.debug("Check agent status failed", e); + logger.debug("Check agent status failed", e); return null; } } @@ -401,11 +402,11 @@ protected ReadyAnswer execute(ReadyCommand cmd) { if (d.primaryIp.equalsIgnoreCase(_ip)) { return new ReadyAnswer(cmd); } else { - s_logger.debug("Primary IP changes to " + d.primaryIp + ", it should be " + _ip); + logger.debug("Primary IP changes to " + d.primaryIp + ", it should be " + _ip); return new ReadyAnswer(cmd, "I am not the primary server"); } } catch (XmlRpcException e) { - s_logger.debug("XML RPC Exception" + e.getMessage(), e); + logger.debug("XML RPC Exception" + e.getMessage(), e); throw new CloudRuntimeException("XML RPC Exception" + e.getMessage(), e); } @@ -418,7 +419,7 @@ protected void createNfsSr(StorageFilerTO pool) throws XmlRpcException { d.type = OvmStoragePool.NFS; d.uuid = pool.getUuid(); OvmStoragePool.create(_conn, d); - s_logger.debug(String.format("Created SR (mount point:%1$s)", mountPoint)); + logger.debug(String.format("Created SR (mount point:%1$s)", mountPoint)); } protected void createOCFS2Sr(StorageFilerTO pool) throws XmlRpcException { @@ -427,7 +428,7 @@ protected void createOCFS2Sr(StorageFilerTO pool) throws XmlRpcException { d.type = OvmStoragePool.OCFS2; d.uuid = pool.getUuid(); OvmStoragePool.create(_conn, d); - s_logger.debug(String.format("Created SR (mount point:%1$s)", d.path)); + logger.debug(String.format("Created SR (mount point:%1$s)", d.path)); } private void setupHeartBeat(String poolUuid) { @@ -437,7 +438,7 @@ private void setupHeartBeat(String poolUuid) { s_isHeartBeat = true; } } catch (Exception e) { - s_logger.debug("setup heart beat for " + _ip + " failed", e); + logger.debug("setup heart beat for " + _ip + " failed", e); s_isHeartBeat = false; } } @@ -459,7 +460,7 @@ protected Answer execute(ModifyStoragePoolCommand cmd) { ModifyStoragePoolAnswer answer = new ModifyStoragePoolAnswer(cmd, d.totalSpace, d.freeSpace, tInfo); return answer; } catch (Exception e) { - s_logger.debug("ModifyStoragePoolCommand failed", e); + logger.debug("ModifyStoragePoolCommand failed", e); return new Answer(cmd, false, e.getMessage()); } } @@ -475,7 +476,7 @@ protected PrimaryStorageDownloadAnswer execute(final PrimaryStorageDownloadComma Pair res = OvmStoragePool.downloadTemplate(_conn, cmd.getPoolUuid(), secondaryStoragePath); return new PrimaryStorageDownloadAnswer(res.first(), res.second()); } catch (Exception e) { - s_logger.debug("PrimaryStorageDownloadCommand failed", e); + logger.debug("PrimaryStorageDownloadCommand failed", e); return new PrimaryStorageDownloadAnswer(e.getMessage()); } } @@ -497,7 +498,7 @@ protected CreateAnswer execute(CreateCommand cmd) { vol.size, null); return new CreateAnswer(cmd, volume); } catch (Exception e) { - s_logger.debug("CreateCommand failed", e); + logger.debug("CreateCommand failed", e); return new CreateAnswer(cmd, e.getMessage()); } } @@ -637,7 +638,7 @@ protected void cleanup(OvmVm.Details vm) { try { cleanupNetwork(vm.vifs); } catch (XmlRpcException e) { - s_logger.debug("Clean up network for " + vm.name + " failed", e); + logger.debug("Clean up network for " + vm.name + " failed", e); } _vmNetworkStats.remove(vm.name); } @@ -666,7 +667,7 @@ public synchronized StartAnswer execute(StartCommand cmd) { return new StartAnswer(cmd); } catch (Exception e) { - s_logger.debug("Start vm " + vmName + " failed", e); + logger.debug("Start vm " + vmName + " failed", e); cleanup(vmDetails); return new StartAnswer(cmd, e.getMessage()); } @@ -683,7 +684,7 @@ protected Answer execute(GetHostStatsCommand cmd) { HostStatsEntry hostStats = new HostStatsEntry(cmd.getHostId(), cpuUtil, rxBytes, txBytes, "host", totalMemory, freeMemory, 0, 0); return new GetHostStatsAnswer(cmd, hostStats); } catch (Exception e) { - s_logger.debug("Get host stats of " + cmd.getHostName() + " failed", e); + logger.debug("Get host stats of " + cmd.getHostName() + " failed", e); return new Answer(cmd, false, e.getMessage()); } @@ -697,7 +698,7 @@ public StopAnswer execute(StopCommand cmd) { try { vm = OvmVm.getDetails(_conn, vmName); } catch (XmlRpcException e) { - s_logger.debug("Unable to get details of vm: " + vmName + ", treating it as stopped", e); + logger.debug("Unable to get details of vm: " + vmName + ", treating it as stopped", e); return new StopAnswer(cmd, "success", true); } @@ -706,7 +707,7 @@ public StopAnswer execute(StopCommand cmd) { cleanup(vm); return new StopAnswer(cmd, "success", true); } catch (Exception e) { - s_logger.debug("Stop " + vmName + "failed", e); + logger.debug("Stop " + vmName + "failed", e); return new StopAnswer(cmd, e.getMessage(), false); } } @@ -720,7 +721,7 @@ public RebootAnswer execute(RebootCommand cmd) { Integer vncPort = Integer.parseInt(res.get("vncPort")); return new RebootAnswer(cmd, null, vncPort); } catch (Exception e) { - s_logger.debug("Reboot " + vmName + " failed", e); + logger.debug("Reboot " + vmName + " failed", e); return new RebootAnswer(cmd, e.getMessage(), false); } } @@ -728,7 +729,7 @@ public RebootAnswer execute(RebootCommand cmd) { private PowerState toPowerState(String vmName, String s) { PowerState state = s_powerStateMaps.get(s); if (state == null) { - s_logger.debug("Unkown state " + s + " for " + vmName); + logger.debug("Unkown state " + s + " for " + vmName); state = PowerState.PowerUnknown; } return state; @@ -760,7 +761,7 @@ protected GetStorageStatsAnswer execute(final GetStorageStatsCommand cmd) { OvmStoragePool.Details d = OvmStoragePool.getDetailsByUuid(_conn, cmd.getStorageId()); return new GetStorageStatsAnswer(cmd, d.totalSpace, d.usedSpace); } catch (Exception e) { - s_logger.debug("GetStorageStatsCommand on pool " + cmd.getStorageId() + " failed", e); + logger.debug("GetStorageStatsCommand on pool " + cmd.getStorageId() + " failed", e); return new GetStorageStatsAnswer(cmd, e.getMessage()); } } @@ -801,7 +802,7 @@ protected GetVmStatsAnswer execute(GetVmStatsCommand cmd) { VmStatsEntry e = getVmStat(vmName); vmStatsNameMap.put(vmName, e); } catch (XmlRpcException e) { - s_logger.debug("Get vm stat for " + vmName + " failed", e); + logger.debug("Get vm stat for " + vmName + " failed", e); continue; } } @@ -813,15 +814,15 @@ public Answer execute(DestroyCommand cmd) { OvmVolume.destroy(_conn, cmd.getVolume().getPoolUuid(), cmd.getVolume().getPath()); return new Answer(cmd, true, "Success"); } catch (Exception e) { - s_logger.debug("Destroy volume " + cmd.getVolume().getName() + " failed", e); + logger.debug("Destroy volume " + cmd.getVolume().getName() + " failed", e); return new Answer(cmd, false, e.getMessage()); } } protected PrepareForMigrationAnswer execute(PrepareForMigrationCommand cmd) { VirtualMachineTO vm = cmd.getVirtualMachine(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Preparing host for migrating " + vm); + if (logger.isDebugEnabled()) { + logger.debug("Preparing host for migrating " + vm); } NicTO[] nics = vm.getNics(); @@ -832,7 +833,7 @@ protected PrepareForMigrationAnswer execute(PrepareForMigrationCommand cmd) { return new PrepareForMigrationAnswer(cmd); } catch (Exception e) { - s_logger.warn("Catch Exception " + e.getClass().getName() + " prepare for migration failed due to " + e.toString(), e); + logger.warn("Catch Exception " + e.getClass().getName() + " prepare for migration failed due to " + e.toString(), e); return new PrepareForMigrationAnswer(cmd, e); } } @@ -847,7 +848,7 @@ protected MigrateAnswer execute(final MigrateCommand cmd) { return new MigrateAnswer(cmd, true, "migration succeeded", null); } catch (Exception e) { String msg = "Catch Exception " + e.getClass().getName() + ": Migration failed due to " + e.toString(); - s_logger.debug(msg, e); + logger.debug(msg, e); return new MigrateAnswer(cmd, false, msg, null); } } @@ -860,13 +861,13 @@ protected CheckVirtualMachineAnswer execute(final CheckVirtualMachineCommand cmd HashMap states = getAllVms(); PowerState vmPowerState = states.get(vmName); if (vmPowerState == null) { - s_logger.warn("Check state of " + vmName + " return null in CheckVirtualMachineCommand"); + logger.warn("Check state of " + vmName + " return null in CheckVirtualMachineCommand"); vmPowerState = PowerState.PowerOff; } return new CheckVirtualMachineAnswer(cmd, vmPowerState, vncPort); } catch (Exception e) { - s_logger.debug("Check migration for " + vmName + " failed", e); + logger.debug("Check migration for " + vmName + " failed", e); return new CheckVirtualMachineAnswer(cmd, PowerState.PowerOff, null); } } @@ -880,7 +881,7 @@ protected GetVncPortAnswer execute(GetVncPortCommand cmd) { Integer vncPort = OvmVm.getVncPort(_conn, cmd.getName()); return new GetVncPortAnswer(cmd, _ip, vncPort); } catch (Exception e) { - s_logger.debug("get vnc port for " + cmd.getName() + " failed", e); + logger.debug("get vnc port for " + cmd.getName() + " failed", e); return new GetVncPortAnswer(cmd, e.getMessage()); } } @@ -895,7 +896,7 @@ protected Answer execute(PingTestCommand cmd) { return new Answer(cmd, true, "success"); } catch (Exception e) { - s_logger.debug("Ping " + cmd.getComputingHostIp() + " failed", e); + logger.debug("Ping " + cmd.getComputingHostIp() + " failed", e); return new Answer(cmd, false, e.getMessage()); } } @@ -905,7 +906,7 @@ protected FenceAnswer execute(FenceCommand cmd) { Boolean res = OvmHost.fence(_conn, cmd.getHostIp()); return new FenceAnswer(cmd, res, res.toString()); } catch (Exception e) { - s_logger.debug("fence " + cmd.getHostIp() + " failed", e); + logger.debug("fence " + cmd.getHostIp() + " failed", e); return new FenceAnswer(cmd, false, e.getMessage()); } } @@ -917,7 +918,7 @@ protected Answer execute(AttachIsoCommand cmd) { OvmVm.detachOrAttachIso(_conn, cmd.getVmName(), isoPath, cmd.isAttach()); return new Answer(cmd); } catch (Exception e) { - s_logger.debug("Attach or detach ISO " + cmd.getIsoPath() + " for " + cmd.getVmName() + " attach:" + cmd.isAttach() + " failed", e); + logger.debug("Attach or detach ISO " + cmd.getIsoPath() + " for " + cmd.getVmName() + " attach:" + cmd.isAttach() + " failed", e); return new Answer(cmd, false, e.getMessage()); } } @@ -932,15 +933,15 @@ private Answer execute(SecurityGroupRulesCmd cmd) { addNetworkRules(cmd.getVmName(), Long.toString(cmd.getVmId()), cmd.getGuestIp(), cmd.getSignature(), String.valueOf(cmd.getSeqNum()), cmd.getGuestMac(), cmd.stringifyRules(), vifDeviceName, bridgeName); } catch (XmlRpcException e) { - s_logger.error(e); + logger.error(e); result = false; } if (!result) { - s_logger.warn("Failed to program network rules for vm " + cmd.getVmName()); + logger.warn("Failed to program network rules for vm " + cmd.getVmName()); return new SecurityGroupRuleAnswer(cmd, false, "programming network rules failed"); } else { - s_logger.info("Programmed network rules for vm " + cmd.getVmName() + " guestIp=" + cmd.getGuestIp() + ":ingress num rules=" + cmd.getIngressRuleSet().size() + + logger.info("Programmed network rules for vm " + cmd.getVmName() + " guestIp=" + cmd.getGuestIp() + ":ingress num rules=" + cmd.getIngressRuleSet().size() + ":egress num rules=" + cmd.getEgressRuleSet().size()); return new SecurityGroupRuleAnswer(cmd); } @@ -951,7 +952,7 @@ private Answer execute(CleanupNetworkRulesCmd cmd) { try { result = cleanupNetworkRules(); } catch (XmlRpcException e) { - s_logger.error(e); + logger.error(e); result = false; } @@ -1013,7 +1014,7 @@ protected OvmVif.Details getVifFromVm(String vmName, Integer deviceId) throws Xm try { vifs = getInterfaces(vmName); } catch (XmlRpcException e) { - s_logger.error("Failed to get VIFs for VM " + vmName, e); + logger.error("Failed to get VIFs for VM " + vmName, e); throw e; } @@ -1044,7 +1045,7 @@ protected Answer execute(PrepareOCFS2NodesCommand cmd) { OvmStoragePool.prepareOCFS2Nodes(_conn, cmd.getClusterName(), params.toString()); return new Answer(cmd, true, "Success"); } catch (XmlRpcException e) { - s_logger.debug("OCFS2 prepare nodes failed", e); + logger.debug("OCFS2 prepare nodes failed", e); return new Answer(cmd, false, e.getMessage()); } } @@ -1069,7 +1070,7 @@ protected CreatePrivateTemplateAnswer execute(final CreatePrivateTemplateFromVol return new CreatePrivateTemplateAnswer(cmd, true, null, res.get("installPath"), Long.parseLong(res.get("virtualSize")), Long.parseLong(res.get("physicalSize")), res.get("templateFileName"), ImageFormat.RAW); } catch (Exception e) { - s_logger.debug("Create template failed", e); + logger.debug("Create template failed", e); return new CreatePrivateTemplateAnswer(cmd, false, e.getMessage()); } } @@ -1091,7 +1092,7 @@ protected CopyVolumeAnswer execute(CopyVolumeCommand cmd) { String res = OvmStoragePool.copyVolume(_conn, secStorageMountPath, volumeFolderOnSecStorage, volumePath, storagePoolUuid, toSec, wait); return new CopyVolumeAnswer(cmd, true, null, null, res); } catch (Exception e) { - s_logger.debug("Copy volume failed", e); + logger.debug("Copy volume failed", e); return new CopyVolumeAnswer(cmd, false, e.getMessage(), null, null); } } @@ -1100,15 +1101,15 @@ protected Answer execute(DeleteStoragePoolCommand cmd) { try { OvmStoragePool.delete(_conn, cmd.getPool().getUuid()); } catch (Exception e) { - s_logger.debug("Delete storage pool on host " + _ip + " failed, however, we leave to user for cleanup and tell management server it succeeded", e); + logger.debug("Delete storage pool on host " + _ip + " failed, however, we leave to user for cleanup and tell management server it succeeded", e); } return new Answer(cmd); } protected CheckNetworkAnswer execute(CheckNetworkCommand cmd) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Checking if network name setup is done on the resource"); + if (logger.isDebugEnabled()) { + logger.debug("Checking if network name setup is done on the resource"); } List infoList = cmd.getPhysicalNetworkInfoList(); @@ -1137,7 +1138,7 @@ protected CheckNetworkAnswer execute(CheckNetworkCommand cmd) { } if (errorout) { - s_logger.error(msg); + logger.error(msg); return new CheckNetworkAnswer(cmd, false, msg); } else { return new CheckNetworkAnswer(cmd, true, "Network Setup check by names is done"); @@ -1146,8 +1147,8 @@ protected CheckNetworkAnswer execute(CheckNetworkCommand cmd) { private boolean isNetworkSetupByName(String nameTag) { if (nameTag != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Looking for network setup by name " + nameTag); + if (logger.isDebugEnabled()) { + logger.debug("Looking for network setup by name " + nameTag); } return _bridges.contains(nameTag); } diff --git a/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/object/Connection.java b/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/object/Connection.java index 8d2edac68fa4..0c6a5ebd943c 100644 --- a/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/object/Connection.java +++ b/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/object/Connection.java @@ -20,7 +20,8 @@ import java.net.URL; import java.util.TimeZone; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.xmlrpc.XmlRpcException; import org.apache.xmlrpc.client.TimingOutCallback; import org.apache.xmlrpc.client.XmlRpcClient; @@ -29,7 +30,7 @@ import com.cloud.utils.exception.CloudRuntimeException; public class Connection { - private static final Logger s_logger = Logger.getLogger(Connection.class); + protected Logger logger = LogManager.getLogger(getClass()); private XmlRpcClientConfigImpl _config = new XmlRpcClientConfigImpl(); XmlRpcClient _client; String _username; @@ -95,7 +96,7 @@ public Object callTimeoutInSec(String method, Object[] params, int timeout, bool /* * some parameters including user password should not be printed in log */ - s_logger.debug("Call Ovm agent: " + Coder.toJson(mParams)); + logger.debug("Call Ovm agent: " + Coder.toJson(mParams)); } long startTime = System.currentTimeMillis(); @@ -109,7 +110,7 @@ public Object callTimeoutInSec(String method, Object[] params, int timeout, bool } finally { long endTime = System.currentTimeMillis(); long during = (endTime - startTime) / 1000; // in secs - s_logger.debug("Ovm call " + method + " finished in " + String.valueOf(during) + " secs"); + logger.debug("Ovm call " + method + " finished in " + String.valueOf(during) + " secs"); } } diff --git a/plugins/hypervisors/ovm3/pom.xml b/plugins/hypervisors/ovm3/pom.xml index 0b96021de8f2..31f761bde78a 100644 --- a/plugins/hypervisors/ovm3/pom.xml +++ b/plugins/hypervisors/ovm3/pom.xml @@ -44,8 +44,12 @@ ${cs.commons-lang3.version} - ch.qos.reload4j - reload4j + org.apache.logging.log4j + log4j-core + + + org.apache.logging.log4j + log4j-api diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/ha/Ovm3Investigator.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/ha/Ovm3Investigator.java index b7feb1ab23b0..a24ff3b1d364 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/ha/Ovm3Investigator.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/ha/Ovm3Investigator.java @@ -21,7 +21,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -35,7 +34,6 @@ import com.cloud.utils.component.AdapterBase; public class Ovm3Investigator extends AdapterBase implements Investigator { - private static final Logger LOGGER = Logger.getLogger(Ovm3Investigator.class); @Inject HostDao hostDao; @Inject @@ -45,7 +43,7 @@ public class Ovm3Investigator extends AdapterBase implements Investigator { @Override public boolean isVmAlive(com.cloud.vm.VirtualMachine vm, Host host) throws UnknownVM { - LOGGER.debug("isVmAlive: " + vm.getHostName() + " on " + host.getName()); + logger.debug("isVmAlive: " + vm.getHostName() + " on " + host.getName()); if (host.getHypervisorType() != Hypervisor.HypervisorType.Ovm3) { throw new UnknownVM(); } @@ -58,7 +56,7 @@ public boolean isVmAlive(com.cloud.vm.VirtualMachine vm, Host host) throws Unkno @Override public Status isAgentAlive(Host agent) { - LOGGER.debug("isAgentAlive: " + agent.getName()); + logger.debug("isAgentAlive: " + agent.getName()); if (agent.getHypervisorType() != Hypervisor.HypervisorType.Ovm3) { return null; } @@ -74,7 +72,7 @@ public Status isAgentAlive(Host agent) { return answer.getResult() ? Status.Down : Status.Up; } } catch (Exception e) { - LOGGER.error("Failed to send command to host: " + neighbor.getId(), e); + logger.error("Failed to send command to host: " + neighbor.getId(), e); } } diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/CloudstackPlugin.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/CloudstackPlugin.java index 3871787f60ed..298420a04b2a 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/CloudstackPlugin.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/CloudstackPlugin.java @@ -20,11 +20,8 @@ import java.util.HashMap; import java.util.Map; -import org.apache.log4j.Logger; public class CloudstackPlugin extends OvmObject { - private static final Logger LOGGER = Logger - .getLogger(CloudstackPlugin.class); private boolean checkstoragestarted = false; public CloudstackPlugin(Connection c) { setClient(c); @@ -48,7 +45,7 @@ public boolean ovsDomrUploadFile(String domr, String path, String file, content); } - public static class ReturnCode { + public class ReturnCode { private Map returnCode = new HashMap() { { put("rc", null); @@ -73,7 +70,7 @@ public Boolean getRc() throws Ovm3ResourceException { } else if (rc instanceof Long) { c = (Long) rc; } else { - LOGGER.debug("Incorrect return code: " + rc); + logger.debug("Incorrect return code: " + rc); return false; } returnCode.put("exit", c); @@ -126,7 +123,7 @@ public boolean dom0CheckPort(String ip, Integer port, Integer retries, Thread.sleep(sleep * 1000); } } catch (Exception e) { - LOGGER.error("Dom0 port check failed: " + e); + logger.error("Dom0 port check failed: " + e); } return x; } diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Connection.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Connection.java index a873be420e1d..c43d36c934b1 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Connection.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Connection.java @@ -22,7 +22,8 @@ import java.util.List; import java.util.TimeZone; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.xmlrpc.XmlRpcException; import org.apache.xmlrpc.client.TimingOutCallback; import org.apache.xmlrpc.client.XmlRpcClient; @@ -30,7 +31,7 @@ import org.apache.xmlrpc.client.XmlRpcClientRequestImpl; public class Connection extends XmlRpcClient { - private static final Logger LOGGER = Logger.getLogger(Connection.class); + protected Logger logger = LogManager.getLogger(getClass()); private final XmlRpcClientConfigImpl xmlClientConfig = new XmlRpcClientConfigImpl(); private XmlRpcClient xmlClient; private String hostUser = null; @@ -83,7 +84,7 @@ private XmlRpcClient setupXmlClient() { /* reply time is 5 mins */ xmlClientConfig.setReplyTimeout(60 * 15000); if (hostUser != null && hostPass != null) { - LOGGER.debug("Setting username " + hostUser); + logger.debug("Setting username " + hostUser); xmlClientConfig.setBasicUserName(hostUser); xmlClientConfig.setBasicPassword(hostPass); } @@ -91,7 +92,7 @@ private XmlRpcClient setupXmlClient() { client.setConfig(xmlClientConfig); client.setTypeFactory(new RpcTypeFactory(client)); } catch (MalformedURLException e) { - LOGGER.info("Incorrect URL: ", e); + logger.info("Incorrect URL: ", e); } return client; } @@ -109,7 +110,7 @@ public Object callTimeoutInSec(String method, List params, int timeout, boolean debug) throws XmlRpcException { TimingOutCallback callback = new TimingOutCallback(timeout * 1000); if (debug) { - LOGGER.debug("Call Ovm3 agent " + hostName + "(" + hostIp +"): " + method + logger.debug("Call Ovm3 agent " + hostName + "(" + hostIp +"): " + method + " with " + params); } long startTime = System.currentTimeMillis(); @@ -120,22 +121,22 @@ public Object callTimeoutInSec(String method, List params, int timeout, xmlClient.executeAsync(req, callback); return callback.waitForResponse(); } catch (TimingOutCallback.TimeoutException e) { - LOGGER.info("Timeout: ", e); + logger.info("Timeout: ", e); throw new XmlRpcException(e.getMessage()); } catch (XmlRpcException e) { - LOGGER.info("XML RPC Exception occurred: ", e); + logger.info("XML RPC Exception occurred: ", e); throw e; } catch (RuntimeException e) { - LOGGER.info("Runtime Exception: ", e); + logger.info("Runtime Exception: ", e); throw new XmlRpcException(e.getMessage()); } catch (Throwable e) { - LOGGER.error("Holy crap batman!: ", e); + logger.error("Holy crap batman!: ", e); throw new XmlRpcException(e.getMessage(), e); } finally { long endTime = System.currentTimeMillis(); /* in seconds */ float during = (endTime - startTime) / (float) 1000; - LOGGER.debug("Ovm3 call " + method + " finished in " + during + logger.debug("Ovm3 call " + method + " finished in " + during + " secs, on " + hostIp + ":" + hostPort); } } diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Linux.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Linux.java index c0c0f3fa6821..50e2574eb54b 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Linux.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Linux.java @@ -21,12 +21,9 @@ import java.util.List; import java.util.Map; -import org.apache.log4j.Logger; import org.w3c.dom.Document; public class Linux extends OvmObject { - private static final Logger LOGGER = Logger - .getLogger(Linux.class); private static final String DEVICE = "Device"; private static final String REMOTEDIR = "Remote_Dir"; private static final String MOUNTPOINT = "Mount_Point"; @@ -210,7 +207,7 @@ public String get(String element) throws Ovm3ResourceException { try { initMaps(); } catch (Ovm3ResourceException e) { - LOGGER.info("Unable to discover host: " + e.getMessage(), e); + logger.info("Unable to discover host: " + e.getMessage(), e); throw e; } if (ovmGeneric.containsKey(element)) { diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Network.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Network.java index 008eb430b0ef..20f2f1eff703 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Network.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Network.java @@ -23,11 +23,9 @@ import java.util.Map; import java.util.Map.Entry; -import org.apache.log4j.Logger; import org.w3c.dom.Document; public class Network extends OvmObject { - private static final Logger LOGGER = Logger.getLogger(Network.class); private static final String START = "start"; private static final String BRIDGE = "Bridge"; private static final String ADDRESS = "Address"; @@ -123,7 +121,7 @@ private Network.Interface getNetIface(String key, String val) return iface.getValue(); } } - LOGGER.debug("Unable to find " + key + " Interface by value: " + val); + logger.debug("Unable to find " + key + " Interface by value: " + val); setSuccess(false); return null; } @@ -150,7 +148,7 @@ public Network.Interface getBridgeByName(String name) && getNetIface("Name", name).getIfType().contentEquals(BRIDGE)) { return getNetIface("Name", name); } - LOGGER.debug("Unable to find bridge by name: " + name); + logger.debug("Unable to find bridge by name: " + name); setSuccess(false); return null; } @@ -161,7 +159,7 @@ public Network.Interface getBridgeByIp(String ip) && getNetIface(ADDRESS, ip).getIfType().contentEquals(BRIDGE)) { return getNetIface(ADDRESS, ip); } - LOGGER.debug("Unable to find bridge by ip: " + ip); + logger.debug("Unable to find bridge by ip: " + ip); setSuccess(false); return null; } diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/OvmObject.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/OvmObject.java index 102478c22ff1..3b7354c34c50 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/OvmObject.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/OvmObject.java @@ -35,7 +35,8 @@ import javax.xml.xpath.XPathExpressionException; import javax.xml.xpath.XPathFactory; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.xmlrpc.XmlRpcException; import org.w3c.dom.Document; import org.w3c.dom.NodeList; @@ -45,8 +46,7 @@ public class OvmObject { private volatile Connection client; private static List emptyParams = new ArrayList(); - private static final Logger LOGGER = Logger - .getLogger(OvmObject.class); + protected Logger logger = LogManager.getLogger(getClass()); private boolean success = false; public OvmObject() { @@ -215,7 +215,7 @@ public String xmlToString(String path, Document xmlDocument) XPathConstants.NODESET); return nodeList.item(0).getNodeValue(); } catch (NullPointerException e) { - LOGGER.info("Got no items back from parsing, returning null: " + e); + logger.info("Got no items back from parsing, returning null: " + e); return null; } catch (XPathExpressionException e) { throw new Ovm3ResourceException("Problem parsing XML to String: ", e); @@ -239,7 +239,7 @@ public Document prepParse(String input) xmlDocument = builder.parse(new InputSource(new StringReader( input))); } catch (SAXException | IOException e) { - LOGGER.info(e.getClass() + ": ", e); + logger.info(e.getClass() + ": ", e); throw new Ovm3ResourceException("Unable to parse XML: ", e); } return xmlDocument; diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Pool.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Pool.java index 6306754185e8..a95664499bbb 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Pool.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Pool.java @@ -23,15 +23,12 @@ import java.util.Map; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.w3c.dom.Document; /* * synonym to the pool python lib in the ovs-agent */ public class Pool extends OvmObject { - private static final Logger LOGGER = Logger - .getLogger(Pool.class); private final List validRoles = new ArrayList() { { diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/PoolOCFS2.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/PoolOCFS2.java index baf1de930e7c..256f08d13d76 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/PoolOCFS2.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/PoolOCFS2.java @@ -20,12 +20,9 @@ import java.util.HashMap; import java.util.Map; -import org.apache.log4j.Logger; import org.w3c.dom.Document; public class PoolOCFS2 extends OvmObject { - private static final Logger LOGGER = Logger - .getLogger(PoolOCFS2.class); private Map poolFileSystem = new HashMap(); private String poolFsTarget; private String poolFsType; @@ -104,7 +101,7 @@ public Boolean createPoolFs(String type, String target, String clustername, return nullIsTrueCallWrapper("create_pool_filesystem", type, target, clustername, fsid, nfsbaseid, managerid, fsid); } else if (hasPoolFs(fsid)) { - LOGGER.debug("PoolFs already exists on this host: " + fsid); + logger.debug("PoolFs already exists on this host: " + fsid); return true; } else { throw new Ovm3ResourceException("Unable to add pool filesystem to host, "+ diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Repository.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Repository.java index 7cbf0e747548..ba4d62e3b795 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Repository.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Repository.java @@ -22,11 +22,9 @@ import java.util.List; import java.util.Map; -import org.apache.log4j.Logger; import org.w3c.dom.Document; public class Repository extends OvmObject { - private static final Logger LOGGER = Logger.getLogger(Repository.class); private static final String VERSION = "Version"; private static final String NAMETAG = "[@Name='"; private Object postDiscovery = null; diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Xen.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Xen.java index ddf6a56db630..adb5d6020895 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Xen.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Xen.java @@ -24,10 +24,8 @@ import java.util.Map; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; public class Xen extends OvmObject { - private static final Logger LOGGER = Logger.getLogger(Xen.class); private static final String VNCLISTEN = "vnclisten"; private static final String MEMORY = "memory"; private static final String MAXVCPUS = "maxvcpus"; @@ -294,7 +292,7 @@ public Integer getVifIdByMac(String mac) { } c += 1; } - LOGGER.debug("No vif matched mac: " + mac + " in " + vmVifs); + logger.debug("No vif matched mac: " + mac + " in " + vmVifs); return -1; } public Integer getVifIdByIp(String ip) { @@ -308,13 +306,13 @@ public Integer getVifIdByIp(String ip) { } c += 1; } - LOGGER.debug("No vif matched ip: " + ip + " in " + vmVifs); + logger.debug("No vif matched ip: " + ip + " in " + vmVifs); return -1; } public Boolean addVif(Integer id, String bridge, String mac) { if (getVifIdByMac(mac) > 0) { - LOGGER.debug("Already nic with mac present: " + mac); + logger.debug("Already nic with mac present: " + mac); return false; } String vif = "mac=" + mac + ",bridge=" + bridge; @@ -338,15 +336,15 @@ public Boolean removeVif(String bridge, String mac) { String remove = "mac=" + mac + ",bridge=" + bridge; for (String vif : getVmVifs()) { if (vif.equals(remove)) { - LOGGER.debug("leaving out vif: " + remove); + logger.debug("leaving out vif: " + remove); } else { - LOGGER.debug("keeping vif: " + vif); + logger.debug("keeping vif: " + vif); newVifs.add(vif); } } vmParams.put("vif", newVifs); } catch (Exception e) { - LOGGER.debug(e); + logger.debug(e); } return true; } @@ -400,7 +398,7 @@ private Boolean addDisk(String image, String mode) { private Boolean addDiskToDisks(String image, String devName, String mode) { for (String disk : vmDisks) { if (disk.contains(image)) { - LOGGER.debug(vmName + " already has disk " +image+ ":" + devName + ":" + mode); + logger.debug(vmName + " already has disk " +image+ ":" + devName + ":" + mode); return true; } } @@ -417,7 +415,7 @@ public Boolean removeDisk(String image) { return true; } } - LOGGER.debug("No disk found corresponding to image: " + image); + logger.debug("No disk found corresponding to image: " + image); return false; } @@ -445,7 +443,7 @@ private String getVmDiskDetailFromMap(int disk, String dest) { Map o = (Map) vmParams .get("device"); if (o == null) { - LOGGER.info("No devices found" + vmName); + logger.info("No devices found" + vmName); return null; } vmDisk = (Map) o.get("vbd")[disk]; @@ -557,7 +555,7 @@ private Object get(String key) { public Map listVms() throws Ovm3ResourceException { Object[] result = (Object[]) callWrapper("list_vms"); if (result == null) { - LOGGER.debug("no vm results on list_vms"); + logger.debug("no vm results on list_vms"); return null; } @@ -634,7 +632,7 @@ public Boolean listVm(String repoId, String vmId) defVm.setVmParams((Map) callWrapper("list_vm", repoId, vmId)); if (defVm.getVmParams() == null) { - LOGGER.debug("no vm results on list_vm"); + logger.debug("no vm results on list_vm"); return false; } return true; @@ -898,7 +896,7 @@ public Boolean rebootVm(String repoId, String vmId) public Vm getVmConfig(String vmName) throws Ovm3ResourceException { defVm = getRunningVmConfig(vmName); if (defVm == null) { - LOGGER.debug("Unable to retrieve running config for " + vmName); + logger.debug("Unable to retrieve running config for " + vmName); return defVm; } return getVmConfig(defVm.getVmRootDiskPoolId(), defVm.getVmUuid()); @@ -919,7 +917,7 @@ public Vm getVmConfig(String repoId, String vmId) Map x = (Map) callWrapper( "get_vm_config", repoId, vmId); if (x == null) { - LOGGER.debug("Unable to find vm with id:" + vmId + " on repoId:" + repoId); + logger.debug("Unable to find vm with id:" + vmId + " on repoId:" + repoId); return nVm; } nVm.setVmVifs(Arrays.asList(Arrays.copyOf(x.get("vif"), diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3Discoverer.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3Discoverer.java index 3f245273a272..2305dbb6c26e 100755 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3Discoverer.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3Discoverer.java @@ -29,7 +29,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; @@ -66,7 +65,6 @@ public class Ovm3Discoverer extends DiscovererBase implements Discoverer, Listener, ResourceStateAdapter { - private static final Logger LOGGER = Logger.getLogger(Ovm3Discoverer.class); protected String publicNetworkDevice; protected String privateNetworkDevice; protected String guestNetworkDevice; @@ -123,11 +121,11 @@ private boolean checkIfExisted(String guid) { private boolean CheckUrl(URI url) throws DiscoveryException { if ("http".equals(url.getScheme()) || "https".equals(url.getScheme())) { String msg = "Discovering " + url + ": " + _params; - LOGGER.debug(msg); + logger.debug(msg); } else { String msg = "urlString is not http(s) so we're not taking care of the discovery for this: " + url; - LOGGER.info(msg); + logger.info(msg); throw new DiscoveryException(msg); } return true; @@ -142,13 +140,13 @@ public Map> find(long dcId, CheckUrl(url); if (clusterId == null) { String msg = "must specify cluster Id when add host"; - LOGGER.info(msg); + logger.info(msg); throw new DiscoveryException(msg); } if (podId == null) { String msg = "must specify pod Id when add host"; - LOGGER.info(msg); + logger.info(msg); throw new DiscoveryException(msg); } @@ -156,30 +154,30 @@ public Map> find(long dcId, if (cluster == null || (cluster.getHypervisorType() != HypervisorType.Ovm3)) { String msg = "invalid cluster id or cluster is not for Ovm3 hypervisors"; - LOGGER.info(msg); + logger.info(msg); throw new DiscoveryException(msg); } else { - LOGGER.debug("cluster: " + cluster); + logger.debug("cluster: " + cluster); } String agentUsername = _params.get("agentusername"); if (agentUsername == null) { String msg = "Agent user name must be specified"; - LOGGER.info(msg); + logger.info(msg); throw new DiscoveryException(msg); } String agentPassword = _params.get("agentpassword"); if (agentPassword == null) { String msg = "Agent password must be specified"; - LOGGER.info(msg); + logger.info(msg); throw new DiscoveryException(msg); } String agentPort = _params.get("agentport"); if (agentPort == null) { String msg = "Agent port must be specified"; - LOGGER.info(msg); + logger.info(msg); throw new DiscoveryException(msg); } @@ -193,11 +191,11 @@ public Map> find(long dcId, if (checkIfExisted(guid)) { String msg = "The host " + hostIp + " has been added before"; - LOGGER.info(msg); + logger.info(msg); throw new DiscoveryException(msg); } - LOGGER.debug("Ovm3 discover is going to disover host having guid " + logger.debug("Ovm3 discover is going to disover host having guid " + guid); ClusterVO clu = clusterDao.findById(clusterId); @@ -224,7 +222,7 @@ public Map> find(long dcId, String msg = "Cannot Ssh to Ovm3 host(IP=" + hostIp + ", username=" + username + ", password=*******), discovery failed"; - LOGGER.warn(msg); + logger.warn(msg); throw new DiscoveryException(msg); } @@ -281,17 +279,17 @@ public Map> find(long dcId, resources.put(ovmResource, details); return resources; } catch (UnknownHostException e) { - LOGGER.error( + logger.error( "Host name resolve failed exception, Unable to discover Ovm3 host: " + url.getHost(), e); return null; } catch (ConfigurationException e) { - LOGGER.error( + logger.error( "Configure resource failed, Unable to discover Ovm3 host: " + url.getHost(), e); return null; } catch (IOException | Ovm3ResourceException e) { - LOGGER.error("Unable to discover Ovm3 host: " + url.getHost(), e); + logger.error("Unable to discover Ovm3 host: " + url.getHost(), e); return null; } } @@ -299,7 +297,7 @@ public Map> find(long dcId, @Override public void postDiscovery(List hosts, long msId) throws CloudRuntimeException { - LOGGER.debug("postDiscovery: " + hosts); + logger.debug("postDiscovery: " + hosts); } @Override @@ -315,26 +313,26 @@ public HypervisorType getHypervisorType() { @Override public HostVO createHostVOForConnectedAgent(HostVO host, StartupCommand[] cmd) { - LOGGER.debug("createHostVOForConnectedAgent: " + host); + logger.debug("createHostVOForConnectedAgent: " + host); return null; } @Override public boolean processAnswers(long agentId, long seq, Answer[] answers) { - LOGGER.debug("processAnswers: " + agentId); + logger.debug("processAnswers: " + agentId); return false; } @Override public boolean processCommands(long agentId, long seq, Command[] commands) { - LOGGER.debug("processCommands: " + agentId); + logger.debug("processCommands: " + agentId); return false; } @Override public AgentControlAnswer processControlCommand(long agentId, AgentControlCommand cmd) { - LOGGER.debug("processControlCommand: " + agentId); + logger.debug("processControlCommand: " + agentId); return null; } @@ -346,12 +344,12 @@ public void processHostAdded(long hostId) { @Override public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) { - LOGGER.debug("processConnect"); + logger.debug("processConnect"); } @Override public boolean processDisconnect(long agentId, Status state) { - LOGGER.debug("processDisconnect"); + logger.debug("processDisconnect"); return false; } @@ -370,13 +368,13 @@ public boolean isRecurring() { @Override public int getTimeout() { - LOGGER.debug("getTimeout"); + logger.debug("getTimeout"); return 0; } @Override public boolean processTimeout(long agentId, long seq) { - LOGGER.debug("processTimeout: " + agentId); + logger.debug("processTimeout: " + agentId); return false; } @@ -384,7 +382,7 @@ public boolean processTimeout(long agentId, long seq) { public HostVO createHostVOForDirectConnectAgent(HostVO host, StartupCommand[] startup, ServerResource resource, Map details, List hostTags) { - LOGGER.debug("createHostVOForDirectConnectAgent: " + host); + logger.debug("createHostVOForDirectConnectAgent: " + host); StartupCommand firstCmd = startup[0]; if (!(firstCmd instanceof StartupRoutingCommand)) { return null; @@ -402,7 +400,7 @@ public HostVO createHostVOForDirectConnectAgent(HostVO host, @Override public DeleteHostAnswer deleteHost(HostVO host, boolean isForced, boolean isForceDeleteStorage) throws UnableDeleteHostException { - LOGGER.debug("deleteHost: " + host); + logger.debug("deleteHost: " + host); if (host.getType() != com.cloud.host.Host.Type.Routing || host.getHypervisorType() != HypervisorType.Ovm3) { return null; diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3FenceBuilder.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3FenceBuilder.java index 95ef97deda88..eb83572fc2b8 100755 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3FenceBuilder.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3FenceBuilder.java @@ -23,7 +23,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.FenceAnswer; @@ -41,7 +40,6 @@ public class Ovm3FenceBuilder extends AdapterBase implements FenceBuilder { Map fenceParams; - private static final Logger LOGGER = Logger.getLogger(Ovm3FenceBuilder.class); @Inject AgentManager agentMgr; @Inject @@ -74,11 +72,11 @@ public Ovm3FenceBuilder() { @Override public Boolean fenceOff(VirtualMachine vm, Host host) { if (host.getHypervisorType() != HypervisorType.Ovm3) { - LOGGER.debug("Don't know how to fence non Ovm3 hosts " + logger.debug("Don't know how to fence non Ovm3 hosts " + host.getHypervisorType()); return null; } else { - LOGGER.debug("Fencing " + vm + " on host " + host + logger.debug("Fencing " + vm + " on host " + host + " with params: "+ fenceParams ); } @@ -94,8 +92,8 @@ public Boolean fenceOff(VirtualMachine vm, Host host) { try { answer = (FenceAnswer) agentMgr.send(h.getId(), fence); } catch (AgentUnavailableException | OperationTimedoutException e) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Moving on to the next host because " + if (logger.isDebugEnabled()) { + logger.debug("Moving on to the next host because " + h.toString() + " is unavailable", e); } continue; @@ -106,8 +104,8 @@ public Boolean fenceOff(VirtualMachine vm, Host host) { } } - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Unable to fence off " + vm.toString() + " on " + if (logger.isDebugEnabled()) { + logger.debug("Unable to fence off " + vm.toString() + " on " + host.toString()); } diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3HypervisorGuru.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3HypervisorGuru.java index 432474d043ab..77663d9bf045 100755 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3HypervisorGuru.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3HypervisorGuru.java @@ -20,7 +20,6 @@ import javax.inject.Inject; import org.apache.cloudstack.storage.command.StorageSubSystemCommand; -import org.apache.log4j.Logger; import com.cloud.agent.api.Command; import com.cloud.agent.api.to.VirtualMachineTO; @@ -33,7 +32,6 @@ import com.cloud.vm.VirtualMachineProfile; public class Ovm3HypervisorGuru extends HypervisorGuruBase implements HypervisorGuru { - private final Logger LOGGER = Logger.getLogger(Ovm3HypervisorGuru.class); @Inject private GuestOSDao guestOsDao; @@ -61,7 +59,7 @@ public boolean trackVmHostChange() { @Override public Pair getCommandHostDelegation(long hostId, Command cmd) { - LOGGER.debug("getCommandHostDelegation: " + cmd.getClass()); + logger.debug("getCommandHostDelegation: " + cmd.getClass()); if (cmd instanceof StorageSubSystemCommand) { StorageSubSystemCommand c = (StorageSubSystemCommand)cmd; c.setExecuteInSequence(true); diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3HypervisorResource.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3HypervisorResource.java index e897ca5e5edc..ba4304d349cc 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3HypervisorResource.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3HypervisorResource.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.storage.command.CopyCommand; import org.apache.cloudstack.storage.command.CreateObjectCommand; import org.apache.cloudstack.storage.command.StorageSubSystemCommand; -import org.apache.log4j.Logger; import com.cloud.agent.IAgentControl; import com.cloud.agent.api.Answer; @@ -101,7 +100,6 @@ import com.cloud.vm.VirtualMachine.State; public class Ovm3HypervisorResource extends ServerResourceBase implements HypervisorResource { - private static final Logger LOGGER = Logger.getLogger(Ovm3HypervisorResource.class); @Inject private VirtualRoutingResource vrResource; private StorageSubsystemCommandHandler storageHandler; @@ -130,7 +128,7 @@ public Type getType() { */ @Override public StartupCommand[] initialize() { - LOGGER.debug("Ovm3 resource intializing"); + logger.debug("Ovm3 resource intializing"); try { StartupRoutingCommand srCmd = new StartupRoutingCommand(); StartupStorageCommand ssCmd = new StartupStorageCommand(); @@ -138,10 +136,10 @@ public StartupCommand[] initialize() { /* here stuff gets completed, but where should state live ? */ hypervisorsupport.fillHostInfo(srCmd); hypervisorsupport.vmStateMapClear(); - LOGGER.debug("Ovm3 pool " + ssCmd + " " + srCmd); + logger.debug("Ovm3 pool " + ssCmd + " " + srCmd); return new StartupCommand[] {srCmd, ssCmd}; } catch (Exception e) { - LOGGER.debug("Ovm3 resource initializes failed", e); + logger.debug("Ovm3 resource initializes failed", e); return new StartupCommand[] {}; } } @@ -158,19 +156,19 @@ public PingCommand getCurrentStatus(long id) { CloudstackPlugin cSp = new CloudstackPlugin(c); if (!cSp.dom0CheckStorageHealthCheck(configuration.getAgentScriptsDir(), configuration.getAgentCheckStorageScript(), configuration.getCsHostGuid(), configuration.getAgentStorageCheckTimeout(), configuration.getAgentStorageCheckInterval()) && !cSp.dom0CheckStorageHealthCheck()) { - LOGGER.error("Storage health check not running on " + configuration.getAgentHostname()); + logger.error("Storage health check not running on " + configuration.getAgentHostname()); } else if (cSp.dom0CheckStorageHealthCheck()) { - LOGGER.error("Storage health check started on " + configuration.getAgentHostname()); + logger.error("Storage health check started on " + configuration.getAgentHostname()); } else { - LOGGER.debug("Storage health check running on " + configuration.getAgentHostname()); + logger.debug("Storage health check running on " + configuration.getAgentHostname()); } return new PingRoutingCommand(getType(), id, hypervisorsupport.hostVmStateReport()); } else { - LOGGER.debug("Agent did not respond correctly: " + ping + " but got " + pong); + logger.debug("Agent did not respond correctly: " + ping + " but got " + pong); } } catch (Ovm3ResourceException | NullPointerException e) { - LOGGER.debug("Check agent status failed", e); + logger.debug("Check agent status failed", e); return null; } return null; @@ -179,7 +177,7 @@ public PingCommand getCurrentStatus(long id) { @Override public Answer executeRequest(Command cmd) { Class clazz = cmd.getClass(); - LOGGER.debug("executeRequest called: " + cmd.getClass()); + logger.debug("executeRequest called: " + cmd.getClass()); if (cmd instanceof NetworkElementCommand) { return vrResource.executeRequest((NetworkElementCommand)cmd); } else if (clazz == NetworkRulesSystemVmCommand.class) { @@ -252,24 +250,24 @@ public Answer executeRequest(Command cmd) { } else if (clazz == RebootCommand.class) { return execute((RebootCommand)cmd); } - LOGGER.debug("Can't find class for executeRequest " + cmd.getClass() + ", is your direct call missing?"); + logger.debug("Can't find class for executeRequest " + cmd.getClass() + ", is your direct call missing?"); return Answer.createUnsupportedCommandAnswer(cmd); } @Override public void disconnected() { - LOGGER.debug("disconnected seems unused everywhere else"); + logger.debug("disconnected seems unused everywhere else"); } @Override public IAgentControl getAgentControl() { - LOGGER.debug("we don't use IAgentControl"); + logger.debug("we don't use IAgentControl"); return null; } @Override public void setAgentControl(IAgentControl agentControl) { - LOGGER.debug("No use in setting IAgentControl"); + logger.debug("No use in setting IAgentControl"); } @Override @@ -299,7 +297,7 @@ public int getRunLevel() { @Override public void setRunLevel(int level) { - LOGGER.debug("runlevel seems unused in other hypervisors"); + logger.debug("runlevel seems unused in other hypervisors"); } /** @@ -307,7 +305,7 @@ public void setRunLevel(int level) { */ @Override public boolean configure(String name, Map params) throws ConfigurationException { - LOGGER.debug("configure " + name + " with params: " + params); + logger.debug("configure " + name + " with params: " + params); /* check if we're primary or not and if we can connect */ try { configuration = new Ovm3Configuration(params); @@ -343,7 +341,7 @@ public boolean configure(String name, Map params) throws Configu } public void setConnection(Connection con) { - LOGGER.debug("override connection: " + con.getIp()); + logger.debug("override connection: " + con.getIp()); c = con; } @@ -377,14 +375,14 @@ public synchronized StartAnswer execute(StartCommand cmd) { String domType = guesttypes.getOvm3GuestType(vmSpec.getOs()); if (domType == null || domType.isEmpty()) { domType = "default"; - LOGGER.debug("VM Virt type missing setting to: " + domType); + logger.debug("VM Virt type missing setting to: " + domType); } else { - LOGGER.debug("VM Virt type set to " + domType + " for " + vmSpec.getOs()); + logger.debug("VM Virt type set to " + domType + " for " + vmSpec.getOs()); } vm.setVmDomainType(domType); if (vmSpec.getBootloader() == BootloaderType.CD) { - LOGGER.warn("CD booting is not supported"); + logger.warn("CD booting is not supported"); } /* * officially CD boot is only supported on HVM, although there is a @@ -422,19 +420,19 @@ public synchronized StartAnswer execute(StartCommand cmd) { /* skip a beat to make sure we didn't miss start */ if (hypervisorsupport.getVmState(vmName) == null && count > 1) { String msg = "VM " + vmName + " went missing on " + configuration.getAgentHostname() + ", returning stopped"; - LOGGER.debug(msg); + logger.debug(msg); state = State.Stopped; return new StartAnswer(cmd, msg); } /* creative fix? */ try { Boolean res = cSp.domrCheckSsh(controlIp); - LOGGER.debug("connected to " + controlIp + " on attempt " + count + " result: " + res); + logger.debug("connected to " + controlIp + " on attempt " + count + " result: " + res); if (res) { break; } } catch (Exception x) { - LOGGER.trace("unable to connect to " + controlIp + " on attempt " + count + " " + x.getMessage(), x); + logger.trace("unable to connect to " + controlIp + " on attempt " + count + " " + x.getMessage(), x); } Thread.sleep(5000); } @@ -449,7 +447,7 @@ public synchronized StartAnswer execute(StartCommand cmd) { state = State.Running; return new StartAnswer(cmd); } catch (Exception e) { - LOGGER.debug("Start vm " + vmName + " failed", e); + logger.debug("Start vm " + vmName + " failed", e); state = State.Stopped; return new StartAnswer(cmd, e.getMessage()); } finally { @@ -473,7 +471,7 @@ public StopAnswer execute(StopCommand cmd) { if (vm == null) { state = State.Stopping; - LOGGER.debug("Unable to get details of vm: " + vmName + ", treating it as Stopping"); + logger.debug("Unable to get details of vm: " + vmName + ", treating it as Stopping"); return new StopAnswer(cmd, "success", true); } String repoId = ovmObject.deDash(vm.getVmRootDiskPoolId()); @@ -483,7 +481,7 @@ public StopAnswer execute(StopCommand cmd) { int tries = 30; while (vms.getRunningVmConfig(vmName) != null && tries > 0) { String msg = "Waiting for " + vmName + " to stop"; - LOGGER.debug(msg); + logger.debug(msg); tries--; Thread.sleep(10 * 1000); } @@ -492,13 +490,13 @@ public StopAnswer execute(StopCommand cmd) { if (vms.getRunningVmConfig(vmName) != null) { String msg = "Stop " + vmName + " failed "; - LOGGER.debug(msg); + logger.debug(msg); return new StopAnswer(cmd, msg, false); } state = State.Stopped; return new StopAnswer(cmd, "success", true); } catch (Exception e) { - LOGGER.debug("Stop " + vmName + " failed ", e); + logger.debug("Stop " + vmName + " failed ", e); return new StopAnswer(cmd, e.getMessage(), false); } finally { if (state != null) { @@ -524,7 +522,7 @@ public RebootAnswer execute(RebootCommand cmd) { Integer vncPort = vm.getVncPort(); return new RebootAnswer(cmd, null, vncPort); } catch (Exception e) { - LOGGER.debug("Reboot " + vmName + " failed", e); + logger.debug("Reboot " + vmName + " failed", e); return new RebootAnswer(cmd, e.getMessage(), false); } finally { hypervisorsupport.setVmState(vmName, State.Running); diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3StorageProcessor.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3StorageProcessor.java index f30df5d74443..d7c2c214f221 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3StorageProcessor.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3StorageProcessor.java @@ -41,7 +41,8 @@ import org.apache.cloudstack.storage.to.SnapshotObjectTO; import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.Command; @@ -78,7 +79,7 @@ * Storage related bits */ public class Ovm3StorageProcessor implements StorageProcessor { - private final Logger LOGGER = Logger.getLogger(Ovm3StorageProcessor.class); + protected Logger logger = LogManager.getLogger(getClass()); private Connection c; private OvmObject ovmObject = new OvmObject(); private Ovm3StoragePool pool; @@ -92,7 +93,7 @@ public Ovm3StorageProcessor(Connection conn, Ovm3Configuration ovm3config, } public final Answer execute(final CopyCommand cmd) { - LOGGER.debug("execute: "+ cmd.getClass()); + logger.debug("execute: "+ cmd.getClass()); DataTO srcData = cmd.getSrcTO(); DataStoreTO srcStore = srcData.getDataStore(); DataTO destData = cmd.getDestTO(); @@ -111,7 +112,7 @@ public final Answer execute(final CopyCommand cmd) { return cloneVolumeFromBaseTemplate(cmd); } else { msg = "Primary to Primary doesn't match"; - LOGGER.debug(msg); + logger.debug(msg); } } else if ((srcData.getObjectType() == DataObjectType.SNAPSHOT) && (destData.getObjectType() == DataObjectType.SNAPSHOT)) { @@ -126,38 +127,38 @@ public final Answer execute(final CopyCommand cmd) { msg = "Unable to do stuff for " + srcStore.getClass() + ":" + srcData.getObjectType() + " to " + destStore.getClass() + ":" + destData.getObjectType(); - LOGGER.debug(msg); + logger.debug(msg); } } catch (Exception e) { msg = "Catch Exception " + e.getClass().getName() + " for template due to " + e.toString(); - LOGGER.warn(msg, e); + logger.warn(msg, e); return new CopyCmdAnswer(msg); } - LOGGER.warn(msg + " " + cmd.getClass()); + logger.warn(msg + " " + cmd.getClass()); return new CopyCmdAnswer(msg); } public Answer execute(DeleteCommand cmd) { DataTO data = cmd.getData(); String msg; - LOGGER.debug("Deleting object: " + data.getObjectType()); + logger.debug("Deleting object: " + data.getObjectType()); if (data.getObjectType() == DataObjectType.VOLUME) { return deleteVolume(cmd); } else if (data.getObjectType() == DataObjectType.SNAPSHOT) { return deleteSnapshot(cmd); } else if (data.getObjectType() == DataObjectType.TEMPLATE) { msg = "Template deletion is not implemented yet."; - LOGGER.info(msg); + logger.info(msg); } else { msg = data.getObjectType() + " deletion is not implemented yet."; - LOGGER.info(msg); + logger.info(msg); } return new Answer(cmd, false, msg); } public CreateAnswer execute(CreateCommand cmd) { - LOGGER.debug("execute: "+ cmd.getClass()); + logger.debug("execute: "+ cmd.getClass()); StorageFilerTO primaryStorage = cmd.getPool(); DiskProfile disk = cmd.getDiskCharacteristics(); /* disk should have a uuid */ @@ -168,13 +169,13 @@ public CreateAnswer execute(CreateCommand cmd) { try { StoragePlugin store = new StoragePlugin(c); if (cmd.getTemplateUrl() != null) { - LOGGER.debug("CreateCommand " + cmd.getTemplateUrl() + " " + logger.debug("CreateCommand " + cmd.getTemplateUrl() + " " + dst); Linux host = new Linux(c); host.copyFile(cmd.getTemplateUrl(), dst); } else { /* this is a dup with the createVolume ? */ - LOGGER.debug("CreateCommand " + dst); + logger.debug("CreateCommand " + dst); store.storagePluginCreate(primaryStorage.getUuid(), primaryStorage.getHost(), dst, disk.getSize(), false); } @@ -186,7 +187,7 @@ public CreateAnswer execute(CreateCommand cmd) { fp.getSize(), null); return new CreateAnswer(cmd, volume); } catch (Exception e) { - LOGGER.debug("CreateCommand failed", e); + logger.debug("CreateCommand failed", e); return new CreateAnswer(cmd, e.getMessage()); } } @@ -196,7 +197,7 @@ public CreateAnswer execute(CreateCommand cmd) { */ @Override public CopyCmdAnswer copyTemplateToPrimaryStorage(CopyCommand cmd) { - LOGGER.debug("execute copyTemplateToPrimaryStorage: "+ cmd.getClass()); + logger.debug("execute copyTemplateToPrimaryStorage: "+ cmd.getClass()); DataTO srcData = cmd.getSrcTO(); DataStoreTO srcStore = srcData.getDataStore(); DataTO destData = cmd.getDestTO(); @@ -225,7 +226,7 @@ public CopyCmdAnswer copyTemplateToPrimaryStorage(CopyCommand cmd) { + "/" + destUuid + ".raw"; } String destFile = destPath + "/" + destUuid + ".raw"; - LOGGER.debug("CopyFrom: " + srcData.getObjectType() + "," + logger.debug("CopyFrom: " + srcData.getObjectType() + "," + srcFile + " to " + destData.getObjectType() + "," + destFile); host.copyFile(srcFile, destFile); @@ -237,7 +238,7 @@ public CopyCmdAnswer copyTemplateToPrimaryStorage(CopyCommand cmd) { return new CopyCmdAnswer(newVol); } catch (Ovm3ResourceException e) { String msg = "Error while copying template to primary storage: " + e.getMessage(); - LOGGER.info(msg); + logger.info(msg); return new CopyCmdAnswer(msg); } } @@ -246,7 +247,7 @@ public CopyCmdAnswer copyTemplateToPrimaryStorage(CopyCommand cmd) { */ @Override public Answer copyVolumeFromPrimaryToSecondary(CopyCommand cmd) { - LOGGER.debug("execute copyVolumeFromPrimaryToSecondary: "+ cmd.getClass()); + logger.debug("execute copyVolumeFromPrimaryToSecondary: "+ cmd.getClass()); return new Answer(cmd); } /** @@ -254,7 +255,7 @@ public Answer copyVolumeFromPrimaryToSecondary(CopyCommand cmd) { */ @Override public CopyCmdAnswer cloneVolumeFromBaseTemplate(CopyCommand cmd) { - LOGGER.debug("execute cloneVolumeFromBaseTemplate: "+ cmd.getClass()); + logger.debug("execute cloneVolumeFromBaseTemplate: "+ cmd.getClass()); try { // src DataTO srcData = cmd.getSrcTO(); @@ -266,7 +267,7 @@ public CopyCmdAnswer cloneVolumeFromBaseTemplate(CopyCommand cmd) { VolumeObjectTO dest = (VolumeObjectTO) destData; String destFile = getVirtualDiskPath(dest.getUuid(), dest.getDataStore().getUuid()); Linux host = new Linux(c); - LOGGER.debug("CopyFrom: " + srcData.getObjectType() + "," + logger.debug("CopyFrom: " + srcData.getObjectType() + "," + srcFile + " to " + destData.getObjectType() + "," + destFile); host.copyFile(srcFile, destFile); @@ -278,7 +279,7 @@ public CopyCmdAnswer cloneVolumeFromBaseTemplate(CopyCommand cmd) { return new CopyCmdAnswer(newVol); } catch (Ovm3ResourceException e) { String msg = "Error cloneVolumeFromBaseTemplate: " + e.getMessage(); - LOGGER.info(msg); + logger.info(msg); return new CopyCmdAnswer(msg); } } @@ -287,7 +288,7 @@ public CopyCmdAnswer cloneVolumeFromBaseTemplate(CopyCommand cmd) { */ @Override public Answer createTemplateFromVolume(CopyCommand cmd) { - LOGGER.debug("execute createTemplateFromVolume: "+ cmd.getClass()); + logger.debug("execute createTemplateFromVolume: "+ cmd.getClass()); return new Answer(cmd); } /** @@ -295,7 +296,7 @@ public Answer createTemplateFromVolume(CopyCommand cmd) { */ @Override public Answer copyVolumeFromImageCacheToPrimary(CopyCommand cmd) { - LOGGER.debug("execute copyVolumeFromImageCacheToPrimary: "+ cmd.getClass()); + logger.debug("execute copyVolumeFromImageCacheToPrimary: "+ cmd.getClass()); return new Answer(cmd); } /** @@ -303,7 +304,7 @@ public Answer copyVolumeFromImageCacheToPrimary(CopyCommand cmd) { */ @Override public Answer createTemplateFromSnapshot(CopyCommand cmd) { - LOGGER.debug("execute createTemplateFromSnapshot: "+ cmd.getClass()); + logger.debug("execute createTemplateFromSnapshot: "+ cmd.getClass()); try { // src.getPath contains the uuid of the snapshot. DataTO srcData = cmd.getSrcTO(); @@ -333,7 +334,7 @@ public Answer createTemplateFromSnapshot(CopyCommand cmd) { return new CopyCmdAnswer(newVol); } catch (Ovm3ResourceException e) { String msg = "Error backupSnapshot: " + e.getMessage(); - LOGGER.info(msg); + logger.info(msg); return new CopyCmdAnswer(msg); } } @@ -344,7 +345,7 @@ public Answer createTemplateFromSnapshot(CopyCommand cmd) { */ @Override public CopyCmdAnswer backupSnapshot(CopyCommand cmd) { - LOGGER.debug("execute backupSnapshot: "+ cmd.getClass()); + logger.debug("execute backupSnapshot: "+ cmd.getClass()); try { DataTO srcData = cmd.getSrcTO(); DataTO destData = cmd.getDestTO(); @@ -366,7 +367,7 @@ public CopyCmdAnswer backupSnapshot(CopyCommand cmd) { Linux host = new Linux(c); CloudstackPlugin csp = new CloudstackPlugin(c); csp.ovsMkdirs(destDir); - LOGGER.debug("CopyFrom: " + srcData.getObjectType() + "," + logger.debug("CopyFrom: " + srcData.getObjectType() + "," + srcFile + " to " + destData.getObjectType() + "," + destFile); host.copyFile(srcFile, destFile); @@ -381,20 +382,20 @@ public CopyCmdAnswer backupSnapshot(CopyCommand cmd) { return new CopyCmdAnswer(newSnap); } catch (Ovm3ResourceException e) { String msg = "Error backupSnapshot: " + e.getMessage(); - LOGGER.info(msg); + logger.info(msg); return new CopyCmdAnswer(msg); } } public Answer execute(CreateObjectCommand cmd) { - LOGGER.debug("execute: "+ cmd.getClass()); + logger.debug("execute: "+ cmd.getClass()); DataTO data = cmd.getData(); if (data.getObjectType() == DataObjectType.VOLUME) { return createVolume(cmd); } else if (data.getObjectType() == DataObjectType.SNAPSHOT) { return createSnapshot(cmd); } else if (data.getObjectType() == DataObjectType.TEMPLATE) { - LOGGER.debug("Template object creation not supported."); + logger.debug("Template object creation not supported."); } return new CreateObjectAnswer(data.getObjectType() + " object creation not supported"); @@ -404,7 +405,7 @@ public Answer execute(CreateObjectCommand cmd) { */ @Override public AttachAnswer attachIso(AttachCommand cmd) { - LOGGER.debug("execute attachIso: "+ cmd.getClass()); + logger.debug("execute attachIso: "+ cmd.getClass()); String vmName = cmd.getVmName(); DiskTO disk = cmd.getDisk(); return attachDetach(cmd, vmName, disk, true); @@ -414,7 +415,7 @@ public AttachAnswer attachIso(AttachCommand cmd) { */ @Override public AttachAnswer dettachIso(DettachCommand cmd) { - LOGGER.debug("execute dettachIso: "+ cmd.getClass()); + logger.debug("execute dettachIso: "+ cmd.getClass()); String vmName = cmd.getVmName(); DiskTO disk = cmd.getDisk(); return attachDetach(cmd, vmName, disk, false); @@ -470,7 +471,7 @@ private AttachAnswer attachDetach(Command cmd, String vmName, DiskTO disk, boolean isAttach) { Xen xen = new Xen(c); String doThis = (isAttach) ? "Attach" : "Dettach"; - LOGGER.debug(doThis + " volume type " + disk.getType() + " " + vmName); + logger.debug(doThis + " volume type " + disk.getType() + " " + vmName); String msg = ""; String path = ""; try { @@ -478,7 +479,7 @@ private AttachAnswer attachDetach(Command cmd, String vmName, DiskTO disk, /* check running */ if (vm == null) { msg = doThis + " can't find VM " + vmName; - LOGGER.debug(msg); + logger.debug(msg); return new AttachAnswer(msg); } if (disk.getType() == Volume.Type.ISO) { @@ -488,7 +489,7 @@ private AttachAnswer attachDetach(Command cmd, String vmName, DiskTO disk, } if ("".equals(path)) { msg = doThis + " can't do anything with an empty path."; - LOGGER.debug(msg); + logger.debug(msg); return new AttachAnswer(msg); } if (isAttach) { @@ -501,7 +502,7 @@ private AttachAnswer attachDetach(Command cmd, String vmName, DiskTO disk, if (!vm.removeDisk(path)) { msg = doThis + " failed for " + vmName + disk.getType() + " was not attached " + path; - LOGGER.debug(msg); + logger.debug(msg); return new AttachAnswer(msg); } } @@ -510,7 +511,7 @@ private AttachAnswer attachDetach(Command cmd, String vmName, DiskTO disk, return new AttachAnswer(disk); } catch (Ovm3ResourceException e) { msg = doThis + " failed for " + vmName + " " + e.getMessage(); - LOGGER.warn(msg, e); + logger.warn(msg, e); return new AttachAnswer(msg); } } @@ -519,7 +520,7 @@ private AttachAnswer attachDetach(Command cmd, String vmName, DiskTO disk, */ @Override public AttachAnswer attachVolume(AttachCommand cmd) { - LOGGER.debug("execute attachVolume: "+ cmd.getClass()); + logger.debug("execute attachVolume: "+ cmd.getClass()); String vmName = cmd.getVmName(); DiskTO disk = cmd.getDisk(); return attachDetach(cmd, vmName, disk, true); @@ -529,7 +530,7 @@ public AttachAnswer attachVolume(AttachCommand cmd) { */ @Override public AttachAnswer dettachVolume(DettachCommand cmd) { - LOGGER.debug("execute dettachVolume: "+ cmd.getClass()); + logger.debug("execute dettachVolume: "+ cmd.getClass()); String vmName = cmd.getVmName(); DiskTO disk = cmd.getDisk(); return attachDetach(cmd, vmName, disk, false); @@ -540,7 +541,7 @@ public AttachAnswer dettachVolume(DettachCommand cmd) { */ @Override public Answer createVolume(CreateObjectCommand cmd) { - LOGGER.debug("execute createVolume: "+ cmd.getClass()); + logger.debug("execute createVolume: "+ cmd.getClass()); DataTO data = cmd.getData(); VolumeObjectTO volume = (VolumeObjectTO) data; try { @@ -567,7 +568,7 @@ public Answer createVolume(CreateObjectCommand cmd) { newVol.setPath(volume.getUuid()); return new CreateObjectAnswer(newVol); } catch (Ovm3ResourceException | URISyntaxException e) { - LOGGER.info("Volume creation failed: " + e.toString(), e); + logger.info("Volume creation failed: " + e.toString(), e); return new CreateObjectAnswer(e.toString()); } } @@ -586,7 +587,7 @@ public Answer createVolume(CreateObjectCommand cmd) { */ @Override public Answer createSnapshot(CreateObjectCommand cmd) { - LOGGER.debug("execute createSnapshot: "+ cmd.getClass()); + logger.debug("execute createSnapshot: "+ cmd.getClass()); DataTO data = cmd.getData(); Xen xen = new Xen(c); SnapshotObjectTO snap = (SnapshotObjectTO) data; @@ -611,7 +612,7 @@ public Answer createSnapshot(CreateObjectCommand cmd) { src = getVirtualDiskPath(vol.getUuid(),data.getDataStore().getUuid()); dest = src.replace(vol.getUuid(), uuid); } - LOGGER.debug("Snapshot " + src + " to " + dest); + logger.debug("Snapshot " + src + " to " + dest); host.copyFile(src, dest); SnapshotObjectTO nsnap = new SnapshotObjectTO(); // nsnap.setPath(dest); @@ -626,7 +627,7 @@ public Answer createSnapshot(CreateObjectCommand cmd) { @Override public Answer deleteVolume(DeleteCommand cmd) { - LOGGER.debug("execute deleteVolume: "+ cmd.getClass()); + logger.debug("execute deleteVolume: "+ cmd.getClass()); DataTO data = cmd.getData(); VolumeObjectTO volume = (VolumeObjectTO) data; try { @@ -635,9 +636,9 @@ public Answer deleteVolume(DeleteCommand cmd) { String path = getVirtualDiskPath(uuid, poolUuid); StoragePlugin sp = new StoragePlugin(c); sp.storagePluginDestroy(poolUuid, path); - LOGGER.debug("Volume deletion success: " + path); + logger.debug("Volume deletion success: " + path); } catch (Ovm3ResourceException e) { - LOGGER.info("Volume deletion failed: " + e.toString(), e); + logger.info("Volume deletion failed: " + e.toString(), e); return new CreateObjectAnswer(e.toString()); } return new Answer(cmd); @@ -648,7 +649,7 @@ public Answer deleteVolume(DeleteCommand cmd) { * bumper bowling. */ public CopyVolumeAnswer execute(CopyVolumeCommand cmd) { - LOGGER.debug("execute: "+ cmd.getClass()); + logger.debug("execute: "+ cmd.getClass()); String volumePath = cmd.getVolumePath(); /* is a repository */ String secondaryStorageURL = cmd.getSecondaryStorageURL(); @@ -662,26 +663,26 @@ public CopyVolumeAnswer execute(CopyVolumeCommand cmd) { /* to secondary storage */ if (cmd.toSecondaryStorage()) { - LOGGER.debug("Copy to secondary storage " + volumePath + logger.debug("Copy to secondary storage " + volumePath + " to " + secondaryStorageURL); host.copyFile(volumePath, secondaryStorageURL); /* from secondary storage */ } else { - LOGGER.debug("Copy from secondary storage " + logger.debug("Copy from secondary storage " + secondaryStorageURL + " to " + volumePath); host.copyFile(secondaryStorageURL, volumePath); } /* check the truth of this */ return new CopyVolumeAnswer(cmd, true, null, null, null); } catch (Ovm3ResourceException e) { - LOGGER.debug("Copy volume failed", e); + logger.debug("Copy volume failed", e); return new CopyVolumeAnswer(cmd, false, e.getMessage(), null, null); } } /* Destroy a volume (image) */ public Answer execute(DestroyCommand cmd) { - LOGGER.debug("execute: "+ cmd.getClass()); + logger.debug("execute: "+ cmd.getClass()); VolumeTO vol = cmd.getVolume(); String vmName = cmd.getVmName(); try { @@ -689,7 +690,7 @@ public Answer execute(DestroyCommand cmd) { store.storagePluginDestroy(vol.getPoolUuid(), vol.getPath()); return new Answer(cmd, true, "Success"); } catch (Ovm3ResourceException e) { - LOGGER.debug("Destroy volume " + vol.getName() + " failed for " + logger.debug("Destroy volume " + vol.getName() + " failed for " + vmName + " ", e); return new Answer(cmd, false, e.getMessage()); } @@ -698,7 +699,7 @@ public Answer execute(DestroyCommand cmd) { /* check if a VM is running should be added */ public CreatePrivateTemplateAnswer execute( final CreatePrivateTemplateFromVolumeCommand cmd) { - LOGGER.debug("execute: "+ cmd.getClass()); + logger.debug("execute: "+ cmd.getClass()); String volumePath = cmd.getVolumePath(); Long accountId = cmd.getAccountId(); Long templateId = cmd.getTemplateId(); @@ -717,7 +718,7 @@ public CreatePrivateTemplateAnswer execute( host.copyFile(volumePath, installPath); return new CreatePrivateTemplateAnswer(cmd, true, installPath); } catch (Exception e) { - LOGGER.debug("Create template failed", e); + logger.debug("Create template failed", e); return new CreatePrivateTemplateAnswer(cmd, false, e.getMessage()); } } @@ -727,7 +728,7 @@ public CreatePrivateTemplateAnswer execute( */ @Override public Answer createVolumeFromSnapshot(CopyCommand cmd) { - LOGGER.debug("execute createVolumeFromSnapshot: "+ cmd.getClass()); + logger.debug("execute createVolumeFromSnapshot: "+ cmd.getClass()); try { DataTO srcData = cmd.getSrcTO(); DataStoreTO srcStore = srcData.getDataStore(); @@ -757,7 +758,7 @@ public Answer createVolumeFromSnapshot(CopyCommand cmd) { return new CopyCmdAnswer(newVol); /* we assume the cache for templates is local */ } catch (Ovm3ResourceException e) { - LOGGER.debug("Failed to createVolumeFromSnapshot: ", e); + logger.debug("Failed to createVolumeFromSnapshot: ", e); return new CopyCmdAnswer(e.toString()); } } @@ -767,7 +768,7 @@ public Answer createVolumeFromSnapshot(CopyCommand cmd) { */ @Override public Answer deleteSnapshot(DeleteCommand cmd) { - LOGGER.debug("execute deleteSnapshot: "+ cmd.getClass()); + logger.debug("execute deleteSnapshot: "+ cmd.getClass()); DataTO data = cmd.getData(); SnapshotObjectTO snap = (SnapshotObjectTO) data; String storeUrl = data.getDataStore().getUrl(); @@ -780,10 +781,10 @@ public Answer deleteSnapshot(DeleteCommand cmd) { + snapUuid + ".raw"; StoragePlugin sp = new StoragePlugin(c); sp.storagePluginDestroy(secPoolUuid, filePath); - LOGGER.debug("Snapshot deletion success: " + filePath); + logger.debug("Snapshot deletion success: " + filePath); return new Answer(cmd, true, "Deleted Snapshot " + filePath); } catch (Ovm3ResourceException e) { - LOGGER.info("Snapshot deletion failed: " + e.toString(), e); + logger.info("Snapshot deletion failed: " + e.toString(), e); return new CreateObjectAnswer(e.toString()); } } @@ -792,7 +793,7 @@ public Answer deleteSnapshot(DeleteCommand cmd) { */ @Override public Answer introduceObject(IntroduceObjectCmd cmd) { - LOGGER.debug("execute introduceObject: "+ cmd.getClass()); + logger.debug("execute introduceObject: "+ cmd.getClass()); return new Answer(cmd, false, "not implemented yet"); } /** @@ -800,7 +801,7 @@ public Answer introduceObject(IntroduceObjectCmd cmd) { */ @Override public Answer forgetObject(ForgetObjectCmd cmd) { - LOGGER.debug("execute forgetObject: "+ cmd.getClass()); + logger.debug("execute forgetObject: "+ cmd.getClass()); return new Answer(cmd, false, "not implemented yet"); } @@ -811,14 +812,14 @@ public Answer forgetObject(ForgetObjectCmd cmd) { */ @Override public SnapshotAndCopyAnswer snapshotAndCopy(SnapshotAndCopyCommand cmd) { - LOGGER.info("'SnapshotAndCopyAnswer snapshotAndCopy(SnapshotAndCopyCommand)' not currently used for Ovm3StorageProcessor"); + logger.info("'SnapshotAndCopyAnswer snapshotAndCopy(SnapshotAndCopyCommand)' not currently used for Ovm3StorageProcessor"); return new SnapshotAndCopyAnswer("Not implemented"); } @Override public ResignatureAnswer resignature(final ResignatureCommand cmd) { - LOGGER.info("'ResignatureAnswer resignature(ResignatureCommand)' not currently used for Ovm3StorageProcessor"); + logger.info("'ResignatureAnswer resignature(ResignatureCommand)' not currently used for Ovm3StorageProcessor"); return new ResignatureAnswer("Not implemented"); } @@ -830,13 +831,13 @@ public Answer handleDownloadTemplateToPrimaryStorage(DirectDownloadCommand cmd) @Override public Answer checkDataStoreStoragePolicyCompliance(CheckDataStoreStoragePolicyComplainceCommand cmd) { - LOGGER.info("'CheckDataStoreStoragePolicyComplainceCommand' not applicable used for Ovm3StorageProcessor"); + logger.info("'CheckDataStoreStoragePolicyComplainceCommand' not applicable used for Ovm3StorageProcessor"); return new Answer(cmd,false,"Not applicable used for Ovm3StorageProcessor"); } @Override public Answer syncVolumePath(SyncVolumePathCommand cmd) { - LOGGER.info("SyncVolumePathCommand not currently applicable for Ovm3StorageProcessor"); + logger.info("SyncVolumePathCommand not currently applicable for Ovm3StorageProcessor"); return new Answer(cmd, false, "Not currently applicable for Ovm3StorageProcessor"); } @@ -851,7 +852,7 @@ public Answer copyVolumeFromPrimaryToPrimary(CopyCommand cmd) { * @return */ public Answer execute(AttachCommand cmd) { - LOGGER.debug("execute: "+ cmd.getClass()); + logger.debug("execute: "+ cmd.getClass()); String vmName = cmd.getVmName(); DiskTO disk = cmd.getDisk(); return attachDetach(cmd, vmName, disk, true); @@ -863,7 +864,7 @@ public Answer execute(AttachCommand cmd) { * @return */ public Answer execute(DettachCommand cmd) { - LOGGER.debug("execute: "+ cmd.getClass()); + logger.debug("execute: "+ cmd.getClass()); String vmName = cmd.getVmName(); DiskTO disk = cmd.getDisk(); return attachDetach(cmd, vmName, disk, false); diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3VirtualRoutingResource.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3VirtualRoutingResource.java index 0e00358e75e8..a3c4e926d085 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3VirtualRoutingResource.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3VirtualRoutingResource.java @@ -17,7 +17,8 @@ package com.cloud.hypervisor.ovm3.resources; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.joda.time.Duration; import com.cloud.agent.api.SetupGuestNetworkCommand; @@ -34,8 +35,7 @@ import com.cloud.utils.ExecutionResult; public class Ovm3VirtualRoutingResource implements VirtualRouterDeployer { - private final Logger logger = Logger - .getLogger(Ovm3VirtualRoutingResource.class); + protected Logger logger = LogManager.getLogger(getClass()); private String domRCloudPath = "/opt/cloud/bin/"; private Connection c; private String agentName; diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3Configuration.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3Configuration.java index 9da760b97fb7..ff6583be3ce0 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3Configuration.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3Configuration.java @@ -25,7 +25,8 @@ import javax.naming.ConfigurationException; import org.apache.commons.lang.BooleanUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.hypervisor.ovm3.objects.Network; import com.cloud.utils.NumbersUtil; @@ -33,8 +34,7 @@ /* holds config data for the Ovm3 Hypervisor */ public class Ovm3Configuration { - private static final Logger LOGGER = Logger - .getLogger(Ovm3Configuration.class); + protected Logger logger = LogManager.getLogger(getClass()); private String agentIp; private Long agentZoneId; private Long agentPodId; @@ -127,11 +127,11 @@ public Ovm3Configuration(Map params) */ private void validatePoolAndCluster() { if (agentInOvm3Cluster) { - LOGGER.debug("Clustering requires a pool, setting pool to true"); + logger.debug("Clustering requires a pool, setting pool to true"); agentInOvm3Pool = true; } if (!NetUtils.isValidIp4(ovm3PoolVip)) { - LOGGER.debug("No VIP, Setting ovm3pool and ovm3cluster to false"); + logger.debug("No VIP, Setting ovm3pool and ovm3cluster to false"); agentInOvm3Pool = false; agentInOvm3Cluster = false; ovm3PoolVip = ""; @@ -450,7 +450,7 @@ public String getTemplateDir() { private String validateParam(String name, String param) throws ConfigurationException { if (param == null) { String msg = "Unable to get " + name + " params are null"; - LOGGER.debug(msg); + logger.debug(msg); throw new ConfigurationException(msg); } return param; diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3HypervisorNetwork.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3HypervisorNetwork.java index 387e8bdf1547..6c2f48dbf8de 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3HypervisorNetwork.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3HypervisorNetwork.java @@ -21,7 +21,8 @@ import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.CheckNetworkAnswer; @@ -39,8 +40,7 @@ import com.cloud.utils.net.NetUtils; public class Ovm3HypervisorNetwork { - private static final Logger LOGGER = Logger - .getLogger(Ovm3HypervisorNetwork.class); + protected Logger logger = LogManager.getLogger(getClass()); private Connection c; private Ovm3Configuration config; public Ovm3HypervisorNetwork(Connection conn, Ovm3Configuration ovm3config) { @@ -57,12 +57,12 @@ public void configureNetworking() throws ConfigurationException { String controlIface = config.getAgentControlNetworkName(); if (controlIface != null && net.getInterfaceByName(controlIface) == null) { - LOGGER.debug("starting " + controlIface); + logger.debug("starting " + controlIface); net.startOvsLocalConfig(controlIface); /* ovs replies too "fast" so the bridge can be "busy" */ int contCount = 0; while (net.getInterfaceByName(controlIface) == null) { - LOGGER.debug("waiting for " + controlIface); + logger.debug("waiting for " + controlIface); Thread.sleep(1 * 1000); if (contCount > 9) { throw new ConfigurationException("Unable to configure " @@ -72,7 +72,7 @@ public void configureNetworking() throws ConfigurationException { contCount++; } } else { - LOGGER.debug("already have " + controlIface); + logger.debug("already have " + controlIface); } /* * The bridge is remembered upon reboot, but not the IP or the @@ -85,10 +85,10 @@ public void configureNetworking() throws ConfigurationException { cSp.ovsControlInterface(controlIface, NetUtils.getLinkLocalCIDR()); } catch (InterruptedException e) { - LOGGER.error("interrupted?", e); + logger.error("interrupted?", e); } catch (Ovm3ResourceException e) { String msg = "Basic configuration failed on " + config.getAgentHostname(); - LOGGER.error(msg, e); + logger.error(msg, e); throw new ConfigurationException(msg + ", " + e.getMessage()); } } @@ -96,27 +96,27 @@ public void configureNetworking() throws ConfigurationException { /**/ private boolean isNetworkSetupByName(String nameTag) { if (nameTag != null) { - LOGGER.debug("Looking for network setup by name " + nameTag); + logger.debug("Looking for network setup by name " + nameTag); try { Network net = new Network(c); net.getInterfaceList(); if (net.getBridgeByName(nameTag) != null) { - LOGGER.debug("Found bridge with name: " + nameTag); + logger.debug("Found bridge with name: " + nameTag); return true; } } catch (Ovm3ResourceException e) { - LOGGER.debug("Unxpected error looking for name: " + nameTag, e); + logger.debug("Unxpected error looking for name: " + nameTag, e); return false; } } - LOGGER.debug("No bridge with name: " + nameTag); + logger.debug("No bridge with name: " + nameTag); return false; } /* this might have to change in the future, works for now... */ public CheckNetworkAnswer execute(CheckNetworkCommand cmd) { - LOGGER.debug("Checking if network name setup is done on " + logger.debug("Checking if network name setup is done on " + config.getAgentHostname()); List infoList = cmd @@ -141,7 +141,7 @@ public CheckNetworkAnswer execute(CheckNetworkCommand cmd) { + info.getPhysicalNetworkId() + ", Guest Network is not configured on the backend by name " + info.getGuestNetworkName(); - LOGGER.error(msg); + logger.error(msg); return new CheckNetworkAnswer(cmd, false, msg); } if (!isNetworkSetupByName(info.getPrivateNetworkName())) { @@ -149,7 +149,7 @@ public CheckNetworkAnswer execute(CheckNetworkCommand cmd) { + info.getPhysicalNetworkId() + ", Private Network is not configured on the backend by name " + info.getPrivateNetworkName(); - LOGGER.error(msg); + logger.error(msg); return new CheckNetworkAnswer(cmd, false, msg); } if (!isNetworkSetupByName(info.getPublicNetworkName())) { @@ -157,7 +157,7 @@ public CheckNetworkAnswer execute(CheckNetworkCommand cmd) { + info.getPhysicalNetworkId() + ", Public Network is not configured on the backend by name " + info.getPublicNetworkName(); - LOGGER.error(msg); + logger.error(msg); return new CheckNetworkAnswer(cmd, false, msg); } /* Storage network is optional, will revert to private otherwise */ @@ -180,7 +180,7 @@ public Answer execute(PingTestCommand cmd) { } return new Answer(cmd, true, "success"); } catch (Ovm3ResourceException e) { - LOGGER.debug("Ping " + cmd.getComputingHostIp() + " failed", e); + logger.debug("Ping " + cmd.getComputingHostIp() + " failed", e); return new Answer(cmd, false, e.getMessage()); } } @@ -190,7 +190,7 @@ private String createVlanBridge(String networkName, Integer vlanId) if (vlanId < 1 || vlanId > 4094) { String msg = "Incorrect vlan " + vlanId + ", needs to be between 1 and 4094"; - LOGGER.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } Network net = new Network(c); @@ -201,12 +201,12 @@ private String createVlanBridge(String networkName, Integer vlanId) if (net.getInterfaceByName(brName) == null) { net.startOvsVlanBridge(brName, physInterface, vlanId); } else { - LOGGER.debug("Interface " + brName + " already exists"); + logger.debug("Interface " + brName + " already exists"); } } catch (Ovm3ResourceException e) { String msg = "Unable to create vlan " + vlanId.toString() + " bridge for " + networkName; - LOGGER.warn(msg + ": " + e); + logger.warn(msg + ": " + e); throw new CloudRuntimeException(msg + ":" + e.getMessage()); } return brName; diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3HypervisorSupport.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3HypervisorSupport.java index 67a63d788f31..3deaea06db94 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3HypervisorSupport.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3HypervisorSupport.java @@ -26,7 +26,8 @@ import javax.naming.ConfigurationException; import org.apache.commons.io.FileUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.CheckHealthAnswer; @@ -63,7 +64,7 @@ import com.trilead.ssh2.SCPClient; public class Ovm3HypervisorSupport { - private final Logger LOGGER = Logger.getLogger(Ovm3HypervisorSupport.class); + protected Logger logger = LogManager.getLogger(getClass()); private Connection c; private Ovm3Configuration config; @@ -162,19 +163,19 @@ public File getSystemVMKeyFile(String filename) { + filename); File keyFile = null; if (keyPath != null) { - LOGGER.debug("found SshKey " + keyPath); + logger.debug("found SshKey " + keyPath); keyFile = new File(keyPath); } if (keyFile == null || !keyFile.exists()) { String key = "client/target/generated-webapp/WEB-INF/classes/scripts/vm/systemvm/" + filename; - LOGGER.warn("findScript failed, going for generated " + key); + logger.warn("findScript failed, going for generated " + key); keyFile = new File(key); } if (keyFile == null || !keyFile.exists()) { String key = "/usr/share/cloudstack-common/scripts/vm/systemvm/" + filename; - LOGGER.warn("generated key retrieval failed " + key); + logger.warn("generated key retrieval failed " + key); keyFile = new File(key); } return keyFile; @@ -190,12 +191,12 @@ public void fillHostInfo(StartupRoutingCommand cmd) { /* get data we need from parts */ Linux host = new Linux(c); if (!host.getOvmVersion().startsWith("3.2.") && !host.getOvmVersion().startsWith("3.3.")) { - LOGGER.error("Hypervisor not supported: " + host.getOvmVersion()); + logger.error("Hypervisor not supported: " + host.getOvmVersion()); throw new CloudRuntimeException( "OVM 3.2. or 3.3. are only supported, not " + host.getOvmVersion()); } else { - LOGGER.debug("Hypervisor version: " + host.getOvmVersion()); + logger.debug("Hypervisor version: " + host.getOvmVersion()); } cmd.setName(host.getHostName()); cmd.setSpeed(host.getCpuKhz()); @@ -249,7 +250,7 @@ public void fillHostInfo(StartupRoutingCommand cmd) { d.put("isprimary", config.getAgentIsPrimary().toString()); d.put("hasprimary", config.getAgentHasPrimary().toString()); cmd.setHostDetails(d); - LOGGER.debug("Add an Ovm3 host " + config.getAgentHostname() + ":" + logger.debug("Add an Ovm3 host " + config.getAgentHostname() + ":" + cmd.getHostDetails()); } catch (Ovm3ResourceException e) { throw new CloudRuntimeException("Ovm3ResourceException: " @@ -266,7 +267,7 @@ public void fillHostInfo(StartupRoutingCommand cmd) { * @throws IOException */ public Boolean setupServer(String key) throws IOException { - LOGGER.debug("Setup all bits on agent: " + config.getAgentHostname()); + logger.debug("Setup all bits on agent: " + config.getAgentHostname()); /* version dependent patching ? */ try { com.trilead.ssh2.Connection sshConnection = SSHCmdHelper @@ -315,7 +316,7 @@ public Boolean setupServer(String key) throws IOException { config.getAgentStorageCheckTimeout(), config.getAgentStorageCheckInterval()); } catch (Exception es) { - LOGGER.error("Unexpected exception ", es); + logger.error("Unexpected exception ", es); String msg = "Unable to install module in agent"; throw new CloudRuntimeException(msg); } @@ -333,7 +334,7 @@ private Map getAllVms() throws Ovm3ResourceException { Xen vms = new Xen(c); return vms.getRunningVmConfigs(); } catch (Exception e) { - LOGGER.debug("getting VM list from " + config.getAgentHostname() + logger.debug("getting VM list from " + config.getAgentHostname() + " failed", e); throw new CloudRuntimeException("Exception on getting VMs from " + config.getAgentHostname() + ":" + e.getMessage(), e); @@ -386,7 +387,7 @@ private Map getAllVmStates(Map vmStateMap) } else { ns = State.Unknown; } - LOGGER.trace("state " + ns + " for " + vm.getVmName() + logger.trace("state " + ns + " for " + vm.getVmName() + " based on " + as); states.put(vm.getVmName(), ns); } @@ -411,7 +412,7 @@ private Map syncState(Map vmStateMap) try { newStates = getAllVmStates(vmStateMap); } catch (Ovm3ResourceException e) { - LOGGER.error("Ovm3 full sync failed: ", e); + logger.error("Ovm3 full sync failed: ", e); throw e; } synchronized (vmStateMap) { @@ -422,41 +423,41 @@ private Map syncState(Map vmStateMap) final String vmName = entry.getKey(); State newState = entry.getValue(); final State oldState = oldStates.remove(vmName); - LOGGER.trace("state for " + vmName + ", old: " + oldState + logger.trace("state for " + vmName + ", old: " + oldState + ", new: " + newState); /* eurh ? */ if (newState == State.Stopped && oldState != State.Stopping && oldState != null && oldState != State.Stopped) { - LOGGER.trace("Getting power state...."); + logger.trace("Getting power state...."); newState = State.Running; } - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("VM " + vmName + ": ovm has state " + newState + if (logger.isTraceEnabled()) { + logger.trace("VM " + vmName + ": ovm has state " + newState + " and we have state " + (oldState != null ? oldState.toString() : "null")); } if (newState == State.Migrating) { - LOGGER.trace(vmName + " is migrating, skipping state check"); + logger.trace(vmName + " is migrating, skipping state check"); continue; } if (oldState == null) { vmStateMap.put(vmName, newState); - LOGGER.debug("New state without old state: " + vmName); + logger.debug("New state without old state: " + vmName); changes.put(vmName, newState); } else if (oldState == State.Starting) { if (newState == State.Running) { vmStateMap.put(vmName, newState); } else if (newState == State.Stopped) { - LOGGER.debug("Ignoring vm " + vmName + logger.debug("Ignoring vm " + vmName + " because of a lag in starting the vm."); } } else if (oldState == State.Migrating) { if (newState == State.Running) { - LOGGER.debug("Detected that a migrating VM is now running: " + logger.debug("Detected that a migrating VM is now running: " + vmName); vmStateMap.put(vmName, newState); } @@ -464,7 +465,7 @@ private Map syncState(Map vmStateMap) if (newState == State.Stopped) { vmStateMap.put(vmName, newState); } else if (newState == State.Running) { - LOGGER.debug("Ignoring vm " + vmName + logger.debug("Ignoring vm " + vmName + " because of a lag in stopping the vm. "); /* should kill it hard perhaps ? */ } @@ -482,27 +483,27 @@ private Map syncState(Map vmStateMap) final State oldState = entry.getValue(); if (oldState == State.Stopping) { - LOGGER.debug("Removing VM " + vmName + logger.debug("Removing VM " + vmName + " in transition state stopping."); vmStateMap.remove(vmName); } else if (oldState == State.Starting) { - LOGGER.debug("Removing VM " + vmName + logger.debug("Removing VM " + vmName + " in transition state starting."); vmStateMap.remove(vmName); } else if (oldState == State.Stopped) { - LOGGER.debug("Stopped VM " + vmName + " removing."); + logger.debug("Stopped VM " + vmName + " removing."); vmStateMap.remove(vmName); } else if (oldState == State.Migrating) { /* * do something smarter here.. newstate should say stopping * already */ - LOGGER.debug("Ignoring VM " + vmName + logger.debug("Ignoring VM " + vmName + " in migrating state."); } else { /* if it's not there name it stopping */ State state = State.Stopping; - LOGGER.debug("VM " + vmName + logger.debug("VM " + vmName + " is now missing from ovm3 server so removing it"); changes.put(vmName, state); vmStateMap.remove(vmName); @@ -536,7 +537,7 @@ public Map hostVmStateReport() throws Ovm3ResourceException { final Map vmStates = new HashMap(); for (final Map.Entry vm : vmStateMap.entrySet()) { - LOGGER.debug("VM " + vm.getKey() + " state: " + vm.getValue() + ":" + logger.debug("VM " + vm.getKey() + " state: " + vm.getValue() + ":" + convertStateToPower(vm.getValue())); vmStates.put(vm.getKey(), new HostVmStateReportEntry( convertStateToPower(vm.getValue()), c.getIp())); @@ -558,14 +559,14 @@ public CheckHealthAnswer execute(CheckHealthCommand cmd) { try { pong = test.echo(ping); } catch (Ovm3ResourceException e) { - LOGGER.debug("CheckHealth went wrong: " + config.getAgentHostname() + logger.debug("CheckHealth went wrong: " + config.getAgentHostname() + ", " + e.getMessage(), e); return new CheckHealthAnswer(cmd, false); } if (ping.contentEquals(pong)) { return new CheckHealthAnswer(cmd, true); } - LOGGER.debug("CheckHealth did not receive " + ping + " but got " + pong + logger.debug("CheckHealth did not receive " + ping + " but got " + pong + " from " + config.getAgentHostname()); return new CheckHealthAnswer(cmd, false); } @@ -577,30 +578,30 @@ public CheckHealthAnswer execute(CheckHealthCommand cmd) { */ public boolean primaryCheck() { if ("".equals(config.getOvm3PoolVip())) { - LOGGER.debug("No cluster vip, not checking for primary"); + logger.debug("No cluster vip, not checking for primary"); return false; } try { CloudstackPlugin cSp = new CloudstackPlugin(c); if (cSp.dom0HasIp(config.getOvm3PoolVip())) { - LOGGER.debug(config.getAgentHostname() + logger.debug(config.getAgentHostname() + " is a primary, already has vip " + config.getOvm3PoolVip()); config.setAgentIsPrimary(true); } else if (cSp.ping(config.getOvm3PoolVip())) { - LOGGER.debug(config.getAgentHostname() + logger.debug(config.getAgentHostname() + " has a primary, someone has vip " + config.getOvm3PoolVip()); config.setAgentHasPrimary(true); } else { - LOGGER.debug(config.getAgentHostname() + logger.debug(config.getAgentHostname() + " becomes a primary, no one has vip " + config.getOvm3PoolVip()); config.setAgentIsPrimary(true); } } catch (Ovm3ResourceException e) { - LOGGER.debug(config.getAgentHostname() + logger.debug(config.getAgentHostname() + " can't reach primary: " + e.getMessage()); config.setAgentHasPrimary(false); } @@ -619,22 +620,22 @@ public ReadyAnswer execute(ReadyCommand cmd) { /* check pool state here */ return new ReadyAnswer(cmd); } else { - LOGGER.debug("Primary IP changes to " + logger.debug("Primary IP changes to " + pool.getPoolPrimaryVip() + ", it should be " + c.getIp()); return new ReadyAnswer(cmd, "I am not the primary server"); } } else if (host.getIsPrimary()) { - LOGGER.debug("Primary, not clustered " + logger.debug("Primary, not clustered " + config.getAgentHostname()); return new ReadyAnswer(cmd); } else { - LOGGER.debug("No primary, not clustered " + logger.debug("No primary, not clustered " + config.getAgentHostname()); return new ReadyAnswer(cmd); } } catch (CloudRuntimeException | Ovm3ResourceException e) { - LOGGER.debug("XML RPC Exception" + e.getMessage(), e); + logger.debug("XML RPC Exception" + e.getMessage(), e); throw new CloudRuntimeException("XML RPC Exception" + e.getMessage(), e); } @@ -644,19 +645,19 @@ public ReadyAnswer execute(ReadyCommand cmd) { /* check "the" virtual machine */ public CheckVirtualMachineAnswer execute( final CheckVirtualMachineCommand cmd) { - LOGGER.debug("CheckVirtualMachineCommand: " + cmd.getVmName()); + logger.debug("CheckVirtualMachineCommand: " + cmd.getVmName()); String vmName = cmd.getVmName(); try { CloudstackPlugin plug = new CloudstackPlugin(c); Integer vncPort = Integer.valueOf(plug.getVncPort(vmName)); if (vncPort == 0) { - LOGGER.warn("No VNC port for " + vmName); + logger.warn("No VNC port for " + vmName); } /* we already have the state ftw */ Map states = getAllVmStates(vmStateMap); State vmState = states.get(vmName); if (vmState == null) { - LOGGER.warn("Check state of " + vmName + logger.warn("Check state of " + vmName + " return null in CheckVirtualMachineCommand"); vmState = State.Stopped; } @@ -666,7 +667,7 @@ public CheckVirtualMachineAnswer execute( return new CheckVirtualMachineAnswer(cmd, convertStateToPower(vmState), vncPort); } catch (Ovm3ResourceException e) { - LOGGER.debug("Check migration for " + vmName + " failed", e); + logger.debug("Check migration for " + vmName + " failed", e); return new CheckVirtualMachineAnswer(cmd, convertStateToPower(State.Stopped), null); } @@ -678,13 +679,13 @@ public CheckVirtualMachineAnswer execute( * For now leave it as we're not clustering in OVM terms. */ public MaintainAnswer execute(MaintainCommand cmd) { - LOGGER.debug("MaintainCommand"); + logger.debug("MaintainCommand"); /* * try { * Network net = new Network(c); * net.stopOvsLocalConfig(config.getAgentControlNetworkName()); * } catch (Ovm3ResourceException e) { - * LOGGER.debug("unable to disable " + + * logger.debug("unable to disable " + * config.getAgentControlNetworkName(), e); * } */ @@ -706,7 +707,7 @@ public Answer execute(GetHostStatsCommand cmd) { 0, 0); return new GetHostStatsAnswer(cmd, hostStats); } catch (Exception e) { - LOGGER.debug("Unable to get host stats for: " + cmd.getHostName(), + logger.debug("Unable to get host stats for: " + cmd.getHostName(), e); return new Answer(cmd, false, e.getMessage()); } @@ -716,18 +717,18 @@ public Answer execute(GetHostStatsCommand cmd) { * We rely on storage health with CheckOnHostCommand.... */ public FenceAnswer execute(FenceCommand cmd) { - LOGGER.debug("FenceCommand"); + logger.debug("FenceCommand"); try { Boolean res = false; return new FenceAnswer(cmd, res, res.toString()); } catch (Exception e) { - LOGGER.error("Unable to fence" + cmd.getHostIp(), e); + logger.error("Unable to fence" + cmd.getHostIp(), e); return new FenceAnswer(cmd, false, e.getMessage()); } } public CheckOnHostAnswer execute(CheckOnHostCommand cmd) { - LOGGER.debug("CheckOnHostCommand"); + logger.debug("CheckOnHostCommand"); CloudstackPlugin csp = new CloudstackPlugin(c); try { Boolean alive = csp.dom0CheckStorageHealth(config.getAgentScriptsDir(), @@ -742,7 +743,7 @@ public CheckOnHostAnswer execute(CheckOnHostCommand cmd) { } else { msg = "storage dead for " + cmd.getHost().getGuid(); } - LOGGER.debug(msg); + logger.debug(msg); return new CheckOnHostAnswer(cmd, alive, msg); } catch (Ovm3ResourceException e) { return new CheckOnHostAnswer(cmd, false, "Error while checking storage for " +cmd.getHost().getGuid() +": " + e.getMessage()); diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3StoragePool.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3StoragePool.java index 7626f494bdb4..56b3777f3084 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3StoragePool.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3StoragePool.java @@ -26,7 +26,8 @@ import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.xmlrpc.XmlRpcException; import com.cloud.agent.api.Answer; @@ -58,8 +59,7 @@ import com.cloud.utils.ssh.SshHelper; public class Ovm3StoragePool { - private static final Logger LOGGER = Logger - .getLogger(Ovm3StoragePool.class); + protected Logger logger = LogManager.getLogger(getClass()); private Connection c; private Ovm3Configuration config; private OvmObject ovmObject = new OvmObject(); @@ -81,7 +81,7 @@ private void setRoles(Pool pool) throws ConfigurationException { } catch (Ovm3ResourceException e) { String msg = "Failed to set server role for host " + config.getAgentHostname() + ": " + e.getMessage(); - LOGGER.error(msg); + logger.error(msg); throw new ConfigurationException(msg); } } @@ -94,12 +94,12 @@ private void setRoles(Pool pool) throws ConfigurationException { */ private void takeOwnership(Pool pool) throws ConfigurationException { try { - LOGGER.debug("Take ownership of host " + config.getAgentHostname()); + logger.debug("Take ownership of host " + config.getAgentHostname()); pool.takeOwnership(config.getAgentOwnedByUuid(), ""); } catch (Ovm3ResourceException e) { String msg = "Failed to take ownership of host " + config.getAgentHostname(); - LOGGER.error(msg); + logger.error(msg); throw new ConfigurationException(msg); } } @@ -113,7 +113,7 @@ private void takeOwnership(Pool pool) throws ConfigurationException { /* FIXME: Placeholders for now, implement later!!!! */ private void takeOwnership33x(Pool pool) throws ConfigurationException { try { - LOGGER.debug("Take ownership of host " + config.getAgentHostname()); + logger.debug("Take ownership of host " + config.getAgentHostname()); String event = "http://localhost:10024/event"; String stats = "http://localhost:10024/stats"; String mgrCert = "None"; @@ -126,7 +126,7 @@ private void takeOwnership33x(Pool pool) throws ConfigurationException { } catch (Ovm3ResourceException e) { String msg = "Failed to take ownership of host " + config.getAgentHostname(); - LOGGER.error(msg); + logger.error(msg); throw new ConfigurationException(msg); } } @@ -146,7 +146,7 @@ public boolean prepareForPool() throws ConfigurationException { /* setup pool and role, needs utility to be able to do things */ if (host.getServerRoles().contentEquals( pool.getValidRoles().toString())) { - LOGGER.info("Server role for host " + config.getAgentHostname() + logger.info("Server role for host " + config.getAgentHostname() + " is ok"); } else { setRoles(pool); @@ -161,19 +161,19 @@ public boolean prepareForPool() throws ConfigurationException { if (host.getManagerUuid().equals(config.getAgentOwnedByUuid())) { String msg = "Host " + config.getAgentHostname() + " owned by us"; - LOGGER.debug(msg); + logger.debug(msg); return true; } else { String msg = "Host " + config.getAgentHostname() + " already part of a pool, and not owned by us"; - LOGGER.error(msg); + logger.error(msg); throw new ConfigurationException(msg); } } } catch (ConfigurationException | Ovm3ResourceException es) { String msg = "Failed to prepare " + config.getAgentHostname() + " for pool: " + es.getMessage(); - LOGGER.error(msg); + logger.error(msg); throw new ConfigurationException(msg); } return true; @@ -203,7 +203,7 @@ private Boolean setupPool(StorageFilerTO cmd) throws Ovm3ResourceException { PoolOCFS2 poolFs = new PoolOCFS2(c); if (config.getAgentIsPrimary()) { try { - LOGGER.debug("Create poolfs on " + config.getAgentHostname() + logger.debug("Create poolfs on " + config.getAgentHostname() + " for repo " + primUuid); /* double check if we're not overwritting anything here!@ */ poolFs.createPoolFs(fsType, mountPoint, clusterUuid, primUuid, @@ -270,7 +270,7 @@ private Boolean addMembers() throws Ovm3ResourceException { members.add(c.getIp()); } } else { - LOGGER.warn(c.getIp() + " noticed primary " + logger.warn(c.getIp() + " noticed primary " + config.getOvm3PoolVip() + " is not part of pool"); return false; } @@ -281,10 +281,10 @@ private Boolean addMembers() throws Ovm3ResourceException { Pool poolM = new Pool(x); if (poolM.isInAPool()) { poolM.setPoolMemberList(members); - LOGGER.debug("Added " + members + " to pool " + logger.debug("Added " + members + " to pool " + poolM.getPoolId() + " on member " + member); } else { - LOGGER.warn(member + logger.warn(member + " unable to be member of a pool it's not in"); return false; } @@ -308,7 +308,7 @@ public Answer execute(DeleteStoragePoolCommand cmd) { pool.leaveServerPool(cmd.getPool().getUuid()); /* also connect to the primary and update the pool list ? */ } catch (Ovm3ResourceException e) { - LOGGER.debug( + logger.debug( "Delete storage pool on host " + config.getAgentHostname() + " failed, however, we leave to user for cleanup and tell management server it succeeded", @@ -342,7 +342,7 @@ private boolean createRepo(StorageFilerTO cmd) throws XmlRpcException { try { repo.mountRepoFs(mountPoint, ovsRepo); } catch (Ovm3ResourceException e) { - LOGGER.debug("Unable to mount NFS repository " + mountPoint + logger.debug("Unable to mount NFS repository " + mountPoint + " on " + ovsRepo + " requested for " + config.getAgentHostname() + ": " + e.getMessage()); } @@ -350,7 +350,7 @@ private boolean createRepo(StorageFilerTO cmd) throws XmlRpcException { repo.addRepo(mountPoint, ovsRepo); repoExists = true; } catch (Ovm3ResourceException e) { - LOGGER.debug("NFS repository " + mountPoint + " on " + ovsRepo + logger.debug("NFS repository " + mountPoint + " on " + ovsRepo + " not found creating repo: " + e.getMessage()); } if (!repoExists) { @@ -364,7 +364,7 @@ private boolean createRepo(StorageFilerTO cmd) throws XmlRpcException { } catch (Ovm3ResourceException e) { msg = "NFS repository " + mountPoint + " on " + ovsRepo + " create failed!"; - LOGGER.debug(msg); + logger.debug(msg); throw new CloudRuntimeException(msg + " " + e.getMessage(), e); } @@ -375,14 +375,14 @@ private boolean createRepo(StorageFilerTO cmd) throws XmlRpcException { try { msg = "Configuring " + config.getAgentHostname() + "(" + config.getAgentIp() + ") for pool"; - LOGGER.debug(msg); + logger.debug(msg); setupPool(cmd); msg = "Configured host for pool"; /* add clustering after pooling */ if (config.getAgentInOvm3Cluster()) { msg = "Setup " + config.getAgentHostname() + "(" + config.getAgentIp() + ") for cluster"; - LOGGER.debug(msg); + logger.debug(msg); /* setup cluster */ /* * From cluster.java @@ -400,7 +400,7 @@ private boolean createRepo(StorageFilerTO cmd) throws XmlRpcException { } } else { msg = "no way dude I can't stand for this"; - LOGGER.debug(msg); + logger.debug(msg); } /* * this is to create the .generic_fs_stamp else we're not allowed to @@ -419,7 +419,7 @@ private boolean createRepo(StorageFilerTO cmd) throws XmlRpcException { } else { msg = "NFS repository " + mountPoint + " on " + ovsRepo + " create failed, was type " + cmd.getType(); - LOGGER.debug(msg); + logger.debug(msg); return false; } @@ -428,7 +428,7 @@ private boolean createRepo(StorageFilerTO cmd) throws XmlRpcException { prepareSecondaryStorageStore(ovsRepo, cmd.getUuid(), cmd.getHost()); } catch (Exception e) { msg = "systemvm.iso copy failed to " + ovsRepo; - LOGGER.debug(msg, e); + logger.debug(msg, e); return false; } return true; @@ -449,7 +449,7 @@ private void prepareSecondaryStorageStore(String storageUrl, try { /* double check */ if (config.getAgentHasPrimary() && config.getAgentInOvm3Pool()) { - LOGGER.debug("Skip systemvm iso copy, leave it to the primary"); + logger.debug("Skip systemvm iso copy, leave it to the primary"); return; } if (lock.lock(3600)) { @@ -466,12 +466,12 @@ private void prepareSecondaryStorageStore(String storageUrl, poolUuid, host, destPath + "/" + srcIso.getName()); if (fp.getSize() != srcIso.getTotalSpace()) { - LOGGER.info(" System VM patch ISO file already exists: " + logger.info(" System VM patch ISO file already exists: " + srcIso.getAbsolutePath().toString() + ", destination: " + destPath); } } catch (Exception e) { - LOGGER.info("Copy System VM patch ISO file to secondary storage. source ISO: " + logger.info("Copy System VM patch ISO file to secondary storage. source ISO: " + srcIso.getAbsolutePath() + ", destination: " + destPath); @@ -484,12 +484,12 @@ private void prepareSecondaryStorageStore(String storageUrl, destPath, srcIso.getAbsolutePath() .toString(), "0644"); } catch (Exception es) { - LOGGER.error("Unexpected exception ", es); + logger.error("Unexpected exception ", es); String msg = "Unable to copy systemvm ISO on secondary storage. src location: " + srcIso.toString() + ", dest location: " + destPath; - LOGGER.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg, es); } } @@ -518,7 +518,7 @@ public String setupSecondaryStorage(String url) "Secondary storage host can not be empty!"); } String uuid = ovmObject.newUuid(uri.getHost() + ":" + uri.getPath()); - LOGGER.info("Secondary storage with uuid: " + uuid); + logger.info("Secondary storage with uuid: " + uuid); return setupNfsStorage(uri, uuid); } @@ -549,7 +549,7 @@ private String setupNfsStorage(URI uri, String uuid) } catch (Ovm3ResourceException ec) { msg = "Nfs storage " + uri + " mount on " + mountPoint + " FAILED " + ec.getMessage(); - LOGGER.error(msg); + logger.error(msg); throw ec; } } else { @@ -565,7 +565,7 @@ private String setupNfsStorage(URI uri, String uuid) * @return */ public GetStorageStatsAnswer execute(final GetStorageStatsCommand cmd) { - LOGGER.debug("Getting stats for: " + cmd.getStorageId()); + logger.debug("Getting stats for: " + cmd.getStorageId()); try { Linux host = new Linux(c); Linux.FileSystem fs = host.getFileSystemByUuid(cmd.getStorageId(), @@ -577,7 +577,7 @@ public GetStorageStatsAnswer execute(final GetStorageStatsCommand cmd) { || fs == null) { String msg = "Null returned when retrieving stats for " + cmd.getStorageId(); - LOGGER.error(msg); + logger.error(msg); return new GetStorageStatsAnswer(cmd, msg); } /* or is it mntUuid ish ? */ @@ -593,14 +593,14 @@ public GetStorageStatsAnswer execute(final GetStorageStatsCommand cmd) { if ("".equals(sd.getSize())) { String msg = "No size when retrieving stats for " + cmd.getStorageId(); - LOGGER.debug(msg); + logger.debug(msg); return new GetStorageStatsAnswer(cmd, msg); } long total = Long.parseLong(sd.getSize()); long used = total - Long.parseLong(sd.getFreeSize()); return new GetStorageStatsAnswer(cmd, total, used); } catch (Ovm3ResourceException e) { - LOGGER.debug("GetStorageStatsCommand for " + cmd.getStorageId() + logger.debug("GetStorageStatsCommand for " + cmd.getStorageId() + " failed", e); return new GetStorageStatsAnswer(cmd, e.getMessage()); } @@ -617,18 +617,18 @@ public File getSystemVMPatchIsoFile() { String systemVmIsoPath = Script.findScript("", "vms/" + iso); File isoFile = null; if (systemVmIsoPath != null) { - LOGGER.debug("found systemvm patch iso " + systemVmIsoPath); + logger.debug("found systemvm patch iso " + systemVmIsoPath); isoFile = new File(systemVmIsoPath); } if (isoFile == null || !isoFile.exists()) { String svm = "client/target/generated-webapp/WEB-INF/classes/vms/" + iso; - LOGGER.debug("last resort for systemvm patch iso " + svm); + logger.debug("last resort for systemvm patch iso " + svm); isoFile = new File(svm); } assert isoFile != null; if (!isoFile.exists()) { - LOGGER.error("Unable to locate " + iso + " in your setup at " + logger.error("Unable to locate " + iso + " in your setup at " + isoFile.toString()); } return isoFile; @@ -642,7 +642,7 @@ public File getSystemVMPatchIsoFile() { * @throws XmlRpcException */ private Boolean createOCFS2Sr(StorageFilerTO pool) throws XmlRpcException { - LOGGER.debug("OCFS2 Not implemented yet"); + logger.debug("OCFS2 Not implemented yet"); return false; } @@ -654,7 +654,7 @@ private Boolean createOCFS2Sr(StorageFilerTO pool) throws XmlRpcException { */ public Answer execute(ModifyStoragePoolCommand cmd) { StorageFilerTO pool = cmd.getPool(); - LOGGER.debug("modifying pool " + pool); + logger.debug("modifying pool " + pool); try { if (config.getAgentInOvm3Cluster()) { // no native ovm cluster for now, I got to break it in horrible @@ -679,7 +679,7 @@ public Answer execute(ModifyStoragePoolCommand cmd) { return new Answer(cmd, false, "The pool type: " + pool.getType().name() + " is not supported."); } catch (Exception e) { - LOGGER.debug("ModifyStoragePoolCommand failed", e); + logger.debug("ModifyStoragePoolCommand failed", e); return new Answer(cmd, false, e.getMessage()); } } @@ -692,7 +692,7 @@ public Answer execute(ModifyStoragePoolCommand cmd) { */ public Answer execute(CreateStoragePoolCommand cmd) { StorageFilerTO pool = cmd.getPool(); - LOGGER.debug("creating pool " + pool); + logger.debug("creating pool " + pool); try { if (pool.getType() == StoragePoolType.NetworkFilesystem) { createRepo(pool); @@ -707,7 +707,7 @@ public Answer execute(CreateStoragePoolCommand cmd) { return new Answer(cmd, false, "OCFS2 is unsupported at the moment"); } else if (pool.getType() == StoragePoolType.PreSetup) { - LOGGER.warn("pre setup for pool " + pool); + logger.warn("pre setup for pool " + pool); } else { return new Answer(cmd, false, "The pool type: " + pool.getType().name() + " is not supported."); @@ -717,7 +717,7 @@ public Answer execute(CreateStoragePoolCommand cmd) { + ", create StoragePool failed due to " + e.toString() + " on host:" + config.getAgentHostname() + " pool: " + pool.getHost() + pool.getPath(); - LOGGER.warn(msg, e); + logger.warn(msg, e); return new Answer(cmd, false, msg); } return new Answer(cmd, true, "success"); @@ -741,7 +741,7 @@ public PrimaryStorageDownloadAnswer execute( repo.importVirtualDisk(tmplturl, image, poolName); return new PrimaryStorageDownloadAnswer(image); } catch (Exception e) { - LOGGER.debug("PrimaryStorageDownloadCommand failed", e); + logger.debug("PrimaryStorageDownloadCommand failed", e); return new PrimaryStorageDownloadAnswer(e.getMessage()); } } diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3VirtualRoutingSupport.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3VirtualRoutingSupport.java index a9d673958779..b1547e4a20e6 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3VirtualRoutingSupport.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3VirtualRoutingSupport.java @@ -17,7 +17,8 @@ package com.cloud.hypervisor.ovm3.resources.helpers; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.NetworkRulesSystemVmCommand; @@ -31,8 +32,7 @@ import com.cloud.utils.ExecutionResult; public class Ovm3VirtualRoutingSupport { - private static final Logger LOGGER = Logger - .getLogger(Ovm3VirtualRoutingSupport.class); + protected Logger logger = LogManager.getLogger(getClass()); private static final String CREATE = "create"; private static final String SUCCESS = "success"; private final Connection c; @@ -49,8 +49,8 @@ public Answer execute(NetworkUsageCommand cmd) { if (cmd.isForVpc()) { return vpcNetworkUsage(cmd); } - if (LOGGER.isInfoEnabled()) { - LOGGER.info("Executing resource NetworkUsageCommand " + cmd); + if (logger.isInfoEnabled()) { + logger.info("Executing resource NetworkUsageCommand " + cmd); } if (cmd.getOption() != null && CREATE.equals(cmd.getOption())) { String result = networkUsage(cmd.getPrivateIP(), CREATE, null); @@ -101,7 +101,7 @@ private long[] getNetworkStats(String privateIP) { stats[1] += (Long.parseLong(splitResult[i++])); } } catch (Exception e) { - LOGGER.warn( + logger.warn( "Unable to parse return from script return of network usage command: " + e.toString(), e); } @@ -136,7 +136,7 @@ private NetworkUsageAnswer vpcNetworkUsage(NetworkUsageCommand cmd) { args); if (!callResult.isSuccess()) { - LOGGER.error("Unable to execute NetworkUsage command on DomR (" + logger.error("Unable to execute NetworkUsage command on DomR (" + privateIp + "), domR may not be ready yet. failure due to " + callResult.getDetails()); @@ -145,7 +145,7 @@ private NetworkUsageAnswer vpcNetworkUsage(NetworkUsageCommand cmd) { if ("get".equals(option) || "vpn".equals(option)) { String result = callResult.getDetails(); if (result == null || result.isEmpty()) { - LOGGER.error(" vpc network usage get returns empty "); + logger.error(" vpc network usage get returns empty "); } long[] stats = new long[2]; if (result != null) { @@ -182,18 +182,18 @@ public CheckSshAnswer execute(CheckSshCommand cmd) { if (!cSp.dom0CheckPort(privateIp, cmdPort, retries, interval)) { String msg = "Port " + cmdPort + " not reachable for " + vmName + ": " + config.getAgentHostname(); - LOGGER.info(msg); + logger.info(msg); return new CheckSshAnswer(cmd, msg); } } catch (Exception e) { String msg = "Can not reach port " + cmdPort + " on System vm " + vmName + ": " + config.getAgentHostname() + " due to exception: " + e; - LOGGER.error(msg); + logger.error(msg); return new CheckSshAnswer(cmd, msg); } - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Ping " + cmdPort + " succeeded for vm " + vmName + if (logger.isDebugEnabled()) { + logger.debug("Ping " + cmdPort + " succeeded for vm " + vmName + ": " + config.getAgentHostname() + " " + cmd); } return new CheckSshAnswer(cmd); diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3VmSupport.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3VmSupport.java index 1d15261388d0..4dd9f013e40d 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3VmSupport.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3VmSupport.java @@ -24,7 +24,8 @@ import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.xmlrpc.XmlRpcException; import com.cloud.agent.api.Answer; @@ -60,7 +61,7 @@ import com.cloud.vm.VirtualMachine.State; public class Ovm3VmSupport { - private final Logger LOGGER = Logger.getLogger(Ovm3VmSupport.class); + protected Logger logger = LogManager.getLogger(getClass()); private OvmObject ovmObject = new OvmObject(); private ResourceManager resourceMgr; private Connection c; @@ -89,7 +90,7 @@ public Boolean createVifs(Xen.Vm vm, VirtualMachineTO spec) NicTO[] nics = spec.getNics(); return createVifs(vm, nics); } else { - LOGGER.info("No nics for vm " + spec.getName()); + logger.info("No nics for vm " + spec.getName()); return false; } } @@ -110,18 +111,18 @@ private Boolean createVif(Xen.Vm vm, NicTO nic) try { String net = network.getNetwork(nic); if (net != null) { - LOGGER.debug("Adding vif " + nic.getDeviceId() + " " + logger.debug("Adding vif " + nic.getDeviceId() + " " + nic.getMac() + " " + net + " to " + vm.getVmName()); vm.addVif(nic.getDeviceId(), net, nic.getMac()); } else { - LOGGER.debug("Unable to add vif " + nic.getDeviceId() + logger.debug("Unable to add vif " + nic.getDeviceId() + " no network for " + vm.getVmName()); return false; } } catch (Exception e) { String msg = "Unable to add vif " + nic.getType() + " for " + vm.getVmName() + " " + e.getMessage(); - LOGGER.debug(msg); + logger.debug(msg); throw new Ovm3ResourceException(msg); } return true; @@ -134,18 +135,18 @@ private Boolean deleteVif(Xen.Vm vm, NicTO nic) try { String net = network.getNetwork(nic); if (net != null) { - LOGGER.debug("Removing vif " + nic.getDeviceId() + " " + " " + logger.debug("Removing vif " + nic.getDeviceId() + " " + " " + nic.getMac() + " " + net + " from " + vm.getVmName()); vm.removeVif(net, nic.getMac()); } else { - LOGGER.debug("Unable to remove vif " + nic.getDeviceId() + logger.debug("Unable to remove vif " + nic.getDeviceId() + " no network for " + vm.getVmName()); return false; } } catch (Exception e) { String msg = "Unable to remove vif " + nic.getType() + " for " + vm.getVmName() + " " + e.getMessage(); - LOGGER.debug(msg); + logger.debug(msg); throw new Ovm3ResourceException(msg); } return true; @@ -154,8 +155,8 @@ private Boolean deleteVif(Xen.Vm vm, NicTO nic) /* Migration should make sure both HVs are the same ? */ public PrepareForMigrationAnswer execute(PrepareForMigrationCommand cmd) { VirtualMachineTO vm = cmd.getVirtualMachine(); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Preparing host for migrating " + vm.getName()); + if (logger.isDebugEnabled()) { + logger.debug("Preparing host for migrating " + vm.getName()); } NicTO[] nics = vm.getNics(); try { @@ -163,10 +164,10 @@ public PrepareForMigrationAnswer execute(PrepareForMigrationCommand cmd) { network.getNetwork(nic); } hypervisor.setVmState(vm.getName(), State.Migrating); - LOGGER.debug("VM " + vm.getName() + " is in Migrating state"); + logger.debug("VM " + vm.getName() + " is in Migrating state"); return new PrepareForMigrationAnswer(cmd); } catch (Ovm3ResourceException e) { - LOGGER.error("Catch Exception " + e.getClass().getName() + logger.error("Catch Exception " + e.getClass().getName() + " prepare for migration failed due to: " + e.getMessage()); return new PrepareForMigrationAnswer(cmd, e); } @@ -184,7 +185,7 @@ public MigrateAnswer execute(final MigrateCommand cmd) { * stop the VM. */ String msg = "Migrating " + vmName + " to " + destIp; - LOGGER.info(msg); + logger.info(msg); if (!config.getAgentInOvm3Cluster() && !config.getAgentInOvm3Pool()) { try { Xen xen = new Xen(c); @@ -193,7 +194,7 @@ public MigrateAnswer execute(final MigrateCommand cmd) { if (destHost == null) { msg = "Unable to find migration target host in DB " + destUuid + " with ip " + destIp; - LOGGER.info(msg); + logger.info(msg); return new MigrateAnswer(cmd, false, msg, null); } xen.stopVm(ovmObject.deDash(vm.getVmRootDiskPoolId()), @@ -204,7 +205,7 @@ public MigrateAnswer execute(final MigrateCommand cmd) { } catch (Ovm3ResourceException e) { msg = "Unpooled VM Migrate of " + vmName + " to " + destUuid + " failed due to: " + e.getMessage(); - LOGGER.debug(msg, e); + logger.debug(msg, e); return new MigrateAnswer(cmd, false, msg, null); } finally { /* shouldn't we just reinitialize completely as a last resort ? */ @@ -228,7 +229,7 @@ public MigrateAnswer execute(final MigrateCommand cmd) { } catch (Ovm3ResourceException e) { msg = "Pooled VM Migrate" + ": Migration of " + vmName + " to " + destIp + " failed due to " + e.getMessage(); - LOGGER.debug(msg, e); + logger.debug(msg, e); return new MigrateAnswer(cmd, false, msg, null); } finally { hypervisor.setVmState(vmName, state); @@ -243,10 +244,10 @@ public GetVncPortAnswer execute(GetVncPortCommand cmd) { Xen host = new Xen(c); Xen.Vm vm = host.getRunningVmConfig(cmd.getName()); Integer vncPort = vm.getVncPort(); - LOGGER.debug("get vnc port for " + cmd.getName() + ": " + vncPort); + logger.debug("get vnc port for " + cmd.getName() + ": " + vncPort); return new GetVncPortAnswer(cmd, c.getIp(), vncPort); } catch (Ovm3ResourceException e) { - LOGGER.debug("get vnc port for " + cmd.getName() + " failed", e); + logger.debug("get vnc port for " + cmd.getName() + " failed", e); return new GetVncPortAnswer(cmd, e.getMessage()); } } @@ -263,11 +264,11 @@ private VmStatsEntry getVmStat(String vmName) { } newVmStats = cSp.ovsDomUStats(vmName); } catch (Ovm3ResourceException e) { - LOGGER.info("Unable to retrieve stats from " + vmName, e); + logger.info("Unable to retrieve stats from " + vmName, e); return stats; } if (oldVmStats == null) { - LOGGER.debug("No old stats retrieved stats from " + vmName); + logger.debug("No old stats retrieved stats from " + vmName); stats.setNumCPUs(1); stats.setNetworkReadKBs(0); stats.setNetworkWriteKBs(0); @@ -278,7 +279,7 @@ private VmStatsEntry getVmStat(String vmName) { stats.setCPUUtilization(0); stats.setEntityType("vm"); } else { - LOGGER.debug("Retrieved new stats from " + vmName); + logger.debug("Retrieved new stats from " + vmName); int cpus = Integer.parseInt(newVmStats.get("vcpus")); stats.setNumCPUs(cpus); stats.setNetworkReadKBs(doubleMin(newVmStats.get("rx_bytes"), oldVmStats.get("rx_bytes"))); @@ -322,14 +323,14 @@ public boolean startVm(String repoId, String vmId) throws XmlRpcException { Xen host = new Xen(c); try { if (host.getRunningVmConfig(vmId) == null) { - LOGGER.error("Create VM " + vmId + " first on " + c.getIp()); + logger.error("Create VM " + vmId + " first on " + c.getIp()); return false; } else { - LOGGER.info("VM " + vmId + " exists on " + c.getIp()); + logger.info("VM " + vmId + " exists on " + c.getIp()); } host.startVm(repoId, vmId); } catch (Exception e) { - LOGGER.error("Failed to start VM " + vmId + " on " + c.getIp() + logger.error("Failed to start VM " + vmId + " on " + c.getIp() + " " + e.getMessage()); return false; } @@ -349,7 +350,7 @@ public void cleanup(Xen.Vm vm) { try { cleanupNetwork(vm.getVmVifs()); } catch (XmlRpcException e) { - LOGGER.info("Clean up network for " + vm.getVmName() + " failed", e); + logger.info("Clean up network for " + vm.getVmName() + " failed", e); } String vmName = vm.getVmName(); /* should become a single entity */ @@ -361,7 +362,7 @@ public void cleanup(Xen.Vm vm) { */ public Boolean createVbds(Xen.Vm vm, VirtualMachineTO spec) { if (spec.getDisks() == null) { - LOGGER.info("No disks defined for " + vm.getVmName()); + logger.info("No disks defined for " + vm.getVmName()); return false; } for (DiskTO disk : spec.getDisks()) { @@ -371,7 +372,7 @@ public Boolean createVbds(Xen.Vm vm, VirtualMachineTO spec) { String diskFile = processor.getVirtualDiskPath(vol.getUuid(), vol.getDataStore().getUuid()); vm.addRootDisk(diskFile); vm.setPrimaryPoolUuid(vol.getDataStore().getUuid()); - LOGGER.debug("Adding root disk: " + diskFile); + logger.debug("Adding root disk: " + diskFile); } else if (disk.getType() == Volume.Type.ISO) { DataTO isoTO = disk.getData(); if (isoTO.getPath() != null) { @@ -389,20 +390,20 @@ public Boolean createVbds(Xen.Vm vm, VirtualMachineTO spec) { + template.getPath(); vm.addIso(isoPath); /* check if secondary storage is mounted */ - LOGGER.debug("Adding ISO: " + isoPath); + logger.debug("Adding ISO: " + isoPath); } } else if (disk.getType() == Volume.Type.DATADISK) { VolumeObjectTO vol = (VolumeObjectTO) disk.getData(); String diskFile = processor.getVirtualDiskPath(vol.getUuid(), vol.getDataStore().getUuid()); vm.addDataDisk(diskFile); - LOGGER.debug("Adding data disk: " + logger.debug("Adding data disk: " + diskFile); } else { throw new CloudRuntimeException("Unknown disk type: " + disk.getType()); } } catch (Exception e) { - LOGGER.debug("CreateVbds failed", e); + logger.debug("CreateVbds failed", e); throw new CloudRuntimeException("Exception" + e.getMessage(), e); } } @@ -439,7 +440,7 @@ private Answer plugNunplugNic(NicTO nic, String vmName, Boolean plug) { vm.getVmUuid()); } catch (Ovm3ResourceException e) { String msg = "Unable to execute command due to " + e.toString(); - LOGGER.debug(msg); + logger.debug(msg); return new Answer(null, false, msg); } return new Answer(null, true, "success"); diff --git a/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/objects/ConnectionTest.java b/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/objects/ConnectionTest.java index e7a94c9113ae..52215c3cffa0 100644 --- a/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/objects/ConnectionTest.java +++ b/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/objects/ConnectionTest.java @@ -23,7 +23,6 @@ import java.util.List; import java.util.Map; -import org.apache.log4j.Logger; import org.apache.xmlrpc.XmlRpcException; import org.apache.xmlrpc.client.XmlRpcClient; import org.apache.xmlrpc.common.XmlRpcHttpRequestConfigImpl; @@ -40,7 +39,6 @@ * Connection */ public class ConnectionTest extends Connection { - private final Logger LOGGER = Logger.getLogger(ConnectionTest.class); XmlTestResultTest results = new XmlTestResultTest(); String result; List multiRes = new ArrayList(); @@ -64,13 +62,13 @@ public Object callTimeoutInSec(String method, List params, int timeout, String result = null; if (getMethodResponse(method) != null) { result = getMethodResponse(method); - LOGGER.debug("methodresponse call: " + method + " - " + params); - LOGGER.trace("methodresponse reply: " + result); + logger.debug("methodresponse call: " + method + " - " + params); + logger.trace("methodresponse reply: " + result); } if (result == null && multiRes.size() >= 0) { result = getResult(); - LOGGER.debug("getresult call: " + method + " - " + params); - LOGGER.trace("getresult reply: " + result); + logger.debug("getresult call: " + method + " - " + params); + logger.trace("getresult reply: " + result); } xr.parse(new InputSource(new StringReader(result))); } catch (Exception e) { diff --git a/plugins/hypervisors/simulator/src/main/java/com/cloud/agent/manager/MockAgentManagerImpl.java b/plugins/hypervisors/simulator/src/main/java/com/cloud/agent/manager/MockAgentManagerImpl.java index e7902ee99c2a..8cb2c32a1c89 100644 --- a/plugins/hypervisors/simulator/src/main/java/com/cloud/agent/manager/MockAgentManagerImpl.java +++ b/plugins/hypervisors/simulator/src/main/java/com/cloud/agent/manager/MockAgentManagerImpl.java @@ -61,7 +61,6 @@ import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.diagnostics.DiagnosticsAnswer; import org.apache.cloudstack.diagnostics.DiagnosticsCommand; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import javax.inject.Inject; @@ -80,7 +79,6 @@ @Component public class MockAgentManagerImpl extends ManagerBase implements MockAgentManager { - private static final Logger s_logger = Logger.getLogger(MockAgentManagerImpl.class); @Inject DataCenterDao dcDao; @Inject @@ -114,7 +112,7 @@ private Pair getPodCidr(long podId, long dcId) { DataCenterVO zone = dcDao.findById(dcId); if (DataCenter.Type.Edge.equals(zone.getType())) { String subnet = String.format("172.%d.%d.0", random.nextInt(15) + 16, random.nextInt(6) + 1); - s_logger.info(String.format("Pod belongs to an edge zone hence CIDR cannot be found, returning %s/24", subnet)); + logger.info(String.format("Pod belongs to an edge zone hence CIDR cannot be found, returning %s/24", subnet)); return new Pair<>(subnet, 24L); } HashMap> podMap = _podDao.getCurrentPodCidrSubnets(dcId, 0); @@ -123,10 +121,10 @@ private Pair getPodCidr(long podId, long dcId) { Long cidrSize = (Long)cidrPair.get(1); return new Pair(cidrAddress, cidrSize); } catch (PatternSyntaxException e) { - s_logger.error("Exception while splitting pod cidr"); + logger.error("Exception while splitting pod cidr"); return null; } catch (IndexOutOfBoundsException e) { - s_logger.error("Invalid pod cidr. Please check"); + logger.error("Invalid pod cidr. Please check"); return null; } } @@ -191,7 +189,7 @@ public Map> createServerResources(Map> createServerResources(Map params) throws Configu random = SecureRandom.getInstance("SHA1PRNG"); _executor = new ThreadPoolExecutor(1, 5, 1, TimeUnit.DAYS, new LinkedBlockingQueue(), new NamedThreadFactory("Simulator-Agent-Mgr")); } catch (NoSuchAlgorithmException e) { - s_logger.debug("Failed to initialize random:" + e.toString()); + logger.debug("Failed to initialize random:" + e.toString()); return false; } return true; @@ -311,7 +309,7 @@ private void handleSystemVMStop() { try { _resourceMgr.deleteHost(host.getId(), true, true); } catch (Exception e) { - s_logger.debug("Failed to delete host: ", e); + logger.debug("Failed to delete host: ", e); } } } @@ -376,12 +374,12 @@ public void run() { try { _resourceMgr.discoverHosts(cmd); } catch (DiscoveryException e) { - s_logger.debug("Failed to discover host: " + e.toString()); + logger.debug("Failed to discover host: " + e.toString()); CallContext.unregister(); return; } } catch (ConfigurationException e) { - s_logger.debug("Failed to load secondary storage resource: " + e.toString()); + logger.debug("Failed to load secondary storage resource: " + e.toString()); CallContext.unregister(); return; } @@ -399,7 +397,7 @@ public MockHost getHost(String guid) { if (_host != null) { return _host; } else { - s_logger.error("Host with guid " + guid + " was not found"); + logger.error("Host with guid " + guid + " was not found"); return null; } } catch (Exception ex) { @@ -526,8 +524,8 @@ public MaintainAnswer maintain(com.cloud.agent.api.MaintainCommand cmd) { @Override public Answer checkNetworkCommand(CheckNetworkCommand cmd) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Checking if network name setup is done on the resource"); + if (logger.isDebugEnabled()) { + logger.debug("Checking if network name setup is done on the resource"); } return new CheckNetworkAnswer(cmd, true, "Network Setup check by names is done"); } diff --git a/plugins/hypervisors/simulator/src/main/java/com/cloud/agent/manager/MockNetworkManagerImpl.java b/plugins/hypervisors/simulator/src/main/java/com/cloud/agent/manager/MockNetworkManagerImpl.java index a71d71f07cb1..9cc8a1c4cf3c 100644 --- a/plugins/hypervisors/simulator/src/main/java/com/cloud/agent/manager/MockNetworkManagerImpl.java +++ b/plugins/hypervisors/simulator/src/main/java/com/cloud/agent/manager/MockNetworkManagerImpl.java @@ -21,7 +21,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.CheckS2SVpnConnectionsCommand; @@ -64,7 +63,6 @@ import com.cloud.utils.component.ManagerBase; public class MockNetworkManagerImpl extends ManagerBase implements MockNetworkManager { - private static final Logger s_logger = Logger.getLogger(MockVmManagerImpl.class); @Inject MockVMDao _mockVmDao; @@ -137,10 +135,10 @@ public Answer setupPVLAN(PvlanSetupCommand cmd) { public PlugNicAnswer plugNic(PlugNicCommand cmd) { String vmname = cmd.getVmName(); if (_mockVmDao.findByVmName(vmname) != null) { - s_logger.debug("Plugged NIC (dev=" + cmd.getNic().getDeviceId() + ", " + cmd.getNic().getIp() + ") into " + cmd.getVmName()); + logger.debug("Plugged NIC (dev=" + cmd.getNic().getDeviceId() + ", " + cmd.getNic().getIp() + ") into " + cmd.getVmName()); return new PlugNicAnswer(cmd, true, "success"); } - s_logger.error("Plug NIC failed for (dev=" + cmd.getNic().getDeviceId() + ", " + cmd.getNic().getIp() + ") into " + cmd.getVmName()); + logger.error("Plug NIC failed for (dev=" + cmd.getNic().getDeviceId() + ", " + cmd.getNic().getIp() + ") into " + cmd.getVmName()); return new PlugNicAnswer(cmd, false, "failure"); } @@ -148,10 +146,10 @@ public PlugNicAnswer plugNic(PlugNicCommand cmd) { public UnPlugNicAnswer unplugNic(UnPlugNicCommand cmd) { String vmname = cmd.getVmName(); if (_mockVmDao.findByVmName(vmname) != null) { - s_logger.debug("Unplugged NIC (dev=" + cmd.getNic().getDeviceId() + ", " + cmd.getNic().getIp() + ") into " + cmd.getVmName()); + logger.debug("Unplugged NIC (dev=" + cmd.getNic().getDeviceId() + ", " + cmd.getNic().getIp() + ") into " + cmd.getVmName()); return new UnPlugNicAnswer(cmd, true, "success"); } - s_logger.error("Unplug NIC failed for (dev=" + cmd.getNic().getDeviceId() + ", " + cmd.getNic().getIp() + ") into " + cmd.getVmName()); + logger.error("Unplug NIC failed for (dev=" + cmd.getNic().getDeviceId() + ", " + cmd.getNic().getIp() + ") into " + cmd.getVmName()); return new UnPlugNicAnswer(cmd, false, "failure"); } @@ -159,10 +157,10 @@ public UnPlugNicAnswer unplugNic(UnPlugNicCommand cmd) { public ReplugNicAnswer replugNic(ReplugNicCommand cmd) { String vmname = cmd.getVmName(); if (_mockVmDao.findByVmName(vmname) != null) { - s_logger.debug("Replugged NIC (dev=" + cmd.getNic().getDeviceId() + ", " + cmd.getNic().getIp() + ") into " + cmd.getVmName()); + logger.debug("Replugged NIC (dev=" + cmd.getNic().getDeviceId() + ", " + cmd.getNic().getIp() + ") into " + cmd.getVmName()); return new ReplugNicAnswer(cmd, true, "success"); } - s_logger.error("Replug NIC failed for (dev=" + cmd.getNic().getDeviceId() + ", " + cmd.getNic().getIp() + ") into " + cmd.getVmName()); + logger.error("Replug NIC failed for (dev=" + cmd.getNic().getDeviceId() + ", " + cmd.getNic().getIp() + ") into " + cmd.getVmName()); return new ReplugNicAnswer(cmd, false, "failure"); } @@ -236,7 +234,7 @@ public Answer setUpGuestNetwork(SetupGuestNetworkCommand cmd) { return new Answer(cmd, true, "success"); } catch (Exception e) { String msg = "Creating guest network failed due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new Answer(cmd, false, msg); } } diff --git a/plugins/hypervisors/simulator/src/main/java/com/cloud/agent/manager/MockStorageManagerImpl.java b/plugins/hypervisors/simulator/src/main/java/com/cloud/agent/manager/MockStorageManagerImpl.java index 27b4a716af1b..f313968c15f7 100644 --- a/plugins/hypervisors/simulator/src/main/java/com/cloud/agent/manager/MockStorageManagerImpl.java +++ b/plugins/hypervisors/simulator/src/main/java/com/cloud/agent/manager/MockStorageManagerImpl.java @@ -32,7 +32,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.storage.command.DeleteCommand; import org.apache.cloudstack.storage.command.DownloadCommand; @@ -108,7 +107,6 @@ @Component public class MockStorageManagerImpl extends ManagerBase implements MockStorageManager { - private static final Logger s_logger = Logger.getLogger(MockStorageManagerImpl.class); @Inject MockStoragePoolDao _mockStoragePoolDao = null; @Inject @@ -1093,7 +1091,7 @@ public Answer ComputeChecksum(ComputeChecksumCommand cmd) { MessageDigest md = MessageDigest.getInstance("md5"); md5 = String.format("%032x", new BigInteger(1, md.digest(cmd.getTemplatePath().getBytes()))); } catch (NoSuchAlgorithmException e) { - s_logger.debug("failed to gernerate md5:" + e.toString()); + logger.debug("failed to gernerate md5:" + e.toString()); } txn.commit(); return new Answer(cmd, true, md5); diff --git a/plugins/hypervisors/simulator/src/main/java/com/cloud/agent/manager/MockVmManagerImpl.java b/plugins/hypervisors/simulator/src/main/java/com/cloud/agent/manager/MockVmManagerImpl.java index 67f3e95e8725..21d7f7029970 100644 --- a/plugins/hypervisors/simulator/src/main/java/com/cloud/agent/manager/MockVmManagerImpl.java +++ b/plugins/hypervisors/simulator/src/main/java/com/cloud/agent/manager/MockVmManagerImpl.java @@ -26,7 +26,6 @@ import javax.naming.ConfigurationException; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.api.Answer; @@ -92,7 +91,6 @@ @Component public class MockVmManagerImpl extends ManagerBase implements MockVmManager { - private static final Logger s_logger = Logger.getLogger(MockVmManagerImpl.class); @Inject MockVMDao _mockVmDao = null; @@ -261,12 +259,12 @@ public CheckRouterAnswer checkRouter(final CheckRouterCommand cmd) { final MockVm vm = _mockVmDao.findByVmName(router_name); final String args = vm.getBootargs(); if (args.indexOf("router_pr=100") > 0) { - s_logger.debug("Router priority is for PRIMARY"); + logger.debug("Router priority is for PRIMARY"); final CheckRouterAnswer ans = new CheckRouterAnswer(cmd, "Status: PRIMARY", true); ans.setState(VirtualRouter.RedundantState.PRIMARY); return ans; } else { - s_logger.debug("Router priority is for BACKUP"); + logger.debug("Router priority is for BACKUP"); final CheckRouterAnswer ans = new CheckRouterAnswer(cmd, "Status: BACKUP", true); ans.setState(VirtualRouter.RedundantState.BACKUP); return ans; @@ -459,7 +457,7 @@ public Answer scaleVm(final ScaleVmCommand cmd) { vm.setCpu(cmd.getCpus() * cmd.getMaxSpeed()); vm.setMemory(cmd.getMaxRam()); _mockVmDao.update(vm.getId(), vm); - s_logger.debug("Scaled up VM " + vmName); + logger.debug("Scaled up VM " + vmName); txn.commit(); return new ScaleVmAnswer(cmd, true, null); } catch (final Exception ex) { @@ -474,7 +472,7 @@ public Answer scaleVm(final ScaleVmCommand cmd) { @Override public Answer plugSecondaryIp(final NetworkRulesVmSecondaryIpCommand cmd) { - s_logger.debug("Plugged secondary IP to VM " + cmd.getVmName()); + logger.debug("Plugged secondary IP to VM " + cmd.getVmName()); return new Answer(cmd, true, null); } @@ -483,7 +481,7 @@ public Answer createVmSnapshot(final CreateVMSnapshotCommand cmd) { final String vmName = cmd.getVmName(); final String vmSnapshotName = cmd.getTarget().getSnapshotName(); - s_logger.debug("Created snapshot " + vmSnapshotName + " for vm " + vmName); + logger.debug("Created snapshot " + vmSnapshotName + " for vm " + vmName); return new CreateVMSnapshotAnswer(cmd, cmd.getTarget(), cmd.getVolumeTOs()); } @@ -494,7 +492,7 @@ public Answer deleteVmSnapshot(final DeleteVMSnapshotCommand cmd) { if (_mockVmDao.findByVmName(cmd.getVmName()) == null) { return new DeleteVMSnapshotAnswer(cmd, false, "No VM by name " + cmd.getVmName()); } - s_logger.debug("Removed snapshot " + snapshotName + " of VM " + vm); + logger.debug("Removed snapshot " + snapshotName + " of VM " + vm); return new DeleteVMSnapshotAnswer(cmd, cmd.getVolumeTOs()); } @@ -506,7 +504,7 @@ public Answer revertVmSnapshot(final RevertToVMSnapshotCommand cmd) { if (vmVo == null) { return new RevertToVMSnapshotAnswer(cmd, false, "No VM by name " + cmd.getVmName()); } - s_logger.debug("Reverted to snapshot " + snapshot + " of VM " + vm); + logger.debug("Reverted to snapshot " + snapshot + " of VM " + vm); return new RevertToVMSnapshotAnswer(cmd, cmd.getVolumeTOs(), vmVo.getPowerState()); } @@ -592,40 +590,40 @@ private boolean logSecurityGroupAction(final SecurityGroupRulesCmd cmd, final Te boolean updateSeqnoAndSig = false; if (currSeqnum != null) { if (cmd.getSeqNum() > currSeqnum) { - s_logger.info("New seqno received: " + cmd.getSeqNum() + " curr=" + currSeqnum); + logger.info("New seqno received: " + cmd.getSeqNum() + " curr=" + currSeqnum); updateSeqnoAndSig = true; if (!cmd.getSignature().equals(currSig)) { - s_logger.info("New seqno received: " + cmd.getSeqNum() + " curr=" + currSeqnum + " new signature received:" + cmd.getSignature() + " curr=" + + logger.info("New seqno received: " + cmd.getSeqNum() + " curr=" + currSeqnum + " new signature received:" + cmd.getSignature() + " curr=" + currSig + ", updated iptables"); action = ", updated iptables"; reason = reason + "seqno_increased_sig_changed"; } else { - s_logger.info("New seqno received: " + cmd.getSeqNum() + " curr=" + currSeqnum + " no change in signature:" + cmd.getSignature() + ", do nothing"); + logger.info("New seqno received: " + cmd.getSeqNum() + " curr=" + currSeqnum + " no change in signature:" + cmd.getSignature() + ", do nothing"); reason = reason + "seqno_increased_sig_same"; } } else if (cmd.getSeqNum() < currSeqnum) { - s_logger.info("Older seqno received: " + cmd.getSeqNum() + " curr=" + currSeqnum + ", do nothing"); + logger.info("Older seqno received: " + cmd.getSeqNum() + " curr=" + currSeqnum + ", do nothing"); reason = reason + "seqno_decreased"; } else { if (!cmd.getSignature().equals(currSig)) { - s_logger.info("Identical seqno received: " + cmd.getSeqNum() + " new signature received:" + cmd.getSignature() + " curr=" + currSig + + logger.info("Identical seqno received: " + cmd.getSeqNum() + " new signature received:" + cmd.getSignature() + " curr=" + currSig + ", updated iptables"); action = ", updated iptables"; reason = reason + "seqno_same_sig_changed"; updateSeqnoAndSig = true; } else { - s_logger.info("Identical seqno received: " + cmd.getSeqNum() + " curr=" + currSeqnum + " no change in signature:" + cmd.getSignature() + + logger.info("Identical seqno received: " + cmd.getSeqNum() + " curr=" + currSeqnum + " no change in signature:" + cmd.getSignature() + ", do nothing"); reason = reason + "seqno_same_sig_same"; } } } else { - s_logger.info("New seqno received: " + cmd.getSeqNum() + " old=null"); + logger.info("New seqno received: " + cmd.getSeqNum() + " old=null"); updateSeqnoAndSig = true; action = ", updated iptables"; reason = ", seqno_new"; } - s_logger.info("Programmed network rules for vm " + cmd.getVmName() + " seqno=" + cmd.getSeqNum() + " signature=" + cmd.getSignature() + " guestIp=" + + logger.info("Programmed network rules for vm " + cmd.getVmName() + " seqno=" + cmd.getSeqNum() + " signature=" + cmd.getSignature() + " guestIp=" + cmd.getGuestIp() + ", numIngressRules=" + cmd.getIngressRuleSet().size() + ", numEgressRules=" + cmd.getEgressRuleSet().size() + " total cidrs=" + cmd.getTotalNumCidrs() + action + reason); return updateSeqnoAndSig; diff --git a/plugins/hypervisors/simulator/src/main/java/com/cloud/agent/manager/SimulatorManagerImpl.java b/plugins/hypervisors/simulator/src/main/java/com/cloud/agent/manager/SimulatorManagerImpl.java index 159f22236c27..cd1eeee77081 100644 --- a/plugins/hypervisors/simulator/src/main/java/com/cloud/agent/manager/SimulatorManagerImpl.java +++ b/plugins/hypervisors/simulator/src/main/java/com/cloud/agent/manager/SimulatorManagerImpl.java @@ -34,7 +34,6 @@ import org.apache.cloudstack.storage.command.DownloadProgressCommand; import org.apache.cloudstack.storage.command.StorageSubSystemCommand; import org.apache.cloudstack.storage.command.UploadStatusCommand; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.api.Answer; @@ -141,7 +140,6 @@ @Component public class SimulatorManagerImpl extends ManagerBase implements SimulatorManager, PluggableService { - private static final Logger s_logger = Logger.getLogger(SimulatorManagerImpl.class); private static final Gson s_gson = GsonHelper.getGson(); @Inject MockVmManager _mockVmMgr; @@ -208,7 +206,7 @@ public List> getCommands() { @DB @Override public Answer simulate(final Command cmd, final String hostGuid) { - s_logger.debug("Simulate command " + cmd); + logger.debug("Simulate command " + cmd); Answer answer = null; Exception exception = null; TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); @@ -233,7 +231,7 @@ public Answer simulate(final Command cmd, final String hostGuid) { try { info.setTimeout(Integer.valueOf(entry.getValue())); } catch (final NumberFormatException e) { - s_logger.debug("invalid timeout parameter: " + e.toString()); + logger.debug("invalid timeout parameter: " + e.toString()); } } @@ -242,9 +240,9 @@ public Answer simulate(final Command cmd, final String hostGuid) { final int wait = Integer.valueOf(entry.getValue()); Thread.sleep(wait); } catch (final NumberFormatException e) { - s_logger.debug("invalid wait parameter: " + e.toString()); + logger.debug("invalid wait parameter: " + e.toString()); } catch (final InterruptedException e) { - s_logger.debug("thread is interrupted: " + e.toString()); + logger.debug("thread is interrupted: " + e.toString()); } } @@ -450,7 +448,7 @@ public Answer simulate(final Command cmd, final String hostGuid) { || cmd instanceof SecStorageFirewallCfgCommand) { answer = new Answer(cmd); } else { - s_logger.error("Simulator does not implement command of type " + cmd.toString()); + logger.error("Simulator does not implement command of type " + cmd.toString()); answer = Answer.createUnsupportedCommandAnswer(cmd); } } @@ -462,11 +460,11 @@ public Answer simulate(final Command cmd, final String hostGuid) { } } - s_logger.debug("Finished simulate command " + cmd); + logger.debug("Finished simulate command " + cmd); return answer; } catch (final Exception e) { - s_logger.error("Failed execute cmd: ", e); + logger.error("Failed execute cmd: ", e); txn.rollback(); return new Answer(cmd, false, e.toString()); } finally { diff --git a/plugins/hypervisors/simulator/src/main/java/com/cloud/api/commands/CleanupSimulatorMockCmd.java b/plugins/hypervisors/simulator/src/main/java/com/cloud/api/commands/CleanupSimulatorMockCmd.java index 3aabb41f9b9e..ad2f78a1271d 100644 --- a/plugins/hypervisors/simulator/src/main/java/com/cloud/api/commands/CleanupSimulatorMockCmd.java +++ b/plugins/hypervisors/simulator/src/main/java/com/cloud/api/commands/CleanupSimulatorMockCmd.java @@ -30,14 +30,12 @@ import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.SuccessResponse; -import org.apache.log4j.Logger; import javax.inject.Inject; @APICommand(name = "cleanupSimulatorMock", description="cleanup simulator mock", responseObject=SuccessResponse.class) public class CleanupSimulatorMockCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(CleanupSimulatorMockCmd.class.getName()); private static final String s_name = "cleanupsimulatormockresponse"; @Inject SimulatorManager _simMgr; diff --git a/plugins/hypervisors/simulator/src/main/java/com/cloud/api/commands/ConfigureSimulatorCmd.java b/plugins/hypervisors/simulator/src/main/java/com/cloud/api/commands/ConfigureSimulatorCmd.java index 2aa666a69fc8..316fef976434 100644 --- a/plugins/hypervisors/simulator/src/main/java/com/cloud/api/commands/ConfigureSimulatorCmd.java +++ b/plugins/hypervisors/simulator/src/main/java/com/cloud/api/commands/ConfigureSimulatorCmd.java @@ -18,7 +18,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.ApiErrorCode; @@ -42,7 +41,6 @@ @APICommand(name = "configureSimulator", description = "configure simulator", responseObject = MockResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ConfigureSimulatorCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(ConfigureSimulatorCmd.class.getName()); private static final String s_name = "configuresimulatorresponse"; @Inject diff --git a/plugins/hypervisors/simulator/src/main/java/com/cloud/api/commands/QuerySimulatorMockCmd.java b/plugins/hypervisors/simulator/src/main/java/com/cloud/api/commands/QuerySimulatorMockCmd.java index 15ee7f723c3d..98d70b953562 100644 --- a/plugins/hypervisors/simulator/src/main/java/com/cloud/api/commands/QuerySimulatorMockCmd.java +++ b/plugins/hypervisors/simulator/src/main/java/com/cloud/api/commands/QuerySimulatorMockCmd.java @@ -31,14 +31,12 @@ import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; -import org.apache.log4j.Logger; import javax.inject.Inject; @APICommand(name = "querySimulatorMock", description="query simulator mock", responseObject=MockResponse.class) public class QuerySimulatorMockCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(QuerySimulatorMockCmd.class.getName()); private static final String s_name = "querysimulatormockresponse"; @Inject SimulatorManager _simMgr; diff --git a/plugins/hypervisors/simulator/src/main/java/com/cloud/ha/SimulatorFencer.java b/plugins/hypervisors/simulator/src/main/java/com/cloud/ha/SimulatorFencer.java index c776edfde353..b37960e26ab9 100644 --- a/plugins/hypervisors/simulator/src/main/java/com/cloud/ha/SimulatorFencer.java +++ b/plugins/hypervisors/simulator/src/main/java/com/cloud/ha/SimulatorFencer.java @@ -22,7 +22,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.FenceAnswer; @@ -39,7 +38,6 @@ import com.cloud.vm.VirtualMachine; public class SimulatorFencer extends AdapterBase implements FenceBuilder { - private static final Logger s_logger = Logger.getLogger(SimulatorFencer.class); @Inject HostDao _hostDao; @Inject AgentManager _agentMgr; @@ -70,7 +68,7 @@ public SimulatorFencer() { @Override public Boolean fenceOff(VirtualMachine vm, Host host) { if (host.getHypervisorType() != HypervisorType.Simulator) { - s_logger.debug("Don't know how to fence non simulator hosts " + host.getHypervisorType()); + logger.debug("Don't know how to fence non simulator hosts " + host.getHypervisorType()); return null; } @@ -89,13 +87,13 @@ public Boolean fenceOff(VirtualMachine vm, Host host) { try { answer = (FenceAnswer)_agentMgr.send(h.getId(), fence); } catch (AgentUnavailableException e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Moving on to the next host because " + h.toString() + " is unavailable", e); + if (logger.isDebugEnabled()) { + logger.debug("Moving on to the next host because " + h.toString() + " is unavailable", e); } continue; } catch (OperationTimedoutException e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Moving on to the next host because " + h.toString() + " is unavailable", e); + if (logger.isDebugEnabled()) { + logger.debug("Moving on to the next host because " + h.toString() + " is unavailable", e); } continue; } @@ -105,8 +103,8 @@ public Boolean fenceOff(VirtualMachine vm, Host host) { } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to fence off " + vm.toString() + " on " + host.toString()); + if (logger.isDebugEnabled()) { + logger.debug("Unable to fence off " + vm.toString() + " on " + host.toString()); } return false; diff --git a/plugins/hypervisors/simulator/src/main/java/com/cloud/ha/SimulatorInvestigator.java b/plugins/hypervisors/simulator/src/main/java/com/cloud/ha/SimulatorInvestigator.java index 8996d5af91c4..56a5b08810be 100644 --- a/plugins/hypervisors/simulator/src/main/java/com/cloud/ha/SimulatorInvestigator.java +++ b/plugins/hypervisors/simulator/src/main/java/com/cloud/ha/SimulatorInvestigator.java @@ -20,7 +20,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.ha.HAManager; import com.cloud.agent.AgentManager; @@ -42,7 +41,6 @@ import com.cloud.vm.VirtualMachine.PowerState; public class SimulatorInvestigator extends AdapterBase implements Investigator { - private final static Logger s_logger = Logger.getLogger(SimulatorInvestigator.class); @Inject AgentManager _agentMgr; @Inject @@ -77,7 +75,7 @@ public Status isAgentAlive(Host agent) { return answer.getResult() ? Status.Up : Status.Down; } } catch (Exception e) { - s_logger.debug("Failed to send command to host: " + neighbor.getId()); + logger.debug("Failed to send command to host: " + neighbor.getId()); } } @@ -93,17 +91,17 @@ public boolean isVmAlive(VirtualMachine vm, Host host) throws UnknownVM { try { Answer answer = _agentMgr.send(vm.getHostId(), cmd); if (!answer.getResult()) { - s_logger.debug("Unable to get vm state on " + vm.toString()); + logger.debug("Unable to get vm state on " + vm.toString()); throw new UnknownVM(); } CheckVirtualMachineAnswer cvmAnswer = (CheckVirtualMachineAnswer)answer; - s_logger.debug("Agent responded with state " + cvmAnswer.getState().toString()); + logger.debug("Agent responded with state " + cvmAnswer.getState().toString()); return cvmAnswer.getState() == PowerState.PowerOn; } catch (AgentUnavailableException e) { - s_logger.debug("Unable to reach the agent for " + vm.toString() + ": " + e.getMessage()); + logger.debug("Unable to reach the agent for " + vm.toString() + ": " + e.getMessage()); throw new UnknownVM(); } catch (OperationTimedoutException e) { - s_logger.debug("Operation timed out for " + vm.toString() + ": " + e.getMessage()); + logger.debug("Operation timed out for " + vm.toString() + ": " + e.getMessage()); throw new UnknownVM(); } } diff --git a/plugins/hypervisors/simulator/src/main/java/com/cloud/resource/AgentResourceBase.java b/plugins/hypervisors/simulator/src/main/java/com/cloud/resource/AgentResourceBase.java index cf4b40e14265..b87315b2460c 100644 --- a/plugins/hypervisors/simulator/src/main/java/com/cloud/resource/AgentResourceBase.java +++ b/plugins/hypervisors/simulator/src/main/java/com/cloud/resource/AgentResourceBase.java @@ -31,7 +31,8 @@ import javax.naming.ConfigurationException; import org.apache.cloudstack.managed.context.ManagedContextRunnable; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.IAgentControl; import com.cloud.agent.api.Answer; @@ -50,7 +51,7 @@ import com.cloud.utils.component.ComponentContext; public class AgentResourceBase implements ServerResource { - private static final Logger s_logger = Logger.getLogger(AgentResourceBase.class); + protected Logger logger = LogManager.getLogger(getClass()); protected String _name; private List _warnings = new LinkedList(); @@ -71,8 +72,8 @@ public class AgentResourceBase implements ServerResource { public AgentResourceBase(long instanceId, AgentType agentType, SimulatorManager simMgr, String hostGuid) { _instanceId = instanceId; - if (s_logger.isDebugEnabled()) { - s_logger.info("New Routing host instantiated with guid:" + hostGuid); + if (logger.isDebugEnabled()) { + logger.info("New Routing host instantiated with guid:" + hostGuid); } if (agentType == AgentType.Routing) { @@ -101,8 +102,8 @@ protected long getInstanceId() { } public AgentResourceBase() { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Deserializing simulated agent on reconnect"); + if (logger.isDebugEnabled()) { + logger.debug("Deserializing simulated agent on reconnect"); } } @@ -129,8 +130,8 @@ public boolean configure(String name, Map params) throws Configu } private void reconnect(MockHost host) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Reconfiguring existing simulated host w/ name: " + host.getName() + " and guid: " + host.getGuid()); + if (logger.isDebugEnabled()) { + logger.debug("Reconfiguring existing simulated host w/ name: " + host.getName() + " and guid: " + host.getGuid()); } this.agentHost = host; } @@ -230,12 +231,12 @@ public void setAgentControl(IAgentControl agentControl) { } protected String findScript(String script) { - s_logger.debug("Looking for " + script + " in the classpath"); + logger.debug("Looking for " + script + " in the classpath"); URL url = ClassLoader.getSystemResource(script); File file = null; if (url == null) { file = new File("./" + script); - s_logger.debug("Looking for " + script + " in " + file.getAbsolutePath()); + logger.debug("Looking for " + script + " in " + file.getAbsolutePath()); if (!file.exists()) { return null; } diff --git a/plugins/hypervisors/simulator/src/main/java/com/cloud/resource/AgentRoutingResource.java b/plugins/hypervisors/simulator/src/main/java/com/cloud/resource/AgentRoutingResource.java index 2c8e73126320..80ced4c230db 100644 --- a/plugins/hypervisors/simulator/src/main/java/com/cloud/resource/AgentRoutingResource.java +++ b/plugins/hypervisors/simulator/src/main/java/com/cloud/resource/AgentRoutingResource.java @@ -24,7 +24,6 @@ import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.CheckVirtualMachineAnswer; @@ -63,7 +62,6 @@ import com.google.gson.stream.JsonReader; public class AgentRoutingResource extends AgentStorageResource { - private static final Logger s_logger = Logger.getLogger(AgentRoutingResource.class); private static final Gson s_gson = GsonHelper.getGson(); private Map> _runningVms = new HashMap>(); @@ -136,7 +134,7 @@ public PingCommand getCurrentStatus(long id) { try { clz = Class.forName(objectType); } catch (ClassNotFoundException e) { - s_logger.info("[ignored] ping returned class", e); + logger.info("[ignored] ping returned class", e); } if (clz != null) { StringReader reader = new StringReader(objectData); @@ -303,7 +301,7 @@ private Answer execute(ShutdownCommand cmd) { @Override public boolean configure(String name, Map params) throws ConfigurationException { if (!super.configure(name, params)) { - s_logger.warn("Base class was unable to configure"); + logger.warn("Base class was unable to configure"); return false; } return true; diff --git a/plugins/hypervisors/simulator/src/main/java/com/cloud/resource/AgentStorageResource.java b/plugins/hypervisors/simulator/src/main/java/com/cloud/resource/AgentStorageResource.java index c8f4701df9c1..d9bcf0b74a4e 100644 --- a/plugins/hypervisors/simulator/src/main/java/com/cloud/resource/AgentStorageResource.java +++ b/plugins/hypervisors/simulator/src/main/java/com/cloud/resource/AgentStorageResource.java @@ -21,7 +21,6 @@ import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.cloudstack.storage.resource.SecondaryStorageResource; @@ -40,7 +39,6 @@ import com.cloud.vm.SecondaryStorageVm; public class AgentStorageResource extends AgentResourceBase implements SecondaryStorageResource { - private static final Logger s_logger = Logger.getLogger(AgentStorageResource.class); final protected String _parent = "/mnt/SecStorage"; protected String _role; @@ -101,7 +99,7 @@ public StartupCommand[] initialize() { @Override public boolean configure(String name, Map params) throws ConfigurationException { if (!super.configure(name, params)) { - s_logger.warn("Base class was unable to configure"); + logger.warn("Base class was unable to configure"); return false; } diff --git a/plugins/hypervisors/simulator/src/main/java/com/cloud/resource/SimulatorDiscoverer.java b/plugins/hypervisors/simulator/src/main/java/com/cloud/resource/SimulatorDiscoverer.java index 8f1b07f027b6..37b1ca3e4a80 100644 --- a/plugins/hypervisors/simulator/src/main/java/com/cloud/resource/SimulatorDiscoverer.java +++ b/plugins/hypervisors/simulator/src/main/java/com/cloud/resource/SimulatorDiscoverer.java @@ -27,7 +27,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; @@ -53,7 +52,6 @@ import com.cloud.storage.dao.VMTemplateZoneDao; public class SimulatorDiscoverer extends DiscovererBase implements Discoverer, Listener, ResourceStateAdapter { - private static final Logger s_logger = Logger.getLogger(SimulatorDiscoverer.class); @Inject HostDao _hostDao; @@ -92,8 +90,8 @@ public class SimulatorDiscoverer extends DiscovererBase implements Discoverer, L if (scheme.equals("http")) { if (host == null || !host.startsWith("sim")) { String msg = "uri is not of simulator type so we're not taking care of the discovery for this: " + uri; - if (s_logger.isDebugEnabled()) { - s_logger.debug(msg); + if (logger.isDebugEnabled()) { + logger.debug(msg); } return null; } @@ -119,8 +117,8 @@ public class SimulatorDiscoverer extends DiscovererBase implements Discoverer, L } } else { String msg = "uriString is not http so we're not taking care of the discovery for this: " + uri; - if (s_logger.isDebugEnabled()) { - s_logger.debug(msg); + if (logger.isDebugEnabled()) { + logger.debug(msg); } return null; } @@ -128,15 +126,15 @@ public class SimulatorDiscoverer extends DiscovererBase implements Discoverer, L String cluster = null; if (clusterId == null) { String msg = "must specify cluster Id when adding host"; - if (s_logger.isDebugEnabled()) { - s_logger.debug(msg); + if (logger.isDebugEnabled()) { + logger.debug(msg); } throw new RuntimeException(msg); } else { ClusterVO clu = _clusterDao.findById(clusterId); if (clu == null || (clu.getHypervisorType() != HypervisorType.Simulator)) { - if (s_logger.isInfoEnabled()) - s_logger.info("invalid cluster id or cluster is not for Simulator hypervisors"); + if (logger.isInfoEnabled()) + logger.info("invalid cluster id or cluster is not for Simulator hypervisors"); return null; } cluster = Long.toString(clusterId); @@ -149,8 +147,8 @@ public class SimulatorDiscoverer extends DiscovererBase implements Discoverer, L String pod; if (podId == null) { String msg = "must specify pod Id when adding host"; - if (s_logger.isDebugEnabled()) { - s_logger.debug(msg); + if (logger.isDebugEnabled()) { + logger.debug(msg); } throw new RuntimeException(msg); } else { @@ -174,17 +172,17 @@ public class SimulatorDiscoverer extends DiscovererBase implements Discoverer, L resources = createAgentResources(params); return resources; } catch (Exception ex) { - s_logger.error("Exception when discovering simulator hosts: " + ex.getMessage()); + logger.error("Exception when discovering simulator hosts: " + ex.getMessage()); } return null; } private Map> createAgentResources(Map params) { try { - s_logger.info("Creating Simulator Resources"); + logger.info("Creating Simulator Resources"); return _mockAgentMgr.createServerResources(params); } catch (Exception ex) { - s_logger.warn("Caught exception at agent resource creation: " + ex.getMessage(), ex); + logger.warn("Caught exception at agent resource creation: " + ex.getMessage(), ex); } return null; } diff --git a/plugins/hypervisors/simulator/src/main/java/com/cloud/resource/SimulatorSecondaryDiscoverer.java b/plugins/hypervisors/simulator/src/main/java/com/cloud/resource/SimulatorSecondaryDiscoverer.java index e09a5a950a40..ec5ee9413994 100644 --- a/plugins/hypervisors/simulator/src/main/java/com/cloud/resource/SimulatorSecondaryDiscoverer.java +++ b/plugins/hypervisors/simulator/src/main/java/com/cloud/resource/SimulatorSecondaryDiscoverer.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; import org.apache.cloudstack.storage.resource.SecondaryStorageDiscoverer; import org.apache.cloudstack.storage.resource.SecondaryStorageResource; -import org.apache.log4j.Logger; import com.cloud.agent.Listener; import com.cloud.agent.api.AgentControlAnswer; @@ -45,7 +44,6 @@ import com.cloud.storage.dao.SnapshotDao; public class SimulatorSecondaryDiscoverer extends SecondaryStorageDiscoverer implements ResourceStateAdapter, Listener { - private static final Logger s_logger = Logger.getLogger(SimulatorSecondaryDiscoverer.class); @Inject MockStorageManager _mockStorageMgr = null; @Inject @@ -69,7 +67,7 @@ public boolean configure(String name, Map params) throws Configu public Map> find(long dcId, Long podId, Long clusterId, URI uri, String username, String password, List hostTags) { if (!uri.getScheme().equalsIgnoreCase("sim")) { - s_logger.debug("It's not NFS or file or ISO, so not a secondary storage server: " + uri.toString()); + logger.debug("It's not NFS or file or ISO, so not a secondary storage server: " + uri.toString()); return null; } List stores = imageStoreDao.listImageStores(); diff --git a/plugins/hypervisors/simulator/src/main/java/com/cloud/resource/SimulatorStorageProcessor.java b/plugins/hypervisors/simulator/src/main/java/com/cloud/resource/SimulatorStorageProcessor.java index 8c8815cc08ac..5e0ee17bb8a9 100644 --- a/plugins/hypervisors/simulator/src/main/java/com/cloud/resource/SimulatorStorageProcessor.java +++ b/plugins/hypervisors/simulator/src/main/java/com/cloud/resource/SimulatorStorageProcessor.java @@ -23,7 +23,8 @@ import java.util.UUID; import org.apache.cloudstack.agent.directdownload.DirectDownloadCommand; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.storage.command.AttachAnswer; import org.apache.cloudstack.storage.command.AttachCommand; @@ -57,7 +58,7 @@ public class SimulatorStorageProcessor implements StorageProcessor { - private static final Logger s_logger = Logger.getLogger(SimulatorStorageProcessor.class); + protected Logger logger = LogManager.getLogger(getClass()); protected SimulatorManager hypervisorResource; public SimulatorStorageProcessor(SimulatorManager resource) { @@ -66,14 +67,14 @@ public SimulatorStorageProcessor(SimulatorManager resource) { @Override public SnapshotAndCopyAnswer snapshotAndCopy(SnapshotAndCopyCommand cmd) { - s_logger.info("'SnapshotAndCopyAnswer snapshotAndCopy(SnapshotAndCopyCommand)' not currently used for SimulatorStorageProcessor"); + logger.info("'SnapshotAndCopyAnswer snapshotAndCopy(SnapshotAndCopyCommand)' not currently used for SimulatorStorageProcessor"); return new SnapshotAndCopyAnswer(); } @Override public ResignatureAnswer resignature(ResignatureCommand cmd) { - s_logger.info("'ResignatureAnswer resignature(ResignatureCommand)' not currently used for SimulatorStorageProcessor"); + logger.info("'ResignatureAnswer resignature(ResignatureCommand)' not currently used for SimulatorStorageProcessor"); return new ResignatureAnswer(); } diff --git a/plugins/hypervisors/simulator/src/main/java/com/cloud/simulator/dao/MockConfigurationDaoImpl.java b/plugins/hypervisors/simulator/src/main/java/com/cloud/simulator/dao/MockConfigurationDaoImpl.java index e3c50fddcd79..f763a2aa0e5e 100644 --- a/plugins/hypervisors/simulator/src/main/java/com/cloud/simulator/dao/MockConfigurationDaoImpl.java +++ b/plugins/hypervisors/simulator/src/main/java/com/cloud/simulator/dao/MockConfigurationDaoImpl.java @@ -21,7 +21,6 @@ import java.util.Formatter; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.simulator.MockConfigurationVO; @@ -32,7 +31,6 @@ @Component public class MockConfigurationDaoImpl extends GenericDaoBase implements MockConfigurationDao { - final static Logger s_logger = Logger.getLogger(MockConfigurationDaoImpl.class); private final SearchBuilder _searchByDcIdName; private final SearchBuilder _searchByDcIDPodIdName; private final SearchBuilder _searchByDcIDPodIdClusterIdName; @@ -139,7 +137,7 @@ public MockConfigurationVO findByNameBottomUP(Long dcId, Long podId, Long cluste return toEntityBean(rs, false); } } catch (Exception e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "error while executing dynamically build search: " + e.getLocalizedMessage()); } return null; diff --git a/plugins/hypervisors/simulator/src/main/java/org/apache/cloudstack/storage/datastore/driver/SimulatorImageStoreDriverImpl.java b/plugins/hypervisors/simulator/src/main/java/org/apache/cloudstack/storage/datastore/driver/SimulatorImageStoreDriverImpl.java index f50514364941..ebdef1d76d1e 100644 --- a/plugins/hypervisors/simulator/src/main/java/org/apache/cloudstack/storage/datastore/driver/SimulatorImageStoreDriverImpl.java +++ b/plugins/hypervisors/simulator/src/main/java/org/apache/cloudstack/storage/datastore/driver/SimulatorImageStoreDriverImpl.java @@ -24,7 +24,6 @@ import javax.inject.Inject; import org.apache.cloudstack.storage.command.CommandResult; -import org.apache.log4j.Logger; import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; @@ -48,7 +47,6 @@ import com.cloud.storage.dao.VolumeDao; public class SimulatorImageStoreDriverImpl extends NfsImageStoreDriverImpl { - private static final Logger s_logger = Logger.getLogger(SimulatorImageStoreDriverImpl.class); @Inject TemplateDataStoreDao _templateStoreDao; @@ -114,7 +112,7 @@ public String createEntityExtractUrl(DataStore store, String installPath, Storag EndPoint ep = _epSelector.select(store); if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; - s_logger.error(errMsg); + logger.error(errMsg); return null; } // Create Symlink at ssvm diff --git a/plugins/hypervisors/simulator/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SimulatorImageStoreLifeCycleImpl.java b/plugins/hypervisors/simulator/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SimulatorImageStoreLifeCycleImpl.java index edf1e2893a72..4db2a10210d1 100644 --- a/plugins/hypervisors/simulator/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SimulatorImageStoreLifeCycleImpl.java +++ b/plugins/hypervisors/simulator/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SimulatorImageStoreLifeCycleImpl.java @@ -26,7 +26,8 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; @@ -45,7 +46,7 @@ import com.cloud.utils.UriUtils; public class SimulatorImageStoreLifeCycleImpl implements ImageStoreLifeCycle { - private static final Logger s_logger = Logger.getLogger(SimulatorImageStoreLifeCycleImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject ImageStoreHelper imageStoreHelper; @@ -65,7 +66,7 @@ public DataStore initialize(Map dsInfos) { DataStoreRole role = (DataStoreRole)dsInfos.get("role"); Map details = (Map)dsInfos.get("details"); - s_logger.info("Trying to add a new data store at " + url + " to data center " + dcId); + logger.info("Trying to add a new data store at " + url + " to data center " + dcId); URI uri; try { diff --git a/plugins/hypervisors/ucs/src/main/java/com/cloud/ucs/manager/UcsManagerImpl.java b/plugins/hypervisors/ucs/src/main/java/com/cloud/ucs/manager/UcsManagerImpl.java index 031d1c63c731..6127ed012f35 100644 --- a/plugins/hypervisors/ucs/src/main/java/com/cloud/ucs/manager/UcsManagerImpl.java +++ b/plugins/hypervisors/ucs/src/main/java/com/cloud/ucs/manager/UcsManagerImpl.java @@ -41,7 +41,8 @@ import org.apache.cloudstack.api.response.UcsProfileResponse; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.managed.context.ManagedContextRunnable; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.configuration.Config; import com.cloud.dc.ClusterDetailsDao; @@ -67,7 +68,7 @@ import com.cloud.utils.xmlobject.XmlObjectParser; public class UcsManagerImpl implements UcsManager { - public static final Logger s_logger = Logger.getLogger(UcsManagerImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); public static final Long COOKIE_TTL = TimeUnit.MILLISECONDS.convert(100L, TimeUnit.MINUTES); public static final Long COOKIE_REFRESH_TTL = TimeUnit.MILLISECONDS.convert(10L, TimeUnit.MINUTES); @@ -110,7 +111,7 @@ private void discoverNewBlades(Map previous, Map previous, Map getCommandHostDelegation(long hostId, Command cmd) { - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("Finding delegation for command of type %s to host %d.", cmd.getClass(), hostId)); + if (logger.isTraceEnabled()) { + logger.trace(String.format("Finding delegation for command of type %s to host %d.", cmd.getClass(), hostId)); } boolean needDelegation = false; @@ -273,9 +271,9 @@ protected VMwareGuru() { } } - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("Command of type %s is going to be executed in sequence? %b", cmd.getClass(), cmd.executeInSequence())); - s_logger.trace(String.format("Command of type %s is going to need delegation? %b", cmd.getClass(), needDelegation)); + if (logger.isTraceEnabled()) { + logger.trace(String.format("Command of type %s is going to be executed in sequence? %b", cmd.getClass(), cmd.executeInSequence())); + logger.trace(String.format("Command of type %s is going to need delegation? %b", cmd.getClass(), needDelegation)); } if (!needDelegation) { @@ -342,13 +340,13 @@ public boolean trackVmHostChange() { return true; } - private static String resolveNameInGuid(String guid) { + private String resolveNameInGuid(String guid) { String tokens[] = guid.split("@"); assert (tokens.length == 2); String vCenterIp = NetUtils.resolveToIp(tokens[1]); if (vCenterIp == null) { - s_logger.error("Fatal : unable to resolve vCenter address " + tokens[1] + ", please check your DNS configuration"); + logger.error("Fatal : unable to resolve vCenter address " + tokens[1] + ", please check your DNS configuration"); return guid; } @@ -364,7 +362,7 @@ private static String resolveNameInGuid(String guid) { for (NicVO nic : nicVOs) { NetworkVO network = networkDao.findById(nic.getNetworkId()); if (network.getBroadcastDomainType() == BroadcastDomainType.Lswitch) { - s_logger.debug("Nic " + nic.toString() + " is connected to an lswitch, cleanup required"); + logger.debug("Nic " + nic.toString() + " is connected to an lswitch, cleanup required"); NetworkVO networkVO = networkDao.findById(nic.getNetworkId()); // We need the traffic label to figure out which vSwitch has the // portgroup @@ -451,7 +449,7 @@ private DatacenterMO getDatacenterMO(long zoneId) throws Exception { ManagedObjectReference dcMor = dcMo.getMor(); if (dcMor == null) { String msg = "Error while getting Vmware datacenter " + vmwareDatacenter.getVmwareDatacenterName(); - s_logger.error(msg); + logger.error(msg); throw new InvalidParameterValueException(msg); } return dcMo; @@ -525,7 +523,7 @@ private boolean isRootDisk(VirtualDisk disk, Map disksMap private void checkBackingInfo(VirtualDeviceBackingInfo backingInfo) { if (!(backingInfo instanceof VirtualDiskFlatVer2BackingInfo)) { String errorMessage = String.format("Unsupported backing info. Expected: [%s], but received: [%s].", VirtualDiskFlatVer2BackingInfo.class.getSimpleName(), backingInfo.getClass().getSimpleName()); - s_logger.error(errorMessage); + logger.error(errorMessage); throw new CloudRuntimeException(errorMessage); } } @@ -668,11 +666,11 @@ private Long getImportingVMTemplate(List virtualDisks, DatacenterMO * If VM exists: update VM */ private VMInstanceVO getVM(String vmInternalName, long templateId, long guestOsId, long serviceOfferingId, long zoneId, long accountId, long userId, long domainId) { - s_logger.debug(String.format("Trying to get VM with specs: [vmInternalName: %s, templateId: %s, guestOsId: %s, serviceOfferingId: %s].", vmInternalName, + logger.debug(String.format("Trying to get VM with specs: [vmInternalName: %s, templateId: %s, guestOsId: %s, serviceOfferingId: %s].", vmInternalName, templateId, guestOsId, serviceOfferingId)); VMInstanceVO vm = virtualMachineDao.findVMByInstanceNameIncludingRemoved(vmInternalName); if (vm != null) { - s_logger.debug(String.format("Found an existing VM [id: %s, removed: %s] with internalName: [%s].", vm.getUuid(), vm.getRemoved() != null ? "yes" : "no", vmInternalName)); + logger.debug(String.format("Found an existing VM [id: %s, removed: %s] with internalName: [%s].", vm.getUuid(), vm.getRemoved() != null ? "yes" : "no", vmInternalName)); vm.setState(VirtualMachine.State.Stopped); vm.setPowerState(VirtualMachine.PowerState.PowerOff); virtualMachineDao.update(vm.getId(), vm); @@ -684,7 +682,7 @@ private VMInstanceVO getVM(String vmInternalName, long templateId, long guestOsI return virtualMachineDao.findById(vm.getId()); } else { long id = userVmDao.getNextInSequence(Long.class, "id"); - s_logger.debug(String.format("Can't find an existing VM with internalName: [%s]. Creating a new VM with: [id: %s, name: %s, templateId: %s, guestOsId: %s, serviceOfferingId: %s].", + logger.debug(String.format("Can't find an existing VM with internalName: [%s]. Creating a new VM with: [id: %s, name: %s, templateId: %s, guestOsId: %s, serviceOfferingId: %s].", vmInternalName, id, vmInternalName, templateId, guestOsId, serviceOfferingId)); UserVmVO vmInstanceVO = new UserVmVO(id, vmInternalName, vmInternalName, templateId, HypervisorType.VMware, guestOsId, false, false, domainId, accountId, userId, @@ -753,7 +751,7 @@ protected VolumeVO updateVolume(VirtualDisk disk, Map dis volume.setAttached(new Date()); _volumeDao.update(volume.getId(), volume); if (volume.getRemoved() != null) { - s_logger.debug(String.format("Marking volume [uuid: %s] of restored VM [%s] as non removed.", volume.getUuid(), + logger.debug(String.format("Marking volume [uuid: %s] of restored VM [%s] as non removed.", volume.getUuid(), ReflectionToStringBuilderUtils.reflectOnlySelectedFields(vm, "uuid", "instanceName"))); _volumeDao.unremove(volume.getId()); if (vm.getType() == Type.User) { @@ -788,7 +786,7 @@ private void syncVMVolumes(VMInstanceVO vmInstanceVO, List virtualD volume = createVolume(disk, vmToImport, domainId, zoneId, accountId, instanceId, poolId, templateId, backup, true); operation = "created"; } - s_logger.debug(String.format("Sync volumes to %s in backup restore operation: %s volume [id: %s].", vmInstanceVO, operation, volume.getUuid())); + logger.debug(String.format("Sync volumes to %s in backup restore operation: %s volume [id: %s].", vmInstanceVO, operation, volume.getUuid())); } } @@ -833,9 +831,9 @@ protected String createVolumeInfoFromVolumes(List vmVolumes) { return GSON.toJson(list.toArray(), Backup.VolumeInfo[].class); } catch (Exception e) { if (CollectionUtils.isEmpty(vmVolumes) || vmVolumes.get(0).getInstanceId() == null) { - s_logger.error(String.format("Failed to create VolumeInfo of VM [id: null] volumes due to: [%s].", e.getMessage()), e); + logger.error(String.format("Failed to create VolumeInfo of VM [id: null] volumes due to: [%s].", e.getMessage()), e); } else { - s_logger.error(String.format("Failed to create VolumeInfo of VM [id: %s] volumes due to: [%s].", vmVolumes.get(0).getInstanceId(), e.getMessage()), e); + logger.error(String.format("Failed to create VolumeInfo of VM [id: %s] volumes due to: [%s].", vmVolumes.get(0).getInstanceId(), e.getMessage()), e); } throw e; } @@ -886,11 +884,11 @@ private NetworkVO getGuestNetworkFromNetworkMorName(String name, long accountId, String[] tagSplit = tag.split("-"); tag = tagSplit[tagSplit.length - 1]; - s_logger.debug(String.format("Trying to find network with vlan: [%s].", vlan)); + logger.debug(String.format("Trying to find network with vlan: [%s].", vlan)); NetworkVO networkVO = networkDao.findByVlan(vlan); if (networkVO == null) { networkVO = createNetworkRecord(zoneId, tag, vlan, accountId, domainId); - s_logger.debug(String.format("Created new network record [id: %s] with details [zoneId: %s, tag: %s, vlan: %s, accountId: %s and domainId: %s].", + logger.debug(String.format("Created new network record [id: %s] with details [zoneId: %s, tag: %s, vlan: %s, accountId: %s and domainId: %s].", networkVO.getUuid(), zoneId, tag, vlan, accountId, domainId)); } return networkVO; @@ -903,7 +901,7 @@ private Map getNetworksMapping(String[] vmNetworkNames, long Map mapping = new HashMap<>(); for (String networkName : vmNetworkNames) { NetworkVO networkVO = getGuestNetworkFromNetworkMorName(networkName, accountId, zoneId, domainId); - s_logger.debug(String.format("Mapping network name [%s] to networkVO [id: %s].", networkName, networkVO.getUuid())); + logger.debug(String.format("Mapping network name [%s] to networkVO [id: %s].", networkName, networkVO.getUuid())); mapping.put(networkName, networkVO); } return mapping; @@ -940,7 +938,7 @@ private void syncVMNics(VirtualDevice[] nicDevices, DatacenterMO dcMo, Map getDisksMapping(Backup backup, List getDisksMapping(Backup backup, List params) { - s_logger.debug(String.format("Cloning VM %s on external vCenter %s", vmName, hostIp)); + logger.debug(String.format("Cloning VM %s on external vCenter %s", vmName, hostIp)); String vcenter = params.get(VmDetailConstants.VMWARE_VCENTER_HOST); String datacenter = params.get(VmDetailConstants.VMWARE_DATACENTER_NAME); String username = params.get(VmDetailConstants.VMWARE_VCENTER_USERNAME); @@ -1278,25 +1276,25 @@ public UnmanagedInstanceTO cloneHypervisorVMOutOfBand(String hostIp, String vmNa VirtualMachineMO vmMo = dataCenterMO.findVm(vmName); if (vmMo == null) { String err = String.format("Cannot find VM with name %s on %s/%s", vmName, vcenter, datacenter); - s_logger.error(err); + logger.error(err); throw new CloudRuntimeException(err); } VirtualMachinePowerState sourceVmPowerState = vmMo.getPowerState(); if (sourceVmPowerState == VirtualMachinePowerState.POWERED_ON && isWindowsVm(vmMo)) { - s_logger.debug(String.format("VM %s is a Windows VM and its Running, cannot be imported." + + logger.debug(String.format("VM %s is a Windows VM and its Running, cannot be imported." + "Please gracefully shut it down before attempting the import", vmName)); } VirtualMachineMO clonedVM = createCloneFromSourceVM(vmName, vmMo, dataCenterMO); - s_logger.debug(String.format("VM %s cloned successfully", vmName)); + logger.debug(String.format("VM %s cloned successfully", vmName)); UnmanagedInstanceTO clonedInstance = VmwareHelper.getUnmanagedInstance(vmMo.getRunningHost(), clonedVM); setNicsFromSourceVM(clonedInstance, vmMo); clonedInstance.setCloneSourcePowerState(sourceVmPowerState == VirtualMachinePowerState.POWERED_ON ? UnmanagedInstanceTO.PowerState.PowerOn : UnmanagedInstanceTO.PowerState.PowerOff); return clonedInstance; } catch (Exception e) { String err = String.format("Error cloning VM: %s from external vCenter %s: %s", vmName, vcenter, e.getMessage()); - s_logger.error(err, e); + logger.error(err, e); throw new CloudRuntimeException(err, e); } } @@ -1319,7 +1317,7 @@ private void setNicsFromSourceVM(UnmanagedInstanceTO clonedInstance, VirtualMach @Override public boolean removeClonedHypervisorVMOutOfBand(String hostIp, String vmName, Map params) { - s_logger.debug(String.format("Removing VM %s on external vCenter %s", vmName, hostIp)); + logger.debug(String.format("Removing VM %s on external vCenter %s", vmName, hostIp)); String vcenter = params.get(VmDetailConstants.VMWARE_VCENTER_HOST); String datacenter = params.get(VmDetailConstants.VMWARE_DATACENTER_NAME); String username = params.get(VmDetailConstants.VMWARE_VCENTER_USERNAME); @@ -1331,13 +1329,13 @@ public boolean removeClonedHypervisorVMOutOfBand(String hostIp, String vmName, M if (vmMo == null) { String err = String.format("Cannot find VM %s on datacenter %s, not possible to remove VM out of band", vmName, datacenter); - s_logger.error(err); + logger.error(err); return false; } return vmMo.destroy(); } catch (Exception e) { String err = String.format("Error destroying external VM %s: %s", vmName, e.getMessage()); - s_logger.error(err, e); + logger.error(err, e); return false; } } diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VmwareVmImplementer.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VmwareVmImplementer.java index d60dc99a4290..461e141fa3db 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VmwareVmImplementer.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VmwareVmImplementer.java @@ -49,7 +49,8 @@ import org.apache.cloudstack.storage.image.deployasis.DeployAsIsHelper; import org.apache.cloudstack.utils.CloudStackVersion; import org.apache.commons.lang.BooleanUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import javax.inject.Inject; import java.util.ArrayList; @@ -60,7 +61,7 @@ import java.util.Map; class VmwareVmImplementer { - private static final Logger LOGGER = Logger.getLogger(VmwareVmImplementer.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject DomainRouterDao domainRouterDao; @@ -124,7 +125,7 @@ VirtualMachineTO implement(VirtualMachineProfile vm, VirtualMachineTO to, long c try { VirtualEthernetCardType.valueOf(nicDeviceType); } catch (Exception e) { - LOGGER.warn("Invalid NIC device type " + nicDeviceType + " is specified in VM details, switch to default E1000"); + logger.warn("Invalid NIC device type " + nicDeviceType + " is specified in VM details, switch to default E1000"); details.put(VmDetailConstants.NIC_ADAPTER, VirtualEthernetCardType.E1000.toString()); } } @@ -135,7 +136,7 @@ VirtualMachineTO implement(VirtualMachineProfile vm, VirtualMachineTO to, long c try { VirtualEthernetCardType.valueOf(nicDeviceType); } catch (Exception e) { - LOGGER.warn(String.format("Invalid NIC device type [%s] specified in VM details, switching to value [%s] of configuration [%s].", + logger.warn(String.format("Invalid NIC device type [%s] specified in VM details, switching to value [%s] of configuration [%s].", nicDeviceType, vmwareMgr.VmwareUserVmNicDeviceType.value(), vmwareMgr.VmwareUserVmNicDeviceType.toString())); details.put(VmDetailConstants.NIC_ADAPTER, vmwareMgr.VmwareUserVmNicDeviceType.value()); } @@ -194,9 +195,9 @@ private void setDeployAsIsInfoTO(VirtualMachineProfile vm, VirtualMachineTO to, } private void setDetails(VirtualMachineTO to, Map details) { - if (LOGGER.isTraceEnabled()) { + if (logger.isTraceEnabled()) { for (String key : details.keySet()) { - LOGGER.trace(String.format("Detail for VM %s: %s => %s", to.getName(), key, details.get(key))); + logger.trace(String.format("Detail for VM %s: %s => %s", to.getName(), key, details.get(key))); } } to.setDetails(details); @@ -346,8 +347,8 @@ protected void configureNestedVirtualization(Map details, Virtua Boolean globalNestedVPerVMEnabled = getGlobalNestedVPerVMEnabled(); Boolean shouldEnableNestedVirtualization = shouldEnableNestedVirtualization(globalNestedVirtualisationEnabled, globalNestedVPerVMEnabled, localNestedV); - if(LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format( + if(logger.isDebugEnabled()) { + logger.debug(String.format( "Due to '%B'(globalNestedVirtualisationEnabled) and '%B'(globalNestedVPerVMEnabled) I'm adding a flag with value %B to the vm configuration for Nested Virtualisation.", globalNestedVirtualisationEnabled, globalNestedVPerVMEnabled, @@ -410,11 +411,11 @@ private NicTO[] sortNicsByDeviceId(NicTO[] nics) { protected GuestOSHypervisorVO getGuestOsMapping(GuestOSVO guestOS , String hypervisorVersion) { GuestOSHypervisorVO guestOsMapping = guestOsHypervisorDao.findByOsIdAndHypervisor(guestOS.getId(), Hypervisor.HypervisorType.VMware.toString(), hypervisorVersion); if (guestOsMapping == null) { - LOGGER.debug(String.format("Cannot find guest os mappings for guest os \"%s\" on VMware %s", guestOS.getDisplayName(), hypervisorVersion)); + logger.debug(String.format("Cannot find guest os mappings for guest os \"%s\" on VMware %s", guestOS.getDisplayName(), hypervisorVersion)); String parentVersion = CloudStackVersion.getVMwareParentVersion(hypervisorVersion); if (parentVersion != null) { guestOsMapping = guestOsHypervisorDao.findByOsIdAndHypervisor(guestOS.getId(), Hypervisor.HypervisorType.VMware.toString(), parentVersion); - LOGGER.debug(String.format("Found guest os mappings for guest os \"%s\" on VMware %s: %s", guestOS.getDisplayName(), parentVersion, guestOsMapping)); + logger.debug(String.format("Found guest os mappings for guest os \"%s\" on VMware %s: %s", guestOS.getDisplayName(), parentVersion, guestOsMapping)); } } return guestOsMapping; diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/VmwareCleanupMaid.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/VmwareCleanupMaid.java index d3b001ae9eb1..d2c71c4ee01a 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/VmwareCleanupMaid.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/VmwareCleanupMaid.java @@ -20,7 +20,8 @@ import java.util.List; import java.util.Map; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.hypervisor.vmware.manager.VmwareManager; import com.cloud.hypervisor.vmware.mo.ClusterMO; @@ -30,7 +31,7 @@ import com.cloud.hypervisor.vmware.util.VmwareContext; public class VmwareCleanupMaid { - private static final Logger s_logger = Logger.getLogger(VmwareCleanupMaid.class); + protected static Logger LOGGER = LogManager.getLogger(VmwareCleanupMaid.class); private static Map> s_leftoverDummyVMs = new HashMap>(); @@ -117,11 +118,11 @@ public synchronized static void gcLeftOverVMs(VmwareContext context) { } if (vmMo != null) { - s_logger.info("Found left over dummy VM " + cleanupMaid.getVmName() + ", destroy it"); + LOGGER.info("Found left over dummy VM " + cleanupMaid.getVmName() + ", destroy it"); vmMo.destroy(); } } catch (Throwable e) { - s_logger.warn("Unable to destroy left over dummy VM " + cleanupMaid.getVmName()); + LOGGER.warn("Unable to destroy left over dummy VM " + cleanupMaid.getVmName()); } finally { // FIXME mgr.popCleanupCheckpoint(cleanupMaid.getCheckPoint()); } diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java index 1989d3d53a1d..580d44a09d61 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java @@ -29,7 +29,6 @@ import com.cloud.dc.VmwareDatacenterVO; import org.apache.cloudstack.api.ApiConstants; -import org.apache.log4j.Logger; import com.cloud.agent.api.StartupCommand; import com.cloud.agent.api.StartupRoutingCommand; @@ -79,7 +78,6 @@ import com.vmware.vim25.ManagedObjectReference; public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer, ResourceStateAdapter { - private static final Logger s_logger = Logger.getLogger(VmwareServerDiscoverer.class); @Inject VmwareManager _vmwareMgr; @@ -107,27 +105,27 @@ public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer List networkElements; public VmwareServerDiscoverer() { - s_logger.info("VmwareServerDiscoverer is constructed"); + logger.info("VmwareServerDiscoverer is constructed"); } @Override public Map> find(long dcId, Long podId, Long clusterId, URI url, String username, String password, List hostTags) throws DiscoveryException { - if (s_logger.isInfoEnabled()) - s_logger.info("Discover host. dc: " + dcId + ", pod: " + podId + ", cluster: " + clusterId + ", uri host: " + url.getHost()); + if (logger.isInfoEnabled()) + logger.info("Discover host. dc: " + dcId + ", pod: " + podId + ", cluster: " + clusterId + ", uri host: " + url.getHost()); if (podId == null) { - if (s_logger.isInfoEnabled()) - s_logger.info("No pod is assigned, assuming that it is not for vmware and skip it to next discoverer"); + if (logger.isInfoEnabled()) + logger.info("No pod is assigned, assuming that it is not for vmware and skip it to next discoverer"); return null; } boolean failureInClusterDiscovery = true; String vsmIp = ""; ClusterVO cluster = _clusterDao.findById(clusterId); if (cluster == null || cluster.getHypervisorType() != HypervisorType.VMware) { - if (s_logger.isInfoEnabled()) - s_logger.info("invalid cluster id or cluster is not for VMware hypervisors"); + if (logger.isInfoEnabled()) + logger.info("invalid cluster id or cluster is not for VMware hypervisors"); return null; } @@ -143,7 +141,7 @@ public VmwareServerDiscoverer() { // If either or both not provided, try to retrieve & use the credentials from database, which are provided earlier while adding VMware DC to zone. if (usernameNotProvided || passwordNotProvided) { // Retrieve credentials associated with VMware DC - s_logger.info("Username and/or Password not provided while adding cluster to cloudstack zone. " + logger.info("Username and/or Password not provided while adding cluster to cloudstack zone. " + "Hence using both username & password provided while adding VMware DC to CloudStack zone."); username = vmwareDc.getUser(); password = vmwareDc.getPassword(); @@ -179,7 +177,7 @@ public VmwareServerDiscoverer() { int maxHostsPerCluster = _hvCapabilitiesDao.getMaxHostsPerCluster(hosts.get(0).getHypervisorType(), hosts.get(0).getHypervisorVersion()); if (hosts.size() >= maxHostsPerCluster) { String msg = "VMware cluster " + cluster.getName() + " is too big to add new host, current size: " + hosts.size() + ", max. size: " + maxHostsPerCluster; - s_logger.error(msg); + logger.error(msg); throw new DiscoveredWithErrorException(msg); } } @@ -265,7 +263,7 @@ public VmwareServerDiscoverer() { "Both public traffic and guest traffic is over same physical network " + pNetworkPublic + ". And virtual switch type chosen for each traffic is different" + ". A physical network cannot be shared by different types of virtual switches."; - s_logger.error(msg); + logger.error(msg); throw new InvalidParameterValueException(msg); } } @@ -273,7 +271,7 @@ public VmwareServerDiscoverer() { privateTrafficLabel = _netmgr.getDefaultManagementTrafficLabel(dcId, HypervisorType.VMware); if (privateTrafficLabel != null) { - s_logger.info("Detected private network label : " + privateTrafficLabel); + logger.info("Detected private network label : " + privateTrafficLabel); } Pair vsmInfo = new Pair(false, 0L); if (nexusDVS && (guestTrafficLabelObj.getVirtualSwitchType() == VirtualSwitchType.NexusDistributedVirtualSwitch) || @@ -284,13 +282,13 @@ public VmwareServerDiscoverer() { if (zoneType != NetworkType.Basic) { publicTrafficLabel = _netmgr.getDefaultPublicTrafficLabel(dcId, HypervisorType.VMware); if (publicTrafficLabel != null) { - s_logger.info("Detected public network label : " + publicTrafficLabel); + logger.info("Detected public network label : " + publicTrafficLabel); } } // Get physical network label guestTrafficLabel = _netmgr.getDefaultGuestTrafficLabel(dcId, HypervisorType.VMware); if (guestTrafficLabel != null) { - s_logger.info("Detected guest network label : " + guestTrafficLabel); + logger.info("Detected guest network label : " + guestTrafficLabel); } // Before proceeding with validation of Nexus 1000v VSM check if an instance of Nexus 1000v VSM is already associated with this cluster. boolean clusterHasVsm = _vmwareMgr.hasNexusVSM(clusterId); @@ -317,18 +315,18 @@ public VmwareServerDiscoverer() { if (nexusDVS) { if (vsmCredentials != null) { - s_logger.info("Stocking credentials of Nexus VSM"); + logger.info("Stocking credentials of Nexus VSM"); context.registerStockObject("vsmcredentials", vsmCredentials); } } List morHosts = _vmwareMgr.addHostToPodCluster(context, dcId, podId, clusterId, URLDecoder.decode(url.getPath(), "UTF-8")); if (morHosts == null) - s_logger.info("Found 0 hosts."); + logger.info("Found 0 hosts."); if (privateTrafficLabel != null) context.uregisterStockObject("privateTrafficLabel"); if (morHosts == null) { - s_logger.error("Unable to find host or cluster based on url: " + URLDecoder.decode(url.getPath(), "UTF-8")); + logger.error("Unable to find host or cluster based on url: " + URLDecoder.decode(url.getPath(), "UTF-8")); return null; } @@ -339,7 +337,7 @@ public VmwareServerDiscoverer() { morCluster = context.getHostMorByPath(URLDecoder.decode(uriFromCluster.getPath(), "UTF-8")); if (morCluster == null || !morCluster.getType().equalsIgnoreCase("ClusterComputeResource")) { - s_logger.warn("Cluster url does not point to a valid vSphere cluster, url: " + clusterDetails.get("url")); + logger.warn("Cluster url does not point to a valid vSphere cluster, url: " + clusterDetails.get("url")); return null; } else { ClusterMO clusterMo = new ClusterMO(context, morCluster); @@ -352,9 +350,9 @@ public VmwareServerDiscoverer() { if (!validateDiscoveredHosts(context, morCluster, morHosts)) { if (morCluster == null) - s_logger.warn("The discovered host is not standalone host, can not be added to a standalone cluster"); + logger.warn("The discovered host is not standalone host, can not be added to a standalone cluster"); else - s_logger.warn("The discovered host does not belong to the cluster"); + logger.warn("The discovered host does not belong to the cluster"); return null; } @@ -390,7 +388,7 @@ public VmwareServerDiscoverer() { resource.configure("VMware", params); } catch (ConfigurationException e) { _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, dcId, podId, "Unable to add " + url.getHost(), "Error is " + e.getMessage()); - s_logger.warn("Unable to instantiate " + url.getHost(), e); + logger.warn("Unable to instantiate " + url.getHost(), e); } resource.start(); @@ -410,17 +408,17 @@ public VmwareServerDiscoverer() { } catch (DiscoveredWithErrorException e) { throw e; } catch (Exception e) { - s_logger.warn("Unable to connect to Vmware vSphere server. service address: " + url.getHost() + ". " + e); + logger.warn("Unable to connect to Vmware vSphere server. service address: " + url.getHost() + ". " + e); return null; } finally { if (context != null) context.close(); if (failureInClusterDiscovery && vsmInfo.first()) { try { - s_logger.debug("Deleting Nexus 1000v VSM " + vsmIp + " because cluster discovery and addition to zone has failed."); + logger.debug("Deleting Nexus 1000v VSM " + vsmIp + " because cluster discovery and addition to zone has failed."); _nexusElement.deleteCiscoNexusVSM(vsmInfo.second().longValue()); } catch (Exception e) { - s_logger.warn("Deleting Nexus 1000v VSM " + vsmIp + " failed."); + logger.warn("Deleting Nexus 1000v VSM " + vsmIp + " failed."); } } } @@ -445,7 +443,7 @@ private VmwareDatacenterVO fetchVmwareDatacenterByZone(Long dcId) throws Discove vmwareDcZone = _vmwareDcZoneMapDao.findByZoneId(dcId); if (vmwareDcZone == null) { msg = "Zone " + dcId + " is not associated with any VMware DC yet. " + "Please add VMware DC to this zone first and then try to add clusters."; - s_logger.error(msg); + logger.error(msg); throw new DiscoveryException(msg); } @@ -490,13 +488,13 @@ private String validateCluster(URI url, VmwareDatacenterVO vmwareDc) throws Disc msg = "This cluster " + clusterName + " belongs to vCenter " + url.getHost() + ". But this zone is associated with VMware DC from vCenter " + vCenterHost + ". Make sure the cluster being added belongs to vCenter " + vCenterHost + " and VMware DC " + vmwareDcNameFromDb; - s_logger.error(msg); + logger.error(msg); throw new DiscoveryException(msg); } else if (!vmwareDcNameFromDb.equalsIgnoreCase(vmwareDcNameFromApi)) { msg = "This cluster " + clusterName + " belongs to VMware DC " + vmwareDcNameFromApi + " .But this zone is associated with VMware DC " + vmwareDcNameFromDb + ". Make sure the cluster being added belongs to VMware DC " + vmwareDcNameFromDb + " in vCenter " + vCenterHost; - s_logger.error(msg); + logger.error(msg); throw new DiscoveryException(msg); } return updatedInventoryPath; @@ -543,15 +541,15 @@ public Hypervisor.HypervisorType getHypervisorType() { @Override public boolean configure(String name, Map params) throws ConfigurationException { - if (s_logger.isInfoEnabled()) - s_logger.info("Configure VmwareServerDiscoverer, discover name: " + name); + if (logger.isInfoEnabled()) + logger.info("Configure VmwareServerDiscoverer, discover name: " + name); super.configure(name, params); createVmwareToolsIso(); - if (s_logger.isInfoEnabled()) { - s_logger.info("VmwareServerDiscoverer has been successfully configured"); + if (logger.isInfoEnabled()) { + logger.info("VmwareServerDiscoverer has been successfully configured"); } _resourceMgr.registerResourceStateAdapter(this.getClass().getSimpleName(), this); return true; @@ -630,7 +628,7 @@ private VmwareTrafficLabel getTrafficInfo(TrafficType trafficType, String zoneWi try { trafficLabelObj = new VmwareTrafficLabel(zoneWideTrafficLabel, trafficType, defaultVirtualSwitchType); } catch (InvalidParameterValueException e) { - s_logger.error("Failed to recognize virtual switch type specified for " + trafficType + " traffic due to " + e.getMessage()); + logger.error("Failed to recognize virtual switch type specified for " + trafficType + " traffic due to " + e.getMessage()); throw e; } @@ -666,7 +664,7 @@ private VmwareTrafficLabel getTrafficInfo(TrafficType trafficType, String zoneWi try { trafficLabelObj = new VmwareTrafficLabel(zoneWideTrafficLabel, trafficType, defVirtualSwitchType); } catch (InvalidParameterValueException e) { - s_logger.error("Failed to recognize virtual switch type specified for " + trafficType + " traffic due to " + e.getMessage()); + logger.error("Failed to recognize virtual switch type specified for " + trafficType + " traffic due to " + e.getMessage()); throw e; } @@ -741,11 +739,11 @@ public ServerResource reloadResource(HostVO host) { try { resource.configure(host.getName(), params); } catch (ConfigurationException e) { - s_logger.warn("Unable to configure resource due to " + e.getMessage()); + logger.warn("Unable to configure resource due to " + e.getMessage()); return null; } if (!resource.start()) { - s_logger.warn("Unable to start the resource"); + logger.warn("Unable to start the resource"); return null; } } @@ -755,7 +753,7 @@ public ServerResource reloadResource(HostVO host) { private void validateVswitchType(String inputVswitchType) { VirtualSwitchType vSwitchType = VirtualSwitchType.getType(inputVswitchType); if (vSwitchType == VirtualSwitchType.None) { - s_logger.error("Unable to resolve " + inputVswitchType + " to a valid virtual switch type in VMware environment."); + logger.error("Unable to resolve " + inputVswitchType + " to a valid virtual switch type in VMware environment."); throw new InvalidParameterValueException("Invalid virtual switch type : " + inputVswitchType); } } diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/dao/LegacyZoneDaoImpl.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/dao/LegacyZoneDaoImpl.java index 28c98fd78311..0077a4decbc1 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/dao/LegacyZoneDaoImpl.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/dao/LegacyZoneDaoImpl.java @@ -20,7 +20,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.hypervisor.vmware.LegacyZoneVO; @@ -33,7 +32,6 @@ @Component @DB public class LegacyZoneDaoImpl extends GenericDaoBase implements LegacyZoneDao { - protected static final Logger s_logger = Logger.getLogger(LegacyZoneDaoImpl.class); final SearchBuilder zoneSearch; final SearchBuilder fullTableSearch; diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/CleanupFullyClonedTemplatesTask.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/CleanupFullyClonedTemplatesTask.java index 1437e057b86e..fa7675906d69 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/CleanupFullyClonedTemplatesTask.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/CleanupFullyClonedTemplatesTask.java @@ -32,7 +32,6 @@ import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.log4j.Logger; import java.util.List; @@ -41,7 +40,6 @@ */ public class CleanupFullyClonedTemplatesTask extends ManagedContextRunnable { - private static final Logger s_logger = Logger.getLogger(CleanupFullyClonedTemplatesTask.class); private PrimaryDataStoreDao primaryStorageDao; private VMTemplatePoolDao templateDataStoreDao; @@ -64,23 +62,23 @@ public class CleanupFullyClonedTemplatesTask extends ManagedContextRunnable { this.vmInstanceDao = vmInstanceDao; this.cloneSettingDao = cloneSettingDao; this.templateManager = templateManager; - if(s_logger.isDebugEnabled()) { - s_logger.debug("new task created: " + this); + if(logger.isDebugEnabled()) { + logger.debug("new task created: " + this); } } @Override public void runInContext() { mine = Thread.currentThread(); - s_logger.info("running job to mark fully cloned templates for gc in thread " + mine.getName()); + logger.info("running job to mark fully cloned templates for gc in thread " + mine.getName()); if (StorageManager.VmwareCreateCloneFull.value()) { // only run if full cloning is being used (might need to be more fine grained) try { queryAllPools(); } catch (Throwable t) { - s_logger.error("error during job to mark fully cloned templates for gc in thread " + mine.getName()); - if(s_logger.isDebugEnabled()) { - s_logger.debug("running job to mark fully cloned templates for gc in thread " + mine.getName(),t); + logger.error("error during job to mark fully cloned templates for gc in thread " + mine.getName()); + if(logger.isDebugEnabled()) { + logger.debug("running job to mark fully cloned templates for gc in thread " + mine.getName(),t); } } } @@ -97,8 +95,8 @@ private void queryAllPools() { private void queryPoolForTemplates(StoragePoolVO pool, long zoneId) { // we don't need those specific to other hypervisor types if (pool.getHypervisor() == null || Hypervisor.HypervisorType.VMware.equals(pool.getHypervisor())) { - if(s_logger.isDebugEnabled()) { - s_logger.debug(mine.getName() + " is marking fully cloned templates in pool " + pool.getName()); + if(logger.isDebugEnabled()) { + logger.debug(mine.getName() + " is marking fully cloned templates in pool " + pool.getName()); } List templatePrimaryDataStoreVOS = templateDataStoreDao.listByPoolId(pool.getId()); for (VMTemplateStoragePoolVO templateMapping : templatePrimaryDataStoreVOS) { @@ -107,16 +105,16 @@ private void queryPoolForTemplates(StoragePoolVO pool, long zoneId) { } } } else { - if(s_logger.isDebugEnabled()) { - s_logger.debug(mine.getName() + " is ignoring pool " + pool.getName() + " id == " + pool.getId()); + if(logger.isDebugEnabled()) { + logger.debug(mine.getName() + " is ignoring pool " + pool.getName() + " id == " + pool.getId()); } } } private boolean canRemoveTemplateFromZone(long zoneId, VMTemplateStoragePoolVO templateMapping) { if (!templateMapping.getMarkedForGC()) { - if(s_logger.isDebugEnabled()) { - s_logger.debug(mine.getName() + " is checking template with id " + templateMapping.getTemplateId() + " for deletion from pool with id " + templateMapping.getPoolId()); + if(logger.isDebugEnabled()) { + logger.debug(mine.getName() + " is checking template with id " + templateMapping.getTemplateId() + " for deletion from pool with id " + templateMapping.getPoolId()); } TemplateJoinVO templateJoinVO = templateDao.findByIdIncludingRemoved(templateMapping.getTemplateId()); @@ -141,14 +139,14 @@ private boolean markedForGc(VMTemplateStoragePoolVO templateMapping, long zoneId break; } } catch (Exception e) { - s_logger.error("failed to retrieve vm clone setting for vm " + vm.toString()); - if(s_logger.isDebugEnabled()) { - s_logger.debug("failed to retrieve vm clone setting for vm " + vm.toString(), e); + logger.error("failed to retrieve vm clone setting for vm " + vm.toString()); + if(logger.isDebugEnabled()) { + logger.debug("failed to retrieve vm clone setting for vm " + vm.toString(), e); } } } if (!used) { - s_logger.info(mine.getName() + " is marking template with id " + templateMapping.getTemplateId() + " for gc in pool with id " + templateMapping.getPoolId()); + logger.info(mine.getName() + " is marking template with id " + templateMapping.getTemplateId() + " for gc in pool with id " + templateMapping.getPoolId()); // else // mark it for removal from primary store templateMapping.setMarkedForGC(true); diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java index b5f4cf3a93f0..2c570f64a44c 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java @@ -67,7 +67,6 @@ import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.amazonaws.util.CollectionUtils; import com.cloud.agent.AgentManager; @@ -174,7 +173,6 @@ import com.vmware.vim25.ManagedObjectReference; public class VmwareManagerImpl extends ManagerBase implements VmwareManager, VmwareStorageMount, Listener, VmwareDatacenterService, Configurable { - private static final Logger s_logger = Logger.getLogger(VmwareManagerImpl.class); private static final long SECONDS_PER_MINUTE = 60; private static final int DEFAULT_PORTS_PER_DV_PORT_GROUP_VSPHERE4_x = 256; @@ -284,10 +282,10 @@ private boolean isSystemVmIsoCopyNeeded(File srcIso, File destIso) { String destIsoMd5 = DigestUtils.md5Hex(new FileInputStream(destIso)); copyNeeded = !StringUtils.equals(srcIsoMd5, destIsoMd5); if (copyNeeded) { - s_logger.debug(String.format("MD5 checksum: %s for source ISO: %s is different from MD5 checksum: %s from destination ISO: %s", srcIsoMd5, srcIso.getAbsolutePath(), destIsoMd5, destIso.getAbsolutePath())); + logger.debug(String.format("MD5 checksum: %s for source ISO: %s is different from MD5 checksum: %s from destination ISO: %s", srcIsoMd5, srcIso.getAbsolutePath(), destIsoMd5, destIso.getAbsolutePath())); } } catch (IOException e) { - s_logger.debug(String.format("Unable to compare MD5 checksum for systemvm.iso at source: %s and destination: %s", srcIso.getAbsolutePath(), destIso.getAbsolutePath()), e); + logger.debug(String.format("Unable to compare MD5 checksum for systemvm.iso at source: %s and destination: %s", srcIso.getAbsolutePath(), destIso.getAbsolutePath()), e); } return copyNeeded; } @@ -303,10 +301,10 @@ public ConfigKey[] getConfigKeys() { } @Override public boolean configure(String name, Map params) throws ConfigurationException { - s_logger.info("Configure VmwareManagerImpl, manager name: " + name); + logger.info("Configure VmwareManagerImpl, manager name: " + name); if (!_configDao.isPremium()) { - s_logger.error("Vmware component can only run under premium distribution"); + logger.error("Vmware component can only run under premium distribution"); throw new ConfigurationException("Vmware component can only run under premium distribution"); } @@ -314,7 +312,7 @@ public boolean configure(String name, Map params) throws Configu if (_instance == null) { _instance = "DEFAULT"; } - s_logger.info("VmwareManagerImpl config - instance.name: " + _instance); + logger.info("VmwareManagerImpl config - instance.name: " + _instance); _mountParent = _configDao.getValue(Config.MountParent.key()); if (_mountParent == null) { @@ -324,7 +322,7 @@ public boolean configure(String name, Map params) throws Configu if (_instance != null) { _mountParent = _mountParent + File.separator + _instance; } - s_logger.info("VmwareManagerImpl config - _mountParent: " + _mountParent); + logger.info("VmwareManagerImpl config - _mountParent: " + _mountParent); String value = (String)params.get("scripts.timeout"); _timeout = NumbersUtil.parseInt(value, 1440) * 1000; @@ -361,18 +359,18 @@ public boolean configure(String name, Map params) throws Configu _additionalPortRangeStart = NumbersUtil.parseInt(_configDao.getValue(Config.VmwareAdditionalVncPortRangeStart.key()), 59000); if (_additionalPortRangeStart > 65535) { - s_logger.warn("Invalid port range start port (" + _additionalPortRangeStart + ") for additional VNC port allocation, reset it to default start port 59000"); + logger.warn("Invalid port range start port (" + _additionalPortRangeStart + ") for additional VNC port allocation, reset it to default start port 59000"); _additionalPortRangeStart = 59000; } _additionalPortRangeSize = NumbersUtil.parseInt(_configDao.getValue(Config.VmwareAdditionalVncPortRangeSize.key()), 1000); if (_additionalPortRangeSize < 0 || _additionalPortRangeStart + _additionalPortRangeSize > 65535) { - s_logger.warn("Invalid port range size (" + _additionalPortRangeSize + " for range starts at " + _additionalPortRangeStart); + logger.warn("Invalid port range size (" + _additionalPortRangeSize + " for range starts at " + _additionalPortRangeStart); _additionalPortRangeSize = Math.min(1000, 65535 - _additionalPortRangeStart); } _vCenterSessionTimeout = NumbersUtil.parseInt(_configDao.getValue(Config.VmwareVcenterSessionTimeout.key()), 1200) * 1000; - s_logger.info("VmwareManagerImpl config - vmware.vcenter.session.timeout: " + _vCenterSessionTimeout); + logger.info("VmwareManagerImpl config - vmware.vcenter.session.timeout: " + _vCenterSessionTimeout); _recycleHungWorker = _configDao.getValue(Config.VmwareRecycleHungWorker.key()); if (_recycleHungWorker == null || _recycleHungWorker.isEmpty()) { @@ -384,13 +382,13 @@ public boolean configure(String name, Map params) throws Configu _rootDiskController = DiskControllerType.ide.toString(); } - s_logger.info("Additional VNC port allocation range is settled at " + _additionalPortRangeStart + " to " + (_additionalPortRangeStart + _additionalPortRangeSize)); + logger.info("Additional VNC port allocation range is settled at " + _additionalPortRangeStart + " to " + (_additionalPortRangeStart + _additionalPortRangeSize)); ((VmwareStorageManagerImpl)_storageMgr).configure(params); _agentMgr.registerForHostEvents(this, true, true, true); - s_logger.info("VmwareManagerImpl has been successfully configured"); + logger.info("VmwareManagerImpl has been successfully configured"); return true; } @@ -402,13 +400,13 @@ public boolean start() { startTemplateCleanJobSchedule(); startupCleanup(_mountParent); - s_logger.info("start done"); + logger.info("start done"); return true; } @Override public boolean stop() { - s_logger.info("shutting down scheduled tasks"); + logger.info("shutting down scheduled tasks"); templateCleanupScheduler.shutdown(); shutdownCleanup(); return true; @@ -448,7 +446,7 @@ private void prepareHost(HostMO hostMo, String privateTrafficLabel) throws Excep vlanId = mgmtTrafficLabelObj.getVlanId(); vSwitchType = mgmtTrafficLabelObj.getVirtualSwitchType().toString(); - s_logger.info("Preparing network on host " + hostMo.getContext().toString() + " for " + privateTrafficLabel); + logger.info("Preparing network on host " + hostMo.getContext().toString() + " for " + privateTrafficLabel); VirtualSwitchType vsType = VirtualSwitchType.getType(vSwitchType); //The management network is probably always going to be a physical network with islation type of vlans, so assume BroadcastDomainType VLAN if (VirtualSwitchType.StandardVirtualSwitch == vsType) { @@ -527,7 +525,7 @@ public List addHostToPodCluster(VmwareContext serviceCon int maxHostsPerCluster = _hvCapabilitiesDao.getMaxHostsPerCluster(HypervisorType.VMware, version); if (hosts.size() > maxHostsPerCluster) { String msg = "Failed to add VMware cluster as size is too big, current size: " + hosts.size() + ", max. size: " + maxHostsPerCluster; - s_logger.error(msg); + logger.error(msg); throw new DiscoveredWithErrorException(msg); } } @@ -551,12 +549,12 @@ public List addHostToPodCluster(VmwareContext serviceCon returnedHostList.add(mor); return returnedHostList; } else { - s_logger.error("Unsupport host type " + mor.getType() + ":" + mor.getValue() + " from inventory path: " + hostInventoryPath); + logger.error("Unsupport host type " + mor.getType() + ":" + mor.getValue() + " from inventory path: " + hostInventoryPath); return null; } } - s_logger.error("Unable to find host from inventory path: " + hostInventoryPath); + logger.error("Unable to find host from inventory path: " + hostInventoryPath); return null; } @@ -573,13 +571,13 @@ public Pair getSecondaryStorageStoreUrlAndId(long dcId) { if (secUrl == null) { // we are using non-NFS image store, then use cache storage instead - s_logger.info("Secondary storage is not NFS, we need to use staging storage"); + logger.info("Secondary storage is not NFS, we need to use staging storage"); DataStore cacheStore = _dataStoreMgr.getImageCacheStore(dcId); if (cacheStore != null) { secUrl = cacheStore.getUri(); secId = cacheStore.getId(); } else { - s_logger.warn("No staging storage is found when non-NFS secondary storage is used"); + logger.warn("No staging storage is found when non-NFS secondary storage is used"); } } @@ -600,12 +598,12 @@ public List> getSecondaryStorageStoresUrlAndIdList(long dcId) if (urlIdList.isEmpty()) { // we are using non-NFS image store, then use cache storage instead - s_logger.info("Secondary storage is not NFS, we need to use staging storage"); + logger.info("Secondary storage is not NFS, we need to use staging storage"); DataStore cacheStore = _dataStoreMgr.getImageCacheStore(dcId); if (cacheStore != null) { urlIdList.add(new Pair<>(cacheStore.getUri(), cacheStore.getId())); } else { - s_logger.warn("No staging storage is found when non-NFS secondary storage is used"); + logger.warn("No staging storage is found when non-NFS secondary storage is used"); } } @@ -654,17 +652,17 @@ public void gcLeftOverVMs(VmwareContext context) { @Override public boolean needRecycle(String workerTag) { - if (s_logger.isInfoEnabled()) - s_logger.info("Check to see if a worker VM with tag " + workerTag + " needs to be recycled"); + if (logger.isInfoEnabled()) + logger.info("Check to see if a worker VM with tag " + workerTag + " needs to be recycled"); if (workerTag == null || workerTag.isEmpty()) { - s_logger.error("Invalid worker VM tag " + workerTag); + logger.error("Invalid worker VM tag " + workerTag); return false; } String tokens[] = workerTag.split("-"); if (tokens.length != 3) { - s_logger.error("Invalid worker VM tag " + workerTag); + logger.error("Invalid worker VM tag " + workerTag); return false; } @@ -673,14 +671,14 @@ public boolean needRecycle(String workerTag) { long runid = Long.parseLong(tokens[2]); if (msHostPeerDao.countStateSeenInPeers(msid, runid, ManagementServerHost.State.Down) > 0) { - if (s_logger.isInfoEnabled()) - s_logger.info("Worker VM's owner management server node has been detected down from peer nodes, recycle it"); + if (logger.isInfoEnabled()) + logger.info("Worker VM's owner management server node has been detected down from peer nodes, recycle it"); return true; } if (runid != clusterManager.getManagementRunId(msid)) { - if (s_logger.isInfoEnabled()) - s_logger.info("Worker VM's owner management server has changed runid, recycle it"); + if (logger.isInfoEnabled()) + logger.info("Worker VM's owner management server has changed runid, recycle it"); return true; } @@ -691,13 +689,13 @@ public boolean needRecycle(String workerTag) { Instant end = start.plusSeconds(2 * (AsyncJobManagerImpl.JobExpireMinutes.value() + AsyncJobManagerImpl.JobCancelThresholdMinutes.value()) * SECONDS_PER_MINUTE); Instant now = Instant.now(); if(s_vmwareCleanOldWorderVMs.value() && now.isAfter(end)) { - if(s_logger.isInfoEnabled()) { - s_logger.info("Worker VM expired, seconds elapsed: " + Duration.between(start,now).getSeconds()); + if(logger.isInfoEnabled()) { + logger.info("Worker VM expired, seconds elapsed: " + Duration.between(start,now).getSeconds()); } return true; } - if (s_logger.isTraceEnabled()) { - s_logger.trace("Worker VM with tag '" + workerTag + "' does not need recycling, yet." + + if (logger.isTraceEnabled()) { + logger.trace("Worker VM with tag '" + workerTag + "' does not need recycling, yet." + "But in " + Duration.between(now,end).getSeconds() + " seconds, though"); } return false; @@ -716,7 +714,7 @@ public void prepareSecondaryStorageStore(String storageUrl, Long storeId) { if (!patchFolder.exists()) { if (!patchFolder.mkdirs()) { String msg = "Unable to create systemvm folder on secondary storage. location: " + patchFolder.toString(); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } } @@ -724,23 +722,23 @@ public void prepareSecondaryStorageStore(String storageUrl, Long storeId) { File srcIso = getSystemVMPatchIsoFile(); File destIso = new File(mountPoint + "/systemvm/" + getSystemVMIsoFileNameOnDatastore()); if (isSystemVmIsoCopyNeeded(srcIso, destIso)) { - s_logger.info("Inject SSH key pairs before copying systemvm.iso into secondary storage"); + logger.info("Inject SSH key pairs before copying systemvm.iso into secondary storage"); _configServer.updateKeyPairs(); - s_logger.info("Copy System VM patch ISO file to secondary storage. source ISO: " + srcIso.getAbsolutePath() + ", destination: " + + logger.info("Copy System VM patch ISO file to secondary storage. source ISO: " + srcIso.getAbsolutePath() + ", destination: " + destIso.getAbsolutePath()); try { FileUtil.copyfile(srcIso, destIso); } catch (IOException e) { - s_logger.error("Unexpected exception ", e); + logger.error("Unexpected exception ", e); String msg = "Unable to copy systemvm ISO on secondary storage. src location: " + srcIso.toString() + ", dest location: " + destIso; - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } } else { - if (s_logger.isTraceEnabled()) { - s_logger.trace("SystemVM ISO file " + destIso.getPath() + " already exists"); + if (logger.isTraceEnabled()) { + logger.trace("SystemVM ISO file " + destIso.getPath() + " already exists"); } } } finally { @@ -778,7 +776,7 @@ private File getSystemVMPatchIsoFile() { assert (isoFile != null); if (!isoFile.exists()) { - s_logger.error("Unable to locate systemvm.iso in your setup at " + isoFile.toString()); + logger.error("Unable to locate systemvm.iso in your setup at " + isoFile.toString()); } return isoFile; } @@ -795,7 +793,7 @@ public File getSystemVMKeyFile() { } assert (keyFile != null); if (!keyFile.exists()) { - s_logger.error("Unable to locate id_rsa.cloud in your setup at " + keyFile.toString()); + logger.error("Unable to locate id_rsa.cloud in your setup at " + keyFile.toString()); } return keyFile; } @@ -813,13 +811,13 @@ public String getMountPoint(String storageUrl, String nfsVersion) { try { uri = new URI(storageUrl); } catch (URISyntaxException e) { - s_logger.error("Invalid storage URL format ", e); + logger.error("Invalid storage URL format ", e); throw new CloudRuntimeException("Unable to create mount point due to invalid storage URL format " + storageUrl); } mountPoint = mount(uri.getHost() + ":" + uri.getPath(), _mountParent, nfsVersion); if (mountPoint == null) { - s_logger.error("Unable to create mount point for " + storageUrl); + logger.error("Unable to create mount point for " + storageUrl); return "/mnt/sec"; // throw new CloudRuntimeException("Unable to create mount point for " + storageUrl); } @@ -840,14 +838,14 @@ private String setupMountPoint(String parent) { break; } } - s_logger.error("Unable to create mount: " + mntPt); + logger.error("Unable to create mount: " + mntPt); } return mountPoint; } private void startupCleanup(String parent) { - s_logger.info("Cleanup mounted NFS mount points used in previous session"); + logger.info("Cleanup mounted NFS mount points used in previous session"); long mshostId = ManagementServerNode.getManagementServerId(); @@ -855,14 +853,14 @@ private void startupCleanup(String parent) { List mounts = _storage.listMountPointsByMsHost(parent, mshostId); if (mounts != null && !mounts.isEmpty()) { for (String mountPoint : mounts) { - s_logger.info("umount NFS mount from previous session: " + mountPoint); + logger.info("umount NFS mount from previous session: " + mountPoint); String result = null; - Script command = new Script(true, "umount", _timeout, s_logger); + Script command = new Script(true, "umount", _timeout, logger); command.add(mountPoint); result = command.execute(); if (result != null) { - s_logger.warn("Unable to umount " + mountPoint + " due to " + result); + logger.warn("Unable to umount " + mountPoint + " due to " + result); } File file = new File(mountPoint); if (file.exists()) { @@ -873,17 +871,17 @@ private void startupCleanup(String parent) { } private void shutdownCleanup() { - s_logger.info("Cleanup mounted NFS mount points used in current session"); + logger.info("Cleanup mounted NFS mount points used in current session"); for (String mountPoint : _storageMounts.values()) { - s_logger.info("umount NFS mount: " + mountPoint); + logger.info("umount NFS mount: " + mountPoint); String result = null; - Script command = new Script(true, "umount", _timeout, s_logger); + Script command = new Script(true, "umount", _timeout, logger); command.add(mountPoint); result = command.execute(); if (result != null) { - s_logger.warn("Unable to umount " + mountPoint + " due to " + result); + logger.warn("Unable to umount " + mountPoint + " due to " + result); } File file = new File(mountPoint); if (file.exists()) { @@ -895,13 +893,13 @@ private void shutdownCleanup() { protected String mount(String path, String parent, String nfsVersion) { String mountPoint = setupMountPoint(parent); if (mountPoint == null) { - s_logger.warn("Unable to create a mount point"); + logger.warn("Unable to create a mount point"); return null; } Script script = null; String result = null; - Script command = new Script(true, "mount", _timeout, s_logger); + Script command = new Script(true, "mount", _timeout, logger); command.add("-t", "nfs"); if (nfsVersion != null){ command.add("-o", "vers=" + nfsVersion); @@ -914,7 +912,7 @@ protected String mount(String path, String parent, String nfsVersion) { command.add(mountPoint); result = command.execute(); if (result != null) { - s_logger.warn("Unable to mount " + path + " due to " + result); + logger.warn("Unable to mount " + path + " due to " + result); File file = new File(mountPoint); if (file.exists()) { file.delete(); @@ -923,11 +921,11 @@ protected String mount(String path, String parent, String nfsVersion) { } // Change permissions for the mountpoint - script = new Script(true, "chmod", _timeout, s_logger); + script = new Script(true, "chmod", _timeout, logger); script.add("1777", mountPoint); result = script.execute(); if (result != null) { - s_logger.warn("Unable to set permissions for " + mountPoint + " due to " + result); + logger.warn("Unable to set permissions for " + mountPoint + " due to " + result); } return mountPoint; } @@ -999,8 +997,8 @@ public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) protected final static int DEFAULT_DOMR_SSHPORT = 3922; protected boolean shutdownRouterVM(DomainRouterVO router) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Try to shutdown router VM " + router.getInstanceName() + " directly."); + if (logger.isDebugEnabled()) { + logger.debug("Try to shutdown router VM " + router.getInstanceName() + " directly."); } Pair result; @@ -1008,15 +1006,15 @@ protected boolean shutdownRouterVM(DomainRouterVO router) { result = SshHelper.sshExecute(router.getPrivateIpAddress(), DEFAULT_DOMR_SSHPORT, "root", getSystemVMKeyFile(), null, "poweroff -f"); if (!result.first()) { - s_logger.debug("Unable to shutdown " + router.getInstanceName() + " directly"); + logger.debug("Unable to shutdown " + router.getInstanceName() + " directly"); return false; } } catch (Throwable e) { - s_logger.warn("Unable to shutdown router " + router.getInstanceName() + " directly."); + logger.warn("Unable to shutdown router " + router.getInstanceName() + " directly."); return false; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Shutdown router " + router.getInstanceName() + " successful."); + if (logger.isDebugEnabled()) { + logger.debug("Shutdown router " + router.getInstanceName() + " successful."); } return true; } @@ -1073,11 +1071,11 @@ public Map getNexusVSMCredentialsByClusterId(Long clusterId) { long vsmId = 0; if (vsmMapVO != null) { vsmId = vsmMapVO.getVsmId(); - s_logger.info("vsmId is " + vsmId); + logger.info("vsmId is " + vsmId); nexusVSM = _nexusDao.findById(vsmId); - s_logger.info("Fetching nexus vsm credentials from database."); + logger.info("Fetching nexus vsm credentials from database."); } else { - s_logger.info("Found empty vsmMapVO."); + logger.info("Found empty vsmMapVO."); return null; } @@ -1086,7 +1084,7 @@ public Map getNexusVSMCredentialsByClusterId(Long clusterId) { nexusVSMCredentials.put("vsmip", nexusVSM.getipaddr()); nexusVSMCredentials.put("vsmusername", nexusVSM.getUserName()); nexusVSMCredentials.put("vsmpassword", nexusVSM.getPassword()); - s_logger.info("Successfully fetched the credentials of Nexus VSM."); + logger.info("Successfully fetched the credentials of Nexus VSM."); } return nexusVSMCredentials; } @@ -1164,7 +1162,7 @@ public VmwareDatacenterVO addVmwareDatacenter(AddVmwareDcCmd cmd) throws Resourc Long associatedVmwareDcId = vmwareDcZoneMap.getVmwareDcId(); VmwareDatacenterVO associatedVmwareDc = vmwareDcDao.findById(associatedVmwareDcId); if (associatedVmwareDc.getVcenterHost().equalsIgnoreCase(vCenterHost) && associatedVmwareDc.getVmwareDatacenterName().equalsIgnoreCase(vmwareDcName)) { - s_logger.info("Ignoring API call addVmwareDc, because VMware DC " + vCenterHost + "/" + vmwareDcName + + logger.info("Ignoring API call addVmwareDc, because VMware DC " + vCenterHost + "/" + vmwareDcName + " is already associated with specified zone with id " + zoneId); return associatedVmwareDc; } else { @@ -1193,7 +1191,7 @@ public VmwareDatacenterVO addVmwareDatacenter(AddVmwareDcCmd cmd) throws Resourc dcMor = dcMo.getMor(); if (dcMor == null) { String msg = "Unable to find VMware DC " + vmwareDcName + " in vCenter " + vCenterHost + ". "; - s_logger.error(msg); + logger.error(msg); throw new InvalidParameterValueException(msg); } @@ -1389,7 +1387,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { dcMo = new DatacenterMO(context, vmwareDcName); } catch (Throwable t) { String msg = "Unable to find DC " + vmwareDcName + " in vCenter " + vCenterHost; - s_logger.error(msg); + logger.error(msg); throw new DiscoveryException(msg); } @@ -1397,10 +1395,10 @@ public void doInTransactionWithoutResult(TransactionStatus status) { // Reset custom field property cloud.zone over this DC dcMo.setCustomFieldValue(CustomFieldConstants.CLOUD_ZONE, "false"); - s_logger.info("Sucessfully reset custom field property cloud.zone over DC " + vmwareDcName); + logger.info("Sucessfully reset custom field property cloud.zone over DC " + vmwareDcName); } catch (Exception e) { String msg = "Unable to reset custom field property cloud.zone over DC " + vmwareDcName + " due to : " + VmwareHelper.getExceptionMessage(e); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } finally { if (context != null) { @@ -1418,8 +1416,8 @@ private void validateZone(Long zoneId) throws InvalidParameterValueException { if (isLegacyZone(zoneId)) { throw new InvalidParameterValueException("The specified zone is legacy zone. Adding VMware datacenter to legacy zone is not supported."); } else { - if (s_logger.isTraceEnabled()) { - s_logger.trace("The specified zone is not legacy zone."); + if (logger.isTraceEnabled()) { + logger.trace("The specified zone is not legacy zone."); } } } @@ -1479,8 +1477,8 @@ private void doesZoneExist(Long zoneId) throws InvalidParameterValueException { if (zone == null) { throw new InvalidParameterValueException("Can't find zone by the id specified."); } - if (s_logger.isTraceEnabled()) { - s_logger.trace("Zone with id:[" + zoneId + "] exists."); + if (logger.isTraceEnabled()) { + logger.trace("Zone with id:[" + zoneId + "] exists."); } } @@ -1510,14 +1508,14 @@ public List importVsphereStoragePoliciesInternal String password = vmwareDatacenter.getPassword(); List storageProfiles = null; try { - s_logger.debug(String.format("Importing vSphere Storage Policies for the vmware DC %d in zone %d", vmwareDcId, zoneId)); + logger.debug(String.format("Importing vSphere Storage Policies for the vmware DC %d in zone %d", vmwareDcId, zoneId)); VmwareContext context = VmwareContextFactory.getContext(vCenterHost, userName, password); PbmProfileManagerMO profileManagerMO = new PbmProfileManagerMO(context); storageProfiles = profileManagerMO.getStorageProfiles(); - s_logger.debug(String.format("Import vSphere Storage Policies for the vmware DC %d in zone %d is successful", vmwareDcId, zoneId)); + logger.debug(String.format("Import vSphere Storage Policies for the vmware DC %d in zone %d is successful", vmwareDcId, zoneId)); } catch (Exception e) { String msg = String.format("Unable to list storage profiles from DC %s due to : %s", vmwareDcName, VmwareHelper.getExceptionMessage(e)); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } @@ -1571,7 +1569,7 @@ public List listVsphereStoragePolicyCompatibleStoragePools(ListVsph StorageFilerTO storageFilerTO = new StorageFilerTO(pool); List hostIds = storageManager.getUpHostsInPool(pool.getId()); if (CollectionUtils.isNullOrEmpty(hostIds)) { - s_logger.debug("Did not find a suitable host to verify compatibility of the pool " + pool.getName()); + logger.debug("Did not find a suitable host to verify compatibility of the pool " + pool.getName()); continue; } Collections.shuffle(hostIds); @@ -1584,7 +1582,7 @@ public List listVsphereStoragePolicyCompatibleStoragePools(ListVsph compatiblePools.add(pool); } } catch (AgentUnavailableException | OperationTimedoutException e) { - s_logger.error("Could not verify if storage policy " + storagePolicy.getName() + " is compatible with storage pool " + pool.getName()); + logger.error("Could not verify if storage policy " + storagePolicy.getName() + " is compatible with storage pool " + pool.getName()); } } return compatiblePools; @@ -1620,7 +1618,7 @@ public List listVMsInDatacenter(ListVmwareDcVmsCmd cmd) { } try { - s_logger.debug(String.format("Connecting to the VMware datacenter %s at vCenter %s to retrieve VMs", + logger.debug(String.format("Connecting to the VMware datacenter %s at vCenter %s to retrieve VMs", datacenterName, vcenter)); String serviceUrl = String.format("https://%s/sdk/vimService", vcenter); VmwareClient vimClient = new VmwareClient(vcenter); @@ -1632,7 +1630,7 @@ public List listVMsInDatacenter(ListVmwareDcVmsCmd cmd) { if (dcMor == null) { String msg = String.format("Unable to find VMware datacenter %s in vCenter %s", datacenterName, vcenter); - s_logger.error(msg); + logger.error(msg); throw new InvalidParameterValueException(msg); } List instances = dcMo.getAllVmsOnDatacenter(); @@ -1641,7 +1639,7 @@ public List listVMsInDatacenter(ListVmwareDcVmsCmd cmd) { } catch (Exception e) { String errorMsg = String.format("Error retrieving stopped VMs from the VMware VC %s datacenter %s: %s", vcenter, datacenterName, e.getMessage()); - s_logger.error(errorMsg, e); + logger.error(errorMsg, e); throw new CloudRuntimeException(errorMsg); } } @@ -1652,25 +1650,25 @@ public boolean hasNexusVSM(Long clusterId) { vsmMapVo = _vsmMapDao.findByClusterId(clusterId); if (vsmMapVo == null) { - s_logger.info("There is no instance of Nexus 1000v VSM associated with this cluster [Id:" + clusterId + "] yet."); + logger.info("There is no instance of Nexus 1000v VSM associated with this cluster [Id:" + clusterId + "] yet."); return false; } else { - s_logger.info("An instance of Nexus 1000v VSM [Id:" + vsmMapVo.getVsmId() + "] associated with this cluster [Id:" + clusterId + "]"); + logger.info("An instance of Nexus 1000v VSM [Id:" + vsmMapVo.getVsmId() + "] associated with this cluster [Id:" + clusterId + "]"); return true; } } private void startTemplateCleanJobSchedule() { - if(s_logger.isDebugEnabled()) { - s_logger.debug("checking to see if we should schedule a job to search for fully cloned templates to clean-up"); + if(logger.isDebugEnabled()) { + logger.debug("checking to see if we should schedule a job to search for fully cloned templates to clean-up"); } if(StorageManager.StorageCleanupEnabled.value() && StorageManager.TemplateCleanupEnabled.value() && templateCleanupInterval.value() > 0) { try { - if (s_logger.isInfoEnabled()) { - s_logger.info("scheduling job to search for fully cloned templates to clean-up once per " + templateCleanupInterval.value() + " minutes."); + if (logger.isInfoEnabled()) { + logger.info("scheduling job to search for fully cloned templates to clean-up once per " + templateCleanupInterval.value() + " minutes."); } // futureTemplateCleanup = Runnable task = getCleanupFullyClonedTemplatesTask(); @@ -1678,21 +1676,21 @@ private void startTemplateCleanJobSchedule() { templateCleanupInterval.value(), templateCleanupInterval.value(), TimeUnit.MINUTES); - if (s_logger.isTraceEnabled()) { - s_logger.trace("scheduled job to search for fully cloned templates to clean-up."); + if (logger.isTraceEnabled()) { + logger.trace("scheduled job to search for fully cloned templates to clean-up."); } } catch (RejectedExecutionException ree) { - s_logger.error("job to search for fully cloned templates cannot be scheduled"); - s_logger.debug("job to search for fully cloned templates cannot be scheduled;", ree); + logger.error("job to search for fully cloned templates cannot be scheduled"); + logger.debug("job to search for fully cloned templates cannot be scheduled;", ree); } catch (NullPointerException npe) { - s_logger.error("job to search for fully cloned templates is invalid"); - s_logger.debug("job to search for fully cloned templates is invalid;", npe); + logger.error("job to search for fully cloned templates is invalid"); + logger.debug("job to search for fully cloned templates is invalid;", npe); } catch (IllegalArgumentException iae) { - s_logger.error("job to search for fully cloned templates is scheduled at invalid intervals"); - s_logger.debug("job to search for fully cloned templates is scheduled at invalid intervals;", iae); + logger.error("job to search for fully cloned templates is scheduled at invalid intervals"); + logger.debug("job to search for fully cloned templates is scheduled at invalid intervals;", iae); } catch (Exception e) { - s_logger.error("job to search for fully cloned templates failed for unknown reasons"); - s_logger.debug("job to search for fully cloned templates failed for unknown reasons;", e); + logger.error("job to search for fully cloned templates failed for unknown reasons"); + logger.debug("job to search for fully cloned templates failed for unknown reasons;", e); } } } diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java index 7e6b8c19ad4d..6203d5b9a94c 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java @@ -32,7 +32,8 @@ import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.BackupSnapshotAnswer; @@ -125,20 +126,20 @@ public boolean execute(VmwareHostService hostService, CreateEntityDownloadURLCom @Override public void createOva(String path, String name, int archiveTimeout) { - Script commandSync = new Script(true, "sync", 0, s_logger); + Script commandSync = new Script(true, "sync", 0, logger); commandSync.execute(); - Script command = new Script(false, "tar", archiveTimeout, s_logger); + Script command = new Script(false, "tar", archiveTimeout, logger); command.setWorkDir(path); command.add("-cf", name + ".ova"); command.add(name + ".ovf"); // OVF file should be the first file in OVA archive command.add(name + "-disk0.vmdk"); - s_logger.info("Package OVA with command: " + command.toString()); + logger.info("Package OVA with command: " + command.toString()); command.execute(); } - private static final Logger s_logger = Logger.getLogger(VmwareStorageManagerImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); private final VmwareStorageMount _mountService; private final StorageLayer _storage = new JavaStorageLayer(); @@ -157,7 +158,7 @@ public VmwareStorageManagerImpl(VmwareStorageMount mountService, String nfsVersi } public void configure(Map params) { - s_logger.info("Configure VmwareStorageManagerImpl"); + logger.info("Configure VmwareStorageManagerImpl"); String value = (String)params.get("scripts.timeout"); _timeout = NumbersUtil.parseInt(value, 1440) * 1000; @@ -167,7 +168,7 @@ public void configure(Map params) { public String createOvaForTemplate(TemplateObjectTO template, int archiveTimeout) { DataStoreTO storeTO = template.getDataStore(); if (!(storeTO instanceof NfsTO)) { - s_logger.debug("Can only handle NFS storage, while creating OVA from template"); + logger.debug("Can only handle NFS storage, while creating OVA from template"); return null; } NfsTO nfsStore = (NfsTO)storeTO; @@ -179,20 +180,20 @@ public String createOvaForTemplate(TemplateObjectTO template, int archiveTimeout try { if (installFullPath.endsWith(".ova")) { if (new File(installFullPath).exists()) { - s_logger.debug("OVA file found at: " + installFullPath); + logger.debug("OVA file found at: " + installFullPath); } else { if (new File(installFullPath + ".meta").exists()) { createOVAFromMetafile(installFullPath + ".meta", archiveTimeout); } else { String msg = "Unable to find OVA or OVA MetaFile to prepare template."; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } } return installPath; } } catch (Throwable e) { - s_logger.debug("Failed to create OVA: " + e.toString()); + logger.debug("Failed to create OVA: " + e.toString()); } return null; } @@ -202,7 +203,7 @@ public String createOvaForTemplate(TemplateObjectTO template, int archiveTimeout public String createOvaForVolume(VolumeObjectTO volume, int archiveTimeout) { DataStoreTO storeTO = volume.getDataStore(); if (!(storeTO instanceof NfsTO)) { - s_logger.debug("can only handle nfs storage, when create ova from volume"); + logger.debug("can only handle nfs storage, when create ova from volume"); return null; } NfsTO nfsStore = (NfsTO)storeTO; @@ -219,12 +220,12 @@ public String createOvaForVolume(VolumeObjectTO volume, int archiveTimeout) { try { if (new File(secondaryMountPoint + File.separator + volumePath).exists()) { - s_logger.debug("ova already exists:" + volumePath); + logger.debug("ova already exists:" + volumePath); return volumePath; } else { - Script commandSync = new Script(true, "sync", 0, s_logger); + Script commandSync = new Script(true, "sync", 0, logger); commandSync.execute(); - Script command = new Script(false, "tar", archiveTimeout, s_logger); + Script command = new Script(false, "tar", archiveTimeout, logger); command.setWorkDir(installFullPath); command.add("-cf", volumeUuid + ".ova"); command.add(volumeUuid + ".ovf"); // OVF file should be the first file in OVA archive @@ -237,7 +238,7 @@ public String createOvaForVolume(VolumeObjectTO volume, int archiveTimeout) { } } catch (Throwable e) { - s_logger.info("Exception for createVolumeOVA"); + logger.info("Exception for createVolumeOVA"); } return null; } @@ -284,8 +285,8 @@ public Answer execute(VmwareHostService hostService, PrimaryStorageDownloadComma VirtualMachineMO templateMo = VmwareHelper.pickOneVmOnRunningHost(dcMo.findVmByNameAndLabel(templateUuidName), true); if (templateMo == null) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Template " + templateName + " is not setup yet, setup template from secondary storage with uuid name: " + templateUuidName); + if (logger.isInfoEnabled()) { + logger.info("Template " + templateName + " is not setup yet, setup template from secondary storage with uuid name: " + templateUuidName); } ManagedObjectReference morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, cmd.getPoolUuid()); assert (morDs != null); @@ -293,7 +294,7 @@ public Answer execute(VmwareHostService hostService, PrimaryStorageDownloadComma copyTemplateFromSecondaryToPrimary(hyperHost, primaryStorageDatastoreMo, secondaryStorageUrl, mountPoint, templateName, templateUuidName, cmd.getNfsVersion()); } else { - s_logger.info("Template " + templateName + " has already been setup, skip the template setup process in primary storage"); + logger.info("Template " + templateName + " has already been setup, skip the template setup process in primary storage"); } return new PrimaryStorageDownloadAnswer(templateUuidName, 0); @@ -332,8 +333,8 @@ public Answer execute(VmwareHostService hostService, BackupSnapshotCommand cmd) try { vmMo = hyperHost.findVmOnHyperHost(cmd.getVmName()); if (vmMo == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to find owner VM for BackupSnapshotCommand on host " + hyperHost.getHyperHostName() + ", will try within datacenter"); + if (logger.isDebugEnabled()) { + logger.debug("Unable to find owner VM for BackupSnapshotCommand on host " + hyperHost.getHyperHostName() + ", will try within datacenter"); } vmMo = hyperHost.findVmOnPeerHyperHost(cmd.getVmName()); @@ -379,7 +380,7 @@ public Answer execute(VmwareHostService hostService, BackupSnapshotCommand cmd) workerVm.detachAllDisksAndDestroy(); } } catch (Throwable e) { - s_logger.warn(String.format("Failed to destroy worker VM [%s] due to: [%s].", workerVMName, e.getMessage()), e); + logger.warn(String.format("Failed to destroy worker VM [%s] due to: [%s].", workerVMName, e.getMessage()), e); } } } catch (Throwable e) { @@ -403,14 +404,14 @@ public Answer execute(VmwareHostService hostService, CreatePrivateTemplateFromVo VirtualMachineMO vmMo = hyperHost.findVmOnHyperHost(cmd.getVmName()); if (vmMo == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to find the owner VM for CreatePrivateTemplateFromVolumeCommand on host " + hyperHost.getHyperHostName() + ", try within datacenter"); + if (logger.isDebugEnabled()) { + logger.debug("Unable to find the owner VM for CreatePrivateTemplateFromVolumeCommand on host " + hyperHost.getHyperHostName() + ", try within datacenter"); } vmMo = hyperHost.findVmOnPeerHyperHost(cmd.getVmName()); if (vmMo == null) { String msg = "Unable to find the owner VM for volume operation. vm: " + cmd.getVmName(); - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } } @@ -501,7 +502,7 @@ public Answer execute(VmwareHostService hostService, CreateVolumeFromSnapshotCom ManagedObjectReference morPrimaryDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, primaryStorageNameLabel); if (morPrimaryDs == null) { String msg = "Unable to find datastore: " + primaryStorageNameLabel; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } @@ -523,25 +524,25 @@ public Answer execute(VmwareHostService hostService, CreateVolumeFromSnapshotCom private void copyTemplateFromSecondaryToPrimary(VmwareHypervisorHost hyperHost, DatastoreMO datastoreMo, String secondaryStorageUrl, String templatePathAtSecondaryStorage, String templateName, String templateUuid, String nfsVersion) throws Exception { - s_logger.info("Executing copyTemplateFromSecondaryToPrimary. secondaryStorage: " + secondaryStorageUrl + ", templatePathAtSecondaryStorage: " + logger.info("Executing copyTemplateFromSecondaryToPrimary. secondaryStorage: " + secondaryStorageUrl + ", templatePathAtSecondaryStorage: " + templatePathAtSecondaryStorage + ", templateName: " + templateName); String secondaryMountPoint = _mountService.getMountPoint(secondaryStorageUrl, nfsVersion); - s_logger.info("Secondary storage mount point: " + secondaryMountPoint); + logger.info("Secondary storage mount point: " + secondaryMountPoint); String srcOVAFileName = secondaryMountPoint + "/" + templatePathAtSecondaryStorage + templateName + "." + ImageFormat.OVA.getFileExtension(); String srcFileName = getOVFFilePath(srcOVAFileName); if (srcFileName == null) { - Script command = new Script("tar", 0, s_logger); + Script command = new Script("tar", 0, logger); command.add("--no-same-owner"); command.add("-xf", srcOVAFileName); command.setWorkDir(secondaryMountPoint + "/" + templatePathAtSecondaryStorage); - s_logger.info("Executing command: " + command.toString()); + logger.info("Executing command: " + command.toString()); String result = command.execute(); if (result != null) { String msg = "Unable to unpack snapshot OVA file at: " + srcOVAFileName; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } } @@ -549,7 +550,7 @@ private void copyTemplateFromSecondaryToPrimary(VmwareHypervisorHost hyperHost, srcFileName = getOVFFilePath(srcOVAFileName); if (srcFileName == null) { String msg = "Unable to locate OVF file in template package directory: " + srcOVAFileName; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } @@ -560,7 +561,7 @@ private void copyTemplateFromSecondaryToPrimary(VmwareHypervisorHost hyperHost, if (vmMo == null) { String msg = "Failed to import OVA template. secondaryStorage: " + secondaryStorageUrl + ", templatePathAtSecondaryStorage: " + templatePathAtSecondaryStorage + ", templateName: " + templateName + ", templateUuid: " + templateUuid; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } @@ -570,7 +571,7 @@ private void copyTemplateFromSecondaryToPrimary(VmwareHypervisorHost hyperHost, } else { vmMo.destroy(); String msg = "Unable to create base snapshot for template, templateName: " + templateName + ", templateUuid: " + templateUuid; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } } @@ -582,14 +583,14 @@ private Ternary createTemplateFromVolume(VirtualMachineMO vm String installPath = getTemplateRelativeDirInSecStorage(accountId, templateId); String installFullPath = secondaryMountPoint + "/" + installPath; synchronized (installPath.intern()) { - Script command = new Script(false, "mkdir", _timeout, s_logger); + Script command = new Script(false, "mkdir", _timeout, logger); command.add("-p"); command.add(installFullPath); String result = command.execute(); if (result != null) { String msg = "unable to prepare template directory: " + installPath + ", storage: " + secStorageUrl + ", error msg: " + result; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } } @@ -599,13 +600,13 @@ private Ternary createTemplateFromVolume(VirtualMachineMO vm Pair volumeDeviceInfo = vmMo.getDiskDevice(volumePath); if (volumeDeviceInfo == null) { String msg = "Unable to find related disk device for volume. volume path: " + volumePath; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } if (!vmMo.createSnapshot(templateUniqueName, "Temporary snapshot for template creation", false, false)) { String msg = "Unable to take snapshot for creating template from volume. volume path: " + volumePath; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } @@ -615,7 +616,7 @@ private Ternary createTemplateFromVolume(VirtualMachineMO vm clonedVm = vmMo.getRunningHost().findVmOnHyperHost(workerVmName); if (clonedVm == null) { String msg = "Unable to create dummy VM to export volume. volume path: " + volumePath; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } @@ -659,64 +660,64 @@ private Ternary createTemplateFromSnapshot(long accountId, l String snapshotFullVMDKName = snapshotRoot + "/" + backupSSUuid + "/"; synchronized (installPath.intern()) { - command = new Script(false, "mkdir", _timeout, s_logger); + command = new Script(false, "mkdir", _timeout, logger); command.add("-p"); command.add(installFullPath); result = command.execute(); if (result != null) { String msg = "unable to prepare template directory: " + installPath + ", storage: " + secStorageUrl + ", error msg: " + result; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } } try { if (new File(snapshotFullOVAName).exists()) { - command = new Script(false, "cp", _timeout, s_logger); + command = new Script(false, "cp", _timeout, logger); command.add(snapshotFullOVAName); command.add(installFullOVAName); result = command.execute(); if (result != null) { String msg = "unable to copy snapshot " + snapshotFullOVAName + " to " + installFullPath; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } // untar OVA file at template directory - command = new Script("tar", 0, s_logger); + command = new Script("tar", 0, logger); command.add("--no-same-owner"); command.add("-xf", installFullOVAName); command.setWorkDir(installFullPath); - s_logger.info("Executing command: " + command.toString()); + logger.info("Executing command: " + command.toString()); result = command.execute(); if (result != null) { String msg = "unable to untar snapshot " + snapshotFullOVAName + " to " + installFullPath; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } } else { // there is no ova file, only ovf originally; if (new File(snapshotFullOvfName).exists()) { - command = new Script(false, "cp", _timeout, s_logger); + command = new Script(false, "cp", _timeout, logger); command.add(snapshotFullOvfName); //command.add(installFullOvfName); command.add(installFullPath); result = command.execute(); if (result != null) { String msg = "unable to copy snapshot " + snapshotFullOvfName + " to " + installFullPath; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } - s_logger.info("vmdkfile parent dir: " + snapshotFullVMDKName); + logger.info("vmdkfile parent dir: " + snapshotFullVMDKName); File snapshotdir = new File(snapshotFullVMDKName); // File snapshotdir = new File(snapshotRoot); File[] ssfiles = snapshotdir.listFiles(); // List filenames = new ArrayList(); for (int i = 0; i < ssfiles.length; i++) { String vmdkfile = ssfiles[i].getName(); - s_logger.info("vmdk file name: " + vmdkfile); + logger.info("vmdk file name: " + vmdkfile); if (vmdkfile.toLowerCase().startsWith(backupSSUuid) && vmdkfile.toLowerCase().endsWith(".vmdk")) { snapshotFullVMDKName += vmdkfile; templateVMDKName += vmdkfile; @@ -724,20 +725,20 @@ private Ternary createTemplateFromSnapshot(long accountId, l } } if (snapshotFullVMDKName != null) { - command = new Script(false, "cp", _timeout, s_logger); + command = new Script(false, "cp", _timeout, logger); command.add(snapshotFullVMDKName); command.add(installFullPath); result = command.execute(); - s_logger.info("Copy VMDK file: " + snapshotFullVMDKName); + logger.info("Copy VMDK file: " + snapshotFullVMDKName); if (result != null) { String msg = "unable to copy snapshot vmdk file " + snapshotFullVMDKName + " to " + installFullPath; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } } } else { String msg = "unable to find any snapshot ova/ovf files" + snapshotFullOVAName + " to " + installFullPath; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } } @@ -848,20 +849,20 @@ private void restoreVolumeFromSecStorage(VmwareHypervisorHost hyperHost, Datasto if (!ovfFile.exists()) { srcOVFFileName = getOVFFilePath(srcOVAFileName); if (srcOVFFileName == null && ovafile.exists()) { // volss: ova file exists; o/w can't do tar - Script command = new Script("tar", 0, s_logger); + Script command = new Script("tar", 0, logger); command.add("--no-same-owner"); command.add("-xf", srcOVAFileName); command.setWorkDir(secondaryMountPoint + "/" + secStorageDir + "/" + snapshotDir); - s_logger.info("Executing command: " + command.toString()); + logger.info("Executing command: " + command.toString()); String result = command.execute(); if (result != null) { String msg = "Unable to unpack snapshot OVA file at: " + srcOVAFileName; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } } else { String msg = "Unable to find snapshot OVA file at: " + srcOVAFileName; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } @@ -869,7 +870,7 @@ private void restoreVolumeFromSecStorage(VmwareHypervisorHost hyperHost, Datasto } if (srcOVFFileName == null) { String msg = "Unable to locate OVF file in template package directory: " + srcOVAFileName; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } @@ -906,14 +907,14 @@ private void exportVolumeToSecondaryStorage(VirtualMachineMO vmMo, String volume synchronized (exportPath.intern()) { if (!new File(exportPath).exists()) { - Script command = new Script(false, "mkdir", _timeout, s_logger); + Script command = new Script(false, "mkdir", _timeout, logger); command.add("-p"); command.add(exportPath); String result = command.execute(); if (result != null) { String errorMessage = String.format("Unable to prepare snapshot backup directory: [%s] due to [%s].", exportPath, result); - s_logger.error(errorMessage); + logger.error(errorMessage); throw new Exception(errorMessage); } } @@ -925,7 +926,7 @@ private void exportVolumeToSecondaryStorage(VirtualMachineMO vmMo, String volume Pair volumeDeviceInfo = vmMo.getDiskDevice(volumePath); if (volumeDeviceInfo == null) { String msg = "Unable to find related disk device for volume. volume path: " + volumePath; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } @@ -936,7 +937,7 @@ private void exportVolumeToSecondaryStorage(VirtualMachineMO vmMo, String volume clonedVm = vmMo.getRunningHost().findVmOnHyperHost(workerVmName); if (clonedVm == null) { String msg = String.format("Unable to create dummy VM to export volume. volume path: [%s].", volumePath); - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } clonedVm.exportVm(exportPath, exportName, false, false); //Note: volss: not to create ova. @@ -965,7 +966,7 @@ private Pair copyVolumeToSecStorage(VmwareHostService hostServic if (morDs == null) { String msg = "Unable to find volumes's storage pool for copy volume operation"; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } @@ -978,7 +979,7 @@ private Pair copyVolumeToSecStorage(VmwareHostService hostServic if (workerVm == null) { String msg = "Unable to create worker VM to execute CopyVolumeCommand"; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } @@ -1029,20 +1030,20 @@ private String createOVAFromMetafile(String metafileName, int archiveTimeout) th File ova_metafile = new File(metafileName); Properties props = null; String ovaFileName = ""; - s_logger.info("Creating OVA using MetaFile: " + metafileName); + logger.info("Creating OVA using MetaFile: " + metafileName); try (FileInputStream strm = new FileInputStream(ova_metafile);) { - s_logger.info("loading properties from ova meta file: " + metafileName); + logger.info("loading properties from ova meta file: " + metafileName); props = new Properties(); props.load(strm); ovaFileName = props.getProperty("ova.filename"); - s_logger.info("ovafilename: " + ovaFileName); + logger.info("ovafilename: " + ovaFileName); String ovfFileName = props.getProperty("ovf"); - s_logger.info("ovffilename: " + ovfFileName); + logger.info("ovffilename: " + ovfFileName); int diskNum = Integer.parseInt(props.getProperty("numDisks")); if (diskNum <= 0) { String msg = "VMDK disk file number is 0. Error"; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } String[] disks = new String[diskNum]; @@ -1050,16 +1051,16 @@ private String createOVAFromMetafile(String metafileName, int archiveTimeout) th // String diskNameKey = "disk" + Integer.toString(i+1) + ".name"; // Fang use this String diskNameKey = "disk1.name"; disks[i] = props.getProperty(diskNameKey); - s_logger.info("diskname " + disks[i]); + logger.info("diskname " + disks[i]); } String exportDir = ova_metafile.getParent(); - s_logger.info("exportDir: " + exportDir); + logger.info("exportDir: " + exportDir); // Important! we need to sync file system before we can safely use tar to work around a linux kernel bug(or feature) - s_logger.info("Sync file system before we package OVA..., before tar "); - s_logger.info("ova: " + ovaFileName + ", ovf:" + ovfFileName + ", vmdk:" + disks[0] + "."); - Script commandSync = new Script(true, "sync", 0, s_logger); + logger.info("Sync file system before we package OVA..., before tar "); + logger.info("ova: " + ovaFileName + ", ovf:" + ovfFileName + ", vmdk:" + disks[0] + "."); + Script commandSync = new Script(true, "sync", 0, logger); commandSync.execute(); - Script command = new Script(false, "tar", archiveTimeout, s_logger); + Script command = new Script(false, "tar", archiveTimeout, logger); command.setWorkDir(exportDir); // Fang: pass this in to the method? command.add("-cf", ovaFileName); command.add(ovfFileName); // OVF file should be the first file in OVA archive @@ -1067,18 +1068,18 @@ private String createOVAFromMetafile(String metafileName, int archiveTimeout) th command.add(diskName); } command.execute(); - s_logger.info("Package OVA for template in dir: " + exportDir + "cmd: " + command.toString()); + logger.info("Package OVA for template in dir: " + exportDir + "cmd: " + command.toString()); // to be safe, physically test existence of the target OVA file if ((new File(exportDir + File.separator + ovaFileName)).exists()) { - s_logger.info("OVA file: " + ovaFileName + " is created and ready to extract."); + logger.info("OVA file: " + ovaFileName + " is created and ready to extract."); return ovaFileName; } else { String msg = exportDir + File.separator + ovaFileName + " is not created as expected"; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } } catch (Exception e) { - s_logger.error("Exception while creating OVA using Metafile", e); + logger.error("Exception while creating OVA using Metafile", e); throw e; } @@ -1195,7 +1196,7 @@ public CreateVMSnapshotAnswer execute(VmwareHostService hostService, CreateVMSna if (info.getEntityName().equals(cmd.getVmName()) && org.apache.commons.lang3.StringUtils.isNotBlank(info.getName()) && info.getName().equalsIgnoreCase("CreateSnapshot_Task")) { if (!(info.getState().equals(TaskInfoState.SUCCESS) || info.getState().equals(TaskInfoState.ERROR))) { - s_logger.debug("There is already a VM snapshot task running, wait for it"); + logger.debug("There is already a VM snapshot task running, wait for it"); context.getVimClient().waitForTask(taskMor); } } @@ -1209,12 +1210,12 @@ public CreateVMSnapshotAnswer execute(VmwareHostService hostService, CreateVMSna if (vmMo == null) { String msg = "Unable to find VM for CreateVMSnapshotCommand"; - s_logger.info(msg); + logger.info(msg); return new CreateVMSnapshotAnswer(cmd, false, msg); } else { if (vmMo.getSnapshotMor(vmSnapshotName) != null) { - s_logger.info("VM snapshot " + vmSnapshotName + " already exists"); + logger.info("VM snapshot " + vmSnapshotName + " already exists"); } else if (!vmMo.createSnapshot(vmSnapshotName, vmSnapshotDesc, snapshotMemory, quiescevm)) { return new CreateVMSnapshotAnswer(cmd, false, "Unable to create snapshot due to esxi internal failed"); } @@ -1225,14 +1226,14 @@ public CreateVMSnapshotAnswer execute(VmwareHostService hostService, CreateVMSna } } catch (Exception e) { String msg = e.getMessage(); - s_logger.error("failed to create snapshot for vm:" + vmName + " due to " + msg, e); + logger.error("failed to create snapshot for vm:" + vmName + " due to " + msg, e); try { if (vmMo.getSnapshotMor(vmSnapshotName) != null) { vmMo.removeSnapshot(vmSnapshotName, false); } } catch (Exception e1) { - s_logger.info("[ignored]" + "error during snapshot remove: " + e1.getLocalizedMessage()); + logger.info("[ignored]" + "error during snapshot remove: " + e1.getLocalizedMessage()); } return new CreateVMSnapshotAnswer(cmd, false, e.getMessage()); @@ -1331,7 +1332,7 @@ private ManagedObjectReference getDatastoreAsManagedObjectReference(String baseN return morDs; } } catch (Exception ex) { - s_logger.info("[ignored]" + "error getting managed object refference: " + ex.getLocalizedMessage()); + logger.info("[ignored]" + "error getting managed object refference: " + ex.getLocalizedMessage()); } // not managed storage, so use the standard way of getting a ManagedObjectReference for a datastore @@ -1356,22 +1357,22 @@ public DeleteVMSnapshotAnswer execute(VmwareHostService hostService, DeleteVMSna if (vmMo == null) { String msg = "Unable to find VM for RevertToVMSnapshotCommand"; - s_logger.debug(msg); + logger.debug(msg); return new DeleteVMSnapshotAnswer(cmd, false, msg); } else { if (vmMo.getSnapshotMor(vmSnapshotName) == null) { - s_logger.debug("can not find the snapshot " + vmSnapshotName + ", assume it is already removed"); + logger.debug("can not find the snapshot " + vmSnapshotName + ", assume it is already removed"); } else { if (!vmMo.removeSnapshot(vmSnapshotName, false)) { String msg = "delete vm snapshot " + vmSnapshotName + " due to error occurred in vmware"; - s_logger.error(msg); + logger.error(msg); return new DeleteVMSnapshotAnswer(cmd, false, msg); } } - s_logger.debug("snapshot: " + vmSnapshotName + " is removed"); + logger.debug("snapshot: " + vmSnapshotName + " is removed"); // after removed snapshot, the volumes' paths have been changed for the VM, needs to report new paths to manager @@ -1381,7 +1382,7 @@ public DeleteVMSnapshotAnswer execute(VmwareHostService hostService, DeleteVMSna } } catch (Exception e) { String msg = e.getMessage(); - s_logger.error("failed to delete vm snapshot " + vmSnapshotName + " of vm " + vmName + " due to " + msg, e); + logger.error("failed to delete vm snapshot " + vmSnapshotName + " of vm " + vmName + " due to " + msg, e); return new DeleteVMSnapshotAnswer(cmd, false, msg); } @@ -1408,7 +1409,7 @@ public RevertToVMSnapshotAnswer execute(VmwareHostService hostService, RevertToV TaskInfo info = (TaskInfo)(context.getVimClient().getDynamicProperty(taskMor, "info")); if (info.getEntityName().equals(cmd.getVmName()) && org.apache.commons.lang3.StringUtils.isNotBlank(info.getName()) && info.getName().equalsIgnoreCase("RevertToSnapshot_Task")) { - s_logger.debug("There is already a VM snapshot task running, wait for it"); + logger.debug("There is already a VM snapshot task running, wait for it"); context.getVimClient().waitForTask(taskMor); } } @@ -1422,7 +1423,7 @@ public RevertToVMSnapshotAnswer execute(VmwareHostService hostService, RevertToV if (vmMo == null) { String msg = "Unable to find VM for RevertToVMSnapshotCommand"; - s_logger.debug(msg); + logger.debug(msg); return new RevertToVMSnapshotAnswer(cmd, false, msg); } else { @@ -1454,7 +1455,7 @@ public RevertToVMSnapshotAnswer execute(VmwareHostService hostService, RevertToV } } catch (Exception e) { String msg = "revert vm " + vmName + " to snapshot " + snapshotName + " failed due to " + e.getMessage(); - s_logger.error(msg, e); + logger.error(msg, e); return new RevertToVMSnapshotAnswer(cmd, false, msg); } @@ -1469,7 +1470,7 @@ private String deleteVolumeDirOnSecondaryStorage(long volumeId, String secStorag private String deleteDir(String dir) { synchronized (dir.intern()) { - Script command = new Script(false, "rm", _timeout, s_logger); + Script command = new Script(false, "rm", _timeout, logger); command.add("-rf"); command.add(dir); return command.execute(); diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareContextFactory.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareContextFactory.java index 3ed5939aac55..9039c0f830cf 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareContextFactory.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareContextFactory.java @@ -19,7 +19,8 @@ import javax.annotation.PostConstruct; import javax.inject.Inject; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.stereotype.Component; import com.cloud.cluster.ClusterManager; @@ -31,7 +32,7 @@ @Component public class VmwareContextFactory { - private static final Logger s_logger = Logger.getLogger(VmwareContextFactory.class); + protected static Logger LOGGER = LogManager.getLogger(VmwareContextFactory.class); private static volatile int s_seq = 1; private static VmwareManager s_vmwareMgr; @@ -61,8 +62,8 @@ public static VmwareContext create(String vCenterAddress, String vCenterUserName assert (vCenterPassword != null); String serviceUrl = "https://" + vCenterAddress + "/sdk/vimService"; - if (s_logger.isDebugEnabled()) - s_logger.debug("initialize VmwareContext. url: " + serviceUrl + ", username: " + vCenterUserName + ", password: " + + if (LOGGER.isDebugEnabled()) + LOGGER.debug("initialize VmwareContext. url: " + serviceUrl + ", username: " + vCenterUserName + ", password: " + StringUtils.getMaskedPasswordForDisplay(vCenterPassword)); VmwareClient vimClient = new VmwareClient(vCenterAddress + "-" + s_seq++); @@ -88,7 +89,7 @@ public static VmwareContext getContext(String vCenterAddress, String vCenterUser } else { // Validate current context and verify if vCenter session timeout value of the context matches the timeout value set by Admin if (!context.validate() || (context.getVimClient().getVcenterSessionTimeout() != s_vmwareMgr.getVcenterSessionTimeout())) { - s_logger.info("Validation of the context failed, dispose and create a new one"); + LOGGER.info("Validation of the context failed, dispose and create a new one"); context.close(); context = create(vCenterAddress, vCenterUserName, vCenterPassword); } diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java index 408904f1d292..bafe52b4d79d 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -73,8 +73,7 @@ import org.apache.commons.lang.ArrayUtils; import org.apache.commons.lang.math.NumberUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; -import org.apache.log4j.NDC; +import org.apache.logging.log4j.ThreadContext; import org.joda.time.Duration; import com.cloud.agent.IAgentControl; @@ -376,7 +375,6 @@ import com.vmware.vim25.VmwareDistributedVirtualSwitchVlanIdSpec; public class VmwareResource extends ServerResourceBase implements StoragePoolResource, ServerResource, VmwareHostService, VirtualRouterDeployer { - private static final Logger s_logger = Logger.getLogger(VmwareResource.class); public static final String VMDK_EXTENSION = ".vmdk"; private static final String EXECUTING_RESOURCE_COMMAND = "Executing resource command %s: [%s]."; public static final String BASEPATH = "/usr/share/cloudstack-common/vms/"; @@ -466,7 +464,7 @@ private String getCommandLogTitle(Command cmd) { public Answer executeRequest(Command cmd) { logCommand(cmd); Answer answer = null; - NDC.push(getCommandLogTitle(cmd)); + ThreadContext.push(getCommandLogTitle(cmd)); try { long cmdSequence = _cmdSequence++; Date startTime = DateUtil.currentGMTTime(); @@ -638,18 +636,18 @@ public Answer executeRequest(Command cmd) { JmxUtil.unregisterMBean("VMware " + _morHyperHost.getValue(), "Command " + mbeanToRemove.getProp("Sequence") + "-" + mbeanToRemove.getProp("Name")); } } catch (Exception e) { - if (s_logger.isTraceEnabled()) - s_logger.trace("Unable to register JMX monitoring due to exception " + ExceptionUtil.toString(e)); + if (logger.isTraceEnabled()) + logger.trace("Unable to register JMX monitoring due to exception " + ExceptionUtil.toString(e)); } } } finally { recycleServiceContext(); - NDC.pop(); + ThreadContext.pop(); } - if (s_logger.isTraceEnabled()) - s_logger.trace("End executeRequest(), cmd: " + cmd.getClass().getSimpleName()); + if (logger.isTraceEnabled()) + logger.trace("End executeRequest(), cmd: " + cmd.getClass().getSimpleName()); return answer; } @@ -660,12 +658,12 @@ private ExecutionResult getSystemVmVersionAndChecksum(String controlIp) { result = executeInVR(controlIp, VRScripts.VERSION, null); if (!result.isSuccess()) { String errMsg = String.format("GetSystemVMVersionCmd on %s failed, message %s", controlIp, result.getDetails()); - s_logger.error(errMsg); + logger.error(errMsg); throw new CloudRuntimeException(errMsg); } } catch (final Exception e) { final String msg = "GetSystemVMVersionCmd failed due to " + e; - s_logger.error(msg, e); + logger.error(msg, e); throw new CloudRuntimeException(msg, e); } return result; @@ -695,7 +693,7 @@ private Answer execute(PatchSystemVmCommand cmd) { if (!org.apache.commons.lang3.StringUtils.isEmpty(checksum) && checksum.equals(scriptChecksum) && !cmd.isForced()) { String msg = String.format("No change in the scripts checksum, not patching systemVM %s", sysVMName); - s_logger.info(msg); + logger.info(msg); return new PatchSystemVmAnswer(cmd, msg, lines[0], lines[1]); } @@ -712,7 +710,7 @@ private Answer execute(PatchSystemVmCommand cmd) { String res = patchResult.second().replace("\n", " "); String[] output = res.split(":"); if (output.length != 2) { - s_logger.warn("Failed to get the latest script version"); + logger.warn("Failed to get the latest script version"); } else { scriptVersion = output[1].split(" ")[0]; } @@ -769,9 +767,9 @@ protected void reconfigureProcessorByHandler(EnumMap vdisk = vmMo.getDiskDevice(volumePath); if (vdisk == null) { String errorMsg = String.format("Resize volume of VM [name: %s] failed because disk device [path: %s] doesn't exist.", vmMo.getVmName(), volumePath); - s_logger.error(errorMsg); + logger.error(errorMsg); throw new Exception(errorMsg); } @@ -1039,7 +1037,7 @@ private VirtualDisk getDiskAfterResizeDiskValidations(VirtualMachineMO vmMo, Str if (vdisk.second() != null && vdisk.second().toLowerCase().contains("ide")) { String errorMsg = String.format("Re-sizing a virtual disk over an IDE controller is not supported in the VMware hypervisor. " + "Please re-try when virtual disk is attached to VM [name: %s] using a SCSI controller.", vmMo.getVmName()); - s_logger.error(errorMsg); + logger.error(errorMsg); throw new Exception(errorMsg); } @@ -1047,7 +1045,7 @@ private VirtualDisk getDiskAfterResizeDiskValidations(VirtualMachineMO vmMo, Str if ((VirtualDiskFlatVer2BackingInfo) disk.getBacking() != null && ((VirtualDiskFlatVer2BackingInfo) disk.getBacking()).getParent() != null) { String errorMsg = String.format("Resize of volume in VM [name: %s] is not supported because Disk device [path: %s] has Parents: [%s].", vmMo.getVmName(), volumePath, ((VirtualDiskFlatVer2BackingInfo) disk.getBacking()).getParent().getUuid()); - s_logger.error(errorMsg); + logger.error(errorMsg); throw new Exception(errorMsg); } return disk; @@ -1063,8 +1061,8 @@ private Pair getNewPathAndChainInfoInDatastoreCluster(VirtualMac String[] diskChain = matchingExistingDisk.getDiskChain(); DatastoreFile file = new DatastoreFile(diskChain[0]); if (!file.getFileBaseName().equalsIgnoreCase(path)) { - if (s_logger.isInfoEnabled()) - s_logger.info("Detected disk-chain top file change on volume: " + path + " -> " + file.getFileBaseName()); + if (logger.isInfoEnabled()) + logger.info("Detected disk-chain top file change on volume: " + path + " -> " + file.getFileBaseName()); path = file.getFileBaseName(); chainInfo = _gson.toJson(matchingExistingDisk); return new Pair<>(path, chainInfo); @@ -1088,7 +1086,7 @@ private Pair getNewPoolUUIDAndChainInfoInDatastoreCluster(Virtua if (diskDatastoreMofromVM != null) { String actualPoolUuid = diskDatastoreMofromVM.getCustomFieldValue(CustomFieldConstants.CLOUD_UUID); if (!actualPoolUuid.equalsIgnoreCase(poolUUID)) { - s_logger.warn(String.format("Volume %s found to be in a different storage pool %s", path, actualPoolUuid)); + logger.warn(String.format("Volume %s found to be in a different storage pool %s", path, actualPoolUuid)); poolUUID = actualPoolUuid; chainInfo = _gson.toJson(matchingExistingDisk); return new Pair<>(poolUUID, chainInfo); @@ -1150,13 +1148,13 @@ protected long[] getVPCNetworkStats(String privateIp, String publicIp, String op ExecutionResult callResult = executeInVR(privateIp, "vpc_netusage.sh", args); if (!callResult.isSuccess()) { - s_logger.error("Unable to execute NetworkUsage command on DomR (" + privateIp + "), domR may not be ready yet. failure due to " + callResult.getDetails()); + logger.error("Unable to execute NetworkUsage command on DomR (" + privateIp + "), domR may not be ready yet. failure due to " + callResult.getDetails()); } if (option.equals("get") || option.equals("vpn")) { String result = callResult.getDetails(); if (result == null || result.isEmpty()) { - s_logger.error(" vpc network usage get returns empty "); + logger.error(" vpc network usage get returns empty "); } long[] stats = new long[2]; if (result != null) { @@ -1178,10 +1176,10 @@ protected long[] getNetworkLbStats(String privateIp, String publicIp, Integer po String result = callResult.getDetails(); if (!Boolean.TRUE.equals(callResult.isSuccess())) { - s_logger.error(String.format("Unable to get network loadbalancer stats on DomR (%s), domR may not be ready yet. failure due to %s", privateIp, callResult.getDetails())); + logger.error(String.format("Unable to get network loadbalancer stats on DomR (%s), domR may not be ready yet. failure due to %s", privateIp, callResult.getDetails())); result = null; } else if (result == null || result.isEmpty()) { - s_logger.error("Get network loadbalancer stats returns empty result"); + logger.error("Get network loadbalancer stats returns empty result"); } long[] stats = new long[1]; if (result != null) { @@ -1234,7 +1232,7 @@ public ExecutionResult createFileInVR(String routerIp, String filePath, String f try { SshHelper.scpTo(routerIp, 3922, "root", keyFile, null, filePath, content.getBytes("UTF-8"), fileName, null); } catch (Exception e) { - s_logger.warn("Fail to create file " + filePath + fileName + " in VR " + routerIp, e); + logger.warn("Fail to create file " + filePath + fileName + " in VR " + routerIp, e); return new ExecutionResult(false, e.getMessage()); } return new ExecutionResult(true, null); @@ -1279,7 +1277,7 @@ public ExecutionResult cleanupCommand(NetworkElementCommand cmd) { // private int findRouterEthDeviceIndex(String domrName, String routerIp, String mac) throws Exception { File keyFile = getSystemVmKeyFile(); - s_logger.info("findRouterEthDeviceIndex. mac: " + mac); + logger.info("findRouterEthDeviceIndex. mac: " + mac); ArrayList skipInterfaces = new ArrayList(Arrays.asList("all", "default", "lo")); // when we dynamically plug in a new NIC into virtual router, it may take time to show up in guest OS @@ -1296,13 +1294,13 @@ private int findRouterEthDeviceIndex(String domrName, String routerIp, String ma if (!(skipInterfaces.contains(token))) { String cmd = String.format("ip address show %s | grep link/ether | sed -e 's/^[ \t]*//' | cut -d' ' -f2", token); - if (s_logger.isDebugEnabled()) - s_logger.debug("Run domr script " + cmd); + if (logger.isDebugEnabled()) + logger.debug("Run domr script " + cmd); Pair result2 = SshHelper.sshExecute(routerIp, DefaultDomRSshPort, "root", keyFile, null, // TODO need to find the dev index inside router based on IP address cmd); - if (s_logger.isDebugEnabled()) - s_logger.debug("result: " + result2.first() + ", output: " + result2.second()); + if (logger.isDebugEnabled()) + logger.debug("result: " + result2.first() + ", output: " + result2.second()); if (result2.first() && result2.second().trim().equalsIgnoreCase(mac.trim())) { return Integer.parseInt(token.substring(3)); @@ -1313,13 +1311,13 @@ private int findRouterEthDeviceIndex(String domrName, String routerIp, String ma } } - s_logger.warn("can not find intereface associated with mac: " + mac + ", guest OS may still at loading state, retry..."); + logger.warn("can not find intereface associated with mac: " + mac + ", guest OS may still at loading state, retry..."); try { Thread.currentThread(); Thread.sleep(1000); } catch (InterruptedException e) { - s_logger.debug("[ignored] interrupted while trying to get mac."); + logger.debug("[ignored] interrupted while trying to get mac."); } } @@ -1348,7 +1346,7 @@ protected ExecutionResult prepareNetworkElementCommand(SetupGuestNetworkCommand nic.setDeviceId(ethDeviceNum); } catch (Exception e) { String msg = "Prepare SetupGuestNetwork failed due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new ExecutionResult(false, msg); } return new ExecutionResult(true, null); @@ -1367,7 +1365,7 @@ private ExecutionResult prepareNetworkElementCommand(IpAssocVpcCommand cmd) { if (ip.isAdd()) { throw new InternalErrorException("Failed to find DomR VIF to associate/disassociate IP with."); } else { - s_logger.debug("VIF to deassociate IP with does not exist, return success"); + logger.debug("VIF to deassociate IP with does not exist, return success"); continue; } } @@ -1375,7 +1373,7 @@ private ExecutionResult prepareNetworkElementCommand(IpAssocVpcCommand cmd) { ip.setNicDevId(ethDeviceNum); } } catch (Exception e) { - s_logger.error("Prepare Ip Assoc failure on applying one ip due to exception: ", e); + logger.error("Prepare Ip Assoc failure on applying one ip due to exception: ", e); return new ExecutionResult(false, e.toString()); } @@ -1392,7 +1390,7 @@ protected ExecutionResult prepareNetworkElementCommand(SetSourceNatCommand cmd) pubIp.setNicDevId(ethDeviceNum); } catch (Exception e) { String msg = "Prepare Ip SNAT failure due to " + e.toString(); - s_logger.error(msg, e); + logger.error(msg, e); return new ExecutionResult(false, e.toString()); } return new ExecutionResult(true, null); @@ -1408,15 +1406,15 @@ private ExecutionResult prepareNetworkElementCommand(SetNetworkACLCommand cmd) { nic.setDeviceId(ethDeviceNum); } catch (Exception e) { String msg = "Prepare SetNetworkACL failed due to " + e.toString(); - s_logger.error(msg, e); + logger.error(msg, e); return new ExecutionResult(false, msg); } return new ExecutionResult(true, null); } private PlugNicAnswer execute(PlugNicCommand cmd) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource PlugNicCommand " + _gson.toJson(cmd)); + if (logger.isInfoEnabled()) { + logger.info("Executing resource PlugNicCommand " + _gson.toJson(cmd)); } try { @@ -1427,7 +1425,7 @@ private PlugNicAnswer execute(PlugNicCommand cmd) { plugNicCommandInternal(cmd.getVmName(), nicDeviceType, cmd.getNic(), cmd.getVMType()); return new PlugNicAnswer(cmd, true, "success"); } catch (Exception e) { - s_logger.error("Unexpected exception: ", e); + logger.error("Unexpected exception: ", e); return new PlugNicAnswer(cmd, false, "Unable to execute PlugNicCommand due to " + e.toString()); } } @@ -1448,14 +1446,14 @@ private void plugNicCommandInternal(String vmName, VirtualEthernetCardType nicDe if (vmMo == null) { String msg = "Router " + vmName + " no longer exists to execute PlugNic command"; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } /* if(!isVMWareToolsInstalled(vmMo)){ String errMsg = "vmware tools is not installed or not running, cannot add nic to vm " + vmName; - s_logger.debug(errMsg); + logger.debug(errMsg); return new PlugNicAnswer(cmd, false, "Unable to execute PlugNicCommand due to " + errMsg); } */ @@ -1481,11 +1479,11 @@ private void plugNicCommandInternal(String vmName, VirtualEthernetCardType nicDe DatacenterMO dataCenterMo = new DatacenterMO(context, dcMor); ManagedObjectReference dvsMor = dataCenterMo.getDvSwitchMor(networkInfo.first()); dvSwitchUuid = dataCenterMo.getDvSwitchUuid(dvsMor); - s_logger.info("Preparing NIC device on dvSwitch : " + dvSwitchUuid); + logger.info("Preparing NIC device on dvSwitch : " + dvSwitchUuid); nic = VmwareHelper.prepareDvNicDevice(vmMo, networkInfo.first(), nicDeviceType, networkInfo.second(), dvSwitchUuid, nicTo.getMac(), deviceNumber + 1, true, true); } else { - s_logger.info("Preparing NIC device on network " + networkInfo.second()); + logger.info("Preparing NIC device on network " + networkInfo.second()); nic = VmwareHelper.prepareNicDevice(vmMo, networkInfo.first(), nicDeviceType, networkInfo.second(), nicTo.getMac(), deviceNumber + 1, true, true); } @@ -1511,14 +1509,14 @@ private ReplugNicAnswer execute(ReplugNicCommand cmd) { if (vmMo == null) { String msg = "Router " + vmName + " no longer exists to execute ReplugNic command"; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } /* if(!isVMWareToolsInstalled(vmMo)){ String errMsg = "vmware tools is not installed or not running, cannot add nic to vm " + vmName; - s_logger.debug(errMsg); + logger.debug(errMsg); return new PlugNicAnswer(cmd, false, "Unable to execute PlugNicCommand due to " + errMsg); } */ @@ -1543,10 +1541,10 @@ private ReplugNicAnswer execute(ReplugNicCommand cmd) { DatacenterMO dataCenterMo = new DatacenterMO(context, dcMor); ManagedObjectReference dvsMor = dataCenterMo.getDvSwitchMor(networkInfo.first()); dvSwitchUuid = dataCenterMo.getDvSwitchUuid(dvsMor); - s_logger.info("Preparing NIC device on dvSwitch : " + dvSwitchUuid); + logger.info("Preparing NIC device on dvSwitch : " + dvSwitchUuid); VmwareHelper.updateDvNicDevice(nic, networkInfo.first(), dvSwitchUuid); } else { - s_logger.info("Preparing NIC device on network " + networkInfo.second()); + logger.info("Preparing NIC device on network " + networkInfo.second()); VmwareHelper.updateNicDevice(nic, networkInfo.first(), networkInfo.second()); } @@ -1555,7 +1553,7 @@ private ReplugNicAnswer execute(ReplugNicCommand cmd) { return new ReplugNicAnswer(cmd, true, "success"); } catch (Exception e) { - s_logger.error("Unexpected exception: ", e); + logger.error("Unexpected exception: ", e); return new ReplugNicAnswer(cmd, false, "Unable to execute ReplugNicCommand due to " + e.toString()); } } @@ -1577,14 +1575,14 @@ private UnPlugNicAnswer execute(UnPlugNicCommand cmd) { if (vmMo == null) { String msg = "VM " + vmName + " no longer exists to execute UnPlugNic command"; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } /* if(!isVMWareToolsInstalled(vmMo)){ String errMsg = "vmware tools not installed or not running, cannot remove nic from vm " + vmName; - s_logger.debug(errMsg); + logger.debug(errMsg); return new UnPlugNicAnswer(cmd, false, "Unable to execute unPlugNicCommand due to " + errMsg); } */ @@ -1596,7 +1594,7 @@ private UnPlugNicAnswer execute(UnPlugNicCommand cmd) { return new UnPlugNicAnswer(cmd, true, "success"); } catch (Exception e) { - s_logger.error("Unexpected exception: ", e); + logger.error("Unexpected exception: ", e); return new UnPlugNicAnswer(cmd, false, "Unable to execute unPlugNicCommand due to " + e.toString()); } } @@ -1678,7 +1676,7 @@ private ExecutionResult prepareNetworkElementCommand(IpAssocCommand cmd) { IpAddressTO[] ips = cmd.getIpAddresses(); String routerName = cmd.getAccessDetail(NetworkElementCommand.ROUTER_NAME); - String controlIp = VmwareResource.getRouterSshControlIp(cmd); + String controlIp = getRouterSshControlIp(cmd); VirtualMachineMO vmMo = hyperHost.findVmOnHyperHost(routerName); @@ -1693,7 +1691,7 @@ private ExecutionResult prepareNetworkElementCommand(IpAssocCommand cmd) { if (vmMo == null) { String msg = "Router " + routerName + " no longer exists to execute IPAssoc command"; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } @@ -1710,14 +1708,14 @@ private ExecutionResult prepareNetworkElementCommand(IpAssocCommand cmd) { String publicNetworkName = HypervisorHostHelper.getPublicNetworkNamePrefix(vlanId); Pair publicNicInfo = vmMo.getNicDeviceIndex(publicNetworkName); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Find public NIC index, public network name: " + publicNetworkName + ", index: " + publicNicInfo.first()); + if (logger.isDebugEnabled()) { + logger.debug("Find public NIC index, public network name: " + publicNetworkName + ", index: " + publicNicInfo.first()); } boolean addVif = false; if (ip.isAdd() && publicNicInfo.first() == -1) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Plug new NIC to associate" + controlIp + " to " + ip.getPublicIp()); + if (logger.isDebugEnabled()) { + logger.debug("Plug new NIC to associate" + controlIp + " to " + ip.getPublicIp()); } addVif = true; } @@ -1737,14 +1735,14 @@ private ExecutionResult prepareNetworkElementCommand(IpAssocCommand cmd) { if (publicNicInfo.first() < 0) { String msg = "Failed to find DomR VIF to associate/disassociate IP with."; - s_logger.error(msg); + logger.error(msg); throw new InternalErrorException(msg); } ip.setNicDevId(publicNicInfo.first()); ip.setNewNic(addVif); } } catch (Throwable e) { - s_logger.error("Unexpected exception: " + e.toString() + " will shortcut rest of IPAssoc commands", e); + logger.error("Unexpected exception: " + e.toString() + " will shortcut rest of IPAssoc commands", e); return new ExecutionResult(false, e.toString()); } return new ExecutionResult(true, null); @@ -1769,7 +1767,7 @@ private ExecutionResult cleanupNetworkElementCommand(IpAssocCommand cmd) { if (vmMo == null) { String msg = String.format("Router %s no longer exists to execute IPAssoc command ", routerName); - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } final String lastIp = cmd.getAccessDetail(NetworkElementCommand.NETWORK_PUB_LAST_IP); @@ -1788,7 +1786,7 @@ private ExecutionResult cleanupNetworkElementCommand(IpAssocCommand cmd) { configureNicDevice(vmMo, nicInfo.first(), VirtualDeviceConfigSpecOperation.REMOVE, "unplugNicCommand"); } } catch (Throwable e) { - s_logger.error("Unexpected exception: " + e.toString() + " will shortcut rest of IPAssoc commands", e); + logger.error("Unexpected exception: " + e.toString() + " will shortcut rest of IPAssoc commands", e); return new ExecutionResult(false, e.toString()); } return new ExecutionResult(true, null); @@ -1805,8 +1803,8 @@ private Pair getVirtualDevice(VirtualMachineMO vmMo, IpA String publicNetworkName = HypervisorHostHelper.getPublicNetworkNamePrefix(vlanId); Pair publicNicInfo = vmMo.getNicDeviceIndex(publicNetworkName); - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Find public NIC index, public network name: %s , index: %s", publicNetworkName, publicNicInfo.first())); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Find public NIC index, public network name: %s , index: %s", publicNetworkName, publicNicInfo.first())); } return new Pair<>(findVirtualNicDevice(vmMo, nicTO.getMac()), publicNicInfo.first()); @@ -1835,8 +1833,8 @@ public ExecutionResult executeInVR(String routerIP, String script, String args, Pair result; //TODO: Password should be masked, cannot output to log directly - if (s_logger.isDebugEnabled()) { - s_logger.debug("Run command on VR: " + routerIP + ", script: " + script + " with args: " + args); + if (logger.isDebugEnabled()) { + logger.debug("Run command on VR: " + routerIP + ", script: " + script + " with args: " + args); } try { @@ -1844,11 +1842,11 @@ public ExecutionResult executeInVR(String routerIP, String script, String args, VRScripts.CONNECTION_TIMEOUT, VRScripts.CONNECTION_TIMEOUT, timeout); } catch (Exception e) { String msg = "Command failed due to " + VmwareHelper.getExceptionMessage(e); - s_logger.error(msg); + logger.error(msg); result = new Pair(false, msg); } - if (s_logger.isDebugEnabled()) { - s_logger.debug(script + " execution result: " + result.first().toString()); + if (logger.isDebugEnabled()) { + logger.debug(script + " execution result: " + result.first().toString()); } return new ExecutionResult(result.first(), result.second()); } @@ -1858,29 +1856,29 @@ protected CheckSshAnswer execute(CheckSshCommand cmd) { String privateIp = cmd.getIp(); int cmdPort = cmd.getPort(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Ping command port, " + privateIp + ":" + cmdPort); + if (logger.isDebugEnabled()) { + logger.debug("Ping command port, " + privateIp + ":" + cmdPort); } String errorMessage = "Can not ping System VM [%s], due to: [%s]."; try { String result = connect(cmd.getName(), privateIp, cmdPort); if (result != null) { - s_logger.error(String.format(errorMessage, vmName, result)); + logger.error(String.format(errorMessage, vmName, result)); return new CheckSshAnswer(cmd, String.format(errorMessage, vmName, result)); } } catch (Exception e) { - s_logger.error(String.format(errorMessage, vmName, e.getMessage()), e); + logger.error(String.format(errorMessage, vmName, e.getMessage()), e); return new CheckSshAnswer(cmd, e); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Ping command port succeeded for vm " + vmName); + if (logger.isDebugEnabled()) { + logger.debug("Ping command port succeeded for vm " + vmName); } if (VirtualMachineName.isValidRouterName(vmName)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Execute network usage setup command on " + vmName); + if (logger.isDebugEnabled()) { + logger.debug("Execute network usage setup command on " + vmName); } networkUsage(privateIp, "create", null); } @@ -1904,8 +1902,8 @@ private DiskTO[] validateDisks(DiskTO[] disks) { validatedDisks.add(vol); } } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Drop invalid disk option, volumeTO: " + _gson.toJson(vol)); + if (logger.isDebugEnabled()) { + logger.debug("Drop invalid disk option, volumeTO: " + _gson.toJson(vol)); } } } @@ -1962,7 +1960,7 @@ protected ScaleVmAnswer execute(ScaleVmCommand cmd) { throw new Exception("Unable to execute ScaleVmCommand"); } } catch (Exception e) { - s_logger.error(String.format("ScaleVmCommand failed due to: [%s].", VmwareHelper.getExceptionMessage(e)), e); + logger.error(String.format("ScaleVmCommand failed due to: [%s].", VmwareHelper.getExceptionMessage(e)), e); return new ScaleVmAnswer(cmd, false, String.format("Unable to execute ScaleVmCommand due to: [%s].", e.toString())); } return new ScaleVmAnswer(cmd, true, null); @@ -1996,7 +1994,7 @@ protected void ensureDiskControllers(VirtualMachineMO vmMo, Pair int availableBusNum = scsiControllerInfo.second() + 1; // method returned current max. bus number if (DiskControllerType.getType(scsiDiskController) != scsiControllerInfo.third()) { - s_logger.debug(String.format("Change controller type from: %s to: %s", scsiControllerInfo.third().toString(), + logger.debug(String.format("Change controller type from: %s to: %s", scsiControllerInfo.third().toString(), scsiDiskController)); vmMo.tearDownDevices(new Class[]{VirtualSCSIController.class}); vmMo.addScsiDeviceControllers(DiskControllerType.getType(scsiDiskController)); @@ -2072,7 +2070,7 @@ protected StartAnswer execute(StartCommand cmd) { if (vmInVcenter != null) { vmAlreadyExistsInVcenter = true; String msg = "VM with name: " + vmNameOnVcenter + " already exists in vCenter."; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } @@ -2085,7 +2083,7 @@ protected StartAnswer execute(StartCommand cmd) { HashMap> dataStoresDetails = inferDatastoreDetailsFromDiskInfo(hyperHost, context, disks, cmd); if ((dataStoresDetails == null) || (dataStoresDetails.isEmpty())) { String msg = "Unable to locate datastore details of the volumes to be attached"; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } @@ -2099,7 +2097,7 @@ protected StartAnswer execute(StartCommand cmd) { List> diskDatastores = null; if (vmMo != null) { - s_logger.info("VM " + vmInternalCSName + " already exists, tear down devices for reconfiguration"); + logger.info("VM " + vmInternalCSName + " already exists, tear down devices for reconfiguration"); if (getVmPowerState(vmMo) != PowerState.PowerOff) vmMo.safePowerOff(_shutdownWaitMs); @@ -2118,8 +2116,8 @@ protected StartAnswer execute(StartCommand cmd) { vmMo = hyperHost.findVmOnPeerHyperHost(vmInternalCSName); if (vmMo != null) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Found vm " + vmInternalCSName + " at other host, relocate to " + hyperHost.getHyperHostName()); + if (logger.isInfoEnabled()) { + logger.info("Found vm " + vmInternalCSName + " at other host, relocate to " + hyperHost.getHyperHostName()); } takeVmFromOtherHyperHost(hyperHost, vmInternalCSName); @@ -2138,7 +2136,7 @@ protected StartAnswer execute(StartCommand cmd) { // If a VM with the same name is found in a different cluster in the DC, unregister the old VM and configure a new VM (cold-migration). VirtualMachineMO existingVmInDc = dcMo.findVm(vmInternalCSName); if (existingVmInDc != null) { - s_logger.debug("Found VM: " + vmInternalCSName + " on a host in a different cluster. Unregistering the exisitng VM."); + logger.debug("Found VM: " + vmInternalCSName + " on a host in a different cluster. Unregistering the exisitng VM."); existingVmName = existingVmInDc.getName(); existingVmFileInfo = existingVmInDc.getFileInfo(); existingVmFileLayout = existingVmInDc.getFileLayout(); @@ -2149,7 +2147,7 @@ protected StartAnswer execute(StartCommand cmd) { if (deployAsIs) { vmMo = hyperHost.findVmOnHyperHost(vmInternalCSName); if (vmMo == null) { - s_logger.info("Cloned deploy-as-is VM " + vmInternalCSName + " is not in this host, relocating it"); + logger.info("Cloned deploy-as-is VM " + vmInternalCSName + " is not in this host, relocating it"); vmMo = takeVmFromOtherHyperHost(hyperHost, vmInternalCSName); } } else { @@ -2164,7 +2162,7 @@ protected StartAnswer execute(StartCommand cmd) { DatastoreMO dsRootVolumeIsOn = rootDiskDataStoreDetails.second(); if (dsRootVolumeIsOn == null) { String msg = "Unable to locate datastore details of root volume"; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } if (rootDisk.getDetails().get(DiskTO.PROTOCOL_TYPE) != null && rootDisk.getDetails().get(DiskTO.PROTOCOL_TYPE).equalsIgnoreCase(Storage.StoragePoolType.DatastoreCluster.toString())) { @@ -2185,8 +2183,8 @@ protected StartAnswer execute(StartCommand cmd) { registerVm(vmNameOnVcenter, dsRootVolumeIsOn); vmMo = hyperHost.findVmOnHyperHost(vmInternalCSName); if (vmMo != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found registered vm " + vmInternalCSName + " at host " + hyperHost.getHyperHostName()); + if (logger.isDebugEnabled()) { + logger.debug("Found registered vm " + vmInternalCSName + " at host " + hyperHost.getHyperHostName()); } } tearDownVm(vmMo); @@ -2204,7 +2202,7 @@ protected StartAnswer execute(StartCommand cmd) { } } if (deployAsIs) { - s_logger.info("Mapping VM disks to spec disks and tearing down datadisks (if any)"); + logger.info("Mapping VM disks to spec disks and tearing down datadisks (if any)"); mapSpecDisksToClonedDisksAndTearDownDatadisks(vmMo, vmInternalCSName, specDisks); } @@ -2253,7 +2251,7 @@ protected StartAnswer execute(StartCommand cmd) { vmConfigSpec.setMemoryHotAddEnabled(vmMo.isMemoryHotAddSupported(guestOsId) && vmSpec.isEnableDynamicallyScaleVm()); String hostApiVersion = ((HostMO) hyperHost).getHostAboutInfo().getApiVersion(); if (numCoresPerSocket > 1 && hostApiVersion.compareTo("5.0") < 0) { - s_logger.warn("Dynamic scaling of CPU is not supported for Virtual Machines with multi-core vCPUs in case of ESXi hosts 4.1 and prior. Hence CpuHotAdd will not be" + logger.warn("Dynamic scaling of CPU is not supported for Virtual Machines with multi-core vCPUs in case of ESXi hosts 4.1 and prior. Hence CpuHotAdd will not be" + " enabled for Virtual Machine: " + vmInternalCSName); vmConfigSpec.setCpuHotAddEnabled(false); } else { @@ -2261,11 +2259,11 @@ protected StartAnswer execute(StartCommand cmd) { } if(!vmMo.isMemoryHotAddSupported(guestOsId) && vmSpec.isEnableDynamicallyScaleVm()){ - s_logger.warn("hotadd of memory is not supported, dynamic scaling feature can not be applied to vm: " + vmInternalCSName); + logger.warn("hotadd of memory is not supported, dynamic scaling feature can not be applied to vm: " + vmInternalCSName); } if(!vmMo.isCpuHotAddSupported(guestOsId) && vmSpec.isEnableDynamicallyScaleVm()){ - s_logger.warn("hotadd of cpu is not supported, dynamic scaling feature can not be applied to vm: " + vmInternalCSName); + logger.warn("hotadd of cpu is not supported, dynamic scaling feature can not be applied to vm: " + vmInternalCSName); } configNestedHVSupport(vmMo, vmSpec, vmConfigSpec); @@ -2297,12 +2295,12 @@ protected StartAnswer execute(StartCommand cmd) { null, secDsMo.getMor(), true, true, ideUnitNumber++, i + 1); deviceConfigSpecArray[i].setDevice(isoInfo.first()); if (isoInfo.second()) { - if (s_logger.isDebugEnabled()) - s_logger.debug("Prepare ISO volume at new device " + _gson.toJson(isoInfo.first())); + if (logger.isDebugEnabled()) + logger.debug("Prepare ISO volume at new device " + _gson.toJson(isoInfo.first())); deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD); } else { - if (s_logger.isDebugEnabled()) - s_logger.debug("Prepare ISO volume at existing device " + _gson.toJson(isoInfo.first())); + if (logger.isDebugEnabled()) + logger.debug("Prepare ISO volume at existing device " + _gson.toJson(isoInfo.first())); deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.EDIT); } i++; @@ -2320,13 +2318,13 @@ protected StartAnswer execute(StartCommand cmd) { Pair isoInfo = VmwareHelper.prepareIsoDevice(vmMo, null, null, true, true, ideUnitNumber++, i + 1); deviceConfigSpecArray[i].setDevice(isoInfo.first()); if (isoInfo.second()) { - if (s_logger.isDebugEnabled()) - s_logger.debug("Prepare ISO volume at existing device " + _gson.toJson(isoInfo.first())); + if (logger.isDebugEnabled()) + logger.debug("Prepare ISO volume at existing device " + _gson.toJson(isoInfo.first())); deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD); } else { - if (s_logger.isDebugEnabled()) - s_logger.debug("Prepare ISO volume at existing device " + _gson.toJson(isoInfo.first())); + if (logger.isDebugEnabled()) + logger.debug("Prepare ISO volume at existing device " + _gson.toJson(isoInfo.first())); deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.EDIT); } @@ -2417,8 +2415,8 @@ protected StartAnswer execute(StartCommand cmd) { if (diskChain != null && diskChain.length > 0) { DatastoreFile file = new DatastoreFile(diskChain[0]); if (!file.getFileBaseName().equalsIgnoreCase(volumeTO.getPath())) { - if (s_logger.isInfoEnabled()) - s_logger.info("Detected disk-chain top file change on volume: " + volumeTO.getId() + " " + volumeTO.getPath() + " -> " + file.getFileBaseName()); + if (logger.isInfoEnabled()) + logger.info("Detected disk-chain top file change on volume: " + volumeTO.getId() + " " + volumeTO.getPath() + " -> " + file.getFileBaseName()); volumeTO.setPath(file.getFileBaseName()); } } @@ -2427,8 +2425,8 @@ protected StartAnswer execute(StartCommand cmd) { String actualPoolUuid = diskDatastoreMofromVM.getCustomFieldValue(CustomFieldConstants.CLOUD_UUID); if (actualPoolUuid != null && !actualPoolUuid.equalsIgnoreCase(primaryStore.getUuid())) { volumeDsDetails = new Pair<>(diskDatastoreMofromVM.getMor(), diskDatastoreMofromVM); - if (s_logger.isInfoEnabled()) - s_logger.info("Detected datastore uuid change on volume: " + volumeTO.getId() + " " + primaryStore.getUuid() + " -> " + actualPoolUuid); + if (logger.isInfoEnabled()) + logger.info("Detected datastore uuid change on volume: " + volumeTO.getId() + " " + primaryStore.getUuid() + " -> " + actualPoolUuid); ((PrimaryDataStoreTO)primaryStore).setUuid(actualPoolUuid); } } @@ -2448,15 +2446,15 @@ protected StartAnswer execute(StartCommand cmd) { Long maxIops = volumeTO.getIopsWriteRate() + volumeTO.getIopsReadRate(); VirtualDevice device = VmwareHelper.prepareDiskDevice(vmMo, null, controllerKey, diskChain, volumeDsDetails.first(), deviceNumber, i + 1, maxIops); - s_logger.debug(LogUtils.logGsonWithoutException("The following definitions will be used to start the VM: virtual device [%s], volume [%s].", device, volumeTO)); + logger.debug(LogUtils.logGsonWithoutException("The following definitions will be used to start the VM: virtual device [%s], volume [%s].", device, volumeTO)); diskStoragePolicyId = volumeTO.getvSphereStoragePolicyId(); if (StringUtils.isNotEmpty(diskStoragePolicyId)) { PbmProfileManagerMO profMgrMo = new PbmProfileManagerMO(context); diskProfileSpec = profMgrMo.getProfileSpec(diskStoragePolicyId); deviceConfigSpecArray[i].getProfile().add(diskProfileSpec); - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Adding vSphere storage profile: %s to virtual disk [%s]", diskStoragePolicyId, _gson.toJson(device))); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Adding vSphere storage profile: %s to virtual disk [%s]", diskStoragePolicyId, _gson.toJson(device))); } } if (vol.getType() == Volume.Type.ROOT) { @@ -2467,8 +2465,8 @@ protected StartAnswer execute(StartCommand cmd) { deviceConfigSpecArray[i].setDevice(device); deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD); - if (s_logger.isDebugEnabled()) - s_logger.debug("Prepare volume at new device " + _gson.toJson(device)); + if (logger.isDebugEnabled()) + logger.debug("Prepare volume at new device " + _gson.toJson(device)); i++; } else { @@ -2485,7 +2483,7 @@ protected StartAnswer execute(StartCommand cmd) { if (StringUtils.isNotBlank(guestOsId) && guestOsId.startsWith("darwin")) { //Mac OS VirtualDevice[] devices = vmMo.getMatchedDevices(new Class[]{VirtualUSBController.class}); if (devices.length == 0) { - s_logger.debug("No USB Controller device on VM Start. Add USB Controller device for Mac OS VM " + vmInternalCSName); + logger.debug("No USB Controller device on VM Start. Add USB Controller device for Mac OS VM " + vmInternalCSName); //For Mac OS X systems, the EHCI+UHCI controller is enabled by default and is required for USB mouse and keyboard access. VirtualDevice usbControllerDevice = VmwareHelper.prepareUSBControllerDevice(); @@ -2493,12 +2491,12 @@ protected StartAnswer execute(StartCommand cmd) { deviceConfigSpecArray[i].setDevice(usbControllerDevice); deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD); - if (s_logger.isDebugEnabled()) - s_logger.debug("Prepare USB controller at new device " + _gson.toJson(deviceConfigSpecArray[i])); + if (logger.isDebugEnabled()) + logger.debug("Prepare USB controller at new device " + _gson.toJson(deviceConfigSpecArray[i])); i++; } else { - s_logger.debug("USB Controller device exists on VM Start for Mac OS VM " + vmInternalCSName); + logger.debug("USB Controller device exists on VM Start for Mac OS VM " + vmInternalCSName); } } @@ -2515,15 +2513,15 @@ protected StartAnswer execute(StartCommand cmd) { Map nicUuidToDvSwitchUuid = new HashMap(); for (NicTO nicTo : sortNicsByDeviceId(nics)) { - s_logger.info("Prepare NIC device based on NicTO: " + _gson.toJson(nicTo)); + logger.info("Prepare NIC device based on NicTO: " + _gson.toJson(nicTo)); String adapterTypeStr = deployAsIs ? mapAdapterType(deployAsIsInfo.getNicAdapterMap().get(nicTo.getDeviceId())) : vmSpec.getDetails().get(VmDetailConstants.NIC_ADAPTER); nicDeviceType = VirtualEthernetCardType.valueOf(adapterTypeStr); - if (s_logger.isDebugEnabled()) { - s_logger.debug("VM " + vmInternalCSName + " will be started with NIC device type: " + nicDeviceType + " on NIC device " + nicTo.getDeviceId()); + if (logger.isDebugEnabled()) { + logger.debug("VM " + vmInternalCSName + " will be started with NIC device type: " + nicDeviceType + " on NIC device " + nicTo.getDeviceId()); } boolean configureVServiceInNexus = (nicTo.getType() == TrafficType.Guest) && (vmSpec.getDetails().containsKey("ConfigureVServiceInNexus")); VirtualMachine.Type vmType = cmd.getVirtualMachine().getType(); @@ -2536,14 +2534,14 @@ protected StartAnswer execute(StartCommand cmd) { DatacenterMO dataCenterMo = new DatacenterMO(context, dcMor); ManagedObjectReference dvsMor = dataCenterMo.getDvSwitchMor(networkInfo.first()); dvSwitchUuid = dataCenterMo.getDvSwitchUuid(dvsMor); - s_logger.info("Preparing NIC device on dvSwitch : " + dvSwitchUuid); + logger.info("Preparing NIC device on dvSwitch : " + dvSwitchUuid); nic = VmwareHelper.prepareDvNicDevice(vmMo, networkInfo.first(), nicDeviceType, networkInfo.second(), dvSwitchUuid, nicTo.getMac(), i + 1, true, true); if (nicTo.getUuid() != null) { nicUuidToDvSwitchUuid.put(nicTo.getUuid(), dvSwitchUuid); } } else { - s_logger.info("Preparing NIC device on network " + networkInfo.second()); + logger.info("Preparing NIC device on network " + networkInfo.second()); nic = VmwareHelper.prepareNicDevice(vmMo, networkInfo.first(), nicDeviceType, networkInfo.second(), nicTo.getMac(), i + 1, true, true); } @@ -2557,8 +2555,8 @@ protected StartAnswer execute(StartCommand cmd) { deviceConfigSpecArray[i].setDevice(nic); deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD); - if (s_logger.isDebugEnabled()) - s_logger.debug("Prepare NIC at new device " + _gson.toJson(deviceConfigSpecArray[i])); + if (logger.isDebugEnabled()) + logger.debug("Prepare NIC at new device " + _gson.toJson(deviceConfigSpecArray[i])); // this is really a hacking for DomR, upon DomR startup, we will reset all the NIC allocation after eth3 if (nicCount < 3) @@ -2605,8 +2603,8 @@ protected StartAnswer execute(StartCommand cmd) { if (StringUtils.isNotEmpty(vmStoragePolicyId)) { vmConfigSpec.getVmProfile().add(vmProfileSpec); - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("Configuring the VM %s with storage policy: %s", vmInternalCSName, vmStoragePolicyId)); + if (logger.isTraceEnabled()) { + logger.trace(String.format("Configuring the VM %s with storage policy: %s", vmInternalCSName, vmStoragePolicyId)); } } // @@ -2641,7 +2639,7 @@ protected StartAnswer execute(StartCommand cmd) { // Power-on VM // if (powerOnVM(vmMo, vmInternalCSName, vmNameOnVcenter)) { - s_logger.debug(String.format("VM %s has been started successfully with hostname %s.", vmInternalCSName, vmNameOnVcenter)); + logger.debug(String.format("VM %s has been started successfully with hostname %s.", vmInternalCSName, vmNameOnVcenter)); } else { throw new Exception("Failed to start VM. vmName: " + vmInternalCSName + " with hostname " + vmNameOnVcenter); } @@ -2666,12 +2664,12 @@ protected StartAnswer execute(StartCommand cmd) { FileUtil.scpPatchFiles(controlIp, VRScripts.CONFIG_CACHE_LOCATION, DefaultDomRSshPort, pemFile, systemVmPatchFiles, BASEPATH); if (!_vrResource.isSystemVMSetup(vmInternalCSName, controlIp)) { String errMsg = "Failed to patch systemVM"; - s_logger.error(errMsg); + logger.error(errMsg); return new StartAnswer(cmd, errMsg); } } catch (Exception e) { String errMsg = "Failed to scp files to system VM. Patching of systemVM failed"; - s_logger.error(errMsg, e); + logger.error(errMsg, e); return new StartAnswer(cmd, String.format("%s due to: %s", errMsg, e.getMessage())); } } @@ -2700,14 +2698,14 @@ protected StartAnswer execute(StartCommand cmd) { } if (existingVmName != null && existingVmFileInfo != null) { - s_logger.debug(String.format("Since VM start failed, registering back an existing VM: [%s] that was unregistered.", existingVmName)); + logger.debug(String.format("Since VM start failed, registering back an existing VM: [%s] that was unregistered.", existingVmName)); try { DatastoreFile fileInDatastore = new DatastoreFile(existingVmFileInfo.getVmPathName()); DatastoreMO existingVmDsMo = new DatastoreMO(dcMo.getContext(), dcMo.findDatastore(fileInDatastore.getDatastoreName())); registerVm(existingVmName, existingVmDsMo); } catch (Exception ex) { String message = String.format("Failed to register an existing VM: [%s] due to [%s].", existingVmName, VmwareHelper.getExceptionMessage(ex)); - s_logger.error(message, ex); + logger.error(message, ex); } } return startAnswer; @@ -2720,9 +2718,9 @@ private boolean powerOnVM(final VirtualMachineMO vmMo, final String vmInternalCS try { return vmMo.powerOn(); } catch (Exception e) { - s_logger.info(String.format("Got exception while power on VM %s with hostname %s", vmInternalCSName, vmNameOnVcenter), e); + logger.info(String.format("Got exception while power on VM %s with hostname %s", vmInternalCSName, vmNameOnVcenter), e); if (e.getMessage() != null && e.getMessage().contains("File system specific implementation of Ioctl[file] failed")) { - s_logger.debug(String.format("Failed to power on VM %s with hostname %s. Retrying", vmInternalCSName, vmNameOnVcenter)); + logger.debug(String.format("Failed to power on VM %s with hostname %s. Retrying", vmInternalCSName, vmNameOnVcenter)); } else { throw e; } @@ -2746,7 +2744,7 @@ private void configureIso(VmwareHypervisorHost hyperHost, VirtualMachineMO vmMo, if (iso.getPath() != null && !iso.getPath().isEmpty()) { DataStoreTO imageStore = iso.getDataStore(); if (!(imageStore instanceof NfsTO)) { - s_logger.debug("unsupported protocol"); + logger.debug("unsupported protocol"); throw new Exception("unsupported protocol"); } NfsTO nfsImageStore = (NfsTO) imageStore; @@ -2760,12 +2758,12 @@ private void configureIso(VmwareHypervisorHost hyperHost, VirtualMachineMO vmMo, VmwareHelper.prepareIsoDevice(vmMo, isoDatastoreInfo.first(), isoDatastoreInfo.second(), true, true, ideUnitNumber, i + 1); deviceConfigSpecArray[i].setDevice(isoInfo.first()); if (isoInfo.second()) { - if (s_logger.isDebugEnabled()) - s_logger.debug("Prepare ISO volume at new device " + _gson.toJson(isoInfo.first())); + if (logger.isDebugEnabled()) + logger.debug("Prepare ISO volume at new device " + _gson.toJson(isoInfo.first())); deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD); } else { - if (s_logger.isDebugEnabled()) - s_logger.debug("Prepare ISO volume at existing device " + _gson.toJson(isoInfo.first())); + if (logger.isDebugEnabled()) + logger.debug("Prepare ISO volume at existing device " + _gson.toJson(isoInfo.first())); deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.EDIT); } } @@ -2886,7 +2884,7 @@ private void setDeployAsIsProperties(VirtualMachineMO vmMo, DeployAsIsInfoTO dep if (deployAsIsInfo != null && MapUtils.isNotEmpty(deployAsIsInfo.getProperties())) { Map properties = deployAsIsInfo.getProperties(); VmConfigInfo vAppConfig = vmMo.getConfigInfo().getVAppConfig(); - s_logger.info("Copying OVF properties to the values the user provided"); + logger.info("Copying OVF properties to the values the user provided"); setVAppPropertiesToConfigSpec(vAppConfig, properties, vmConfigSpec, hyperHost); } } @@ -2896,7 +2894,7 @@ private void setDeployAsIsProperties(VirtualMachineMO vmMo, DeployAsIsInfoTO dep */ private void mapSpecDisksToClonedDisksAndTearDownDatadisks(VirtualMachineMO vmMo, String vmInternalCSName, DiskTO[] specDisks) { try { - s_logger.debug("Mapping spec disks information to cloned VM disks for VM " + vmInternalCSName); + logger.debug("Mapping spec disks information to cloned VM disks for VM " + vmInternalCSName); if (vmMo != null && ArrayUtils.isNotEmpty(specDisks)) { List vmDisks = vmMo.getVirtualDisksOrderedByKey(); @@ -2912,7 +2910,7 @@ private void mapSpecDisksToClonedDisksAndTearDownDatadisks(VirtualMachineMO vmMo if (dataVolume instanceof VolumeObjectTO) { VolumeObjectTO volumeObjectTO = (VolumeObjectTO) dataVolume; if (!volumeObjectTO.getSize().equals(vmDisk.getCapacityInBytes())) { - s_logger.info("Mapped disk size is not the same as the cloned VM disk size: " + + logger.info("Mapped disk size is not the same as the cloned VM disk size: " + volumeObjectTO.getSize() + " - " + vmDisk.getCapacityInBytes()); } VirtualDeviceBackingInfo backingInfo = vmDisk.getBacking(); @@ -2925,29 +2923,29 @@ private void mapSpecDisksToClonedDisksAndTearDownDatadisks(VirtualMachineMO vmMo String relativePath = fileNameParts[1].split("/")[1].replace(".vmdk", ""); String vmSpecDatastoreUuid = volumeObjectTO.getDataStore().getUuid().replaceAll("-", ""); if (!datastoreUuid.equals(vmSpecDatastoreUuid)) { - s_logger.info("Mapped disk datastore UUID is not the same as the cloned VM datastore UUID: " + + logger.info("Mapped disk datastore UUID is not the same as the cloned VM datastore UUID: " + datastoreUuid + " - " + vmSpecDatastoreUuid); } volumeObjectTO.setPath(relativePath); specDisk.setPath(relativePath); rootDisks.add(vmDisk); } else { - s_logger.error("Empty backing filename for volume " + volumeObjectTO.getName()); + logger.error("Empty backing filename for volume " + volumeObjectTO.getName()); } } else { - s_logger.error("Could not get volume backing info for volume " + volumeObjectTO.getName()); + logger.error("Could not get volume backing info for volume " + volumeObjectTO.getName()); } } } vmDisks.removeAll(rootDisks); if (CollectionUtils.isNotEmpty(vmDisks)) { - s_logger.info("Tearing down datadisks for deploy-as-is VM"); + logger.info("Tearing down datadisks for deploy-as-is VM"); tearDownVMDisks(vmMo, vmDisks); } } } catch (Exception e) { String msg = "Error mapping deploy-as-is VM disks from cloned VM " + vmInternalCSName; - s_logger.error(msg, e); + logger.error(msg, e); throw new CloudRuntimeException(e); } } @@ -2967,8 +2965,8 @@ private void setBootOptions(VirtualMachineTO vmSpec, String bootMode, VirtualMac if (bootOptions == null) { bootOptions = new VirtualMachineBootOptions(); } - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("configuring VM '%s' to enter hardware setup",vmSpec.getName())); + if (logger.isDebugEnabled()) { + logger.debug(String.format("configuring VM '%s' to enter hardware setup",vmSpec.getName())); } bootOptions.setEnterBIOSSetup(vmSpec.isEnterHardwareSetup()); } @@ -3023,7 +3021,7 @@ protected List copyVAppConfigPropertySectionFromOVF(VmConfigIn if (ovfProperties.containsKey(info.getId())) { String value = ovfProperties.get(info.getId()); info.setValue(value); - s_logger.info("Setting OVF property ID = " + info.getId() + " VALUE = " + value); + logger.info("Setting OVF property ID = " + info.getId() + " VALUE = " + value); } spec.setInfo(info); spec.setOperation(useEdit ? ArrayUpdateOperation.EDIT : ArrayUpdateOperation.ADD); @@ -3041,7 +3039,7 @@ protected List copyVAppConfigProductSectionFromOVF(VmConfigInfo for (VAppProductInfo info : productFromOvf) { VAppProductSpec spec = new VAppProductSpec(); spec.setInfo(info); - s_logger.info("Procuct info KEY " + info.getKey()); + logger.info("Procuct info KEY " + info.getKey()); spec.setOperation(useEdit ? ArrayUpdateOperation.EDIT : ArrayUpdateOperation.ADD); specs.add(spec); } @@ -3094,7 +3092,7 @@ private void resizeRootDiskOnVMStart(VirtualMachineMO vmMo, DiskTO rootDiskTO, V final String[] diskChain = diskInfo.getDiskChain(); if (diskChain != null && diskChain.length > 1) { - s_logger.warn("Disk chain length for the VM is greater than one, this is not supported"); + logger.warn("Disk chain length for the VM is greater than one, this is not supported"); throw new CloudRuntimeException("Unsupported VM disk chain length: " + diskChain.length); } @@ -3104,7 +3102,7 @@ private void resizeRootDiskOnVMStart(VirtualMachineMO vmMo, DiskTO rootDiskTO, V resizingSupported = true; } if (!resizingSupported) { - s_logger.warn("Resizing of root disk is only support for scsi device/bus, the provide VM's disk device bus name is " + diskInfo.getDiskDeviceBusName()); + logger.warn("Resizing of root disk is only support for scsi device/bus, the provide VM's disk device bus name is " + diskInfo.getDiskDeviceBusName()); throw new CloudRuntimeException("Unsupported VM root disk device bus: " + diskInfo.getDiskDeviceBusName()); } @@ -3168,7 +3166,7 @@ protected void configureVideoCard(VirtualMachineMO vmMo, VirtualMachineTO vmSpec long svgaVmramSize = Long.parseLong(value); setNewVRamSizeVmVideoCard(vmMo, svgaVmramSize, vmConfigSpec); } catch (NumberFormatException e) { - s_logger.error("Unexpected value, cannot parse " + value + " to long due to: " + e.getMessage()); + logger.error("Unexpected value, cannot parse " + value + " to long due to: " + e.getMessage()); } } } @@ -3199,7 +3197,7 @@ protected void setNewVRamSizeVmVideoCard(VirtualMachineMO vmMo, long svgaVmramSi */ protected void modifyVmVideoCardVRamSize(VirtualMachineVideoCard videoCard, VirtualMachineMO vmMo, long svgaVmramSize, VirtualMachineConfigSpec vmConfigSpec) { if (videoCard.getVideoRamSizeInKB().longValue() != svgaVmramSize) { - s_logger.info("Video card memory was set " + toHumanReadableSize(videoCard.getVideoRamSizeInKB().longValue()) + " instead of " + toHumanReadableSize(svgaVmramSize)); + logger.info("Video card memory was set " + toHumanReadableSize(videoCard.getVideoRamSizeInKB().longValue()) + " instead of " + toHumanReadableSize(svgaVmramSize)); configureSpecVideoCardNewVRamSize(videoCard, svgaVmramSize, vmConfigSpec); } } @@ -3283,7 +3281,7 @@ private String[] syncDiskChain(DatacenterMO dcMo, VirtualMachineMO vmMo, DiskTO for (int i = 0; i < disks.length; i++) { DatastoreFile file = new DatastoreFile(disks[i]); if (!isManaged && file.getDir() != null && file.getDir().isEmpty()) { - s_logger.info("Perform run-time datastore folder upgrade. sync " + disks[i] + " to VM folder"); + logger.info("Perform run-time datastore folder upgrade. sync " + disks[i] + " to VM folder"); disks[i] = VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dcMo, vmMo.getName(), dsMo, file.getFileBaseName(), VmwareManager.s_vmwareSearchExcludeFolder.value()); } } @@ -3311,7 +3309,7 @@ private String[] syncDiskChain(DatacenterMO dcMo, VirtualMachineMO vmMo, DiskTO } } if (!dsMo.fileExists(datastoreDiskPath)) { - s_logger.warn("Volume " + volumeTO.getId() + " does not seem to exist on datastore, out of sync? path: " + datastoreDiskPath); + logger.warn("Volume " + volumeTO.getId() + " does not seem to exist on datastore, out of sync? path: " + datastoreDiskPath); } return new String[]{datastoreDiskPath}; @@ -3337,8 +3335,8 @@ protected void configNestedHVSupport(VirtualMachineMO vmMo, VirtualMachineTO vmS VmwareContext context = vmMo.getContext(); if ("true".equals(vmSpec.getDetails().get(VmDetailConstants.NESTED_VIRTUALIZATION_FLAG))) { - if (s_logger.isDebugEnabled()) - s_logger.debug("Nested Virtualization enabled in configuration, checking hypervisor capability"); + if (logger.isDebugEnabled()) + logger.debug("Nested Virtualization enabled in configuration, checking hypervisor capability"); ManagedObjectReference hostMor = vmMo.getRunningHost().getMor(); ManagedObjectReference computeMor = context.getVimClient().getMoRefProp(hostMor, "parent"); @@ -3347,12 +3345,12 @@ protected void configNestedHVSupport(VirtualMachineMO vmMo, VirtualMachineTO vmS Boolean nestedHvSupported = hostCapability.isNestedHVSupported(); if (nestedHvSupported == null) { // nestedHvEnabled property is supported only since VMware 5.1. It's not defined for earlier versions. - s_logger.warn("Hypervisor doesn't support nested virtualization, unable to set config for VM " + vmSpec.getName()); + logger.warn("Hypervisor doesn't support nested virtualization, unable to set config for VM " + vmSpec.getName()); } else if (nestedHvSupported.booleanValue()) { - s_logger.debug("Hypervisor supports nested virtualization, enabling for VM " + vmSpec.getName()); + logger.debug("Hypervisor supports nested virtualization, enabling for VM " + vmSpec.getName()); vmConfigSpec.setNestedHVEnabled(true); } else { - s_logger.warn("Hypervisor doesn't support nested virtualization, unable to set config for VM " + vmSpec.getName()); + logger.warn("Hypervisor doesn't support nested virtualization, unable to set config for VM " + vmSpec.getName()); vmConfigSpec.setNestedHVEnabled(false); } } @@ -3409,7 +3407,7 @@ private static void configCustomExtraOption(List extraOptions, Virt } } - private static void postNvpConfigBeforeStart(VirtualMachineMO vmMo, VirtualMachineTO vmSpec) throws Exception { + private void postNvpConfigBeforeStart(VirtualMachineMO vmMo, VirtualMachineTO vmSpec) throws Exception { /** * We need to configure the port on the DV switch after the host is * connected. So make this happen between the configure and start of @@ -3419,7 +3417,7 @@ private static void postNvpConfigBeforeStart(VirtualMachineMO vmMo, VirtualMachi for (NicTO nicTo : sortNicsByDeviceId(vmSpec.getNics())) { if (nicTo.getBroadcastType() == BroadcastDomainType.Lswitch) { // We need to create a port with a unique vlan and pass the key to the nic device - s_logger.trace("Nic " + nicTo.toString() + " is connected to an NVP logicalswitch"); + logger.trace("Nic " + nicTo.toString() + " is connected to an NVP logicalswitch"); VirtualDevice nicVirtualDevice = vmMo.getNicDeviceByIndex(nicIndex); if (nicVirtualDevice == null) { throw new Exception("Failed to find a VirtualDevice for nic " + nicIndex); //FIXME Generic exceptions are bad @@ -3433,7 +3431,7 @@ private static void postNvpConfigBeforeStart(VirtualMachineMO vmMo, VirtualMachi String portGroupKey = port.getPortgroupKey(); String dvSwitchUuid = port.getSwitchUuid(); - s_logger.debug("NIC " + nicTo.toString() + " is connected to dvSwitch " + dvSwitchUuid + " pg " + portGroupKey + " port " + portKey); + logger.debug("NIC " + nicTo.toString() + " is connected to dvSwitch " + dvSwitchUuid + " pg " + portGroupKey + " port " + portKey); ManagedObjectReference dvSwitchManager = vmMo.getContext().getVimClient().getServiceContent().getDvSwitchManager(); ManagedObjectReference dvSwitch = vmMo.getContext().getVimClient().getService().queryDvsByUuid(dvSwitchManager, dvSwitchUuid); @@ -3453,7 +3451,7 @@ private static void postNvpConfigBeforeStart(VirtualMachineMO vmMo, VirtualMachi } VMwareDVSPortSetting settings = (VMwareDVSPortSetting) dvPort.getConfig().getSetting(); VmwareDistributedVirtualSwitchVlanIdSpec vlanId = (VmwareDistributedVirtualSwitchVlanIdSpec) settings.getVlan(); - s_logger.trace("Found port " + dvPort.getKey() + " with vlan " + vlanId.getVlanId()); + logger.trace("Found port " + dvPort.getKey() + " with vlan " + vlanId.getVlanId()); if (vlanId.getVlanId() > 0 && vlanId.getVlanId() < 4095) { usedVlans.add(vlanId.getVlanId()); } @@ -3469,7 +3467,7 @@ private static void postNvpConfigBeforeStart(VirtualMachineMO vmMo, VirtualMachi VmwareDistributedVirtualSwitchVlanIdSpec vlanId = (VmwareDistributedVirtualSwitchVlanIdSpec) settings.getVlan(); BoolPolicy blocked = settings.getBlocked(); if (blocked.isValue() == Boolean.TRUE) { - s_logger.trace("Port is blocked, set a vlanid and unblock"); + logger.trace("Port is blocked, set a vlanid and unblock"); DVPortConfigSpec dvPortConfigSpec = new DVPortConfigSpec(); VMwareDVSPortSetting edittedSettings = new VMwareDVSPortSetting(); // Unblock @@ -3496,9 +3494,9 @@ private static void postNvpConfigBeforeStart(VirtualMachineMO vmMo, VirtualMachi if (!vmMo.getContext().getVimClient().waitForTask(task)) { throw new Exception("Failed to configure the dvSwitch port for nic " + nicTo.toString()); } - s_logger.debug("NIC " + nicTo.toString() + " connected to vlan " + i); + logger.debug("NIC " + nicTo.toString() + " connected to vlan " + i); } else { - s_logger.trace("Port already configured and set to vlan " + vlanId.getVlanId()); + logger.trace("Port already configured and set to vlan " + vlanId.getVlanId()); } } else if (backing instanceof VirtualEthernetCardNetworkBackingInfo) { // This NIC is connected to a Virtual Switch @@ -3507,7 +3505,7 @@ private static void postNvpConfigBeforeStart(VirtualMachineMO vmMo, VirtualMachi //if NSX API VERSION >= 4.2, connect to br-int (nsx.network), do not create portgroup else previous behaviour //OK, connected to OpaqueNetwork } else { - s_logger.error("nic device backing is of type " + backing.getClass().getName()); + logger.error("nic device backing is of type " + backing.getClass().getName()); throw new Exception("Incompatible backing for a VirtualDevice for nic " + nicIndex); //FIXME Generic exceptions are bad } } @@ -3524,7 +3522,7 @@ private VirtualMachineDiskInfo getMatchingExistingDiskWithVolumeDetails(VirtualM VirtualMachineDiskInfo diskInfo = diskInfoBuilder.getDiskInfoByBackingFileBaseName(diskBackingFileBaseName, dsName); if (diskInfo != null) { - s_logger.info("Found existing disk info from volume path: " + volumePath); + logger.info("Found existing disk info from volume path: " + volumePath); return diskInfo; } else { if (chainInfo != null) { @@ -3536,7 +3534,7 @@ private VirtualMachineDiskInfo getMatchingExistingDiskWithVolumeDetails(VirtualM DatastoreFile file = new DatastoreFile(diskPath); diskInfo = diskInfoBuilder.getDiskInfoByBackingFileBaseName(file.getFileBaseName(), dsName); if (diskInfo != null) { - s_logger.info("Found existing disk from chain info: " + diskPath); + logger.info("Found existing disk from chain info: " + diskPath); return diskInfo; } } @@ -3545,7 +3543,7 @@ private VirtualMachineDiskInfo getMatchingExistingDiskWithVolumeDetails(VirtualM if (diskInfo == null) { diskInfo = diskInfoBuilder.getDiskInfoByDeviceBusName(infoInChain.getDiskDeviceBusName()); if (diskInfo != null) { - s_logger.info("Found existing disk from chain device bus information: " + infoInChain.getDiskDeviceBusName()); + logger.info("Found existing disk from chain device bus information: " + infoInChain.getDiskDeviceBusName()); return diskInfo; } } @@ -3595,7 +3593,7 @@ private String getDiskController(VirtualMachineMO vmMo, VirtualMachineDiskInfo m if (deployAsIs && matchingExistingDisk != null) { String currentBusName = matchingExistingDisk.getDiskDeviceBusName(); if (currentBusName != null) { - s_logger.info("Chose disk controller based on existing information: " + currentBusName); + logger.info("Chose disk controller based on existing information: " + currentBusName); if (currentBusName.startsWith("ide")) { controllerType = DiskControllerType.ide; } else if (currentBusName.startsWith("scsi")) { @@ -3610,11 +3608,11 @@ private String getDiskController(VirtualMachineMO vmMo, VirtualMachineDiskInfo m } if (vol.getType() == Volume.Type.ROOT) { - s_logger.info("Chose disk controller for vol " + vol.getType() + " -> " + controllerInfo.first() + logger.info("Chose disk controller for vol " + vol.getType() + " -> " + controllerInfo.first() + ", based on root disk controller settings at global configuration setting."); return controllerInfo.first(); } else { - s_logger.info("Chose disk controller for vol " + vol.getType() + " -> " + controllerInfo.second() + logger.info("Chose disk controller for vol " + vol.getType() + " -> " + controllerInfo.second() + ", based on default data disk controller setting i.e. Operating system recommended."); // Need to bring in global configuration setting & template level setting. return controllerInfo.second(); } @@ -3649,13 +3647,13 @@ private void postDiskConfigBeforeStart(VirtualMachineMO vmMo, VirtualMachineTO v DatastoreFile originalFile = new DatastoreFile(volumeTO.getPath()); if (!file.getFileBaseName().equalsIgnoreCase(originalFile.getFileBaseName())) { - if (s_logger.isInfoEnabled()) - s_logger.info("Detected disk-chain top file change on volume: " + volumeTO.getId() + " " + volumeTO.getPath() + " -> " + diskChain[0]); + if (logger.isInfoEnabled()) + logger.info("Detected disk-chain top file change on volume: " + volumeTO.getId() + " " + volumeTO.getPath() + " -> " + diskChain[0]); } } else { if (!file.getFileBaseName().equalsIgnoreCase(volumeTO.getPath())) { - if (s_logger.isInfoEnabled()) - s_logger.info("Detected disk-chain top file change on volume: " + volumeTO.getId() + " " + volumeTO.getPath() + " -> " + file.getFileBaseName()); + if (logger.isInfoEnabled()) + logger.info("Detected disk-chain top file change on volume: " + volumeTO.getId() + " " + volumeTO.getPath() + " -> " + file.getFileBaseName()); } } @@ -3688,13 +3686,13 @@ private void postDiskConfigBeforeStart(VirtualMachineMO vmMo, VirtualMachineTO v private void checkAndDeleteDatastoreFile(String filePath, List skipDatastores, DatastoreMO dsMo, DatacenterMO dcMo) throws Exception { if (dsMo != null && dcMo != null && (skipDatastores == null || !skipDatastores.contains(dsMo.getName()))) { - s_logger.debug("Deleting file: " + filePath); + logger.debug("Deleting file: " + filePath); dsMo.deleteFile(filePath, dcMo.getMor(), true); } } private void deleteUnregisteredVmFiles(VirtualMachineFileLayoutEx vmFileLayout, DatacenterMO dcMo, boolean deleteDisks, List skipDatastores) throws Exception { - s_logger.debug("Deleting files associated with an existing VM that was unregistered"); + logger.debug("Deleting files associated with an existing VM that was unregistered"); DatastoreFile vmFolder = null; try { List fileInfo = vmFileLayout.getFile(); @@ -3723,7 +3721,7 @@ else if (file.getType().equals("config")) } } catch (Exception e) { String message = "Failed to delete files associated with an existing VM that was unregistered due to " + VmwareHelper.getExceptionMessage(e); - s_logger.warn(message, e); + logger.warn(message, e); } } @@ -3825,7 +3823,7 @@ private DatastoreMO getDataStoreWhereDiskExists(VmwareHypervisorHost hyperHost, VirtualMachineDiskInfo diskInfo = diskInfoBuilder.getDiskInfoByBackingFileBaseName(diskBackingFileBaseName, dsName); if (diskInfo != null) { - s_logger.info("Found existing disk info from volume path: " + volume.getPath()); + logger.info("Found existing disk info from volume path: " + volume.getPath()); return dsMo; } else { String chainInfo = volume.getChainInfo(); @@ -3838,7 +3836,7 @@ private DatastoreMO getDataStoreWhereDiskExists(VmwareHypervisorHost hyperHost, DatastoreFile file = new DatastoreFile(diskPath); diskInfo = diskInfoBuilder.getDiskInfoByBackingFileBaseName(file.getFileBaseName(), dsName); if (diskInfo != null) { - s_logger.info("Found existing disk from chain info: " + diskPath); + logger.info("Found existing disk from chain info: " + diskPath); return dsMo; } } @@ -3908,7 +3906,7 @@ private HashMap> inferDatastor if (morDatastore == null) { String msg = "Failed to get the mounted datastore for the volume's pool " + poolUuid; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } @@ -3964,7 +3962,7 @@ private String getVlanInfo(NicTO nicTo, String defaultVlan) { // TODO consider the spread of functionality between BroadcastDomainType and NetUtils return NetUtils.getPrimaryPvlanFromUri(nicTo.getBroadcastUri()); } else { - s_logger.warn("BroadcastType is not claimed as VLAN or PVLAN, but without vlan info in broadcast URI. Use vlan info from labeling: " + defaultVlan); + logger.warn("BroadcastType is not claimed as VLAN or PVLAN, but without vlan info in broadcast URI. Use vlan info from labeling: " + defaultVlan); return defaultVlan; } } else if (nicTo.getBroadcastType() == BroadcastDomainType.Lswitch) { @@ -3974,12 +3972,12 @@ private String getVlanInfo(NicTO nicTo, String defaultVlan) { URI broadcastUri = nicTo.getBroadcastUri(); if (broadcastUri != null) { String vlanId = BroadcastDomainType.getValue(broadcastUri); - s_logger.debug("Using VLAN [" + vlanId + "] from broadcast uri [" + broadcastUri + "]"); + logger.debug("Using VLAN [" + vlanId + "] from broadcast uri [" + broadcastUri + "]"); return vlanId; } } - s_logger.warn("Unrecognized broadcast type in VmwareResource, type: " + nicTo.getBroadcastType().toString() + ". Use vlan info from labeling: " + defaultVlan); + logger.warn("Unrecognized broadcast type in VmwareResource, type: " + nicTo.getBroadcastType().toString() + ". Use vlan info from labeling: " + defaultVlan); return defaultVlan; } @@ -3994,7 +3992,7 @@ private Pair prepareNetworkFromNicInfo(HostMO ho String namePrefix = getNetworkNamePrefix(nicTo); Pair networkInfo = null; - s_logger.info("Prepare network on " + switchType + " " + switchName + " with name prefix: " + namePrefix); + logger.info("Prepare network on " + switchType + " " + switchName + " with name prefix: " + namePrefix); if (VirtualSwitchType.StandardVirtualSwitch == switchType) { networkInfo = HypervisorHostHelper.prepareNetwork(switchName, namePrefix, hostMo, @@ -4102,13 +4100,13 @@ private VirtualMachineMO takeVmFromOtherHyperHost(VmwareHypervisorHost hyperHost ManagedObjectReference morTargetPhysicalHost = hyperHost.findMigrationTarget(vmMo); if (morTargetPhysicalHost == null) { String msg = "VM " + vmName + " is on other host and we have no resource available to migrate and start it here"; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } if (!vmMo.relocate(morTargetPhysicalHost)) { String msg = "VM " + vmName + " is on other host and we failed to relocate it here"; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } @@ -4163,7 +4161,7 @@ protected Answer execute(ReadyCommand cmd) { return new ReadyAnswer(cmd, "Host is not in connect state"); } } catch (Exception e) { - s_logger.error("Unexpected exception: ", e); + logger.error("Unexpected exception: ", e); return new ReadyAnswer(cmd, VmwareHelper.getExceptionMessage(e)); } } @@ -4177,16 +4175,16 @@ protected Answer execute(GetHostStatsCommand cmd) { try { HostStatsEntry entry = getHyperHostStats(hyperHost); if (entry != null) { - s_logger.debug(String.format("Host stats response from hypervisor is: [%s].", _gson.toJson(entry))); + logger.debug(String.format("Host stats response from hypervisor is: [%s].", _gson.toJson(entry))); entry.setHostId(cmd.getHostId()); answer = new GetHostStatsAnswer(cmd, entry); } } catch (Exception e) { - s_logger.error(createLogMessageException(e, cmd), e); + logger.error(createLogMessageException(e, cmd), e); } - if (s_logger.isTraceEnabled()) { - s_logger.trace("GetHostStats Answer: " + _gson.toJson(answer)); + if (logger.isTraceEnabled()) { + logger.trace("GetHostStats Answer: " + _gson.toJson(answer)); } return answer; @@ -4217,7 +4215,7 @@ protected Answer execute(GetVmStatsCommand cmd) { createLogMessageException(e, cmd); } - s_logger.debug(String.format("VM Stats Map is: [%s].", _gson.toJson(vmStatsMap))); + logger.debug(String.format("VM Stats Map is: [%s].", _gson.toJson(vmStatsMap))); return new GetVmStatsAnswer(cmd, vmStatsMap); } @@ -4325,7 +4323,7 @@ protected Answer execute(GetVmDiskStatsCommand cmd) { } } } catch (Exception e) { - s_logger.error(String.format("Unable to execute PerfQuerySpec due to: [%s]. The window interval is enabled in vCenter?", VmwareHelper.getExceptionMessage(e)), e); + logger.error(String.format("Unable to execute PerfQuerySpec due to: [%s]. The window interval is enabled in vCenter?", VmwareHelper.getExceptionMessage(e)), e); } } @@ -4336,12 +4334,12 @@ protected Answer execute(GetVmDiskStatsCommand cmd) { } } - s_logger.debug(String.format("VM Disks Maps is: [%s].", _gson.toJson(vmStatsMap))); + logger.debug(String.format("VM Disks Maps is: [%s].", _gson.toJson(vmStatsMap))); if (MapUtils.isNotEmpty(vmStatsMap)) { return new GetVmDiskStatsAnswer(cmd, "", cmd.getHostName(), vmStatsMap); } } catch (Exception e) { - s_logger.error(String.format("Unable to execute GetVmDiskStatsCommand due to [%s].", VmwareHelper.getExceptionMessage(e)), e); + logger.error(String.format("Unable to execute GetVmDiskStatsCommand due to [%s].", VmwareHelper.getExceptionMessage(e)), e); } return new GetVmDiskStatsAnswer(cmd, null, null, null); } @@ -4387,10 +4385,10 @@ protected GetVolumeStatsAnswer execute(GetVolumeStatsCommand cmd) { } } } - s_logger.debug(String.format("Volume Stats Entry is: [%s].", _gson.toJson(statEntry))); + logger.debug(String.format("Volume Stats Entry is: [%s].", _gson.toJson(statEntry))); return new GetVolumeStatsAnswer(cmd, "", statEntry); } catch (Exception e) { - s_logger.error(String.format("VOLSTAT GetVolumeStatsCommand failed due to [%s].", VmwareHelper.getExceptionMessage(e)), e); + logger.error(String.format("VOLSTAT GetVolumeStatsCommand failed due to [%s].", VmwareHelper.getExceptionMessage(e)), e); } return new GetVolumeStatsAnswer(cmd, "", null); @@ -4419,11 +4417,11 @@ protected Answer execute(StopCommand cmd) { if (cmd.checkBeforeCleanup()) { if (getVmPowerState(vmMo) != PowerState.PowerOff) { String msg = "StopCommand is sent for cleanup and VM " + cmd.getVmName() + " is current running. ignore it."; - s_logger.warn(msg); + logger.warn(msg); return new StopAnswer(cmd, msg, false); } else { String msg = "StopCommand is sent for cleanup and VM " + cmd.getVmName() + " is indeed stopped already."; - s_logger.info(msg); + logger.info(msg); return new StopAnswer(cmd, msg, true); } } @@ -4442,19 +4440,19 @@ protected Answer execute(StopCommand cmd) { } if (!success) { msg = "Have problem in powering off VM " + cmd.getVmName() + ", let the process continue"; - s_logger.warn(msg); + logger.warn(msg); } return new StopAnswer(cmd, msg, true); } String msg = "VM " + cmd.getVmName() + " is already in stopped state"; - s_logger.info(msg); + logger.info(msg); return new StopAnswer(cmd, msg, true); } finally { } } else { String msg = "VM " + cmd.getVmName() + " is no longer on the expected host in vSphere"; - s_logger.info(msg); + logger.info(msg); return new StopAnswer(cmd, msg, true); } } catch (Exception e) { @@ -4487,7 +4485,7 @@ protected Answer execute(RebootCommand cmd) { if (vmMo != null) { if (vmMo.isToolsInstallerMounted()) { toolsInstallerMounted = true; - s_logger.trace("Detected mounted vmware tools installer for :[" + cmd.getVmName() + "]"); + logger.trace("Detected mounted vmware tools installer for :[" + cmd.getVmName() + "]"); } try { if (canSetEnableSetupConfig(vmMo,cmd.getVirtualMachine())) { @@ -4497,9 +4495,9 @@ protected Answer execute(RebootCommand cmd) { return new RebootAnswer(cmd, "Failed to configure VM to boot into hardware setup menu: " + vmMo.getName(), false); } } catch (ToolsUnavailableFaultMsg e) { - s_logger.warn("VMware tools is not installed at guest OS, we will perform hard reset for reboot"); + logger.warn("VMware tools is not installed at guest OS, we will perform hard reset for reboot"); } catch (Exception e) { - s_logger.warn("We are not able to perform gracefull guest reboot due to " + VmwareHelper.getExceptionMessage(e)); + logger.warn("We are not able to perform gracefull guest reboot due to " + VmwareHelper.getExceptionMessage(e)); } // continue to try with hard-reset @@ -4508,11 +4506,11 @@ protected Answer execute(RebootCommand cmd) { } String msg = "Reboot failed in vSphere. vm: " + cmd.getVmName(); - s_logger.warn(msg); + logger.warn(msg); return new RebootAnswer(cmd, msg, false); } else { String msg = "Unable to find the VM in vSphere to reboot. vm: " + cmd.getVmName(); - s_logger.warn(msg); + logger.warn(msg); return new RebootAnswer(cmd, msg, false); } } catch (Exception e) { @@ -4521,9 +4519,9 @@ protected Answer execute(RebootCommand cmd) { if (toolsInstallerMounted) { try { vmMo.mountToolsInstaller(); - s_logger.debug(String.format("Successfully re-mounted vmware tools installer for :[%s].", cmd.getVmName())); + logger.debug(String.format("Successfully re-mounted vmware tools installer for :[%s].", cmd.getVmName())); } catch (Exception e) { - s_logger.error(String.format("Unabled to re-mount vmware tools installer for: [%s].", cmd.getVmName()), e); + logger.error(String.format("Unabled to re-mount vmware tools installer for: [%s].", cmd.getVmName()), e); } } } @@ -4539,8 +4537,8 @@ private boolean canSetEnableSetupConfig(VirtualMachineMO vmMo, VirtualMachineTO if (virtualMachine.isEnterHardwareSetup()) { VirtualMachineBootOptions bootOptions = new VirtualMachineBootOptions(); VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec(); - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("configuring VM '%s' to reboot into hardware setup menu.",virtualMachine.getName())); + if (logger.isDebugEnabled()) { + logger.debug(String.format("configuring VM '%s' to reboot into hardware setup menu.",virtualMachine.getName())); } bootOptions.setEnterBIOSSetup(virtualMachine.isEnterHardwareSetup()); vmConfigSpec.setBootOptions(bootOptions); @@ -4549,7 +4547,7 @@ private boolean canSetEnableSetupConfig(VirtualMachineMO vmMo, VirtualMachineTO return false; } } catch (Exception e) { - s_logger.error(String.format("failed to reconfigure VM '%s' to boot into hardware setup menu",virtualMachine.getName()),e); + logger.error(String.format("failed to reconfigure VM '%s' to boot into hardware setup menu",virtualMachine.getName()),e); return false; } } @@ -4570,7 +4568,7 @@ protected Answer execute(CheckVirtualMachineCommand cmd) { powerState = getVmPowerState(vmMo); return new CheckVirtualMachineAnswer(cmd, powerState, vncPort); } else { - s_logger.warn("Can not find vm " + vmName + " to execute CheckVirtualMachineCommand"); + logger.warn("Can not find vm " + vmName + " to execute CheckVirtualMachineCommand"); return new CheckVirtualMachineAnswer(cmd, powerState, vncPort); } @@ -4590,13 +4588,13 @@ protected Answer execute(PrepareForMigrationCommand cmd) { // find VM through datacenter (VM is not at the target host yet) VirtualMachineMO vmMo = hyperHost.findVmOnPeerHyperHost(vmName); if (vmMo == null) { - s_logger.info("VM " + vmName + " was not found in the cluster of host " + hyperHost.getHyperHostName() + ". Looking for the VM in datacenter."); + logger.info("VM " + vmName + " was not found in the cluster of host " + hyperHost.getHyperHostName() + ". Looking for the VM in datacenter."); ManagedObjectReference dcMor = hyperHost.getHyperHostDatacenter(); DatacenterMO dcMo = new DatacenterMO(hyperHost.getContext(), dcMor); vmMo = dcMo.findVm(vmName); if (vmMo == null) { String msg = "VM " + vmName + " does not exist in VMware datacenter"; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } } @@ -4635,13 +4633,13 @@ protected Answer execute(MigrateVmToPoolCommand cmd) { try { VirtualMachineMO vmMo = getVirtualMachineMO(vmName, hyperHost); if (vmMo == null) { - s_logger.info("VM " + vmName + " was not found in the cluster of host " + hyperHost.getHyperHostName() + ". Looking for the VM in datacenter."); + logger.info("VM " + vmName + " was not found in the cluster of host " + hyperHost.getHyperHostName() + ". Looking for the VM in datacenter."); ManagedObjectReference dcMor = hyperHost.getHyperHostDatacenter(); DatacenterMO dcMo = new DatacenterMO(hyperHost.getContext(), dcMor); vmMo = dcMo.findVm(vmName); if (vmMo == null) { String msg = "VM " + vmName + " does not exist in VMware datacenter"; - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } } @@ -4650,10 +4648,10 @@ protected Answer execute(MigrateVmToPoolCommand cmd) { if (e instanceof Exception) { return new Answer(cmd, (Exception) e); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("problem", e); + if (logger.isDebugEnabled()) { + logger.debug("problem", e); } - s_logger.error(e.getLocalizedMessage()); + logger.error(e.getLocalizedMessage()); return new Answer(cmd, false, "unknown problem: " + e.getLocalizedMessage()); } } @@ -4677,9 +4675,9 @@ private Answer migrateAndAnswer(VirtualMachineMO vmMo, String poolUuid, VmwareHy if (cmd instanceof MigrateVolumeCommand) { // Else device keys will be found in relocateVirtualMachine MigrateVolumeCommand mcmd = (MigrateVolumeCommand) cmd; addVolumeDiskmapping(vmMo, volumeDeviceKey, mcmd.getVolumePath(), mcmd.getVolumeId()); - if (s_logger.isTraceEnabled()) { + if (logger.isTraceEnabled()) { for (Integer diskId: volumeDeviceKey.keySet()) { - s_logger.trace(String.format("Disk to migrate has disk id %d and volumeId %d", diskId, volumeDeviceKey.get(diskId))); + logger.trace(String.format("Disk to migrate has disk id %d and volumeId %d", diskId, volumeDeviceKey.get(diskId))); } } } @@ -4687,7 +4685,7 @@ private Answer migrateAndAnswer(VirtualMachineMO vmMo, String poolUuid, VmwareHy return createAnswerForCmd(vmMo, volumeToList, cmd, volumeDeviceKey); } catch (Exception e) { String msg = "Change data store for VM " + vmMo.getVmName() + " failed"; - s_logger.error(msg + ": " + e.getLocalizedMessage()); + logger.error(msg + ": " + e.getLocalizedMessage()); throw new CloudRuntimeException(msg, e); } } @@ -4697,8 +4695,8 @@ Answer createAnswerForCmd(VirtualMachineMO vmMo, List volumeObje VirtualMachineDiskInfoBuilder diskInfoBuilder = vmMo.getDiskInfoBuilder(); VirtualDisk[] disks = vmMo.getAllDiskDevice(); Answer answer; - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("creating answer for %s", cmd.getClass().getSimpleName())); + if (logger.isTraceEnabled()) { + logger.trace(String.format("creating answer for %s", cmd.getClass().getSimpleName())); } if (cmd instanceof MigrateVolumeCommand) { if (disks.length == 1) { @@ -4714,8 +4712,8 @@ Answer createAnswerForCmd(VirtualMachineMO vmMo, List volumeObje } private void addVolumeDiskmapping(VirtualMachineMO vmMo, Map volumeDeviceKey, String volumePath, long volumeId) throws Exception { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("locating disk for volume (%d) using path %s", volumeId, volumePath)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("locating disk for volume (%d) using path %s", volumeId, volumePath)); } Pair diskInfo = getVirtualDiskInfo(vmMo, volumePath + VMDK_EXTENSION); String vmdkAbsFile = VmwareHelper.getAbsoluteVmdkFile(diskInfo.first()); @@ -4730,13 +4728,13 @@ private ManagedObjectReference getTargetDatastoreMOReference(String destinationP VmwareHypervisorHost hyperHost) { ManagedObjectReference morDs; try { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("finding datastore %s", destinationPool)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("finding datastore %s", destinationPool)); } morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, destinationPool); } catch (Exception e) { String msg = "exception while finding data store " + destinationPool; - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg + ": " + e.getLocalizedMessage()); } return morDs; @@ -4748,7 +4746,7 @@ private ManagedObjectReference getDataCenterMOReference(String vmName, VmwareHyp morDc = hyperHost.getHyperHostDatacenter(); } catch (Exception e) { String msg = "exception while finding VMware datacenter to search for VM " + vmName; - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg + ": " + e.getLocalizedMessage()); } return morDc; @@ -4761,7 +4759,7 @@ private VirtualMachineMO getVirtualMachineMO(String vmName, VmwareHypervisorHost vmMo = hyperHost.findVmOnPeerHyperHost(vmName); } catch (Exception e) { String msg = "exception while searching for VM " + vmName + " in VMware datacenter"; - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg + ": " + e.getLocalizedMessage()); } return vmMo; @@ -4777,7 +4775,7 @@ protected Answer execute(MigrateCommand cmd) { VirtualMachineMO vmMo = hyperHost.findVmOnPeerHyperHost(vmName); if (vmMo == null) { String msg = "VM " + vmName + " does not exist in VMware datacenter"; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } @@ -4808,7 +4806,7 @@ protected Answer execute(MigrateWithStorageCommand cmd) { return new MigrateWithStorageAnswer(cmd, volumeToList); } catch (Throwable e) { String msg = "MigrateWithStorageCommand failed due to " + VmwareHelper.getExceptionMessage(e); - s_logger.warn(msg, e); + logger.warn(msg, e); return new MigrateWithStorageAnswer(cmd, (Exception)e); } } @@ -4840,7 +4838,7 @@ private Answer migrateVolume(MigrateVolumeCommand cmd) { morDestinationDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(dsHost, targetDsName); if(morDestinationDS == null) { String msg = "Unable to find the target datastore: " + targetDsName + " on host: " + dsHost.getHyperHostName(); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } destinationDsMo = new DatastoreMO(hyperHost.getContext(), morDestinationDS); @@ -4856,7 +4854,7 @@ private Answer migrateVolume(MigrateVolumeCommand cmd) { // OfflineVmwareMigration: more robust would be to find the store given the volume as it might have been moved out of band or due to error // example: DatastoreMO existingVmDsMo = new DatastoreMO(dcMo.getContext(), dcMo.findDatastore(fileInDatastore.getDatastoreName())); - s_logger.info("Create worker VM " + vmName); + logger.info("Create worker VM " + vmName); // OfflineVmwareMigration: 2. create the worker with access to the data(store) vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, sourceDsMo, vmName, HypervisorHostHelper.getMinimumHostHardwareVersion(hyperHost, hyperHostInTargetCluster)); @@ -4870,14 +4868,14 @@ private Answer migrateVolume(MigrateVolumeCommand cmd) { String vmdkFileName = path + VMDK_EXTENSION; vmdkDataStorePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(sourceDsMo, vmdkFileName); if (!sourceDsMo.fileExists(vmdkDataStorePath)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("path not found (%s), trying under '%s'", vmdkFileName, path)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("path not found (%s), trying under '%s'", vmdkFileName, path)); } vmdkDataStorePath = VmwareStorageLayoutHelper.getVmwareDatastorePathFromVmdkFileName(sourceDsMo, path, vmdkFileName); } if (!sourceDsMo.folderExists(String.format("[%s]", sourceDsMo.getName()), path) || !sourceDsMo.fileExists(vmdkDataStorePath)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("path not found (%s), trying under '%s'", vmdkFileName, vmName)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("path not found (%s), trying under '%s'", vmdkFileName, vmName)); } vmdkDataStorePath = VmwareStorageLayoutHelper.getVmwareDatastorePathFromVmdkFileName(sourceDsMo, vmName, vmdkFileName); } @@ -4885,8 +4883,8 @@ private Answer migrateVolume(MigrateVolumeCommand cmd) { vmdkDataStorePath = sourceDsMo.searchFileInSubFolders(vmdkFileName, true, null); } - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("attaching %s to %s for migration", vmdkDataStorePath, vmMo.getVmName())); + if (logger.isDebugEnabled()) { + logger.debug(String.format("attaching %s to %s for migration", vmdkDataStorePath, vmMo.getVmName())); } vmMo.attachDisk(new String[]{vmdkDataStorePath}, morSourceDS); } @@ -4896,23 +4894,23 @@ private Answer migrateVolume(MigrateVolumeCommand cmd) { vmMo = hyperHost.findVmOnPeerHyperHost(vmName); if (vmMo == null) { String msg = "VM " + vmName + " does not exist in VMware datacenter"; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } - if (s_logger.isTraceEnabled()) { + if (logger.isTraceEnabled()) { VirtualDisk[] disks = vmMo.getAllDiskDevice(); String format = "disk %d is attached as %s"; for (VirtualDisk disk : disks) { - s_logger.trace(String.format(format, disk.getKey(), vmMo.getVmdkFileBaseName(disk))); + logger.trace(String.format(format, disk.getKey(), vmMo.getVmdkFileBaseName(disk))); } } // OfflineVmwareMigration: 5. create a relocate spec and perform Pair vdisk = vmMo.getDiskDevice(path); if (vdisk == null) { - if (s_logger.isTraceEnabled()) - s_logger.trace("migrate volume done (failed)"); + if (logger.isTraceEnabled()) + logger.trace("migrate volume done (failed)"); throw new CloudRuntimeException("No such disk device: " + path); } @@ -4926,26 +4924,26 @@ private Answer migrateVolume(MigrateVolumeCommand cmd) { answer = migrateAndAnswer(vmMo, cmd.getTargetPool().getUuid(), hyperHost, cmd); } catch (Exception e) { String msg = String.format("Migration of volume '%s' failed due to %s", cmd.getVolumePath(), e.getLocalizedMessage()); - s_logger.error(msg, e); + logger.error(msg, e); answer = new Answer(cmd, false, msg); } finally { try { // OfflineVmwareMigration: worker *may* have been renamed vmName = vmMo.getVmName(); - s_logger.info("Dettaching disks before destroying worker VM '" + vmName + "' after volume migration"); + logger.info("Dettaching disks before destroying worker VM '" + vmName + "' after volume migration"); VirtualDisk[] disks = vmMo.getAllDiskDevice(); String format = "disk %d was migrated to %s"; for (VirtualDisk disk : disks) { - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format(format, disk.getKey(), vmMo.getVmdkFileBaseName(disk))); + if (logger.isTraceEnabled()) { + logger.trace(String.format(format, disk.getKey(), vmMo.getVmdkFileBaseName(disk))); } vmdkDataStorePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(destinationDsMo, vmMo.getVmdkFileBaseName(disk) + VMDK_EXTENSION); vmMo.detachDisk(vmdkDataStorePath, false); } - s_logger.info("Destroy worker VM '" + vmName + "' after volume migration"); + logger.info("Destroy worker VM '" + vmName + "' after volume migration"); vmMo.destroy(); } catch (Throwable e) { - s_logger.info("Failed to destroy worker VM: " + vmName); + logger.info("Failed to destroy worker VM: " + vmName); } } if (answer instanceof MigrateVolumeAnswer) { @@ -4958,12 +4956,12 @@ private Answer migrateVolume(MigrateVolumeCommand cmd) { if (!destinationDsMo.fileExists(vmdkDataStorePath)) { String msg = String.format("Migration of volume '%s' failed; file (%s) not found as path '%s'", cmd.getVolumePath(), vmdkFileName, vmdkDataStorePath); - s_logger.error(msg); + logger.error(msg); answer = new Answer(cmd, false, msg); } } catch (Exception e) { String msg = String.format("Migration of volume '%s' failed due to %s", cmd.getVolumePath(), e.getLocalizedMessage()); - s_logger.error(msg, e); + logger.error(msg, e); answer = new Answer(cmd, false, msg); } } @@ -5006,7 +5004,7 @@ private Answer execute(MigrateVolumeCommand cmd) { if (vmMo == null) { String msg = "VM " + vmName + " does not exist in VMware datacenter " + morDc.getValue(); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } vmName = vmMo.getName(); @@ -5014,7 +5012,7 @@ private Answer execute(MigrateVolumeCommand cmd) { if (morDs == null) { String msg = "Unable to find the mounted datastore with name: " + tgtDsName + " on source host: " + srcHyperHost.getHyperHostName() + " to execute MigrateVolumeCommand"; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } @@ -5028,8 +5026,8 @@ private Answer execute(MigrateVolumeCommand cmd) { String[] diskChain = matchingExistingDisk.getDiskChain(); DatastoreFile file = new DatastoreFile(diskChain[0]); if (!file.getFileBaseName().equalsIgnoreCase(volumePath)) { - if (s_logger.isInfoEnabled()) - s_logger.info("Detected disk-chain top file change on volume: " + volumePath + " -> " + file.getFileBaseName()); + if (logger.isInfoEnabled()) + logger.info("Detected disk-chain top file change on volume: " + volumePath + " -> " + file.getFileBaseName()); volumePath = file.getFileBaseName(); } } @@ -5068,16 +5066,16 @@ private Answer execute(MigrateVolumeCommand cmd) { if (!vmMo.changeDatastore(relocateSpec)) { throw new Exception("Change datastore operation failed during volume migration"); } else { - s_logger.debug("Successfully migrated volume " + volumePath + " to target datastore " + tgtDsName); + logger.debug("Successfully migrated volume " + volumePath + " to target datastore " + tgtDsName); } // Consolidate VM disks. // In case of a linked clone VM, if VM's disks are not consolidated, // further volume operations on the ROOT volume such as volume snapshot etc. will result in DB inconsistencies. if (!vmMo.consolidateVmDisks()) { - s_logger.warn("VM disk consolidation failed after storage migration."); + logger.warn("VM disk consolidation failed after storage migration."); } else { - s_logger.debug("Successfully consolidated disks of VM " + vmName + "."); + logger.debug("Successfully consolidated disks of VM " + vmName + "."); } // Update and return volume path and chain info because that could have changed after migration @@ -5095,7 +5093,7 @@ private Answer execute(MigrateVolumeCommand cmd) { return answer; } catch (Exception e) { String msg = "Catch Exception " + e.getClass().getName() + " due to " + e.toString(); - s_logger.error(msg, e); + logger.error(msg, e); return new MigrateVolumeAnswer(cmd, false, msg, null); } } @@ -5161,7 +5159,7 @@ protected Answer execute(ModifyTargetsCommand cmd) { hostMOs.add(hostMO); } } catch (Exception ex) { - s_logger.error(ex.getMessage(), ex); + logger.error(ex.getMessage(), ex); throw new CloudRuntimeException(ex.getMessage(), ex); } @@ -5307,7 +5305,7 @@ private void handleTargets(boolean add, ModifyTargetsCommand.TargetTypeToRemove try { _storageProcessor.handleTargets(add, targetTypeToRemove, isRemoveAsync, targets, hosts); } catch (Exception ex) { - s_logger.warn(ex.getMessage()); + logger.warn(ex.getMessage()); } } } @@ -5331,14 +5329,14 @@ protected Answer execute(DeleteStoragePoolCommand cmd) { } } catch (Throwable e) { if (e instanceof RemoteException) { - s_logger.warn("Encounter remote exception to vCenter, invalidate VMware session context"); + logger.warn("Encounter remote exception to vCenter, invalidate VMware session context"); invalidateServiceContext(); } StorageFilerTO pool = cmd.getPool(); String msg = String.format("DeleteStoragePoolCommand (pool: [%s], path: [%s]) failed due to [%s].", pool.getHost(), pool.getPath(), VmwareHelper.getExceptionMessage(e)); - s_logger.error(msg, e); + logger.error(msg, e); return new Answer(cmd, false, msg); } @@ -5358,7 +5356,7 @@ protected AttachIsoAnswer execute(AttachIsoCommand cmd) { VirtualMachineMO vmMo = HypervisorHostHelper.findVmOnHypervisorHostOrPeer(hyperHost, cmd.getVmName()); if (vmMo == null) { String msg = "Unable to find VM in vSphere to execute AttachIsoCommand, vmName: " + cmd.getVmName(); - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } @@ -5366,7 +5364,7 @@ protected AttachIsoAnswer execute(AttachIsoCommand cmd) { if (storeUrl == null) { if (!cmd.getIsoPath().equalsIgnoreCase(TemplateManager.VMWARE_TOOLS_ISO)) { String msg = "ISO store root url is not found in AttachIsoCommand"; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } else { if (cmd.isAttach()) { @@ -5391,7 +5389,7 @@ protected AttachIsoAnswer execute(AttachIsoCommand cmd) { if (!isoPath.startsWith(storeUrl)) { assert (false); String msg = "ISO path does not start with the secondary storage root"; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } @@ -5416,12 +5414,12 @@ protected AttachIsoAnswer execute(AttachIsoCommand cmd) { } catch (Throwable e) { if (e instanceof RemoteException) { - s_logger.warn("Encounter remote exception to vCenter, invalidate VMware session context"); + logger.warn("Encounter remote exception to vCenter, invalidate VMware session context"); invalidateServiceContext(); } String message = String.format("AttachIsoCommand(%s) failed due to [%s].", cmd.isAttach()? "attach" : "detach", VmwareHelper.getExceptionMessage(e)); - s_logger.error(message, e); + logger.error(message, e); return new AttachIsoAnswer(cmd, false, message); } } @@ -5451,12 +5449,12 @@ public synchronized ManagedObjectReference prepareSecondaryDatastoreOnSpecificHo return morDatastore; } - private static String getSecondaryDatastoreUUID(String storeUrl) { + private String getSecondaryDatastoreUUID(String storeUrl) { String uuid = null; try { uuid = UUID.nameUUIDFromBytes(storeUrl.getBytes("UTF-8")).toString(); } catch (UnsupportedEncodingException e) { - s_logger.warn("Failed to create UUID from string " + storeUrl + ". Bad storeUrl or UTF-8 encoding error."); + logger.warn("Failed to create UUID from string " + storeUrl + ". Bad storeUrl or UTF-8 encoding error."); } return uuid; } @@ -5609,11 +5607,11 @@ protected Answer execute(GetStorageStatsCommand cmd) { long used = capacity - free; - s_logger.debug(String.format("Datastore summary info: [storageId: %s, ], localPath: %s, poolType: %s, capacity: %s, free: %s, used: %s].", cmd.getStorageId(), + logger.debug(String.format("Datastore summary info: [storageId: %s, ], localPath: %s, poolType: %s, capacity: %s, free: %s, used: %s].", cmd.getStorageId(), cmd.getLocalPath(), cmd.getPooltype(), toHumanReadableSize(capacity), toHumanReadableSize(free), toHumanReadableSize(used))); if (capacity <= 0) { - s_logger.warn("Something is wrong with vSphere NFS datastore, rebooting ESX(ESXi) host should help"); + logger.warn("Something is wrong with vSphere NFS datastore, rebooting ESX(ESXi) host should help"); } return new GetStorageStatsAnswer(cmd, capacity, used); @@ -5621,17 +5619,17 @@ protected Answer execute(GetStorageStatsCommand cmd) { String msg = String.format("Could not find datastore for GetStorageStatsCommand: [storageId: %s, localPath: %s, poolType: %s].", cmd.getStorageId(), cmd.getLocalPath(), cmd.getPooltype()); - s_logger.error(msg); + logger.error(msg); return new GetStorageStatsAnswer(cmd, msg); } } catch (Throwable e) { if (e instanceof RemoteException) { - s_logger.warn("Encounter remote exception to vCenter, invalidate VMware session context"); + logger.warn("Encounter remote exception to vCenter, invalidate VMware session context"); invalidateServiceContext(); } String msg = String.format("Unable to execute GetStorageStatsCommand(storageId : [%s], localPath: [%s], poolType: [%s]) due to [%s]", cmd.getStorageId(), cmd.getLocalPath(), cmd.getPooltype(), VmwareHelper.getExceptionMessage(e)); - s_logger.error(msg, e); + logger.error(msg, e); return new GetStorageStatsAnswer(cmd, msg); } } @@ -5645,8 +5643,8 @@ protected Answer execute(GetVncPortCommand cmd) { VirtualMachineMO vmMo = hyperHost.findVmOnHyperHost(cmd.getName()); if (vmMo == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to find the owner VM for GetVncPortCommand on host " + hyperHost.getHyperHostName() + ", try within datacenter"); + if (logger.isDebugEnabled()) { + logger.debug("Unable to find the owner VM for GetVncPortCommand on host " + hyperHost.getHyperHostName() + ", try within datacenter"); } vmMo = hyperHost.findVmOnPeerHyperHost(cmd.getName()); @@ -5658,8 +5656,8 @@ protected Answer execute(GetVncPortCommand cmd) { Pair portInfo = vmMo.getVncPort(mgr.getManagementPortGroupByHost((HostMO) hyperHost)); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Found vnc port info. vm: " + cmd.getName() + " host: " + portInfo.first() + ", vnc port: " + portInfo.second()); + if (logger.isTraceEnabled()) { + logger.trace("Found vnc port info. vm: " + cmd.getName() + " host: " + portInfo.first() + ", vnc port: " + portInfo.second()); } return new GetVncPortAnswer(cmd, portInfo.first(), portInfo.second()); } catch (Throwable e) { @@ -5684,7 +5682,7 @@ protected Answer execute(PingTestCommand cmd) { if (result.first()) return new Answer(cmd); } catch (Exception e) { - s_logger.error("Unable to execute ping command on DomR (" + controlIp + "), domR may not be ready yet. failure due to " + VmwareHelper.getExceptionMessage(e), e); + logger.error("Unable to execute ping command on DomR (" + controlIp + "), domR may not be ready yet. failure due to " + VmwareHelper.getExceptionMessage(e), e); } return new Answer(cmd, false, "PingTestCommand failed"); } else { @@ -5708,7 +5706,7 @@ protected Answer execute(PingTestCommand cmd) { } } } catch (Exception e) { - s_logger.error("Unable to execute ping command on host (" + cmd.getComputingHostIp() + "). failure due to " + VmwareHelper.getExceptionMessage(e), e); + logger.error("Unable to execute ping command on host (" + cmd.getComputingHostIp() + "). failure due to " + VmwareHelper.getExceptionMessage(e), e); } return new Answer(cmd, false, "PingTestCommand failed"); @@ -5720,7 +5718,7 @@ protected Answer execute(CheckOnHostCommand cmd) { } protected Answer execute(ModifySshKeysCommand cmd) { - s_logger.debug(String.format("Executing resource command %s.", cmd.getClass().getSimpleName())); + logger.debug(String.format("Executing resource command %s.", cmd.getClass().getSimpleName())); return new Answer(cmd); } @@ -5755,17 +5753,17 @@ protected Answer execute(GetVmIpAddressCommand cmd) { } } else { details += "VM " + vmName + " no longer exists on vSphere host: " + hyperHost.getHyperHostName(); - s_logger.info(details); + logger.info(details); } } catch (Throwable e) { createLogMessageException(e, cmd); details = String.format("%s. Encountered exception: [%s].", details, VmwareHelper.getExceptionMessage(e)); - s_logger.error(details); + logger.error(details); } answer = new Answer(cmd, result, details); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Returning GetVmIpAddressAnswer: " + _gson.toJson(answer)); + if (logger.isTraceEnabled()) { + logger.trace("Returning GetVmIpAddressAnswer: " + _gson.toJson(answer)); } return answer; } @@ -5804,15 +5802,15 @@ protected Answer execute(UnregisterVMCommand cmd) { } return new Answer(cmd, true, "unregister succeeded"); } catch (Exception e) { - s_logger.warn("We are not able to unregister VM " + VmwareHelper.getExceptionMessage(e)); + logger.warn("We are not able to unregister VM " + VmwareHelper.getExceptionMessage(e)); } String msg = "Expunge failed in vSphere. vm: " + cmd.getVmName(); - s_logger.warn(msg); + logger.warn(msg); return new Answer(cmd, false, msg); } else { String msg = "Unable to find the VM in vSphere to unregister, assume it is already removed. VM: " + cmd.getVmName(); - s_logger.warn(msg); + logger.warn(msg); return new Answer(cmd, true, msg); } } catch (Exception e) { @@ -5841,7 +5839,7 @@ protected Answer execute(UnregisterNicCommand cmd) { return new Answer(cmd, true, "Nothing to do"); } - s_logger.debug("Cleaning up portgroup " + cmd.getNicUuid() + " on switch " + _guestTrafficInfo.getVirtualSwitchName()); + logger.debug("Cleaning up portgroup " + cmd.getNicUuid() + " on switch " + _guestTrafficInfo.getVirtualSwitchName()); VmwareContext context = getServiceContext(); VmwareHypervisorHost host = getHyperHost(context); ManagedObjectReference clusterMO = host.getHyperHostCluster(); @@ -5856,7 +5854,7 @@ protected Answer execute(UnregisterNicCommand cmd) { for (ManagedObjectReference hostMOR : hosts) { HostMO hostMo = new HostMO(context, hostMOR); hostMo.deletePortGroup(cmd.getNicUuid().toString()); - s_logger.debug("Removed portgroup " + cmd.getNicUuid() + " from host " + hostMo.getHostName()); + logger.debug("Removed portgroup " + cmd.getNicUuid() + " from host " + hostMo.getHostName()); } return new Answer(cmd, true, "Unregistered resources for NIC " + cmd.getNicUuid()); } catch (Exception e) { @@ -5876,13 +5874,13 @@ public void cleanupNetwork(DatacenterMO dcMO, NetworkDetails netDetails) { } } } catch(Throwable e) { - s_logger.warn("Unable to cleanup network due to exception: " + e.getMessage(), e); + logger.warn("Unable to cleanup network due to exception: " + e.getMessage(), e); } } private void cleanupPortGroup(DatacenterMO dcMO, String portGroupName) throws Exception { if (StringUtils.isBlank(portGroupName)) { - s_logger.debug("Unspecified network port group, couldn't cleanup"); + logger.debug("Unspecified network port group, couldn't cleanup"); return; } @@ -5905,7 +5903,7 @@ private boolean areVMsOnNetwork(DatacenterMO dcMO, NetworkDetails netDetails) th NetworkMO networkMo = new NetworkMO(host.getContext(), netDetails.getNetworkMor()); List vms = networkMo.getVMsOnNetwork(); if (!CollectionUtils.isEmpty(vms)) { - s_logger.debug("Network port group: " + netDetails.getName() + " is in use"); + logger.debug("Network port group: " + netDetails.getName() + " is in use"); return true; } } @@ -5945,7 +5943,7 @@ public PingCommand getCurrentStatus(long id) { return null; } } catch (Exception e) { - s_logger.error("Unexpected exception", e); + logger.error("Unexpected exception", e); return null; } return new PingRoutingCommand(getType(), id, syncHostVmStates()); @@ -5964,7 +5962,7 @@ private void gcAndKillHungWorkerVMs() { if (hyperHost.isHyperHostConnected()) { mgr.gcLeftOverVMs(context); - s_logger.info("Scan hung worker VM to recycle"); + logger.info("Scan hung worker VM to recycle"); int workerKey = ((HostMO) hyperHost).getCustomFieldKey("VirtualMachine", CustomFieldConstants.CLOUD_WORKER); int workerTagKey = ((HostMO) hyperHost).getCustomFieldKey("VirtualMachine", CustomFieldConstants.CLOUD_WORKER_TAG); @@ -6000,7 +5998,7 @@ private void gcAndKillHungWorkerVMs() { recycle = mgr.needRecycle(workerTag); if (recycle) { - s_logger.info("Recycle pending worker VM: " + vmMo.getName()); + logger.info("Recycle pending worker VM: " + vmMo.getName()); vmMo.cancelPendingTasks(); vmMo.powerOff(); @@ -6011,12 +6009,12 @@ private void gcAndKillHungWorkerVMs() { } } } else { - s_logger.error("Host is no longer connected."); + logger.error("Host is no longer connected."); } } catch (Throwable e) { if (e instanceof RemoteException) { - s_logger.warn("Encounter remote exception to vCenter, invalidate VMware session context"); + logger.warn("Encounter remote exception to vCenter, invalidate VMware session context"); invalidateServiceContext(); } } @@ -6036,7 +6034,7 @@ public StartupCommand[] initialize() { VmwareHypervisorHost hyperHost = getHyperHost(context); assert (hyperHost instanceof HostMO); if (!((HostMO) hyperHost).isHyperHostConnected()) { - s_logger.info("Host " + hyperHost.getHyperHostName() + " is not in connected state"); + logger.info("Host " + hyperHost.getHyperHostName() + " is not in connected state"); return null; } @@ -6047,7 +6045,7 @@ public StartupCommand[] initialize() { } catch (Exception e) { String msg = "VmwareResource intialize() failed due to : " + VmwareHelper.getExceptionMessage(e); - s_logger.error(msg); + logger.error(msg); invalidateServiceContext(); return null; } @@ -6105,16 +6103,16 @@ private List initializeLocalStorage() { cmd.setPod(_pod); cmd.setCluster(_cluster); - s_logger.info("Add local storage startup command: " + _gson.toJson(cmd)); + logger.info("Add local storage startup command: " + _gson.toJson(cmd)); storageCmds.add(cmd); } } else { - s_logger.info("Cluster host does not support local storage, skip it"); + logger.info("Cluster host does not support local storage, skip it"); } } catch (Exception e) { String msg = "initializing local storage failed due to : " + VmwareHelper.getExceptionMessage(e); - s_logger.error(msg); + logger.error(msg); invalidateServiceContext(); throw new CloudRuntimeException(msg); } @@ -6134,14 +6132,14 @@ protected void fillHostInfo(StartupRoutingCommand cmd) { fillHostNetworkInfo(serviceContext, cmd); fillHostDetailsInfo(serviceContext, details); } catch (RuntimeFaultFaultMsg e) { - s_logger.error("RuntimeFault while retrieving host info: " + e.toString(), e); + logger.error("RuntimeFault while retrieving host info: " + e.toString(), e); throw new CloudRuntimeException("RuntimeFault while retrieving host info"); } catch (RemoteException e) { - s_logger.error("RemoteException while retrieving host info: " + e.toString(), e); + logger.error("RemoteException while retrieving host info: " + e.toString(), e); invalidateServiceContext(); throw new CloudRuntimeException("RemoteException while retrieving host info"); } catch (Exception e) { - s_logger.error("Exception while retrieving host info: " + e.toString(), e); + logger.error("Exception while retrieving host info: " + e.toString(), e); invalidateServiceContext(); throw new CloudRuntimeException("Exception while retrieving host info: " + e.toString()); } @@ -6175,7 +6173,7 @@ private String getIqn() { } } } catch (Exception ex) { - s_logger.info("Could not locate an IQN for this host."); + logger.info("Could not locate an IQN for this host."); } return null; @@ -6186,8 +6184,8 @@ private void fillHostHardwareInfo(VmwareContext serviceContext, StartupRoutingCo VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext()); VmwareHypervisorHostResourceSummary summary = hyperHost.getHyperHostResourceSummary(); - if (s_logger.isInfoEnabled()) { - s_logger.info("Startup report on host hardware info. " + _gson.toJson(summary)); + if (logger.isInfoEnabled()) { + logger.info("Startup report on host hardware info. " + _gson.toJson(summary)); } cmd.setCaps("hvm"); @@ -6211,8 +6209,8 @@ private void fillHostNetworkInfo(VmwareContext serviceContext, StartupRoutingCom throw new Exception("No ESX(i) host found"); } - if (s_logger.isInfoEnabled()) { - s_logger.info("Startup report on host network info. " + _gson.toJson(summary)); + if (logger.isInfoEnabled()) { + logger.info("Startup report on host network info. " + _gson.toJson(summary)); } cmd.setPrivateIpAddress(summary.getHostIp()); @@ -6225,7 +6223,7 @@ private void fillHostNetworkInfo(VmwareContext serviceContext, StartupRoutingCom } catch (Throwable e) { String msg = "querying host network info failed due to " + VmwareHelper.getExceptionMessage(e); - s_logger.error(msg, e); + logger.error(msg, e); throw new CloudRuntimeException(msg); } } @@ -6284,7 +6282,7 @@ protected OptionValue[] configureVnc(OptionValue[] optionsToMerge, VmwareHypervi } while (val != startVal); if (vncPort == 0) { - s_logger.info("we've run out of range for ports between 5900-5964 for the cluster, we will try port range at 59000-60000"); + logger.info("we've run out of range for ports between 5900-5964 for the cluster, we will try port range at 59000-60000"); Pair additionalRange = mgr.getAddiionalVncPortRange(); maxVncPorts = additionalRange.second(); @@ -6304,8 +6302,8 @@ protected OptionValue[] configureVnc(OptionValue[] optionsToMerge, VmwareHypervi throw new Exception("Unable to find an available VNC port on host"); } - if (s_logger.isInfoEnabled()) { - s_logger.info("Configure VNC port for VM " + vmName + ", port: " + vncPort + ", host: " + vmOwnerHost.getHyperHostName()); + if (logger.isInfoEnabled()) { + logger.info("Configure VNC port for VM " + vmName + ", port: " + vncPort + ", host: " + vmOwnerHost.getHyperHostName()); } return VmwareHelper.composeVncOptions(optionsToMerge, true, vncPassword, vncPort, keyboardLayout); @@ -6314,29 +6312,29 @@ protected OptionValue[] configureVnc(OptionValue[] optionsToMerge, VmwareHypervi mgr.endExclusiveOperation(); } catch (Throwable e) { assert (false); - s_logger.error("Unexpected exception ", e); + logger.error("Unexpected exception ", e); } } } private VirtualMachineGuestOsIdentifier translateGuestOsIdentifier(String cpuArchitecture, String guestOs, String cloudGuestOs) { if (cpuArchitecture == null) { - s_logger.warn("CPU arch is not set, default to i386. guest os: " + guestOs); + logger.warn("CPU arch is not set, default to i386. guest os: " + guestOs); cpuArchitecture = "i386"; } if (cloudGuestOs == null) { - s_logger.warn("Guest OS mapping name is not set for guest os: " + guestOs); + logger.warn("Guest OS mapping name is not set for guest os: " + guestOs); } VirtualMachineGuestOsIdentifier identifier = null; try { if (cloudGuestOs != null) { identifier = VirtualMachineGuestOsIdentifier.fromValue(cloudGuestOs); - s_logger.debug("Using mapping name : " + identifier.toString()); + logger.debug("Using mapping name : " + identifier.toString()); } } catch (IllegalArgumentException e) { - s_logger.warn("Unable to find Guest OS Identifier in VMware for mapping name: " + cloudGuestOs + ". Continuing with defaults."); + logger.warn("Unable to find Guest OS Identifier in VMware for mapping name: " + cloudGuestOs + ". Continuing with defaults."); } if (identifier != null) { return identifier; @@ -6353,7 +6351,7 @@ private HashMap getHostVmStateReport() throws Ex int key = ((HostMO) hyperHost).getCustomFieldKey("VirtualMachine", CustomFieldConstants.CLOUD_VM_INTERNAL_NAME); if (key == 0) { - s_logger.warn("Custom field " + CustomFieldConstants.CLOUD_VM_INTERNAL_NAME + " is not registered ?!"); + logger.warn("Custom field " + CustomFieldConstants.CLOUD_VM_INTERNAL_NAME + " is not registered ?!"); } String instanceNameCustomField = "value[" + key + "]"; @@ -6405,7 +6403,7 @@ private HashMap getVmStates() throws Exception { int key = ((HostMO) hyperHost).getCustomFieldKey("VirtualMachine", CustomFieldConstants.CLOUD_VM_INTERNAL_NAME); if (key == 0) { - s_logger.warn("Custom field " + CustomFieldConstants.CLOUD_VM_INTERNAL_NAME + " is not registered ?!"); + logger.warn("Custom field " + CustomFieldConstants.CLOUD_VM_INTERNAL_NAME + " is not registered ?!"); } String instanceNameCustomField = "value[" + key + "]"; @@ -6497,7 +6495,7 @@ private HashMap getVmStats(List vmNames) throws Ex int key = ((HostMO) hyperHost).getCustomFieldKey("VirtualMachine", CustomFieldConstants.CLOUD_VM_INTERNAL_NAME); if (key == 0) { - s_logger.warn("Custom field " + CustomFieldConstants.CLOUD_VM_INTERNAL_NAME + " is not registered ?!"); + logger.warn("Custom field " + CustomFieldConstants.CLOUD_VM_INTERNAL_NAME + " is not registered ?!"); } String instanceNameCustomField = "value[" + key + "]"; @@ -6633,7 +6631,7 @@ private HashMap getVmStats(List vmNames) throws Ex } } } catch (Exception e) { - s_logger.error(String.format("Unable to execute PerfQuerySpec due to: [%s]. The window interval is enabled in vCenter?", VmwareHelper.getExceptionMessage(e)), e); + logger.error(String.format("Unable to execute PerfQuerySpec due to: [%s]. The window interval is enabled in vCenter?", VmwareHelper.getExceptionMessage(e)), e); } } @@ -6694,7 +6692,7 @@ protected long[] getNetworkStats(String privateIP, String publicIp) { stats[1] += Long.parseLong(splitResult[i++]); } } catch (Throwable e) { - s_logger.warn("Unable to parse return from script return of network usage command: " + e.toString(), e); + logger.warn("Unable to parse return from script return of network usage command: " + e.toString(), e); } } return stats; @@ -6708,7 +6706,7 @@ protected String connect(final String vmName, final String ipAddress, final int // VM patching/rebooting time that may need int retry = _retry; while (System.currentTimeMillis() - startTick <= _opsTimeout || --retry > 0) { - s_logger.info("Trying to connect to " + ipAddress); + logger.info("Trying to connect to " + ipAddress); try (SocketChannel sch = SocketChannel.open();) { sch.configureBlocking(true); sch.socket().setSoTimeout(5000); @@ -6717,7 +6715,7 @@ protected String connect(final String vmName, final String ipAddress, final int sch.connect(addr); return null; } catch (IOException e) { - s_logger.info("Could not connect to " + ipAddress + " due to " + e.toString()); + logger.info("Could not connect to " + ipAddress + " due to " + e.toString()); if (e instanceof ConnectException) { // if connection is refused because of VM is being started, // we give it more sleep time @@ -6725,7 +6723,7 @@ protected String connect(final String vmName, final String ipAddress, final int try { Thread.sleep(5000); } catch (InterruptedException ex) { - s_logger.debug("[ignored] interrupted while waiting to retry connect after failure.", e); + logger.debug("[ignored] interrupted while waiting to retry connect after failure.", e); } } } @@ -6733,11 +6731,11 @@ protected String connect(final String vmName, final String ipAddress, final int try { Thread.sleep(1000); } catch (InterruptedException ex) { - s_logger.debug("[ignored] interrupted while waiting to retry connect."); + logger.debug("[ignored] interrupted while waiting to retry connect."); } } - s_logger.info("Unable to logon to " + ipAddress); + logger.info("Unable to logon to " + ipAddress); return "Unable to connect"; } @@ -6776,10 +6774,10 @@ private static HostStatsEntry getHyperHostStats(VmwareHypervisorHost hyperHost) return entry; } - private static String getRouterSshControlIp(NetworkElementCommand cmd) { + private String getRouterSshControlIp(NetworkElementCommand cmd) { String routerIp = cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP); - if (s_logger.isDebugEnabled()) - s_logger.debug("Use router's private IP for SSH control. IP : " + routerIp); + if (logger.isDebugEnabled()) + logger.debug("Use router's private IP for SSH control. IP : " + routerIp); return routerIp; } @@ -6860,7 +6858,7 @@ else if (value != null && value.equalsIgnoreCase("ide")) if (intObj != null) _portsPerDvPortGroup = intObj.intValue(); - s_logger.info("VmwareResource network configuration info." + " private traffic over vSwitch: " + _privateNetworkVSwitchName + ", public traffic over " + logger.info("VmwareResource network configuration info." + " private traffic over vSwitch: " + _privateNetworkVSwitchName + ", public traffic over " + _publicTrafficInfo.getVirtualSwitchType() + " : " + _publicTrafficInfo.getVirtualSwitchName() + ", guest traffic over " + _guestTrafficInfo.getVirtualSwitchType() + " : " + _guestTrafficInfo.getVirtualSwitchName()); @@ -6891,12 +6889,12 @@ else if (value != null && value.equalsIgnoreCase("ide")) throw new ConfigurationException("Unable to configure VirtualRoutingResource"); } - if (s_logger.isTraceEnabled()) { - s_logger.trace("Successfully configured VmwareResource."); + if (logger.isTraceEnabled()) { + logger.trace("Successfully configured VmwareResource."); } return true; } catch (Exception e) { - s_logger.error("Unexpected Exception ", e); + logger.error("Unexpected Exception ", e); throw new ConfigurationException("Failed to configure VmwareResource due to unexpect exception."); } finally { recycleServiceContext(); @@ -6944,24 +6942,24 @@ public VmwareContext getServiceContext(Command cmd) { // Before re-using the thread local context, ensure it corresponds to the right vCenter API session and that it is valid to make calls. if (context.getPoolKey().equals(poolKey)) { if (context.validate()) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("ThreadLocal context is still valid, just reuse"); + if (logger.isTraceEnabled()) { + logger.trace("ThreadLocal context is still valid, just reuse"); } return context; } else { - s_logger.info("Validation of the context failed, dispose and use a new one"); + logger.info("Validation of the context failed, dispose and use a new one"); invalidateServiceContext(context); } } else { // Exisitng ThreadLocal context corresponds to a different vCenter API session. Why has it not been recycled? - s_logger.warn("ThreadLocal VMware context: " + poolKey + " doesn't correspond to the right vCenter. Expected VMware context: " + context.getPoolKey()); + logger.warn("ThreadLocal VMware context: " + poolKey + " doesn't correspond to the right vCenter. Expected VMware context: " + context.getPoolKey()); } } try { context = VmwareContextFactory.getContext(_vCenterAddress, _username, _password); s_serviceContext.set(context); } catch (Exception e) { - s_logger.error("Unable to connect to vSphere server: " + _vCenterAddress, e); + logger.error("Unable to connect to vSphere server: " + _vCenterAddress, e); throw new CloudRuntimeException("Unable to connect to vSphere server: " + _vCenterAddress); } return context; @@ -6976,17 +6974,17 @@ public void invalidateServiceContext(VmwareContext context) { context.close(); } - private static void recycleServiceContext() { + private void recycleServiceContext() { VmwareContext context = s_serviceContext.get(); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Reset threadlocal context to null"); + if (logger.isTraceEnabled()) { + logger.trace("Reset threadlocal context to null"); } s_serviceContext.set(null); if (context != null) { assert (context.getPool() != null); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Recycling threadlocal context to pool"); + if (logger.isTraceEnabled()) { + logger.trace("Recycling threadlocal context to pool"); } context.getPool().registerContext(context); } @@ -7054,16 +7052,16 @@ public Answer execute(DestroyCommand cmd) { VirtualMachineMO vmMo = findVmOnDatacenter(context, hyperHost, vol); if (vmMo != null) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Destroy template volume " + vol.getPath()); + if (logger.isInfoEnabled()) { + logger.info("Destroy template volume " + vol.getPath()); } if (vmMo.isTemplate()) { vmMo.markAsVirtualMachine(hyperHost.getHyperHostOwnerResourcePool(), hyperHost.getMor()); } vmMo.destroy(); } else { - if (s_logger.isInfoEnabled()) { - s_logger.info("Template volume " + vol.getPath() + " is not found, no need to delete."); + if (logger.isInfoEnabled()) { + logger.info("Template volume " + vol.getPath() + " is not found, no need to delete."); } } return new Answer(cmd, true, "Success"); @@ -7088,7 +7086,7 @@ protected VirtualMachineMO findVmOnDatacenter(VmwareContext context, VmwareHyper DatacenterMO dcMo = new DatacenterMO(context, hyperHost.getHyperHostDatacenter()); if (dcMo.getMor() == null) { String msg = "Unable to find VMware DC"; - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } return dcMo.findVm(vol.getPath()); @@ -7101,7 +7099,7 @@ protected File getSystemVmKeyFile() { return s_systemVmKeyFile; } - private static void syncFetchSystemVmKeyFile() { + private void syncFetchSystemVmKeyFile() { synchronized (s_syncLockObjectFetchKeyFile) { if (s_systemVmKeyFile == null) { s_systemVmKeyFile = fetchSystemVmKeyFile(); @@ -7109,9 +7107,9 @@ private static void syncFetchSystemVmKeyFile() { } } - private static File fetchSystemVmKeyFile() { + private File fetchSystemVmKeyFile() { String filePath = s_relativePathSystemVmKeyFileInstallDir; - s_logger.debug("Looking for file [" + filePath + "] in the classpath."); + logger.debug("Looking for file [" + filePath + "] in the classpath."); URL url = Script.class.getClassLoader().getResource(filePath); File keyFile = null; if (url != null) { @@ -7120,10 +7118,10 @@ private static File fetchSystemVmKeyFile() { if (keyFile == null || !keyFile.exists()) { filePath = s_defaultPathSystemVmKeyFile; keyFile = new File(filePath); - s_logger.debug("Looking for file [" + filePath + "] in the classpath."); + logger.debug("Looking for file [" + filePath + "] in the classpath."); } if (!keyFile.exists()) { - s_logger.error("Unable to locate id_rsa.cloud in your setup at " + keyFile.toString()); + logger.error("Unable to locate id_rsa.cloud in your setup at " + keyFile.toString()); } return keyFile; } @@ -7164,7 +7162,7 @@ private Answer execute(GetUnmanagedInstancesCommand cmd) { } } } catch (Exception e) { - s_logger.info("GetUnmanagedInstancesCommand failed due to " + VmwareHelper.getExceptionMessage(e)); + logger.info("GetUnmanagedInstancesCommand failed due to " + VmwareHelper.getExceptionMessage(e)); } return new GetUnmanagedInstancesAnswer(cmd, "", unmanagedInstances); } @@ -7175,7 +7173,7 @@ private Answer execute(PrepareUnmanageVMInstanceCommand cmd) { String instanceName = cmd.getInstanceName(); try { - s_logger.debug(String.format("Verify if VMware instance: [%s] is available before unmanaging VM.", cmd.getInstanceName())); + logger.debug(String.format("Verify if VMware instance: [%s] is available before unmanaging VM.", cmd.getInstanceName())); ManagedObjectReference dcMor = hyperHost.getHyperHostDatacenter(); DatacenterMO dataCenterMo = new DatacenterMO(getServiceContext(), dcMor); @@ -7184,7 +7182,7 @@ private Answer execute(PrepareUnmanageVMInstanceCommand cmd) { return new PrepareUnmanageVMInstanceAnswer(cmd, false, String.format("Cannot find VM with name [%s] in datacenter [%s].", instanceName, dataCenterMo.getName())); } } catch (Exception e) { - s_logger.error("Error trying to verify if VM to unmanage exists", e); + logger.error("Error trying to verify if VM to unmanage exists", e); return new PrepareUnmanageVMInstanceAnswer(cmd, false, "Error: " + e.getMessage()); } @@ -7235,12 +7233,12 @@ private List relocateVirtualMachine(final VmwareHypervisorHost h vmMo = sourceHyperHost.findVmOnHyperHost(vmName); if (vmMo == null) { String msg = String.format("VM: %s does not exist on host: %s", vmName, sourceHyperHost.getHyperHostName()); - s_logger.warn(msg); + logger.warn(msg); // find VM through source host (VM is not at the target host yet) vmMo = dcMo.findVm(vmName); if (vmMo == null) { msg = String.format("VM: %s does not exist on datacenter: %s", vmName, dcMo.getName()); - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } // VM host has changed @@ -7256,7 +7254,7 @@ private List relocateVirtualMachine(final VmwareHypervisorHost h morDatastore = getTargetDatastoreMOReference(poolUuid, dsHost); if (morDatastore == null) { String msg = String.format("Unable to find the target datastore: %s on host: %s to execute migration", poolUuid, dsHost.getHyperHostName()); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } relocateSpec.setDatastore(morDatastore); @@ -7266,13 +7264,13 @@ private List relocateVirtualMachine(final VmwareHypervisorHost h for (Pair entry : volToFiler) { VolumeTO volume = entry.first(); StorageFilerTO filerTo = entry.second(); - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Preparing spec for volume: %s to migrate it to datastore: %s", volume.getName(), filerTo.getUuid())); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Preparing spec for volume: %s to migrate it to datastore: %s", volume.getName(), filerTo.getUuid())); } ManagedObjectReference morVolumeDatastore = getTargetDatastoreMOReference(filerTo.getUuid(), dsHost); if (morVolumeDatastore == null) { String msg = String.format("Unable to find the target datastore: %s in datacenter: %s to execute migration", filerTo.getUuid(), dcMo.getName()); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } @@ -7345,7 +7343,7 @@ private List relocateVirtualMachine(final VmwareHypervisorHost h if (!vmMo.changeDatastore(relocateSpec)) { throw new Exception("Change datastore operation failed during storage migration"); } else { - s_logger.debug(String.format("Successfully migrated storage of VM: %s to target datastore(s)", vmName)); + logger.debug(String.format("Successfully migrated storage of VM: %s to target datastore(s)", vmName)); } // Migrate VM to target host. if (targetHyperHost != null) { @@ -7353,7 +7351,7 @@ private List relocateVirtualMachine(final VmwareHypervisorHost h if (!vmMo.migrate(morPool, targetHyperHost.getMor())) { throw new Exception("VM migration to target host failed during storage migration"); } else { - s_logger.debug(String.format("Successfully migrated VM: %s from host %s to %s", vmName , sourceHyperHost.getHyperHostName(), targetHyperHost.getHyperHostName())); + logger.debug(String.format("Successfully migrated VM: %s from host %s to %s", vmName , sourceHyperHost.getHyperHostName(), targetHyperHost.getHyperHostName())); } } } else { @@ -7369,16 +7367,16 @@ private List relocateVirtualMachine(final VmwareHypervisorHost h if (targetHyperHost != null) { msg = String.format("%s from host %s to %s", msg, sourceHyperHost.getHyperHostName(), targetHyperHost.getHyperHostName()); } - s_logger.debug(msg); + logger.debug(msg); } } // Consolidate VM disks. // In case of a linked clone VM, if VM's disks are not consolidated, further VM operations such as volume snapshot, VM snapshot etc. will result in DB inconsistencies. if (!vmMo.consolidateVmDisks()) { - s_logger.warn("VM disk consolidation failed after storage migration. Yet proceeding with VM migration."); + logger.warn("VM disk consolidation failed after storage migration. Yet proceeding with VM migration."); } else { - s_logger.debug(String.format("Successfully consolidated disks of VM: %s", vmName)); + logger.debug(String.format("Successfully consolidated disks of VM: %s", vmName)); } if (MapUtils.isNotEmpty(volumeDeviceKey)) { @@ -7407,21 +7405,21 @@ private List relocateVirtualMachine(final VmwareHypervisorHost h } } catch (Throwable e) { if (e instanceof RemoteException) { - s_logger.warn("Encountered remote exception at vCenter, invalidating VMware session context"); + logger.warn("Encountered remote exception at vCenter, invalidating VMware session context"); invalidateServiceContext(); } throw e; } finally { // Cleanup datastores mounted on source host for (String mountedDatastore : mountedDatastoresAtSource) { - s_logger.debug("Attempting to unmount datastore " + mountedDatastore + " at " + sourceHyperHost.getHyperHostName()); + logger.debug("Attempting to unmount datastore " + mountedDatastore + " at " + sourceHyperHost.getHyperHostName()); try { sourceHyperHost.unmountDatastore(mountedDatastore); } catch (Exception unmountEx) { - s_logger.warn("Failed to unmount datastore " + mountedDatastore + " at " + sourceHyperHost.getHyperHostName() + ". Seems the datastore is still being used by " + sourceHyperHost.getHyperHostName() + + logger.warn("Failed to unmount datastore " + mountedDatastore + " at " + sourceHyperHost.getHyperHostName() + ". Seems the datastore is still being used by " + sourceHyperHost.getHyperHostName() + ". Please unmount manually to cleanup."); } - s_logger.debug("Successfully unmounted datastore " + mountedDatastore + " at " + sourceHyperHost.getHyperHostName()); + logger.debug("Successfully unmounted datastore " + mountedDatastore + " at " + sourceHyperHost.getHyperHostName()); } } @@ -7434,7 +7432,7 @@ private String getMountedDatastoreName(VmwareHypervisorHost sourceHyperHost, Str // If host version is below 5.1 then simultaneous change of VM's datastore and host is not supported. // So since only the datastore will be changed first, ensure the target datastore is mounted on source host. if (sourceHostApiVersion.compareTo("5.1") < 0) { - s_logger.debug(String.format("Host: %s version is %s, vMotion without shared storage cannot be done. Check source host has target datastore mounted or can be mounted", sourceHyperHost.getHyperHostName(), sourceHostApiVersion)); + logger.debug(String.format("Host: %s version is %s, vMotion without shared storage cannot be done. Check source host has target datastore mounted or can be mounted", sourceHyperHost.getHyperHostName(), sourceHostApiVersion)); ManagedObjectReference morVolumeDatastoreAtSource = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(sourceHyperHost, filerTo.getUuid()); String volumeDatastoreName = filerTo.getUuid().replace("-", ""); String volumeDatastoreHost = filerTo.getHost(); @@ -7449,20 +7447,20 @@ private String getMountedDatastoreName(VmwareHypervisorHost sourceHyperHost, Str throw new Exception("Unable to mount NFS datastore " + volumeDatastoreHost + ":/" + volumeDatastorePath + " on host: " + sourceHyperHost.getHyperHostName()); } mountedDatastoreName = volumeDatastoreName; - s_logger.debug("Mounted NFS datastore " + volumeDatastoreHost + ":/" + volumeDatastorePath + " on host: " + sourceHyperHost.getHyperHostName()); + logger.debug("Mounted NFS datastore " + volumeDatastoreHost + ":/" + volumeDatastorePath + " on host: " + sourceHyperHost.getHyperHostName()); } } // If datastore is VMFS and target datastore is not mounted or accessible to source host then fail migration. if (filerTo.getType().equals(StoragePoolType.VMFS)) { if (morVolumeDatastoreAtSource == null) { - s_logger.warn("Host: " + sourceHyperHost.getHyperHostName() + " version is below 5.1, target VMFS datastore(s) need to be manually mounted on host for successful storage migration."); + logger.warn("Host: " + sourceHyperHost.getHyperHostName() + " version is below 5.1, target VMFS datastore(s) need to be manually mounted on host for successful storage migration."); throw new Exception("Target VMFS datastore: " + volumeDatastorePath + " is not mounted on host: " + sourceHyperHost.getHyperHostName()); } DatastoreMO dsAtSourceMo = new DatastoreMO(getServiceContext(), morVolumeDatastoreAtSource); String srcHostValue = sourceHyperHost.getMor().getValue(); if (!dsAtSourceMo.isAccessibleToHost(srcHostValue)) { - s_logger.warn("Host " + sourceHyperHost.getHyperHostName() + " version is below 5.1, target VMFS datastore(s) need to be accessible to host for a successful storage migration."); + logger.warn("Host " + sourceHyperHost.getHyperHostName() + " version is below 5.1, target VMFS datastore(s) need to be accessible to host for a successful storage migration."); throw new Exception("Target VMFS datastore: " + volumeDatastorePath + " is not accessible on host: " + sourceHyperHost.getHyperHostName()); } } @@ -7491,13 +7489,13 @@ public String acquireVirtualMachineVncTicket(String vmInternalCSName) throws Exc private GetVmVncTicketAnswer execute(GetVmVncTicketCommand cmd) { String vmInternalName = cmd.getVmInternalName(); - s_logger.info("Getting VNC ticket for VM " + vmInternalName); + logger.info("Getting VNC ticket for VM " + vmInternalName); try { String ticket = acquireVirtualMachineVncTicket(vmInternalName); boolean result = StringUtils.isNotBlank(ticket); return new GetVmVncTicketAnswer(ticket, result, result ? "" : "Empty ticket obtained"); } catch (Exception e) { - s_logger.error("Error getting VNC ticket for VM " + vmInternalName, e); + logger.error("Error getting VNC ticket for VM " + vmInternalName, e); return new GetVmVncTicketAnswer(null, false, e.getLocalizedMessage()); } } @@ -7505,7 +7503,7 @@ private GetVmVncTicketAnswer execute(GetVmVncTicketCommand cmd) { protected CheckGuestOsMappingAnswer execute(CheckGuestOsMappingCommand cmd) { String guestOsName = cmd.getGuestOsName(); String guestOsMappingName = cmd.getGuestOsHypervisorMappingName(); - s_logger.info("Checking guest os mapping name: " + guestOsMappingName + " for the guest os: " + guestOsName + " in the hypervisor"); + logger.info("Checking guest os mapping name: " + guestOsMappingName + " for the guest os: " + guestOsName + " in the hypervisor"); try { VmwareContext context = getServiceContext(); VmwareHypervisorHost hyperHost = getHyperHost(context); @@ -7513,14 +7511,14 @@ protected CheckGuestOsMappingAnswer execute(CheckGuestOsMappingCommand cmd) { if (guestOsDescriptor == null) { return new CheckGuestOsMappingAnswer(cmd, "Guest os mapping name: " + guestOsMappingName + " not found in the hypervisor"); } - s_logger.debug("Matching hypervisor guest os - id: " + guestOsDescriptor.getId() + ", full name: " + guestOsDescriptor.getFullName() + ", family: " + guestOsDescriptor.getFamily()); + logger.debug("Matching hypervisor guest os - id: " + guestOsDescriptor.getId() + ", full name: " + guestOsDescriptor.getFullName() + ", family: " + guestOsDescriptor.getFamily()); if (guestOsDescriptor.getFullName().equalsIgnoreCase(guestOsName)) { - s_logger.debug("Hypervisor guest os name in the descriptor matches with os name: " + guestOsName); + logger.debug("Hypervisor guest os name in the descriptor matches with os name: " + guestOsName); } - s_logger.info("Hypervisor guest os name in the descriptor matches with os mapping: " + guestOsMappingName + " from user"); + logger.info("Hypervisor guest os name in the descriptor matches with os mapping: " + guestOsMappingName + " from user"); return new CheckGuestOsMappingAnswer(cmd); } catch (Exception e) { - s_logger.error("Failed to check the hypervisor guest os mapping name: " + guestOsMappingName, e); + logger.error("Failed to check the hypervisor guest os mapping name: " + guestOsMappingName, e); return new CheckGuestOsMappingAnswer(cmd, e.getLocalizedMessage()); } } @@ -7588,7 +7586,7 @@ protected ListDataStoreObjectsAnswer execute(ListDataStoreObjectsCommand cmd) { return new ListDataStoreObjectsAnswer(false, count, names, paths, absPaths, isDirs, sizes, modifiedList); } String errorMsg = String.format("Failed to list files at path [%s] due to: [%s].", path, e.getMessage()); - s_logger.error(errorMsg, e); + logger.error(errorMsg, e); } return null; @@ -7596,7 +7594,7 @@ protected ListDataStoreObjectsAnswer execute(ListDataStoreObjectsCommand cmd) { protected GetHypervisorGuestOsNamesAnswer execute(GetHypervisorGuestOsNamesCommand cmd) { String keyword = cmd.getKeyword(); - s_logger.info("Getting guest os names in the hypervisor"); + logger.info("Getting guest os names in the hypervisor"); try { VmwareContext context = getServiceContext(); VmwareHypervisorHost hyperHost = getHyperHost(context); @@ -7620,7 +7618,7 @@ protected GetHypervisorGuestOsNamesAnswer execute(GetHypervisorGuestOsNamesComma } return new GetHypervisorGuestOsNamesAnswer(cmd, hypervisorGuestOsNames); } catch (Exception e) { - s_logger.error("Failed to get the hypervisor guest names due to: " + e.getLocalizedMessage(), e); + logger.error("Failed to get the hypervisor guest names due to: " + e.getLocalizedMessage(), e); return new GetHypervisorGuestOsNamesAnswer(cmd, e.getLocalizedMessage()); } } @@ -7641,7 +7639,7 @@ private Answer execute(PrepareForBackupRestorationCommand command) { if (vmMo == null) { String msg = "VM " + vmName + " no longer exists to execute PrepareForBackupRestorationCommand command"; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } @@ -7649,7 +7647,7 @@ private Answer execute(PrepareForBackupRestorationCommand command) { return new Answer(command, true, "success"); } catch (Exception e) { - s_logger.error("Unexpected exception: ", e); + logger.error("Unexpected exception: ", e); return new Answer(command, false, "Unable to execute PrepareForBackupRestorationCommand due to " + e.toString()); } } @@ -7657,7 +7655,7 @@ private Answer execute(PrepareForBackupRestorationCommand command) { private Integer getVmwareWindowTimeInterval() { Integer windowInterval = VmwareManager.VMWARE_STATS_TIME_WINDOW.value(); if (windowInterval == null || windowInterval < 20) { - s_logger.error(String.format("The window interval can't be [%s]. Therefore we will use the default value of [%s] seconds.", windowInterval, VmwareManager.VMWARE_STATS_TIME_WINDOW.defaultValue())); + logger.error(String.format("The window interval can't be [%s]. Therefore we will use the default value of [%s] seconds.", windowInterval, VmwareManager.VMWARE_STATS_TIME_WINDOW.defaultValue())); windowInterval = Integer.valueOf(VmwareManager.VMWARE_STATS_TIME_WINDOW.defaultValue()); } return windowInterval; @@ -7666,21 +7664,21 @@ private Integer getVmwareWindowTimeInterval() { @Override public String createLogMessageException(Throwable e, Command command) { if (e instanceof RemoteException) { - s_logger.warn("Encounter remote exception to vCenter, invalidate VMware session context."); + logger.warn("Encounter remote exception to vCenter, invalidate VMware session context."); invalidateServiceContext(); } String message = String.format("%s failed due to [%s].", command.getClass().getSimpleName(), VmwareHelper.getExceptionMessage(e)); - s_logger.error(message, e); + logger.error(message, e); return message; } private void logCommand(Command cmd) { try { - s_logger.debug(String.format(EXECUTING_RESOURCE_COMMAND, cmd.getClass().getSimpleName(), _gson.toJson(cmd))); + logger.debug(String.format(EXECUTING_RESOURCE_COMMAND, cmd.getClass().getSimpleName(), _gson.toJson(cmd))); } catch (Exception e) { - s_logger.error(String.format("Failed to log command %s due to: [%s].", cmd.getClass().getSimpleName(), e.getMessage()), e); + logger.error(String.format("Failed to log command %s due to: [%s].", cmd.getClass().getSimpleName(), e.getMessage()), e); } } diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/network/CiscoNexusVSMDeviceManagerImpl.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/network/CiscoNexusVSMDeviceManagerImpl.java index 136e44261b50..beac489acdb3 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/network/CiscoNexusVSMDeviceManagerImpl.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/network/CiscoNexusVSMDeviceManagerImpl.java @@ -20,7 +20,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import com.cloud.agent.api.StartupCommand; import com.cloud.dc.ClusterDetailsDao; @@ -65,7 +64,6 @@ public abstract class CiscoNexusVSMDeviceManagerImpl extends AdapterBase { @Inject PortProfileDao _ppDao; - private static final org.apache.log4j.Logger s_logger = Logger.getLogger(ExternalLoadBalancerDeviceManagerImpl.class); @DB //public CiscoNexusVSMDeviceVO addCiscoNexusVSM(long clusterId, String ipaddress, String username, String password, ServerResource resource, String vsmName) { @@ -107,7 +105,7 @@ CiscoNexusVSMDeviceVO addCiscoNexusVSM(long clusterId, String ipaddress, String netconfClient = new NetconfHelper(ipaddress, username, password); } catch (CloudRuntimeException e) { String msg = "Failed to connect to Nexus VSM " + ipaddress + " with credentials of user " + username; - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } @@ -203,7 +201,7 @@ public boolean deleteCiscoNexusVSM(final long vsmId) throws ResourceInUseExcepti if (hosts != null && hosts.size() > 0) { for (Host host : hosts) { if (host.getType() == Host.Type.Routing) { - s_logger.info("Non-empty cluster with id" + clusterId + "still has a host that uses this VSM. Please empty the cluster first"); + logger.info("Non-empty cluster with id" + clusterId + "still has a host that uses this VSM. Please empty the cluster first"); throw new ResourceInUseException("Non-empty cluster with id" + clusterId + "still has a host that uses this VSM. Please empty the cluster first"); } @@ -267,7 +265,7 @@ public CiscoNexusVSMDeviceVO getCiscoVSMbyVSMId(long vsmId) { public CiscoNexusVSMDeviceVO getCiscoVSMbyClusId(long clusterId) { ClusterVSMMapVO mapVO = _clusterVSMDao.findByClusterId(clusterId); if (mapVO == null) { - s_logger.info("Couldn't find a VSM associated with the specified cluster Id"); + logger.info("Couldn't find a VSM associated with the specified cluster Id"); return null; } // Else, pull out the VSM associated with the VSM id in mapVO. diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/network/dao/CiscoNexusVSMDeviceDaoImpl.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/network/dao/CiscoNexusVSMDeviceDaoImpl.java index 4fff02247831..2e7e41534301 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/network/dao/CiscoNexusVSMDeviceDaoImpl.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/network/dao/CiscoNexusVSMDeviceDaoImpl.java @@ -19,7 +19,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.network.CiscoNexusVSMDeviceVO; @@ -32,7 +31,6 @@ @Component @DB public class CiscoNexusVSMDeviceDaoImpl extends GenericDaoBase implements CiscoNexusVSMDeviceDao { - protected static final Logger s_logger = Logger.getLogger(CiscoNexusVSMDeviceDaoImpl.class); final SearchBuilder mgmtVlanIdSearch; final SearchBuilder domainIdSearch; final SearchBuilder nameSearch; diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/network/element/CiscoNexusVSMElement.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/network/element/CiscoNexusVSMElement.java index f67b2e73bd16..2503e0ac7fde 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/network/element/CiscoNexusVSMElement.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/network/element/CiscoNexusVSMElement.java @@ -24,7 +24,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import com.cloud.api.commands.DeleteCiscoNexusVSMCmd; import com.cloud.api.commands.DisableCiscoNexusVSMCmd; @@ -69,7 +68,6 @@ public class CiscoNexusVSMElement extends CiscoNexusVSMDeviceManagerImpl implements CiscoNexusVSMElementService, NetworkElement, Manager { - private static final Logger s_logger = Logger.getLogger(CiscoNexusVSMElement.class); @Inject CiscoNexusVSMDeviceDao _vsmDao; @@ -146,7 +144,7 @@ public boolean deleteCiscoNexusVSM(DeleteCiscoNexusVSMCmd cmd) { try { result = deleteCiscoNexusVSM(cmd.getCiscoNexusVSMDeviceId()); } catch (ResourceInUseException e) { - s_logger.info("VSM could not be deleted"); + logger.info("VSM could not be deleted"); // TODO: Throw a better exception here. throw new CloudRuntimeException("Failed to delete specified VSM"); } @@ -265,7 +263,7 @@ public Pair validateAndAddVsm(final String vsmIp, final String vs netconfClient.disconnect(); } catch (CloudRuntimeException e) { String msg = "Invalid credentials supplied for user " + vsmUser + " for Cisco Nexus 1000v VSM at " + vsmIp; - s_logger.error(msg); + logger.error(msg); _clusterDao.remove(clusterId); throw new CloudRuntimeException(msg); } @@ -275,7 +273,7 @@ public Pair validateAndAddVsm(final String vsmIp, final String vs if (vsm != null) { List clusterList = _clusterVSMDao.listByVSMId(vsm.getId()); if (clusterList != null && !clusterList.isEmpty()) { - s_logger.error("Failed to add cluster: specified Nexus VSM is already associated with another cluster"); + logger.error("Failed to add cluster: specified Nexus VSM is already associated with another cluster"); ResourceInUseException ex = new ResourceInUseException("Failed to add cluster: specified Nexus VSM is already associated with another cluster with specified Id"); // get clusterUuid to report error @@ -320,7 +318,7 @@ public CiscoNexusVSMDeviceVO doInTransaction(TransactionStatus status) { msg += "vsmpassword: Password of user account with admin privileges over Cisco Nexus 1000v dvSwitch. "; } } - s_logger.error(msg); + logger.error(msg); // Cleaning up the cluster record as addCluster operation failed because of invalid credentials of Nexus dvSwitch. _clusterDao.remove(clusterId); throw new CloudRuntimeException(msg); diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/PremiumSecondaryStorageResource.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/PremiumSecondaryStorageResource.java index e2aff4ce932e..f5344119dede 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/PremiumSecondaryStorageResource.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/PremiumSecondaryStorageResource.java @@ -21,7 +21,6 @@ import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.cloudstack.storage.resource.NfsSecondaryStorageResource; import org.apache.cloudstack.storage.resource.SecondaryStorageResourceHandler; @@ -32,7 +31,6 @@ public class PremiumSecondaryStorageResource extends NfsSecondaryStorageResource { - private static final Logger s_logger = Logger.getLogger(PremiumSecondaryStorageResource.class); private Map _handlers = new HashMap(); @@ -44,13 +42,13 @@ public Answer executeRequest(Command cmd) { if (hypervisor != null) { Hypervisor.HypervisorType hypervisorType = Hypervisor.HypervisorType.getType(hypervisor); if (hypervisorType == null) { - s_logger.error("Unsupported hypervisor type in command context, hypervisor: " + hypervisor); + logger.error("Unsupported hypervisor type in command context, hypervisor: " + hypervisor); return defaultAction(cmd); } SecondaryStorageResourceHandler handler = getHandler(hypervisorType); if (handler == null) { - s_logger.error("No handler can be found for hypervisor type in command context, hypervisor: " + hypervisor); + logger.error("No handler can be found for hypervisor type in command context, hypervisor: " + hypervisor); return defaultAction(cmd); } @@ -66,8 +64,8 @@ public Answer defaultAction(Command cmd) { public void ensureOutgoingRuleForAddress(String address) { if (address == null || address.isEmpty() || address.startsWith("0.0.0.0")) { - if (s_logger.isInfoEnabled()) - s_logger.info("Drop invalid dynamic route/firewall entry " + address); + if (logger.isInfoEnabled()) + logger.info("Drop invalid dynamic route/firewall entry " + address); return; } @@ -80,8 +78,8 @@ public void ensureOutgoingRuleForAddress(String address) { } if (needToSetRule) { - if (s_logger.isInfoEnabled()) - s_logger.info("Add dynamic route/firewall entry for " + address); + if (logger.isInfoEnabled()) + logger.info("Add dynamic route/firewall entry for " + address); allowOutgoingOnPrivate(address); } } diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareSecondaryStorageContextFactory.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareSecondaryStorageContextFactory.java index 6e19ba67bf50..2fa3ccc568e9 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareSecondaryStorageContextFactory.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareSecondaryStorageContextFactory.java @@ -16,14 +16,15 @@ // under the License. package com.cloud.storage.resource; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.hypervisor.vmware.util.VmwareClient; import com.cloud.hypervisor.vmware.util.VmwareContext; import com.cloud.hypervisor.vmware.util.VmwareContextPool; public class VmwareSecondaryStorageContextFactory { - private static final Logger s_logger = Logger.getLogger(VmwareSecondaryStorageContextFactory.class); + protected static Logger LOGGER = LogManager.getLogger(VmwareSecondaryStorageContextFactory.class); private static volatile int s_seq = 1; @@ -60,7 +61,7 @@ public static VmwareContext getContext(String vCenterAddress, String vCenterUser } else { // Validate current context and verify if vCenter session timeout value of the context matches the timeout value set by Admin if (!context.validate() || (context.getVimClient().getVcenterSessionTimeout() != s_vCenterSessionTimeout)) { - s_logger.info("Validation of the context faild. dispose and create a new one"); + LOGGER.info("Validation of the context faild. dispose and create a new one"); context.close(); context = create(vCenterAddress, vCenterUserName, vCenterPassword); } diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareSecondaryStorageResourceHandler.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareSecondaryStorageResourceHandler.java index 68947ef69042..ece6176547c0 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareSecondaryStorageResourceHandler.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareSecondaryStorageResourceHandler.java @@ -20,8 +20,8 @@ import org.apache.cloudstack.storage.command.StorageSubSystemCommand; import org.apache.cloudstack.storage.resource.SecondaryStorageResourceHandler; -import org.apache.log4j.Logger; -import org.apache.log4j.NDC; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.BackupSnapshotCommand; @@ -50,9 +50,10 @@ import com.cloud.utils.StringUtils; import com.google.gson.Gson; import com.vmware.vim25.ManagedObjectReference; +import org.apache.logging.log4j.ThreadContext; public class VmwareSecondaryStorageResourceHandler implements SecondaryStorageResourceHandler, VmwareHostService, VmwareStorageMount { - private static final Logger s_logger = Logger.getLogger(VmwareSecondaryStorageResourceHandler.class); + protected Logger logger = LogManager.getLogger(getClass()); private final PremiumSecondaryStorageResource _resource; private final VmwareStorageManager _storageMgr; @@ -94,7 +95,7 @@ public Answer executeRequest(Command cmd) { try { Answer answer; - NDC.push(getCommandLogTitle(cmd)); + ThreadContext.push(getCommandLogTitle(cmd)); logCommand(cmd); if (cmd instanceof PrimaryStorageDownloadCommand) { @@ -130,23 +131,23 @@ public Answer executeRequest(Command cmd) { answer.setContextParam("checkpoint2", cmd.getContextParam("checkpoint2")); } - if (s_logger.isDebugEnabled()) - s_logger.debug("Command execution answer: " + _gson.toJson(answer)); + if (logger.isDebugEnabled()) + logger.debug("Command execution answer: " + _gson.toJson(answer)); return answer; } finally { - if (s_logger.isDebugEnabled()) - s_logger.debug("Done executing " + _gson.toJson(cmd)); + if (logger.isDebugEnabled()) + logger.debug("Done executing " + _gson.toJson(cmd)); recycleServiceContext(); - NDC.pop(); + ThreadContext.pop(); } } private void logCommand(Command cmd) { try { - s_logger.debug(String.format("Executing command: [%s].", _gson.toJson(cmd))); + logger.debug(String.format("Executing command: [%s].", _gson.toJson(cmd))); } catch (Exception e) { - s_logger.debug(String.format("Executing command: [%s].", cmd.getClass().getSimpleName())); + logger.debug(String.format("Executing command: [%s].", cmd.getClass().getSimpleName())); } } @@ -186,13 +187,13 @@ private Answer execute(CreateVolumeFromSnapshotCommand cmd) { public VmwareContext getServiceContext(Command cmd) { String guid = cmd.getContextParam("guid"); if (guid == null || guid.isEmpty()) { - s_logger.error("Invalid command context parameter guid"); + logger.error("Invalid command context parameter guid"); return null; } String username = cmd.getContextParam("username"); if (username == null || username.isEmpty()) { - s_logger.error("Invalid command context parameter username"); + logger.error("Invalid command context parameter username"); return null; } @@ -201,14 +202,14 @@ public VmwareContext getServiceContext(Command cmd) { // validate command guid parameter String[] tokens = guid.split("@"); if (tokens == null || tokens.length != 2) { - s_logger.error("Invalid content in command context parameter guid"); + logger.error("Invalid content in command context parameter guid"); return null; } String vCenterAddress = tokens[1]; String[] hostTokens = tokens[0].split(":"); if (hostTokens == null || hostTokens.length != 2) { - s_logger.error("Invalid content in command context parameter guid"); + logger.error("Invalid content in command context parameter guid"); return null; } @@ -223,7 +224,7 @@ public VmwareContext getServiceContext(Command cmd) { context = null; } if (context == null) { - s_logger.info("Open new VmwareContext. vCenter: " + vCenterAddress + ", user: " + username + ", password: " + StringUtils.getMaskedPasswordForDisplay(password)); + logger.info("Open new VmwareContext. vCenter: " + vCenterAddress + ", user: " + username + ", password: " + StringUtils.getMaskedPasswordForDisplay(password)); VmwareSecondaryStorageContextFactory.setVcenterSessionTimeout(vCenterSessionTimeout); context = VmwareSecondaryStorageContextFactory.getContext(vCenterAddress, username, password); } @@ -235,7 +236,7 @@ public VmwareContext getServiceContext(Command cmd) { currentContext.set(context); return context; } catch (Exception e) { - s_logger.error("Unexpected exception " + e.toString(), e); + logger.error("Unexpected exception " + e.toString(), e); return null; } } @@ -266,7 +267,7 @@ public VmwareHypervisorHost getHyperHost(VmwareContext context, Command cmd) { ManagedObjectReference morHyperHost = new ManagedObjectReference(); String[] hostTokens = tokens[0].split(":"); if (hostTokens == null || hostTokens.length != 2) { - s_logger.error("Invalid content in command context parameter guid"); + logger.error("Invalid content in command context parameter guid"); return null; } @@ -289,10 +290,10 @@ public VmwareHypervisorHost getHyperHost(VmwareContext context, Command cmd) { : cmd.getContextParam("serviceconsole")); _resource.ensureOutgoingRuleForAddress(netSummary.getHostIp()); - s_logger.info("Setup firewall rule for host: " + netSummary.getHostIp()); + logger.info("Setup firewall rule for host: " + netSummary.getHostIp()); } } catch (Throwable e) { - s_logger.warn("Unable to retrive host network information due to exception " + e.toString() + ", host: " + hostTokens[0] + "-" + hostTokens[1]); + logger.warn("Unable to retrive host network information due to exception " + e.toString() + ", host: " + hostTokens[0] + "-" + hostTokens[1]); } return hostMo; @@ -320,7 +321,7 @@ public String getMountPoint(String storageUrl, String nfsVersion) { @Override public String createLogMessageException(Throwable e, Command command) { String message = String.format("%s failed due to [%s].", command.getClass().getSimpleName(), VmwareHelper.getExceptionMessage(e)); - s_logger.error(message, e); + logger.error(message, e); return message; } diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageLayoutHelper.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageLayoutHelper.java index b6b92f67ec59..ab9754a7c9e0 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageLayoutHelper.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageLayoutHelper.java @@ -22,7 +22,8 @@ import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.hypervisor.vmware.mo.DatacenterMO; import com.cloud.hypervisor.vmware.mo.DatastoreFile; @@ -37,7 +38,7 @@ * */ public class VmwareStorageLayoutHelper implements Configurable { - private static final Logger s_logger = Logger.getLogger(VmwareStorageLayoutHelper.class); + protected static Logger LOGGER = LogManager.getLogger(VmwareStorageLayoutHelper.class); static final ConfigKey VsphereLinkedCloneExtensions = new ConfigKey("Hidden", String.class, "vsphere.linked.clone.extensions", "delta.vmdk,sesparse.vmdk", @@ -169,7 +170,7 @@ public static String syncVolumeToVmDefaultFolder(DatacenterMO dcMo, String vmNam assert (ds != null); if (!ds.folderExists(String.format("[%s]", ds.getName()), vmName)) { - s_logger.info("VM folder does not exist on target datastore, we will create one. vm: " + vmName + ", datastore: " + ds.getName()); + LOGGER.info("VM folder does not exist on target datastore, we will create one. vm: " + vmName + ", datastore: " + ds.getName()); ds.makeDirectory(String.format("[%s] %s", ds.getName(), vmName), dcMo.getMor()); } @@ -190,7 +191,7 @@ public static String syncVolumeToVmDefaultFolder(DatacenterMO dcMo, String vmNam for (int i=1; i" + vmdkFullCloneModePair[i]); + LOGGER.info("sync " + vmdkFullCloneModeLegacyPair[i] + "->" + vmdkFullCloneModePair[i]); ds.moveDatastoreFile(vmdkFullCloneModeLegacyPair[i], dcMo.getMor(), ds.getMor(), vmdkFullCloneModePair[i], dcMo.getMor(), true); } @@ -198,14 +199,14 @@ public static String syncVolumeToVmDefaultFolder(DatacenterMO dcMo, String vmNam for (int i=1; i" + vmdkLinkedCloneModePair[i]); + LOGGER.info("sync " + vmdkLinkedCloneModeLegacyPair[i] + "->" + vmdkLinkedCloneModePair[i]); ds.moveDatastoreFile(vmdkLinkedCloneModeLegacyPair[i], dcMo.getMor(), ds.getMor(), vmdkLinkedCloneModePair[i], dcMo.getMor(), true); } } if (ds.fileExists(vmdkLinkedCloneModeLegacyPair[0])) { - s_logger.info("sync " + vmdkLinkedCloneModeLegacyPair[0] + "->" + vmdkLinkedCloneModePair[0]); + LOGGER.info("sync " + vmdkLinkedCloneModeLegacyPair[0] + "->" + vmdkLinkedCloneModePair[0]); ds.moveDatastoreFile(vmdkLinkedCloneModeLegacyPair[0], dcMo.getMor(), ds.getMor(), vmdkLinkedCloneModePair[0], dcMo.getMor(), true); } @@ -240,14 +241,14 @@ public static void syncVolumeToRootFolder(DatacenterMO dcMo, DatastoreMO ds, Str if (ds.fileExists(companionFilePath)) { String targetPath = getDatastorePathBaseFolderFromVmdkFileName(ds, String.format("%s-%s",vmdkName, linkedCloneExtension)); - s_logger.info("Fixup folder-synchronization. move " + companionFilePath + " -> " + targetPath); + LOGGER.info("Fixup folder-synchronization. move " + companionFilePath + " -> " + targetPath); ds.moveDatastoreFile(companionFilePath, dcMo.getMor(), ds.getMor(), targetPath, dcMo.getMor(), true); } } // move the identity VMDK file the last String targetPath = getDatastorePathBaseFolderFromVmdkFileName(ds, vmdkName + ".vmdk"); - s_logger.info("Fixup folder-synchronization. move " + fileDsFullPath + " -> " + targetPath); + LOGGER.info("Fixup folder-synchronization. move " + fileDsFullPath + " -> " + targetPath); ds.moveDatastoreFile(fileDsFullPath, dcMo.getMor(), ds.getMor(), targetPath, dcMo.getMor(), true); try { @@ -266,9 +267,9 @@ public static void syncVolumeToRootFolder(DatacenterMO dcMo, DatastoreMO ds, Str + "in specific versions of VMWare. Users using VMFS or VMWare versions greater than 6.7 have not reported this error. If the operation performed is a volume detach, " + "it was successful. If you want to know why this error occurs in VMWare, please contact VMWare's technical support.", vmName, e.getMessage(), link); - s_logger.warn(message, e); + LOGGER.warn(message, e); } else { - s_logger.error(String.format("Failed to sync volume [%s] of VM [%s] due to: [%s].", vmdkName, vmName, e.getMessage()), e); + LOGGER.error(String.format("Failed to sync volume [%s] of VM [%s] due to: [%s].", vmdkName, vmName, e.getMessage()), e); throw e; } } @@ -279,13 +280,13 @@ public static void moveVolumeToRootFolder(DatacenterMO dcMo, List detach for (String fileFullDsPath : detachedDisks) { DatastoreFile file = new DatastoreFile(fileFullDsPath); - s_logger.info("Check if we need to move " + fileFullDsPath + " to its root location"); + LOGGER.info("Check if we need to move " + fileFullDsPath + " to its root location"); DatastoreMO dsMo = new DatastoreMO(dcMo.getContext(), dcMo.findDatastore(file.getDatastoreName())); if (dsMo.getMor() != null && !dsMo.getDatastoreType().equalsIgnoreCase("VVOL")) { HypervisorHostHelper.createBaseFolderInDatastore(dsMo, dsMo.getDataCenterMor()); DatastoreFile targetFile = new DatastoreFile(file.getDatastoreName(), HypervisorHostHelper.VSPHERE_DATASTORE_BASE_FOLDER, file.getFileName()); if (!targetFile.getPath().equalsIgnoreCase(file.getPath())) { - s_logger.info("Move " + file.getPath() + " -> " + targetFile.getPath()); + LOGGER.info("Move " + file.getPath() + " -> " + targetFile.getPath()); dsMo.moveDatastoreFile(file.getPath(), dcMo.getMor(), dsMo.getMor(), targetFile.getPath(), dcMo.getMor(), true); List vSphereFileExtensions = new ArrayList<>(Arrays.asList(VsphereLinkedCloneExtensions.value().trim().split("\\s*,\\s*"))); @@ -295,13 +296,13 @@ public static void moveVolumeToRootFolder(DatacenterMO dcMo, List detach String pairSrcFilePath = file.getCompanionPath(String.format("%s-%s", file.getFileBaseName(), linkedCloneExtension)); String pairTargetFilePath = targetFile.getCompanionPath(String.format("%s-%s", file.getFileBaseName(), linkedCloneExtension)); if (dsMo.fileExists(pairSrcFilePath)) { - s_logger.info("Move " + pairSrcFilePath + " -> " + pairTargetFilePath); + LOGGER.info("Move " + pairSrcFilePath + " -> " + pairTargetFilePath); dsMo.moveDatastoreFile(pairSrcFilePath, dcMo.getMor(), dsMo.getMor(), pairTargetFilePath, dcMo.getMor(), true); } } } } else { - s_logger.warn("Datastore for " + fileFullDsPath + " no longer exists, we have to skip"); + LOGGER.warn("Datastore for " + fileFullDsPath + " no longer exists, we have to skip"); } } } @@ -371,7 +372,7 @@ public static void deleteVolumeVmdkFiles(DatastoreMO dsMo, String volumeName, Da if (fileFullPath != null) { dsMo.deleteFile(fileFullPath, dcMo.getMor(), true, excludeFolders); } else { - s_logger.warn("Unable to locate VMDK file: " + fileName); + LOGGER.warn("Unable to locate VMDK file: " + fileName); } List vSphereFileExtensions = new ArrayList<>(Arrays.asList(VsphereLinkedCloneExtensions.value().trim().split("\\s*,\\s*"))); @@ -383,7 +384,7 @@ public static void deleteVolumeVmdkFiles(DatastoreMO dsMo, String volumeName, Da if (fileFullPath != null) { dsMo.deleteFile(fileFullPath, dcMo.getMor(), true, excludeFolders); } else { - s_logger.warn("Unable to locate VMDK file: " + String.format("%s-%s", volumeName, linkedCloneExtension)); + LOGGER.warn("Unable to locate VMDK file: " + String.format("%s-%s", volumeName, linkedCloneExtension)); } } } diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageProcessor.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageProcessor.java index 57522a678f89..d81fd028b101 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageProcessor.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageProcessor.java @@ -58,7 +58,8 @@ import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.cloudstack.utils.volume.VirtualMachineDiskInfo; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.Command; @@ -151,7 +152,7 @@ public String getName() { } } - private static final Logger s_logger = Logger.getLogger(VmwareStorageProcessor.class); + protected Logger logger = LogManager.getLogger(getClass()); private static final int DEFAULT_NFS_PORT = 2049; private static final int SECONDS_TO_WAIT_FOR_DATASTORE = 120; @@ -181,7 +182,7 @@ public VmwareStorageProcessor(VmwareHostService hostService, boolean fullCloneFl @Override public SnapshotAndCopyAnswer snapshotAndCopy(SnapshotAndCopyCommand cmd) { - s_logger.info("'SnapshotAndCopyAnswer snapshotAndCopy(SnapshotAndCopyCommand)' not currently used for VmwareStorageProcessor"); + logger.info("'SnapshotAndCopyAnswer snapshotAndCopy(SnapshotAndCopyCommand)' not currently used for VmwareStorageProcessor"); return new SnapshotAndCopyAnswer(); } @@ -303,7 +304,7 @@ public ResignatureAnswer resignature(ResignatureCommand cmd) { return answer; } catch (Exception ex) { - s_logger.error(String.format("Command %s failed due to: [%s].", cmd.getClass().getSimpleName(), ex.getMessage()), ex); + logger.error(String.format("Command %s failed due to: [%s].", cmd.getClass().getSimpleName(), ex.getMessage()), ex); throw new CloudRuntimeException(ex.getMessage()); } @@ -314,7 +315,7 @@ private List getExtentsMatching(List details) throws Exception { - s_logger.debug(String.format("Executing clean up in DataStore: [%s].", dsMo.getName())); + logger.debug(String.format("Executing clean up in DataStore: [%s].", dsMo.getName())); boolean expandDatastore = Boolean.parseBoolean(details.get(DiskTO.EXPAND_DATASTORE)); // A volume on the storage system holding a template uses a minimum hypervisor snapshot reserve value. @@ -492,7 +493,7 @@ private Pair copyTemplateFromSecondaryToPrimary(VmwareHy boolean createSnapshot, String nfsVersion, String configuration) throws Exception { String secondaryMountPoint = mountService.getMountPoint(secondaryStorageUrl, nfsVersion); - s_logger.info(String.format("Init copy of template [name: %s, path in secondary storage: %s, configuration: %s] in secondary storage [url: %s, mount point: %s] to primary storage.", + logger.info(String.format("Init copy of template [name: %s, path in secondary storage: %s, configuration: %s] in secondary storage [url: %s, mount point: %s] to primary storage.", templateName, templatePathAtSecondaryStorage, configuration, secondaryStorageUrl, secondaryMountPoint)); String srcOVAFileName = @@ -501,15 +502,15 @@ private Pair copyTemplateFromSecondaryToPrimary(VmwareHy String srcFileName = getOVFFilePath(srcOVAFileName); if (srcFileName == null) { - Script command = new Script("tar", 0, s_logger); + Script command = new Script("tar", 0, logger); command.add("--no-same-owner"); command.add("-xf", srcOVAFileName); command.setWorkDir(secondaryMountPoint + "/" + templatePathAtSecondaryStorage); - s_logger.info("Executing command: " + command.toString()); + logger.info("Executing command: " + command.toString()); String result = command.execute(); if (result != null) { String msg = "Unable to unpack snapshot OVA file at: " + srcOVAFileName; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } } @@ -517,7 +518,7 @@ private Pair copyTemplateFromSecondaryToPrimary(VmwareHy srcFileName = getOVFFilePath(srcOVAFileName); if (srcFileName == null) { String msg = "Unable to locate OVF file in template package directory: " + srcOVAFileName; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } @@ -526,19 +527,19 @@ private Pair copyTemplateFromSecondaryToPrimary(VmwareHy } VmConfigInfo vAppConfig; - s_logger.debug(String.format("Deploying OVF template %s with configuration %s.", templateName, configuration)); + logger.debug(String.format("Deploying OVF template %s with configuration %s.", templateName, configuration)); hyperHost.importVmFromOVF(srcFileName, templateUuid, datastoreMo, "thin", configuration); VirtualMachineMO vmMo = hyperHost.findVmOnHyperHost(templateUuid); if (vmMo == null) { String msg = "Failed to import OVA template. secondaryStorage: " + secondaryStorageUrl + ", templatePathAtSecondaryStorage: " + templatePathAtSecondaryStorage + ", templateName: " + templateName + ", templateUuid: " + templateUuid; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } else { vAppConfig = vmMo.getConfigInfo().getVAppConfig(); if (vAppConfig != null) { - s_logger.info("Found vApp configuration"); + logger.info("Found vApp configuration"); } } @@ -561,7 +562,7 @@ private Pair copyTemplateFromSecondaryToPrimary(VmwareHy String msg = "Unable to create base snapshot for template, templateName: " + templateName + ", templateUuid: " + templateUuid; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } @@ -665,8 +666,8 @@ public Answer copyTemplateToPrimaryStorage(CopyCommand cmd) { dsMo = new DatastoreMO(context, morDs); if (templateMo == null) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Template " + templateInfo.second() + " is not setup yet. Set up template from secondary storage with uuid name: " + templateUuidName); + if (logger.isInfoEnabled()) { + logger.info("Template " + templateInfo.second() + " is not setup yet. Set up template from secondary storage with uuid name: " + templateUuidName); } if (managed) { @@ -694,7 +695,7 @@ public Answer copyTemplateToPrimaryStorage(CopyCommand cmd) { templateUuidName, true, _nfsVersion, configurationId); } } else { - s_logger.info("Template " + templateInfo.second() + " has already been setup, skip the template setup process in primary storage"); + logger.info("Template " + templateInfo.second() + " has already been setup, skip the template setup process in primary storage"); } TemplateObjectTO newTemplate = new TemplateObjectTO(); @@ -727,7 +728,7 @@ public Answer copyTemplateToPrimaryStorage(CopyCommand cmd) { removeVmfsDatastore(cmd, hyperHost, VmwareResource.getDatastoreName(managedStoragePoolName), storageHost, storagePort, trimIqn(managedStoragePoolName)); } catch (Exception ex) { - s_logger.error("Unable to remove the following datastore: " + VmwareResource.getDatastoreName(managedStoragePoolName), ex); + logger.error("Unable to remove the following datastore: " + VmwareResource.getDatastoreName(managedStoragePoolName), ex); } } } @@ -747,17 +748,17 @@ private boolean createVMLinkedClone(VirtualMachineMO vmTemplate, DatacenterMO dc if (morBaseSnapshot == null) { String msg = "Unable to find template base snapshot, invalid template"; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } - s_logger.info("creating linked clone from template"); + logger.info("creating linked clone from template"); if (!vmTemplate.createLinkedClone(vmdkName, morBaseSnapshot, dcMo.getVmFolder(), morPool, morDatastore)) { String msg = "Unable to clone from the template"; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } @@ -767,12 +768,12 @@ private boolean createVMLinkedClone(VirtualMachineMO vmTemplate, DatacenterMO dc private boolean createVMFullClone(VirtualMachineMO vmTemplate, DatacenterMO dcMo, DatastoreMO dsMo, String vmdkName, ManagedObjectReference morDatastore, ManagedObjectReference morPool, ProvisioningType diskProvisioningType) throws Exception { - s_logger.info("creating full clone from template"); + logger.info("creating full clone from template"); if (!vmTemplate.createFullClone(vmdkName, dcMo.getVmFolder(), morPool, morDatastore, diskProvisioningType)) { String msg = "Unable to create full clone from the template"; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } @@ -809,13 +810,13 @@ public Answer cloneVolumeFromBaseTemplate(CopyCommand cmd) { VirtualMachineMO existingVm = dcMo.findVm(vmName); if (volume.getDeviceId().equals(0L)) { if (existingVm != null) { - s_logger.info(String.format("Found existing VM wth name [%s] before cloning from template, destroying it", vmName)); + logger.info(String.format("Found existing VM wth name [%s] before cloning from template, destroying it", vmName)); existingVm.detachAllDisksAndDestroy(); } - s_logger.info("ROOT Volume from deploy-as-is template, cloning template"); + logger.info("ROOT Volume from deploy-as-is template, cloning template"); cloneVMFromTemplate(hyperHost, template.getPath(), vmName, primaryStore.getUuid()); } else { - s_logger.info("ROOT Volume from deploy-as-is template, volume already created at this point"); + logger.info("ROOT Volume from deploy-as-is template, volume already created at this point"); } } else { if (srcStore == null) { @@ -833,15 +834,15 @@ public Answer cloneVolumeFromBaseTemplate(CopyCommand cmd) { String vmdkFilePair[] = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, null, vmdkFileBaseName, VmwareStorageLayoutType.CLOUDSTACK_LEGACY, true); String volumeDatastorePath = vmdkFilePair[0]; synchronized (this) { - s_logger.info("Delete file if exists in datastore to clear the way for creating the volume. file: " + volumeDatastorePath); + logger.info("Delete file if exists in datastore to clear the way for creating the volume. file: " + volumeDatastorePath); VmwareStorageLayoutHelper.deleteVolumeVmdkFiles(dsMo, vmdkName, dcMo, searchExcludedFolders); vmMo.createDisk(volumeDatastorePath, (long)(volume.getSize() / (1024L * 1024L)), morDatastore, -1, null); vmMo.detachDisk(volumeDatastorePath, false); } } finally { - s_logger.info("Destroy dummy VM after volume creation"); + logger.info("Destroy dummy VM after volume creation"); if (vmMo != null) { - s_logger.warn("Unable to destroy a null VM ManagedObjectReference"); + logger.warn("Unable to destroy a null VM ManagedObjectReference"); vmMo.detachAllDisksAndDestroy(); } } @@ -849,7 +850,7 @@ public Answer cloneVolumeFromBaseTemplate(CopyCommand cmd) { String templatePath = template.getPath(); VirtualMachineMO vmTemplate = VmwareHelper.pickOneVmOnRunningHost(dcMo.findVmByNameAndLabel(templatePath), true); if (vmTemplate == null) { - s_logger.warn("Template host in vSphere is not in connected state, request template reload"); + logger.warn("Template host in vSphere is not in connected state, request template reload"); return new CopyCmdAnswer("Template host in vSphere is not in connected state, request template reload"); } if (dsMo.getDatastoreType().equalsIgnoreCase("VVOL")) { @@ -909,7 +910,7 @@ private String cloneVMforVvols(VmwareContext context, VmwareHypervisorHost hyper assert (vmMo != null); String vmdkFileBaseName = vmMo.getVmdkFileBaseNames().get(0); if (volume.getVolumeType() == Volume.Type.DATADISK) { - s_logger.info("detach disks from volume-wrapper VM " + vmName); + logger.info("detach disks from volume-wrapper VM " + vmName); vmMo.detachAllDisksAndDestroy(); } return vmdkFileBaseName; @@ -939,7 +940,7 @@ private String createVMAndFolderWithVMName(VmwareContext context, VmwareHypervis assert (vmMo != null); String vmdkFileBaseName = vmMo.getVmdkFileBaseNames().get(0); - s_logger.info("Move volume out of volume-wrapper VM " + vmdkFileBaseName); + logger.info("Move volume out of volume-wrapper VM " + vmdkFileBaseName); String[] vmwareLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, vmdkName, vmdkFileBaseName, VmwareStorageLayoutType.VMWARE, !_fullCloneFlag); String[] legacyCloudStackLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, vmdkName, vmdkFileBaseName, VmwareStorageLayoutType.CLOUDSTACK_LEGACY, !_fullCloneFlag); @@ -947,7 +948,7 @@ private String createVMAndFolderWithVMName(VmwareContext context, VmwareHypervis dsMo.moveDatastoreFile(vmwareLayoutFilePair[i], dcMo.getMor(), dsMo.getMor(), legacyCloudStackLayoutFilePair[i], dcMo.getMor(), true); } - s_logger.info("detach disks from volume-wrapper VM and destroy" + vmdkName); + logger.info("detach disks from volume-wrapper VM and destroy" + vmdkName); vmMo.detachAllDisksAndDestroy(); String srcFile = dsMo.getDatastorePath(vmdkName, true); @@ -1021,7 +1022,7 @@ private String deleteVolumeDirOnSecondaryStorage(String volumeDir, String secSto private String deleteDir(String dir) { synchronized (dir.intern()) { - Script command = new Script(false, "rm", _timeout, s_logger); + Script command = new Script(false, "rm", _timeout, logger); command.add("-rf"); command.add(dir); return command.execute(); @@ -1081,7 +1082,7 @@ private Pair copyVolumeToSecStorage(VmwareHostService hostServic if (morDs == null) { String msg = "Unable to find volumes's storage pool for copy volume operation"; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } @@ -1094,7 +1095,7 @@ private Pair copyVolumeToSecStorage(VmwareHostService hostServic if (workerVm == null) { String msg = "Unable to create worker VM to execute CopyVolumeCommand"; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } @@ -1188,14 +1189,14 @@ private Ternary createTemplateFromVolume(VmwareContext conte String secondaryMountPoint = mountService.getMountPoint(secStorageUrl, nfsVersion); String installFullPath = secondaryMountPoint + "/" + installPath; synchronized (installPath.intern()) { - Script command = new Script(false, "mkdir", _timeout, s_logger); + Script command = new Script(false, "mkdir", _timeout, logger); command.add("-p"); command.add(installFullPath); String result = command.execute(); if (result != null) { String msg = "unable to prepare template directory: " + installPath + ", storage: " + secStorageUrl + ", error msg: " + result; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } } @@ -1205,7 +1206,7 @@ private Ternary createTemplateFromVolume(VmwareContext conte Pair volumeDeviceInfo = vmMo.getDiskDevice(volumePath); if (volumeDeviceInfo == null) { String msg = "Unable to find related disk device for volume. volume path: " + volumePath; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } @@ -1245,7 +1246,7 @@ private Ternary createTemplateFromVolume(VmwareContext conte } finally { if (clonedVm != null) { - s_logger.debug(String.format("Destroying cloned VM: %s with its disks", clonedVm.getName())); + logger.debug(String.format("Destroying cloned VM: %s with its disks", clonedVm.getName())); clonedVm.destroy(); } } @@ -1283,8 +1284,8 @@ public Answer createTemplateFromVolume(CopyCommand cmd) { } else { vmMo = hyperHost.findVmOnHyperHost(volume.getVmName()); if (vmMo == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to find the owner VM for CreatePrivateTemplateFromVolumeCommand on host " + hyperHost.getHyperHostName() + + if (logger.isDebugEnabled()) { + logger.debug("Unable to find the owner VM for CreatePrivateTemplateFromVolumeCommand on host " + hyperHost.getHyperHostName() + ", try within datacenter"); } vmMo = hyperHost.findVmOnPeerHyperHost(volume.getVmName()); @@ -1299,7 +1300,7 @@ public Answer createTemplateFromVolume(CopyCommand cmd) { if (vmMo == null) { String msg = "Unable to find the owner VM for volume operation. vm: " + volume.getVmName(); - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } } @@ -1323,7 +1324,7 @@ public Answer createTemplateFromVolume(CopyCommand cmd) { workerVmMo.detachAllDisksAndDestroy(); } } catch (Throwable e) { - s_logger.error("Failed to destroy worker VM created for detached volume"); + logger.error("Failed to destroy worker VM created for detached volume"); } } } @@ -1380,68 +1381,68 @@ private Ternary createTemplateFromSnapshot(String installPat String snapshotFullVMDKName = snapshotRoot + "/" + backupSSUuid + "/"; synchronized (installPath.intern()) { - command = new Script(false, "mkdir", _timeout, s_logger); + command = new Script(false, "mkdir", _timeout, logger); command.add("-p"); command.add(installFullPath); result = command.execute(); if (result != null) { String msg = "unable to prepare template directory: " + installPath + ", storage: " + secStorageUrl + ", error msg: " + result; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } } try { if (new File(snapshotFullOVAName).exists()) { - command = new Script(false, "cp", wait, s_logger); + command = new Script(false, "cp", wait, logger); command.add(snapshotFullOVAName); command.add(installFullOVAName); result = command.execute(); if (result != null) { String msg = "unable to copy snapshot " + snapshotFullOVAName + " to " + installFullPath; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } // untar OVA file at template directory - command = new Script("tar", wait, s_logger); + command = new Script("tar", wait, logger); command.add("--no-same-owner"); command.add("-xf", installFullOVAName); command.setWorkDir(installFullPath); - s_logger.info("Executing command: " + command.toString()); + logger.info("Executing command: " + command.toString()); result = command.execute(); if (result != null) { String msg = "unable to untar snapshot " + snapshotFullOVAName + " to " + installFullPath; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } } else { // there is no ova file, only ovf originally; if (new File(snapshotFullOvfName).exists()) { - command = new Script(false, "cp", wait, s_logger); + command = new Script(false, "cp", wait, logger); command.add(snapshotFullOvfName); //command.add(installFullOvfName); command.add(installFullPath); result = command.execute(); if (result != null) { String msg = "unable to copy snapshot " + snapshotFullOvfName + " to " + installFullPath; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } - s_logger.info("vmdkfile parent dir: " + snapshotRoot); + logger.info("vmdkfile parent dir: " + snapshotRoot); File snapshotdir = new File(snapshotRoot); File[] ssfiles = snapshotdir.listFiles(); if (ssfiles == null) { String msg = "unable to find snapshot vmdk files in " + snapshotRoot; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } // List filenames = new ArrayList(); for (int i = 0; i < ssfiles.length; i++) { String vmdkfile = ssfiles[i].getName(); - s_logger.info("vmdk file name: " + vmdkfile); + logger.info("vmdk file name: " + vmdkfile); if (vmdkfile.toLowerCase().startsWith(backupSSUuid) && vmdkfile.toLowerCase().endsWith(".vmdk")) { snapshotFullVMDKName = snapshotRoot + File.separator + vmdkfile; templateVMDKName += vmdkfile; @@ -1449,20 +1450,20 @@ private Ternary createTemplateFromSnapshot(String installPat } } if (snapshotFullVMDKName != null) { - command = new Script(false, "cp", wait, s_logger); + command = new Script(false, "cp", wait, logger); command.add(snapshotFullVMDKName); command.add(installFullPath); result = command.execute(); - s_logger.info("Copy VMDK file: " + snapshotFullVMDKName); + logger.info("Copy VMDK file: " + snapshotFullVMDKName); if (result != null) { String msg = "unable to copy snapshot vmdk file " + snapshotFullVMDKName + " to " + installFullPath; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } } } else { String msg = "unable to find any snapshot ova/ovf files" + snapshotFullOVAName + " to " + installFullPath; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } } @@ -1592,7 +1593,7 @@ private void takeDownManagedStorageCopyTemplateFromSnapshot(CopyCommand cmd) thr private void createTemplateFolder(String installPath, String installFullPath, NfsTO nfsSvr) { synchronized (installPath.intern()) { - Script command = new Script(false, "mkdir", _timeout, s_logger); + Script command = new Script(false, "mkdir", _timeout, logger); command.add("-p"); command.add(installFullPath); @@ -1603,7 +1604,7 @@ private void createTemplateFolder(String installPath, String installFullPath, Nf String secStorageUrl = nfsSvr.getUrl(); String msg = "unable to prepare template directory: " + installPath + "; storage: " + secStorageUrl + "; error msg: " + result; - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } @@ -1640,7 +1641,7 @@ private String getTemplateVmdkName(String installFullPath, String exportName) { if (templateFiles == null) { String msg = "Unable to find template files in " + installFullPath; - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } @@ -1693,7 +1694,7 @@ private Answer handleManagedStorageCreateTemplateFromSnapshot(CopyCommand cmd, T catch (Exception ex) { String errMsg = "Problem creating a template from a snapshot for managed storage: " + ex.getMessage(); - s_logger.error(errMsg); + logger.error(errMsg); throw new CloudRuntimeException(errMsg, ex); } @@ -1702,7 +1703,7 @@ private Answer handleManagedStorageCreateTemplateFromSnapshot(CopyCommand cmd, T takeDownManagedStorageCopyTemplateFromSnapshot(cmd); } catch (Exception ex) { - s_logger.warn("Unable to remove one or more static targets"); + logger.warn("Unable to remove one or more static targets"); } } } @@ -1757,13 +1758,13 @@ private Pair exportVolumeToSecondaryStorage(VmwareContext cont synchronized (exportPath.intern()) { if (!new File(exportPath).exists()) { - Script command = new Script(false, "mkdir", _timeout, s_logger); + Script command = new Script(false, "mkdir", _timeout, logger); command.add("-p"); command.add(exportPath); String result = command.execute(); if (result != null) { String errorMessage = String.format("Unable to prepare snapshot backup directory: [%s] due to [%s].", exportPath, result); - s_logger.error(errorMessage); + logger.error(errorMessage); throw new Exception(errorMessage); } } @@ -1775,7 +1776,7 @@ private Pair exportVolumeToSecondaryStorage(VmwareContext cont Pair volumeDeviceInfo = vmMo.getDiskDevice(volumePath); if (volumeDeviceInfo == null) { String msg = "Unable to find related disk device for volume. volume path: " + volumePath; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } @@ -1799,7 +1800,7 @@ private Pair exportVolumeToSecondaryStorage(VmwareContext cont return new Pair<>(diskDevice, disks); } finally { if (clonedVm != null) { - s_logger.debug(String.format("Destroying cloned VM: %s with its disks", clonedVm.getName())); + logger.debug(String.format("Destroying cloned VM: %s with its disks", clonedVm.getName())); clonedVm.destroy(); } } @@ -1858,8 +1859,8 @@ public Answer backupSnapshot(CopyCommand cmd) { if(vmName != null) { vmMo = hyperHost.findVmOnHyperHost(vmName); if (vmMo == null) { - if(s_logger.isDebugEnabled()) { - s_logger.debug("Unable to find owner VM for BackupSnapshotCommand on host " + hyperHost.getHyperHostName() + ", will try within datacenter"); + if(logger.isDebugEnabled()) { + logger.debug("Unable to find owner VM for BackupSnapshotCommand on host " + hyperHost.getHyperHostName() + ", will try within datacenter"); } vmMo = hyperHost.findVmOnPeerHyperHost(vmName); } @@ -1876,11 +1877,11 @@ public Answer backupSnapshot(CopyCommand cmd) { String datastoreVolumePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(dsMo, volumePath + ".vmdk"); vmMo.attachDisk(new String[] { datastoreVolumePath }, morDs); } else { - s_logger.info("Using owner VM " + vmName + " for snapshot operation"); + logger.info("Using owner VM " + vmName + " for snapshot operation"); hasOwnerVm = true; } - s_logger.debug(String.format("Executing backup snapshot with UUID [%s] to secondary storage.", snapshotUuid)); + logger.debug(String.format("Executing backup snapshot with UUID [%s] to secondary storage.", snapshotUuid)); backupResult = backupSnapshotToSecondaryStorage(context, vmMo, hyperHost, destSnapshot.getPath(), srcSnapshot.getVolume().getPath(), snapshotUuid, secondaryStorageUrl, prevSnapshotUuid, prevBackupUuid, hostService.getWorkerName(context, cmd, 1, null), _nfsVersion); @@ -1925,18 +1926,18 @@ public Answer backupSnapshot(CopyCommand cmd) { // TODO: this post operation fixup is not atomic and not safe when management server stops // in the middle if (backupResult != null && hasOwnerVm) { - s_logger.info("Check if we have disk consolidation after snapshot operation"); + logger.info("Check if we have disk consolidation after snapshot operation"); boolean chainConsolidated = false; for (String vmdkDsFilePath : backupResult.third()) { - s_logger.info("Validate disk chain file:" + vmdkDsFilePath); + logger.info("Validate disk chain file:" + vmdkDsFilePath); if (vmMo.getDiskDevice(vmdkDsFilePath) == null) { - s_logger.info("" + vmdkDsFilePath + " no longer exists, consolidation detected"); + logger.info("" + vmdkDsFilePath + " no longer exists, consolidation detected"); chainConsolidated = true; break; } else { - s_logger.info("" + vmdkDsFilePath + " is found still in chain"); + logger.info("" + vmdkDsFilePath + " is found still in chain"); } } @@ -1945,10 +1946,10 @@ public Answer backupSnapshot(CopyCommand cmd) { try { topVmdkFilePath = vmMo.getDiskCurrentTopBackingFileInChain(backupResult.second()); } catch (Exception e) { - s_logger.error("Unexpected exception", e); + logger.error("Unexpected exception", e); } - s_logger.info("Disk has been consolidated, top VMDK is now: " + topVmdkFilePath); + logger.info("Disk has been consolidated, top VMDK is now: " + topVmdkFilePath); if (topVmdkFilePath != null) { DatastoreFile file = new DatastoreFile(topVmdkFilePath); @@ -1958,12 +1959,12 @@ public Answer backupSnapshot(CopyCommand cmd) { vol.setPath(file.getFileBaseName()); snapshotInfo.setVolume(vol); } else { - s_logger.error("Disk has been consolidated, but top VMDK is not found ?!"); + logger.error("Disk has been consolidated, but top VMDK is not found ?!"); } } } } else { - s_logger.info("No snapshots created to be deleted!"); + logger.info("No snapshots created to be deleted!"); } } @@ -1972,7 +1973,7 @@ public Answer backupSnapshot(CopyCommand cmd) { workerVm.detachAllDisksAndDestroy(); } } catch (Throwable e) { - s_logger.warn(String.format("Failed to destroy worker VM [%s] due to: [%s]", workerVMName, e.getMessage()), e); + logger.warn(String.format("Failed to destroy worker VM [%s] due to: [%s]", workerVMName, e.getMessage()), e); } } @@ -2018,7 +2019,7 @@ private Answer attachVolume(Command cmd, DiskTO disk, boolean isAttach, boolean if (vmMo == null) { String msg = "Unable to find the VM to execute AttachCommand, vmName: " + vmName; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } @@ -2047,7 +2048,7 @@ private Answer attachVolume(Command cmd, DiskTO disk, boolean isAttach, boolean if (morDs == null) { String msg = "Unable to find the mounted datastore to execute AttachCommand, vmName: " + vmName; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } @@ -2146,7 +2147,7 @@ private Answer attachVolume(Command cmd, DiskTO disk, boolean isAttach, boolean return answer; } catch (Throwable e) { String msg = String.format("Failed to %s volume!", isAttach? "attach" : "detach"); - s_logger.error(msg, e); + logger.error(msg, e); hostService.createLogMessageException(e, cmd); // Sending empty error message - too many duplicate errors in UI return new AttachAnswer(""); @@ -2168,7 +2169,7 @@ private VirtualMachineDiskInfo getMatchingExistingDisk(VmwareHypervisorHost hype VirtualMachineDiskInfo diskInfo = diskInfoBuilder.getDiskInfoByBackingFileBaseName(diskBackingFileBaseName, dsName); if (diskInfo != null) { - s_logger.info("Found existing disk info from volume path: " + volume.getPath()); + logger.info("Found existing disk info from volume path: " + volume.getPath()); return diskInfo; } else { String chainInfo = volume.getChainInfo(); @@ -2181,7 +2182,7 @@ private VirtualMachineDiskInfo getMatchingExistingDisk(VmwareHypervisorHost hype DatastoreFile file = new DatastoreFile(diskPath); diskInfo = diskInfoBuilder.getDiskInfoByBackingFileBaseName(file.getFileBaseName(), dsName); if (diskInfo != null) { - s_logger.info("Found existing disk from chain info: " + diskPath); + logger.info("Found existing disk from chain info: " + diskPath); return diskInfo; } } @@ -2190,7 +2191,7 @@ private VirtualMachineDiskInfo getMatchingExistingDisk(VmwareHypervisorHost hype if (diskInfo == null) { diskInfo = diskInfoBuilder.getDiskInfoByDeviceBusName(infoInChain.getDiskDeviceBusName()); if (diskInfo != null) { - s_logger.info("Found existing disk from chain device bus information: " + infoInChain.getDiskDeviceBusName()); + logger.info("Found existing disk from chain device bus information: " + infoInChain.getDiskDeviceBusName()); return diskInfo; } } @@ -2213,7 +2214,7 @@ private DatastoreMO getDiskDatastoreMofromVM(VmwareHypervisorHost hyperHost, Vmw VirtualMachineDiskInfo diskInfo = diskInfoBuilder.getDiskInfoByBackingFileBaseName(diskBackingFileBaseName, dsName); if (diskInfo != null) { - s_logger.info("Found existing disk info from volume path: " + volume.getPath()); + logger.info("Found existing disk info from volume path: " + volume.getPath()); return dsMo; } else { String chainInfo = volume.getChainInfo(); @@ -2226,7 +2227,7 @@ private DatastoreMO getDiskDatastoreMofromVM(VmwareHypervisorHost hyperHost, Vmw DatastoreFile file = new DatastoreFile(diskPath); diskInfo = diskInfoBuilder.getDiskInfoByBackingFileBaseName(file.getFileBaseName(), dsName); if (diskInfo != null) { - s_logger.info("Found existing disk from chain info: " + diskPath); + logger.info("Found existing disk from chain info: " + diskPath); return dsMo; } } @@ -2279,12 +2280,12 @@ private boolean expandVirtualDisk(VirtualMachineMO vmMo, String datastoreVolumeP return false; } - private static String getSecondaryDatastoreUUID(String storeUrl) { + private String getSecondaryDatastoreUUID(String storeUrl) { String uuid = null; try{ uuid=UUID.nameUUIDFromBytes(storeUrl.getBytes("UTF-8")).toString(); }catch(UnsupportedEncodingException e){ - s_logger.warn("Failed to create UUID from string " + storeUrl + ". Bad storeUrl or UTF-8 encoding error." ); + logger.warn("Failed to create UUID from string " + storeUrl + ". Bad storeUrl or UTF-8 encoding error." ); } return uuid; } @@ -2310,7 +2311,7 @@ private Answer attachIso(DiskTO disk, boolean isAttach, String vmName, boolean f VirtualMachineMO vmMo = HypervisorHostHelper.findVmOnHypervisorHostOrPeer(hyperHost, vmName); if (vmMo == null) { String msg = "Unable to find VM in vSphere to execute AttachIsoCommand, vmName: " + vmName; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } TemplateObjectTO iso = (TemplateObjectTO)disk.getData(); @@ -2322,7 +2323,7 @@ private Answer attachIso(DiskTO disk, boolean isAttach, String vmName, boolean f if (storeUrl == null) { if (!iso.getName().equalsIgnoreCase(TemplateManager.VMWARE_TOOLS_ISO)) { String msg = "ISO store root url is not found in AttachIsoCommand"; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } else { if (isAttach) { @@ -2346,7 +2347,7 @@ private Answer attachIso(DiskTO disk, boolean isAttach, String vmName, boolean f if (!isoPath.startsWith(storeUrl)) { assert (false); String msg = "ISO path does not start with the secondary storage root"; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } @@ -2369,12 +2370,12 @@ private Answer attachIso(DiskTO disk, boolean isAttach, String vmName, boolean f return new AttachAnswer(disk); } catch (Throwable e) { if (e instanceof RemoteException) { - s_logger.warn("Encounter remote exception to vCenter, invalidate VMware session context"); + logger.warn("Encounter remote exception to vCenter, invalidate VMware session context"); hostService.invalidateServiceContext(null); } String message = String.format("AttachIsoCommand(%s) failed due to: [%s]. Also check if your guest os is a supported version", isAttach? "attach" : "detach", VmwareHelper.getExceptionMessage(e)); - s_logger.error(message, e); + logger.error(message, e); return new AttachAnswer(message); } } @@ -2391,7 +2392,7 @@ public Answer dettachVolume(DettachCommand cmd) { @Override public Answer createVolume(CreateObjectCommand cmd) { - s_logger.debug(LogUtils.logGsonWithoutException("Executing CreateObjectCommand cmd: [%s].", cmd)); + logger.debug(LogUtils.logGsonWithoutException("Executing CreateObjectCommand cmd: [%s].", cmd)); VolumeObjectTO volume = (VolumeObjectTO)cmd.getData(); DataStoreTO primaryStore = volume.getDataStore(); String vSphereStoragePolicyId = volume.getvSphereStoragePolicyId(); @@ -2421,10 +2422,10 @@ public Answer createVolume(CreateObjectCommand cmd) { newVol.setPath(file.getFileBaseName()); newVol.setSize(volume.getSize()); } catch (Exception e) { - s_logger.error(String.format("Create disk using vStorageObject manager failed due to [%s], retrying using worker VM.", e.getMessage()), e); + logger.error(String.format("Create disk using vStorageObject manager failed due to [%s], retrying using worker VM.", e.getMessage()), e); String dummyVmName = hostService.getWorkerName(context, cmd, 0, dsMo); try { - s_logger.info(String.format("Creating worker VM [%s].", dummyVmName)); + logger.info(String.format("Creating worker VM [%s].", dummyVmName)); vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, dummyVmName, null); if (vmMo == null) { throw new CloudRuntimeException("Unable to create a dummy VM for volume creation."); @@ -2436,7 +2437,7 @@ public Answer createVolume(CreateObjectCommand cmd) { vmMo.detachDisk(volumeDatastorePath, false); } catch (Exception e1) { - s_logger.error(String.format("Deleting file [%s] due to [%s].", volumeDatastorePath, e1.getMessage()), e1); + logger.error(String.format("Deleting file [%s] due to [%s].", volumeDatastorePath, e1.getMessage()), e1); VmwareStorageLayoutHelper.deleteVolumeVmdkFiles(dsMo, volumeUuid, dcMo, VmwareManager.s_vmwareSearchExcludeFolder.value()); throw new CloudRuntimeException(String.format("Unable to create volume due to [%s].", e1.getMessage())); } @@ -2447,7 +2448,7 @@ public Answer createVolume(CreateObjectCommand cmd) { newVol.setSize(volume.getSize()); return new CreateObjectAnswer(newVol); } finally { - s_logger.info("Destroying dummy VM after volume creation."); + logger.info("Destroying dummy VM after volume creation."); if (vmMo != null) { vmMo.detachAllDisksAndDestroy(); } @@ -2502,7 +2503,7 @@ public Answer deleteVolume(DeleteCommand cmd) { if (morDs == null) { String msg = "Unable to find datastore based on volume mount point " + store.getUuid(); - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } @@ -2528,11 +2529,11 @@ public Answer deleteVolume(DeleteCommand cmd) { boolean deployAsIs = vol.isDeployAsIs(); if (vmMo != null) { - if (s_logger.isInfoEnabled()) { + if (logger.isInfoEnabled()) { if (deployAsIs) { - s_logger.info("Destroying root volume " + vol.getPath() + " of deploy-as-is VM " + vmName); + logger.info("Destroying root volume " + vol.getPath() + " of deploy-as-is VM " + vmName); } else { - s_logger.info("Destroy root volume and VM itself. vmName " + vmName); + logger.info("Destroy root volume and VM itself. vmName " + vmName); } } @@ -2580,15 +2581,15 @@ public Answer deleteVolume(DeleteCommand cmd) { } } } else if (deployAsIs) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Destroying root volume " + vol.getPath() + " of already removed deploy-as-is VM " + vmName); + if (logger.isInfoEnabled()) { + logger.info("Destroying root volume " + vol.getPath() + " of already removed deploy-as-is VM " + vmName); } // The disks of the deploy-as-is VM have been detached from the VM and moved to root folder String deployAsIsRootDiskPath = dsMo.searchFileInSubFolders(vol.getPath() + VmwareResource.VMDK_EXTENSION, true, null); if (StringUtils.isNotBlank(deployAsIsRootDiskPath)) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Removing disk " + deployAsIsRootDiskPath); + if (logger.isInfoEnabled()) { + logger.info("Removing disk " + deployAsIsRootDiskPath); } dsMo.deleteFile(deployAsIsRootDiskPath, morDc, true); String deltaFilePath = dsMo.searchFileInSubFolders(vol.getPath() + "-delta" + VmwareResource.VMDK_EXTENSION, @@ -2600,8 +2601,8 @@ public Answer deleteVolume(DeleteCommand cmd) { } /* - if (s_logger.isInfoEnabled()) { - s_logger.info("Destroy volume by original name: " + vol.getPath() + ".vmdk"); + if (logger.isInfoEnabled()) { + logger.info("Destroy volume by original name: " + vol.getPath() + ".vmdk"); } VmwareStorageLayoutHelper.deleteVolumeVmdkFiles(dsMo, vol.getPath(), new DatacenterMO(context, morDc)); @@ -2610,8 +2611,8 @@ public Answer deleteVolume(DeleteCommand cmd) { return new Answer(cmd, true, ""); } - if (s_logger.isInfoEnabled()) { - s_logger.info("Destroy root volume directly from datastore"); + if (logger.isInfoEnabled()) { + logger.info("Destroy root volume directly from datastore"); } } @@ -2634,7 +2635,7 @@ private ManagedObjectReference prepareManagedDatastore(VmwareContext context, Vm String storageHost, int storagePort, String chapInitiatorUsername, String chapInitiatorSecret, String chapTargetUsername, String chapTargetSecret) throws Exception { if (storagePort == DEFAULT_NFS_PORT) { - s_logger.info("creating the NFS datastore with the following configuration - storageHost: " + storageHost + ", storagePort: " + storagePort + + logger.info("creating the NFS datastore with the following configuration - storageHost: " + storageHost + ", storagePort: " + storagePort + ", exportpath: " + iScsiName + "and diskUuid : " + diskUuid); ManagedObjectReference morCluster = hyperHost.getHyperHostCluster(); ClusterMO cluster = new ClusterMO(context, morCluster); @@ -3021,13 +3022,13 @@ private void mountVmfsDatastore2(DatastoreMO dsMO, List hosts) throws Ex hostStorageSystemMO.mountVmfsVolume(getDatastoreUuid(dsMO, hostMO)); } catch (InvalidStateFaultMsg ex) { - s_logger.trace("'" + ex.getClass().getName() + "' exception thrown: " + ex.getMessage()); + logger.trace("'" + ex.getClass().getName() + "' exception thrown: " + ex.getMessage()); List currentHosts = new ArrayList<>(1); currentHosts.add(hostMO); - s_logger.trace("Waiting for host " + hostMO.getHostName() + " to mount datastore " + dsMO.getName()); + logger.trace("Waiting for host " + hostMO.getHostName() + " to mount datastore " + dsMO.getName()); waitForAllHostsToMountDatastore2(currentHosts, dsMO); } @@ -3201,7 +3202,7 @@ public void handleTargets(boolean add, ModifyTargetsCommand.TargetTypeToRemove t } } catch (Exception ex) { - s_logger.warn(ex.getMessage()); + logger.warn(ex.getMessage()); } })); } @@ -3285,7 +3286,7 @@ private void handleRemove(List targetsToRemove, rescanAllHosts(hosts, true, false); } catch (Exception ex) { - s_logger.warn(ex.getMessage()); + logger.warn(ex.getMessage()); } } @@ -3422,13 +3423,13 @@ private void rescanAllHosts(List lstHosts, boolean rescanHba, boolean re } } - private static String trimIqn(String iqn) { + private String trimIqn(String iqn) { String[] tmp = iqn.split("/"); if (tmp.length != 3) { String msg = "Wrong format for iSCSI path: " + iqn + ". It should be formatted as '/targetIQN/LUN'."; - s_logger.warn(msg); + logger.warn(msg); throw new CloudRuntimeException(msg); } @@ -3545,7 +3546,7 @@ private void removeManagedTargetsFromCluster(List managedDatastoreNames) rescanAllHosts(context, hosts, true, false); } catch (Exception ex) { - s_logger.warn(ex.getMessage()); + logger.warn(ex.getMessage()); } })); } @@ -3614,27 +3615,27 @@ private Long restoreVolumeFromSecStorage(VmwareHypervisorHost hyperHost, Datasto if (!ovfFile.exists()) { srcOVFFileName = getOVFFilePath(srcOVAFileName); if (srcOVFFileName == null && ovafile.exists()) { // volss: ova file exists; o/w can't do tar - Script command = new Script("tar", wait, s_logger); + Script command = new Script("tar", wait, logger); command.add("--no-same-owner"); command.add("-xf", srcOVAFileName); command.setWorkDir(secondaryMountPoint + "/" + secStorageDir + "/" + snapshotDir); - s_logger.info("Executing command: " + command.toString()); + logger.info("Executing command: " + command.toString()); String result = command.execute(); if (result != null) { String msg = "Unable to unpack snapshot OVA file at: " + srcOVAFileName; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } srcOVFFileName = getOVFFilePath(srcOVAFileName); } else if (srcOVFFileName == null) { String msg = "Unable to find snapshot OVA file at: " + srcOVAFileName; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } } if (srcOVFFileName == null) { String msg = "Unable to locate OVF file in template package directory: " + srcOVAFileName; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } @@ -3688,7 +3689,7 @@ public Answer createVolumeFromSnapshot(CopyCommand cmd) { ManagedObjectReference morPrimaryDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, primaryStorageNameLabel); if (morPrimaryDs == null) { String msg = "Unable to find datastore: " + primaryStorageNameLabel; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } @@ -3733,12 +3734,12 @@ public Answer forgetObject(ForgetObjectCmd cmd) { return new Answer(cmd, false, "not implememented yet"); } - private static String deriveTemplateUuidOnHost(VmwareHypervisorHost hyperHost, String storeIdentifier, String templateName) { + private String deriveTemplateUuidOnHost(VmwareHypervisorHost hyperHost, String storeIdentifier, String templateName) { String templateUuid; try { templateUuid = UUID.nameUUIDFromBytes((templateName + "@" + storeIdentifier + "-" + hyperHost.getMor().getValue()).getBytes("UTF-8")).toString(); } catch(UnsupportedEncodingException e){ - s_logger.warn("unexpected encoding error, using default Charset: " + e.getLocalizedMessage()); + logger.warn("unexpected encoding error, using default Charset: " + e.getLocalizedMessage()); templateUuid = UUID.nameUUIDFromBytes((templateName + "@" + storeIdentifier + "-" + hyperHost.getMor().getValue()).getBytes(Charset.defaultCharset())) .toString(); } @@ -3752,17 +3753,17 @@ private String getLegacyVmDataDiskController() throws Exception { void setNfsVersion(String nfsVersion){ this._nfsVersion = nfsVersion; - s_logger.debug("VmwareProcessor instance now using NFS version: " + nfsVersion); + logger.debug("VmwareProcessor instance now using NFS version: " + nfsVersion); } void setFullCloneFlag(boolean value){ this._fullCloneFlag = value; - s_logger.debug("VmwareProcessor instance - create full clone = " + (value ? "TRUE" : "FALSE")); + logger.debug("VmwareProcessor instance - create full clone = " + (value ? "TRUE" : "FALSE")); } void setDiskProvisioningStrictness(boolean value){ this._diskProvisioningStrictness = value; - s_logger.debug("VmwareProcessor instance - diskProvisioningStrictness = " + (value ? "TRUE" : "FALSE")); + logger.debug("VmwareProcessor instance - diskProvisioningStrictness = " + (value ? "TRUE" : "FALSE")); } @Override @@ -3780,7 +3781,7 @@ public Answer checkDataStoreStoragePolicyCompliance(CheckDataStoreStoragePolicyC ManagedObjectReference morPrimaryDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, primaryStorageNameLabel); if (morPrimaryDs == null) { String msg = "Unable to find datastore: " + primaryStorageNameLabel; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } @@ -3821,7 +3822,7 @@ public VirtualMachineMO cloneVMFromTemplate(VmwareHypervisorHost hyperHost, Stri if (morDatastore == null) { throw new CloudRuntimeException("Unable to find datastore in vSphere"); } - s_logger.info("Cloning VM " + cloneName + " from template " + templateName + " into datastore " + templatePrimaryStoreUuid); + logger.info("Cloning VM " + cloneName + " from template " + templateName + " into datastore " + templatePrimaryStoreUuid); if (!_fullCloneFlag) { createVMLinkedClone(templateMo, dcMo, cloneName, morDatastore, morPool, null); } else { @@ -3834,7 +3835,7 @@ public VirtualMachineMO cloneVMFromTemplate(VmwareHypervisorHost hyperHost, Stri return vm; } catch (Throwable e) { String msg = "Error cloning VM from template in primary storage: %s" + e.getMessage(); - s_logger.error(msg, e); + logger.error(msg, e); throw new CloudRuntimeException(msg, e); } } @@ -3854,8 +3855,8 @@ public Pair getSyncedVolume(VirtualMachineMO vmMo, VmwareConte DatastoreFile file = new DatastoreFile(diskChain[0]); String volumePath = volumeTO.getPath(); if (!file.getFileBaseName().equalsIgnoreCase(volumePath)) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Detected disk-chain top file change on volume: " + volumeTO.getId() + " " + volumePath + " -> " + file.getFileBaseName()); + if (logger.isInfoEnabled()) { + logger.info("Detected disk-chain top file change on volume: " + volumeTO.getId() + " " + volumePath + " -> " + file.getFileBaseName()); } volumePathChangeObserved = true; volumePath = file.getFileBaseName(); @@ -3867,7 +3868,7 @@ public Pair getSyncedVolume(VirtualMachineMO vmMo, VmwareConte if (diskDatastoreMoFromVM != null) { String actualPoolUuid = diskDatastoreMoFromVM.getCustomFieldValue(CustomFieldConstants.CLOUD_UUID); if (!actualPoolUuid.equalsIgnoreCase(primaryStore.getUuid())) { - s_logger.warn(String.format("Volume %s found to be in a different storage pool %s", volumePath, actualPoolUuid)); + logger.warn(String.format("Volume %s found to be in a different storage pool %s", volumePath, actualPoolUuid)); datastoreChangeObserved = true; volumeTO.setDataStoreUuid(actualPoolUuid); volumeTO.setChainInfo(_gson.toJson(matchingExistingDisk)); @@ -3891,7 +3892,7 @@ public Answer syncVolumePath(SyncVolumePathCommand cmd) { vmMo = hyperHost.findVmOnPeerHyperHost(vmName); if (vmMo == null) { String msg = "Unable to find the VM to execute SyncVolumePathCommand, vmName: " + vmName; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } } diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageSubsystemCommandHandler.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageSubsystemCommandHandler.java index e56f41ea8212..0067508cb000 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageSubsystemCommandHandler.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/storage/resource/VmwareStorageSubsystemCommandHandler.java @@ -37,7 +37,6 @@ import org.apache.cloudstack.storage.to.SnapshotObjectTO; import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.to.DataObjectType; @@ -54,7 +53,6 @@ public class VmwareStorageSubsystemCommandHandler extends StorageSubsystemCommandHandlerBase { - private static final Logger s_logger = Logger.getLogger(VmwareStorageSubsystemCommandHandler.class); private VmwareStorageManager storageManager; private PremiumSecondaryStorageResource storageResource; private String _nfsVersion; @@ -98,7 +96,7 @@ public boolean reconfigureStorageProcessor(EnumMap getHostIdForVmAndHostGuidInTargetCluster(VirtualMachi @Override public StrategyPriority canHandle(Map volumeMap, Host srcHost, Host destHost) { if (srcHost.getHypervisorType() == HypervisorType.VMware && destHost.getHypervisorType() == HypervisorType.VMware) { - s_logger.debug(this.getClass() + " can handle the request because the hosts have VMware hypervisor"); + logger.debug(this.getClass() + " can handle the request because the hosts have VMware hypervisor"); return StrategyPriority.HYPERVISOR; } return StrategyPriority.CANT_HANDLE; @@ -230,7 +231,7 @@ public void copyAsync(DataObject srcData, DataObject destData, Host destHost, As , srcData.toString() , destData.toString() ); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } // OfflineVmwareMigration: extract the destination pool from destData and construct a migrateVolume command @@ -300,9 +301,9 @@ private void updateVolumeAfterMigration(Answer answer, DataObject srcData, DataO throw new CloudRuntimeException("unexpected answer from hypervisor agent: " + answer.getDetails()); } MigrateVolumeAnswer ans = (MigrateVolumeAnswer) answer; - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { String format = "retrieved '%s' as new path for volume(%d)"; - s_logger.debug(String.format(format, ans.getVolumePath(), destData.getId())); + logger.debug(String.format(format, ans.getVolumePath(), destData.getId())); } // OfflineVmwareMigration: update the volume with new pool/volume path destinationVO.setPoolId(destData.getDataStore().getId()); @@ -326,7 +327,7 @@ public void copyAsync(Map volumeMap, VirtualMachineTO vmT throw new CloudRuntimeException("Unsupported operation requested for moving data."); } } catch (Exception e) { - s_logger.error("copy failed", e); + logger.error("copy failed", e); errMsg = e.toString(); } @@ -355,20 +356,20 @@ private Answer migrateVmWithVolumesAcrossCluster(VMInstanceVO vm, VirtualMachine MigrateWithStorageCommand migrateWithStorageCmd = new MigrateWithStorageCommand(to, volumeToFilerto, destHost.getGuid()); MigrateWithStorageAnswer migrateWithStorageAnswer = (MigrateWithStorageAnswer) agentMgr.send(srcHost.getId(), migrateWithStorageCmd); if (migrateWithStorageAnswer == null) { - s_logger.error("Migration with storage of vm " + vm + " to host " + destHost + " failed."); + logger.error("Migration with storage of vm " + vm + " to host " + destHost + " failed."); throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost); } else if (!migrateWithStorageAnswer.getResult()) { - s_logger.error("Migration with storage of vm " + vm + " failed. Details: " + migrateWithStorageAnswer.getDetails()); + logger.error("Migration with storage of vm " + vm + " failed. Details: " + migrateWithStorageAnswer.getDetails()); throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost + ". " + migrateWithStorageAnswer.getDetails()); } else { // Update the volume details after migration. updateVolumesAfterMigration(volumeToPool, migrateWithStorageAnswer.getVolumeTos()); } - s_logger.debug("Storage migration of VM " + vm.getInstanceName() + " completed successfully. Migrated to host " + destHost.getName()); + logger.debug("Storage migration of VM " + vm.getInstanceName() + " completed successfully. Migrated to host " + destHost.getName()); return migrateWithStorageAnswer; } catch (OperationTimedoutException e) { - s_logger.error("Error while migrating vm " + vm + " to host " + destHost, e); + logger.error("Error while migrating vm " + vm + " to host " + destHost, e); throw new AgentUnavailableException("Operation timed out on storage motion for " + vm, destHost.getId()); } } @@ -389,10 +390,10 @@ private Answer migrateVmWithVolumesWithinCluster(VMInstanceVO vm, VirtualMachine MigrateWithStorageCommand command = new MigrateWithStorageCommand(to, volumeToFilerto, destHost.getGuid()); MigrateWithStorageAnswer answer = (MigrateWithStorageAnswer) agentMgr.send(srcHost.getId(), command); if (answer == null) { - s_logger.error("Migration with storage of vm " + vm + " failed."); + logger.error("Migration with storage of vm " + vm + " failed."); throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost); } else if (!answer.getResult()) { - s_logger.error("Migration with storage of vm " + vm + " failed. Details: " + answer.getDetails()); + logger.error("Migration with storage of vm " + vm + " failed. Details: " + answer.getDetails()); throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost + ". " + answer.getDetails()); } else { // Update the volume details after migration. @@ -401,7 +402,7 @@ private Answer migrateVmWithVolumesWithinCluster(VMInstanceVO vm, VirtualMachine return answer; } catch (OperationTimedoutException e) { - s_logger.error("Error while migrating vm " + vm + " to host " + destHost, e); + logger.error("Error while migrating vm " + vm + " to host " + destHost, e); throw new AgentUnavailableException("Operation timed out on storage motion for " + vm, destHost.getId()); } } @@ -429,7 +430,7 @@ private void updateVolumesAfterMigration(Map volumeToPool } } if (!updated) { - s_logger.error("Volume path wasn't updated for volume " + volume + " after it was migrated."); + logger.error("Volume path wasn't updated for volume " + volume + " after it was migrated."); } } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/ha/XenServerFencer.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/ha/XenServerFencer.java index 72ec375fa271..a29ac2aee76b 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/ha/XenServerFencer.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/ha/XenServerFencer.java @@ -22,7 +22,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -40,7 +39,6 @@ import com.cloud.vm.VirtualMachine; public class XenServerFencer extends AdapterBase implements FenceBuilder { - private static final Logger s_logger = Logger.getLogger(XenServerFencer.class); @Inject HostDao _hostDao; @@ -52,7 +50,7 @@ public class XenServerFencer extends AdapterBase implements FenceBuilder { @Override public Boolean fenceOff(VirtualMachine vm, Host host) { if (host.getHypervisorType() != HypervisorType.XenServer) { - s_logger.debug("Don't know how to fence non XenServer hosts " + host.getHypervisorType()); + logger.debug("Don't know how to fence non XenServer hosts " + host.getHypervisorType()); return null; } @@ -71,18 +69,18 @@ public Boolean fenceOff(VirtualMachine vm, Host host) { try { Answer ans = _agentMgr.send(h.getId(), fence); if (!(ans instanceof FenceAnswer)) { - s_logger.debug("Answer is not fenceanswer. Result = " + ans.getResult() + "; Details = " + ans.getDetails()); + logger.debug("Answer is not fenceanswer. Result = " + ans.getResult() + "; Details = " + ans.getDetails()); continue; } answer = (FenceAnswer)ans; } catch (AgentUnavailableException e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Moving on to the next host because " + h.toString() + " is unavailable", e); + if (logger.isDebugEnabled()) { + logger.debug("Moving on to the next host because " + h.toString() + " is unavailable", e); } continue; } catch (OperationTimedoutException e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Moving on to the next host because " + h.toString() + " is unavailable", e); + if (logger.isDebugEnabled()) { + logger.debug("Moving on to the next host because " + h.toString() + " is unavailable", e); } continue; } @@ -92,8 +90,8 @@ public Boolean fenceOff(VirtualMachine vm, Host host) { } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to fence off " + vm.toString() + " on " + host.toString()); + if (logger.isDebugEnabled()) { + logger.debug("Unable to fence off " + vm.toString() + " on " + host.toString()); } return false; diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/XenServerGuru.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/XenServerGuru.java index 9de6ba8ab4f7..af10ded2e22f 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/XenServerGuru.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/XenServerGuru.java @@ -33,7 +33,6 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.agent.api.Command; import com.cloud.agent.api.to.DataObjectType; @@ -59,7 +58,6 @@ public class XenServerGuru extends HypervisorGuruBase implements HypervisorGuru, Configurable { - private Logger logger = Logger.getLogger(getClass()); @Inject private GuestOSDao guestOsDao; @@ -184,8 +182,8 @@ public Pair getCommandHostDelegation(long hostId, Command cmd) { } // only now can we decide, now we now we're only deciding for ourselves if (cmd instanceof StorageSubSystemCommand) { - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("XenServer StrorageSubSystemCommand re always executed in sequence (command of type %s to host %l).", cmd.getClass(), hostId)); + if (logger.isTraceEnabled()) { + logger.trace(String.format("XenServer StrorageSubSystemCommand re always executed in sequence (command of type %s to host %l).", cmd.getClass(), hostId)); } StorageSubSystemCommand c = (StorageSubSystemCommand)cmd; c.setExecuteInSequence(true); diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/discoverer/XcpServerDiscoverer.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/discoverer/XcpServerDiscoverer.java index 095ba8108417..2e98b68bc443 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/discoverer/XcpServerDiscoverer.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/discoverer/XcpServerDiscoverer.java @@ -32,7 +32,6 @@ import org.apache.cloudstack.hypervisor.xenserver.XenserverConfigs; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.apache.maven.artifact.versioning.ComparableVersion; import org.apache.xmlrpc.XmlRpcException; @@ -105,7 +104,6 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, Listener, ResourceStateAdapter { - private static final Logger s_logger = Logger.getLogger(XcpServerDiscoverer.class); private int _wait; private XenServerConnectionPool _connPool; private boolean _checkHvm; @@ -171,16 +169,16 @@ protected boolean poolHasHotFix(Connection conn, String hostIp, String hotFixUui for(HostPatch patch : patches) { PoolPatch pp = patch.getPoolPatch(conn); if (pp != null && pp.equals(poolPatch) && patch.getApplied(conn)) { - s_logger.debug("host " + hostIp + " does have " + hotFixUuid +" Hotfix."); + logger.debug("host " + hostIp + " does have " + hotFixUuid +" Hotfix."); return true; } } } return false; } catch (UuidInvalid e) { - s_logger.debug("host " + hostIp + " doesn't have " + hotFixUuid + " Hotfix"); + logger.debug("host " + hostIp + " doesn't have " + hotFixUuid + " Hotfix"); } catch (Exception e) { - s_logger.debug("can't get patches information, consider it doesn't have " + hotFixUuid + " Hotfix"); + logger.debug("can't get patches information, consider it doesn't have " + hotFixUuid + " Hotfix"); } return false; } @@ -194,25 +192,25 @@ protected boolean poolHasHotFix(Connection conn, String hostIp, String hotFixUui Connection conn = null; if (!url.getScheme().equals("http")) { String msg = "urlString is not http so we're not taking care of the discovery for this: " + url; - s_logger.debug(msg); + logger.debug(msg); return null; } if (clusterId == null) { String msg = "must specify cluster Id when add host"; - s_logger.debug(msg); + logger.debug(msg); throw new RuntimeException(msg); } if (podId == null) { String msg = "must specify pod Id when add host"; - s_logger.debug(msg); + logger.debug(msg); throw new RuntimeException(msg); } ClusterVO cluster = _clusterDao.findById(clusterId); if (cluster == null || cluster.getHypervisorType() != HypervisorType.XenServer) { - if (s_logger.isInfoEnabled()) { - s_logger.info("invalid cluster id or cluster is not for XenServer hypervisors"); + if (logger.isInfoEnabled()) { + logger.info("invalid cluster id or cluster is not for XenServer hypervisors"); } return null; } @@ -226,7 +224,7 @@ protected boolean poolHasHotFix(Connection conn, String hostIp, String hotFixUui conn = _connPool.getConnect(hostIp, username, pass); if (conn == null) { String msg = "Unable to get a connection to " + url; - s_logger.debug(msg); + logger.debug(msg); throw new DiscoveryException(msg); } @@ -252,7 +250,7 @@ protected boolean poolHasHotFix(Connection conn, String hostIp, String hotFixUui if (!clu.getGuid().equals(poolUuid)) { String msg = "Please join the host " + hostIp + " to XS pool " + clu.getGuid() + " through XC/XS before adding it through CS UI"; - s_logger.warn(msg); + logger.warn(msg); throw new DiscoveryException(msg); } } else { @@ -264,7 +262,7 @@ protected boolean poolHasHotFix(Connection conn, String hostIp, String hotFixUui try { Session.logout(conn); } catch (Exception e) { - s_logger.debug("Caught exception during logout", e); + logger.debug("Caught exception during logout", e); } conn.dispose(); conn = null; @@ -287,7 +285,7 @@ protected boolean poolHasHotFix(Connection conn, String hostIp, String hotFixUui if (!support_hvm) { String msg = "Unable to add host " + record.address + " because it doesn't support hvm"; _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, dcId, podId, msg, msg); - s_logger.debug(msg); + logger.debug(msg); throw new RuntimeException(msg); } } @@ -308,12 +306,12 @@ protected boolean poolHasHotFix(Connection conn, String hostIp, String hotFixUui String hostKernelVer = record.softwareVersion.get("linux"); if (_resourceMgr.findHostByGuid(record.uuid) != null) { - s_logger.debug("Skipping " + record.address + " because " + record.uuid + " is already in the database."); + logger.debug("Skipping " + record.address + " because " + record.uuid + " is already in the database."); continue; } CitrixResourceBase resource = createServerResource(dcId, podId, record, latestHotFix); - s_logger.info("Found host " + record.hostname + " ip=" + record.address + " product version=" + prodVersion); + logger.info("Found host " + record.hostname + " ip=" + record.address + " product version=" + prodVersion); Map details = new HashMap(); Map params = new HashMap(); @@ -364,7 +362,7 @@ protected boolean poolHasHotFix(Connection conn, String hostIp, String hotFixUui resource.configure("XenServer", params); } catch (ConfigurationException e) { _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, dcId, podId, "Unable to add " + record.address, "Error is " + e.getMessage()); - s_logger.warn("Unable to instantiate " + record.address, e); + logger.warn("Unable to instantiate " + record.address, e); continue; } resource.start(); @@ -373,16 +371,16 @@ protected boolean poolHasHotFix(Connection conn, String hostIp, String hotFixUui } catch (SessionAuthenticationFailed e) { throw new DiscoveredWithErrorException("Authentication error"); } catch (XenAPIException e) { - s_logger.warn("XenAPI exception", e); + logger.warn("XenAPI exception", e); return null; } catch (XmlRpcException e) { - s_logger.warn("Xml Rpc Exception", e); + logger.warn("Xml Rpc Exception", e); return null; } catch (UnknownHostException e) { - s_logger.warn("Unable to resolve the host name", e); + logger.warn("Unable to resolve the host name", e); return null; } catch (Exception e) { - s_logger.warn("other exceptions: " + e.toString(), e); + logger.warn("other exceptions: " + e.toString(), e); return null; } return resources; @@ -440,7 +438,7 @@ else if (prodBrand.equals("XenServer") && prodVersion.equals("5.6.0")) { final String[] items = prodVersion.split("\\."); if ((Integer.parseInt(items[0]) > 6) || (Integer.parseInt(items[0]) == 6 && Integer.parseInt(items[1]) >= 4)) { - s_logger.warn("defaulting to xenserver650 resource for product brand: " + prodBrand + " with product " + + logger.warn("defaulting to xenserver650 resource for product brand: " + prodBrand + " with product " + "version: " + prodVersion); //default to xenserver650 resource. return new XenServer650Resource(); @@ -449,7 +447,7 @@ else if (prodBrand.equals("XenServer") && prodVersion.equals("5.6.0")) { String msg = "Only support XCP 1.0.0, 1.1.0, 1.4.x, 1.5 beta, 1.6.x; XenServer 5.6, XenServer 5.6 FP1, XenServer 5.6 SP2, Xenserver 6.0, 6.0.2, 6.1.0, 6.2.0, >6.4.0, Citrix Hypervisor > 8.0.0 but this one is " + prodBrand + " " + prodVersion; - s_logger.warn(msg); + logger.warn(msg); throw new RuntimeException(msg); } @@ -566,7 +564,7 @@ public void processConnect(com.cloud.host.Host agent, StartupCommand cmd, boolea StartupRoutingCommand startup = (StartupRoutingCommand)cmd; if (startup.getHypervisorType() != HypervisorType.XenServer) { - s_logger.debug("Not XenServer so moving on."); + logger.debug("Not XenServer so moving on."); return; } @@ -578,7 +576,7 @@ public void processConnect(com.cloud.host.Host agent, StartupCommand cmd, boolea _clusterDao.update(cluster.getId(), cluster); } else if (!cluster.getGuid().equals(startup.getPool())) { String msg = "pool uuid for cluster " + cluster.getId() + " changed from " + cluster.getGuid() + " to " + startup.getPool(); - s_logger.warn(msg); + logger.warn(msg); throw new CloudRuntimeException(msg); } @@ -592,15 +590,15 @@ public void processConnect(com.cloud.host.Host agent, StartupCommand cmd, boolea if (!resource.equals(host.getResource())) { String msg = "host " + host.getPrivateIpAddress() + " changed from " + host.getResource() + " to " + resource; - s_logger.debug(msg); + logger.debug(msg); host.setResource(resource); host.setSetup(false); _hostDao.update(agentId, host); throw new HypervisorVersionChangedException(msg); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Setting up host " + agentId); + if (logger.isDebugEnabled()) { + logger.debug("Setting up host " + agentId); } HostEnvironment env = new HostEnvironment(); @@ -624,12 +622,12 @@ public void processConnect(com.cloud.host.Host agent, StartupCommand cmd, boolea } return; } else { - s_logger.warn("Unable to setup agent " + agentId + " due to " + ((answer != null) ? answer.getDetails() : "return null")); + logger.warn("Unable to setup agent " + agentId + " due to " + ((answer != null) ? answer.getDetails() : "return null")); } } catch (AgentUnavailableException e) { - s_logger.warn("Unable to setup agent " + agentId + " because it became unavailable.", e); + logger.warn("Unable to setup agent " + agentId + " because it became unavailable.", e); } catch (OperationTimedoutException e) { - s_logger.warn("Unable to setup agent " + agentId + " because it timed out", e); + logger.warn("Unable to setup agent " + agentId + " because it timed out", e); } throw new ConnectionException(true, "Reinitialize agent after setup."); } @@ -677,7 +675,7 @@ public HostVO createHostVOForDirectConnectAgent(HostVO host, StartupCommand[] st HostPodVO pod = _podDao.findById(host.getPodId()); DataCenterVO dc = _dcDao.findById(host.getDataCenterId()); - s_logger.info("Host: " + host.getName() + " connected with hypervisor type: " + HypervisorType.XenServer + ". Checking CIDR..."); + logger.info("Host: " + host.getName() + " connected with hypervisor type: " + HypervisorType.XenServer + ". Checking CIDR..."); _resourceMgr.checkCIDR(pod, dc, ssCmd.getPrivateIpAddress(), ssCmd.getPrivateNetmask()); return _resourceMgr.fillRoutingHostVO(host, ssCmd, HypervisorType.XenServer, details, hostTags); } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java index 90473705a53e..295c96025ab7 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java @@ -70,7 +70,6 @@ import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.BooleanUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.apache.xmlrpc.XmlRpcException; import org.joda.time.Duration; import org.w3c.dom.Document; @@ -230,7 +229,6 @@ public String toString() { private static final long mem_128m = 134217728L; static final Random Rand = new Random(System.currentTimeMillis()); - private static final Logger s_logger = Logger.getLogger(CitrixResourceBase.class); protected static final HashMap s_powerStatesTable; public static final String XS_TOOLS_ISO_AFTER_70 = "guest-tools.iso"; @@ -363,21 +361,21 @@ public String callHostPlugin(final Connection conn, final String plugin, final S args.put(params[i], params[i + 1]); } - if (s_logger.isTraceEnabled()) { - s_logger.trace("callHostPlugin executing for command " + cmd + " with " + getArgsString(args)); + if (logger.isTraceEnabled()) { + logger.trace("callHostPlugin executing for command " + cmd + " with " + getArgsString(args)); } final Host host = Host.getByUuid(conn, _host.getUuid()); final String result = host.callPlugin(conn, plugin, cmd, args); - if (s_logger.isTraceEnabled()) { - s_logger.trace("callHostPlugin Result: " + result); + if (logger.isTraceEnabled()) { + logger.trace("callHostPlugin Result: " + result); } return result.replace("\n", ""); } catch (final XenAPIException e) { msg = "callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to " + e.toString(); - s_logger.warn(msg); + logger.warn(msg); } catch (final XmlRpcException e) { msg = "callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to " + e.getMessage(); - s_logger.debug(msg); + logger.debug(msg); } throw new CloudRuntimeException(msg); } @@ -390,8 +388,8 @@ protected String callHostPluginAsync(final Connection conn, final String plugin, for (final Map.Entry entry : params.entrySet()) { args.put(entry.getKey(), entry.getValue()); } - if (s_logger.isTraceEnabled()) { - s_logger.trace("callHostPlugin executing for command " + cmd + " with " + getArgsString(args)); + if (logger.isTraceEnabled()) { + logger.trace("callHostPlugin executing for command " + cmd + " with " + getArgsString(args)); } final Host host = Host.getByUuid(conn, _host.getUuid()); task = host.callPluginAsync(conn, plugin, cmd, args); @@ -399,20 +397,20 @@ protected String callHostPluginAsync(final Connection conn, final String plugin, waitForTask(conn, task, 1000, timeout); checkForSuccess(conn, task); final String result = task.getResult(conn); - if (s_logger.isTraceEnabled()) { - s_logger.trace("callHostPlugin Result: " + result); + if (logger.isTraceEnabled()) { + logger.trace("callHostPlugin Result: " + result); } return result.replace("", "").replace("", "").replace("\n", ""); } catch (final Types.HandleInvalid e) { - s_logger.warn("callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to HandleInvalid clazz:" + e.clazz + ", handle:" + e.handle); + logger.warn("callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to HandleInvalid clazz:" + e.clazz + ", handle:" + e.handle); } catch (final Exception e) { - s_logger.warn("callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to " + e.toString(), e); + logger.warn("callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to " + e.toString(), e); } finally { if (task != null) { try { task.destroy(conn); } catch (final Exception e1) { - s_logger.debug("unable to destroy task(" + task.toString() + ") on host(" + _host.getUuid() + ") due to " + e1.toString()); + logger.debug("unable to destroy task(" + task.toString() + ") on host(" + _host.getUuid() + ") due to " + e1.toString()); } } } @@ -427,8 +425,8 @@ protected String callHostPluginAsync(final Connection conn, final String plugin, for (int i = 0; i < params.length; i += 2) { args.put(params[i], params[i + 1]); } - if (s_logger.isTraceEnabled()) { - s_logger.trace("callHostPlugin executing for command " + cmd + " with " + getArgsString(args)); + if (logger.isTraceEnabled()) { + logger.trace("callHostPlugin executing for command " + cmd + " with " + getArgsString(args)); } final Host host = Host.getByUuid(conn, _host.getUuid()); task = host.callPluginAsync(conn, plugin, cmd, args); @@ -436,22 +434,22 @@ protected String callHostPluginAsync(final Connection conn, final String plugin, waitForTask(conn, task, 1000, timeout); checkForSuccess(conn, task); final String result = task.getResult(conn); - if (s_logger.isTraceEnabled()) { - s_logger.trace("callHostPlugin Result: " + result); + if (logger.isTraceEnabled()) { + logger.trace("callHostPlugin Result: " + result); } return result.replace("", "").replace("", "").replace("\n", ""); } catch (final Types.HandleInvalid e) { - s_logger.warn("callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to HandleInvalid clazz:" + e.clazz + ", handle:" + e.handle); + logger.warn("callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to HandleInvalid clazz:" + e.clazz + ", handle:" + e.handle); } catch (final XenAPIException e) { - s_logger.warn("callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to " + e.toString(), e); + logger.warn("callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to " + e.toString(), e); } catch (final Exception e) { - s_logger.warn("callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to " + e.getMessage(), e); + logger.warn("callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to " + e.getMessage(), e); } finally { if (task != null) { try { task.destroy(conn); } catch (final Exception e1) { - s_logger.debug("unable to destroy task(" + task.toString() + ") on host(" + _host.getUuid() + ") due to " + e1.toString()); + logger.debug("unable to destroy task(" + task.toString() + ") on host(" + _host.getUuid() + ") due to " + e1.toString()); } } } @@ -475,20 +473,20 @@ protected String callHostPluginThroughMaster(final Connection conn, final String args.put(params[i], params[i + 1]); } - if (s_logger.isTraceEnabled()) { - s_logger.trace("callHostPlugin executing for command " + cmd + " with " + getArgsString(args)); + if (logger.isTraceEnabled()) { + logger.trace("callHostPlugin executing for command " + cmd + " with " + getArgsString(args)); } final String result = master.callPlugin(conn, plugin, cmd, args); - if (s_logger.isTraceEnabled()) { - s_logger.trace("callHostPlugin Result: " + result); + if (logger.isTraceEnabled()) { + logger.trace("callHostPlugin Result: " + result); } return result.replace("\n", ""); } catch (final Types.HandleInvalid e) { - s_logger.warn("callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to HandleInvalid clazz:" + e.clazz + ", handle:" + e.handle); + logger.warn("callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to HandleInvalid clazz:" + e.clazz + ", handle:" + e.handle); } catch (final XenAPIException e) { - s_logger.warn("callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to " + e.toString(), e); + logger.warn("callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to " + e.toString(), e); } catch (final XmlRpcException e) { - s_logger.warn("callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to " + e.getMessage(), e); + logger.warn("callHostPlugin failed for cmd: " + cmd + " with args " + getArgsString(args) + " due to " + e.getMessage(), e); } return null; } @@ -503,13 +501,13 @@ public boolean canBridgeFirewall(final Connection conn) { public void checkForSuccess(final Connection c, final Task task) throws XenAPIException, XmlRpcException { if (task.getStatus(c) == Types.TaskStatusType.SUCCESS) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Task " + task.getNameLabel(c) + " (" + task.getUuid(c) + ") completed"); + if (logger.isTraceEnabled()) { + logger.trace("Task " + task.getNameLabel(c) + " (" + task.getUuid(c) + ") completed"); } return; } else { final String msg = "Task failed! Task record: " + task.getRecord(c); - s_logger.warn(msg); + logger.warn(msg); task.cancel(c); task.destroy(c); throw new Types.BadAsyncResult(msg); @@ -522,11 +520,11 @@ protected boolean checkSR(final Connection conn, final SR sr) { final Set pbds = sr.getPBDs(conn); if (pbds.size() == 0) { final String msg = "There is no PBDs for this SR: " + srr.nameLabel + " on host:" + _host.getUuid(); - s_logger.warn(msg); + logger.warn(msg); return false; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Checking " + srr.nameLabel + " or SR " + srr.uuid + " on " + _host); + if (logger.isDebugEnabled()) { + logger.debug("Checking " + srr.nameLabel + " or SR " + srr.uuid + " on " + _host); } if (srr.shared) { if (SRType.NFS.equals(srr.type)) { @@ -567,7 +565,7 @@ protected boolean checkSR(final Connection conn, final SR sr) { } catch (final Exception e) { final String msg = "checkSR failed host:" + _host + " due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); return false; } return true; @@ -591,7 +589,7 @@ private void CheckXenHostInfo() throws ConfigurationException { } if (!hostRec.address.equals(_host.getIp())) { final String msg = "Host " + _host.getIp() + " seems be reinstalled, please remove this host and readd"; - s_logger.error(msg); + logger.error(msg); throw new ConfigurationException(msg); } } finally { @@ -626,7 +624,7 @@ public boolean cleanupHaltedVms(final Connection conn) throws XenAPIException, X try { vm.destroy(conn); } catch (final Exception e) { - s_logger.warn("Catch Exception " + e.getClass().getName() + ": unable to destroy VM " + vmRec.nameLabel + " due to ", e); + logger.warn("Catch Exception " + e.getClass().getName() + ": unable to destroy VM " + vmRec.nameLabel + " due to ", e); success = false; } } @@ -701,7 +699,7 @@ protected ExecutionResult cleanupNetworkElementCommand(final IpAssocCommand cmd) } } } catch (final Exception e) { - s_logger.debug("Ip Assoc failure on applying one ip due to exception: ", e); + logger.debug("Ip Assoc failure on applying one ip due to exception: ", e); return new ExecutionResult(false, e.getMessage()); } return new ExecutionResult(true, null); @@ -713,7 +711,7 @@ public void cleanupTemplateSR(final Connection conn) { final Host host = Host.getByUuid(conn, _host.getUuid()); pbds = host.getPBDs(conn); } catch (final XenAPIException e) { - s_logger.warn("Unable to get the SRs " + e.toString(), e); + logger.warn("Unable to get the SRs " + e.toString(), e); throw new CloudRuntimeException("Unable to get SRs " + e.toString(), e); } catch (final Exception e) { throw new CloudRuntimeException("Unable to get SRs " + e.getMessage(), e); @@ -725,7 +723,7 @@ public void cleanupTemplateSR(final Connection conn) { sr = pbd.getSR(conn); srRec = sr.getRecord(conn); } catch (final Exception e) { - s_logger.warn("pbd.getSR get Exception due to ", e); + logger.warn("pbd.getSR get Exception due to ", e); continue; } final String type = srRec.type; @@ -738,7 +736,7 @@ public void cleanupTemplateSR(final Connection conn) { pbd.destroy(conn); sr.forget(conn); } catch (final Exception e) { - s_logger.warn("forget SR catch Exception due to ", e); + logger.warn("forget SR catch Exception due to ", e); } } } @@ -758,12 +756,12 @@ public void cleanUpTmpDomVif(final Connection conn, final Network nw) throws Xen final Map config = vifr.otherConfig; vifName = config.get("nameLabel"); } - s_logger.debug("A VIF in dom0 for the network is found - so destroy the vif"); + logger.debug("A VIF in dom0 for the network is found - so destroy the vif"); v.destroy(conn); - s_logger.debug("Destroy temp dom0 vif" + vifName + " success"); + logger.debug("Destroy temp dom0 vif" + vifName + " success"); } } catch (final Exception e) { - s_logger.warn("Destroy temp dom0 vif " + vifName + "failed", e); + logger.warn("Destroy temp dom0 vif " + vifName + "failed", e); } } } @@ -785,7 +783,7 @@ protected VDI cloudVDIcopy(final Connection conn, final VDI vdi, final SR sr, in try { task.destroy(conn); } catch (final Exception e) { - s_logger.debug("unable to destroy task(" + task.toString() + ") on host(" + _host.getUuid() + ") due to " + e.toString()); + logger.debug("unable to destroy task(" + task.toString() + ") on host(" + _host.getUuid() + ") due to " + e.toString()); } } } @@ -809,7 +807,7 @@ public HashMap clusterVMMetaDataSync(final Connection conn) { } } catch (final Throwable e) { final String msg = "Unable to get vms through host " + _host.getUuid() + " due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); throw new CloudRuntimeException(msg); } return vmMetaDatum; @@ -915,7 +913,7 @@ public synchronized Network configureTunnelNetwork(final Connection conn, final } return nw; } catch (final Exception e) { - s_logger.warn("createandConfigureTunnelNetwork failed", e); + logger.warn("createandConfigureTunnelNetwork failed", e); return null; } } @@ -934,16 +932,16 @@ public String connect(final Connection conn, final String vmName, final String i final Set vms = VM.getByNameLabel(conn, vmName); if (vms.size() < 1) { final String msg = "VM " + vmName + " is not running"; - s_logger.warn(msg); + logger.warn(msg); return msg; } } catch (final Exception e) { final String msg = "VM.getByNameLabel " + vmName + " failed due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); return msg; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Trying to connect to " + ipAddress + " attempt " + i + " of " + _retry); + if (logger.isDebugEnabled()) { + logger.debug("Trying to connect to " + ipAddress + " attempt " + i + " of " + _retry); } if (pingdomr(conn, ipAddress, Integer.toString(port))) { return null; @@ -954,7 +952,7 @@ public String connect(final Connection conn, final String vmName, final String i } } final String msg = "Timeout, Unable to logon to " + ipAddress; - s_logger.debug(msg); + logger.debug(msg); return msg; } @@ -978,7 +976,7 @@ public String copyVhdFromSecondaryStorage(final Connection conn, final String mo if (killCopyProcess(conn, source)) { destroyVDIbyNameLabel(conn, nameLabel); } - s_logger.warn(errMsg); + logger.warn(errMsg); throw new CloudRuntimeException(errMsg); } @@ -987,15 +985,15 @@ public ExecutionResult createFileInVR(final String routerIp, final String path, final Connection conn = getConnection(); final String hostPath = "/tmp/"; - s_logger.debug("Copying VR with ip " + routerIp + " config file into host " + _host.getIp()); + logger.debug("Copying VR with ip " + routerIp + " config file into host " + _host.getIp()); try { SshHelper.scpTo(_host.getIp(), 22, _username, null, _password.peek(), hostPath, content.getBytes(Charset.defaultCharset()), filename, null); } catch (final Exception e) { - s_logger.warn("scp VR config file into host " + _host.getIp() + " failed with exception " + e.getMessage().toString()); + logger.warn("scp VR config file into host " + _host.getIp() + " failed with exception " + e.getMessage().toString()); } final String rc = callHostPlugin(conn, "vmops", "createFileInDomr", "domrip", routerIp, "srcfilepath", hostPath + filename, "dstfilepath", path, "cleanup", "true"); - s_logger.debug("VR Config file " + filename + " got created in VR, IP: " + routerIp + " with content \n" + content); + logger.debug("VR Config file " + filename + " got created in VR, IP: " + routerIp + " with content \n" + content); return new ExecutionResult(rc.startsWith("succ#"), rc.substring(5)); } @@ -1007,12 +1005,12 @@ public ExecutionResult copyPatchFilesToVR(final String routerIp, final String pa for (String file: systemVmPatchFiles) { rc = callHostPlugin(conn, "vmops", "createFileInDomr", "domrip", routerIp, "srcfilepath", hostPath.concat(file), "dstfilepath", path, "cleanup", "false"); if (rc.startsWith("fail#")) { - s_logger.error(String.format("Failed to scp file %s required for patching the systemVM", file)); + logger.error(String.format("Failed to scp file %s required for patching the systemVM", file)); break; } } - s_logger.debug("VR Config files at " + hostPath + " got created in VR, IP: " + routerIp); + logger.debug("VR Config files at " + hostPath + " got created in VR, IP: " + routerIp); return new ExecutionResult(rc.startsWith("succ#"), rc.substring(5)); } @@ -1032,19 +1030,19 @@ protected SR createIsoSRbyURI(final Connection conn, final URI uri, final String return sr; } catch (final XenAPIException e) { final String msg = "createIsoSRbyURI failed! mountpoint: " + uri.getHost() + uri.getPath() + " due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); throw new CloudRuntimeException(msg, e); } catch (final Exception e) { final String msg = "createIsoSRbyURI failed! mountpoint: " + uri.getHost() + uri.getPath() + " due to " + e.getMessage(); - s_logger.warn(msg, e); + logger.warn(msg, e); throw new CloudRuntimeException(msg, e); } } protected SR createNfsSRbyURI(final Connection conn, final URI uri, final boolean shared) { try { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Creating a " + (shared ? "shared SR for " : "not shared SR for ") + uri); + if (logger.isDebugEnabled()) { + logger.debug("Creating a " + (shared ? "shared SR for " : "not shared SR for ") + uri); } final Map deviceConfig = new HashMap(); @@ -1071,18 +1069,18 @@ protected SR createNfsSRbyURI(final Connection conn, final URI uri, final boolea if (!checkSR(conn, sr)) { throw new Exception("no attached PBD"); } - if (s_logger.isDebugEnabled()) { - s_logger.debug(logX(sr, "Created a SR; UUID is " + sr.getUuid(conn) + " device config is " + deviceConfig)); + if (logger.isDebugEnabled()) { + logger.debug(logX(sr, "Created a SR; UUID is " + sr.getUuid(conn) + " device config is " + deviceConfig)); } sr.scan(conn); return sr; } catch (final XenAPIException e) { final String msg = "Can not create second storage SR mountpoint: " + uri.getHost() + uri.getPath() + " due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); throw new CloudRuntimeException(msg, e); } catch (final Exception e) { final String msg = "Can not create second storage SR mountpoint: " + uri.getHost() + uri.getPath() + " due to " + e.getMessage(); - s_logger.warn(msg, e); + logger.warn(msg, e); throw new CloudRuntimeException(msg, e); } } @@ -1090,11 +1088,11 @@ protected SR createNfsSRbyURI(final Connection conn, final URI uri, final boolea public SR findPatchIsoSR(final Connection conn) throws XmlRpcException, XenAPIException { Set srs = SR.getByNameLabel(conn, "XenServer Tools"); if (srs.size() != 1) { - s_logger.debug("Failed to find SR by name 'XenServer Tools', will try to find 'XCP-ng Tools' SR"); + logger.debug("Failed to find SR by name 'XenServer Tools', will try to find 'XCP-ng Tools' SR"); srs = SR.getByNameLabel(conn, "XCP-ng Tools"); } if (srs.size() != 1) { - s_logger.debug("Failed to find SR by name 'XenServer Tools' or 'XCP-ng Tools', will try to find 'Citrix Hypervisor' SR"); + logger.debug("Failed to find SR by name 'XenServer Tools' or 'XCP-ng Tools', will try to find 'Citrix Hypervisor' SR"); srs = SR.getByNameLabel(conn, "Citrix Hypervisor Tools"); } if (srs.size() != 1) { @@ -1165,7 +1163,7 @@ String createTemplateFromSnapshot(final Connection conn, final String templatePa } final String source = "cloud_mount/" + tmpltLocalDir; killCopyProcess(conn, source); - s_logger.warn(errMsg); + logger.warn(errMsg); throw new CloudRuntimeException(errMsg); } @@ -1218,8 +1216,8 @@ public VBD createVbd(final Connection conn, final DiskTO volume, final String vm } final VBD vbd = VBD.create(conn, vbdr); - if (s_logger.isDebugEnabled()) { - s_logger.debug("VBD " + vbd.getUuid(conn) + " created for " + volume); + if (logger.isDebugEnabled()) { + logger.debug("VBD " + vbd.getUuid(conn) + " created for " + volume); } return vbd; @@ -1253,8 +1251,8 @@ public void createVGPU(final Connection conn, final StartCommand cmd, final VM v public VIF createVif(final Connection conn, final String vmName, final VM vm, final VirtualMachineTO vmSpec, final NicTO nic) throws XmlRpcException, XenAPIException { assert nic.getUuid() != null : "Nic should have a uuid value"; - if (s_logger.isDebugEnabled()) { - s_logger.debug("Creating VIF for " + vmName + " on nic " + nic); + if (logger.isDebugEnabled()) { + logger.debug("Creating VIF for " + vmName + " on nic " + nic); } VIF.Record vifr = new VIF.Record(); vifr.VM = vm; @@ -1286,10 +1284,10 @@ public VIF createVif(final Connection conn, final String vmName, final VM vm, fi vifr.lockingMode = Types.VifLockingMode.NETWORK_DEFAULT; final VIF vif = VIF.create(conn, vifr); - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { vifr = vif.getRecord(conn); if (vifr != null) { - s_logger.debug("Created a vif " + vifr.uuid + " on " + nic.getDeviceId()); + logger.debug("Created a vif " + vifr.uuid + " on " + nic.getDeviceId()); } } @@ -1351,7 +1349,7 @@ public VM createVmFromTemplate(final Connection conn, final VirtualMachineTO vmS } else { // scaling disallowed, set static memory target if (vmSpec.isEnableDynamicallyScaleVm() && !isDmcEnabled(conn, host)) { - s_logger.warn("Host " + host.getHostname(conn) + " does not support dynamic scaling, so the vm " + vmSpec.getName() + " is not dynamically scalable"); + logger.warn("Host " + host.getHostname(conn) + " does not support dynamic scaling, so the vm " + vmSpec.getName() + " is not dynamically scalable"); } vmr.memoryStaticMin = vmSpec.getMinRam(); vmr.memoryStaticMax = vmSpec.getMaxRam(); @@ -1377,7 +1375,7 @@ public VM createVmFromTemplate(final Connection conn, final VirtualMachineTO vmS } final VM vm = VM.create(conn, vmr); - s_logger.debug("Created VM " + vm.getUuid(conn) + " for " + vmSpec.getName()); + logger.debug("Created VM " + vm.getUuid(conn) + " for " + vmSpec.getName()); final Map vcpuParams = new HashMap(); @@ -1413,14 +1411,14 @@ public VM createVmFromTemplate(final Connection conn, final VirtualMachineTO vmS String pvargs = vm.getPVArgs(conn); pvargs = pvargs + vmSpec.getBootArgs().replaceAll(" ", "%"); vm.setPVArgs(conn, pvargs); - s_logger.debug("PV args are " + pvargs); + logger.debug("PV args are " + pvargs); // send boot args into xenstore-data for HVM instances Map xenstoreData = new HashMap<>(); xenstoreData.put(XENSTORE_DATA_CS_INIT, bootArgs); vm.setXenstoreData(conn, xenstoreData); - s_logger.debug("HVM args are " + bootArgs); + logger.debug("HVM args are " + bootArgs); } if (!(guestOsTypeName.startsWith("Windows") || guestOsTypeName.startsWith("Citrix") || guestOsTypeName.startsWith("Other"))) { @@ -1473,7 +1471,7 @@ public VM createWorkingVM(final Connection conn, final String vmName, final Stri final String guestOsTypeName = platformEmulator; if (guestOsTypeName == null) { final String msg = " Hypervisor " + this.getClass().getName() + " doesn't support guest OS type " + guestOSType + ". you can choose 'Other install media' to run it as HVM"; - s_logger.warn(msg); + logger.warn(msg); throw new CloudRuntimeException(msg); } final VM template = getVM(conn, guestOsTypeName); @@ -1486,7 +1484,7 @@ public VM createWorkingVM(final Connection conn, final String vmName, final Stri final VDI vdi = VDI.getByUuid(conn, vdiUuid); vdiMap.put(vdi, volume); } catch (final Types.UuidInvalid e) { - s_logger.warn("Unable to find vdi by uuid: " + vdiUuid + ", skip it"); + logger.warn("Unable to find vdi by uuid: " + vdiUuid + ", skip it"); } } for (final Map.Entry entry : vdiMap.entrySet()) { @@ -1549,12 +1547,12 @@ public void destroyPatchVbd(final Connection conn, final Set vms) throws Xml vbd.eject(conn); } } catch (Exception e) { - s_logger.debug("Cannot eject CD-ROM device for VM " + vmName + " due to " + e.toString(), e); + logger.debug("Cannot eject CD-ROM device for VM " + vmName + " due to " + e.toString(), e); } try { vbd.destroy(conn); } catch (Exception e) { - s_logger.debug("Cannot destroy CD-ROM device for VM " + vmName + " due to " + e.toString(), e); + logger.debug("Cannot destroy CD-ROM device for VM " + vmName + " due to " + e.toString(), e); } break; } @@ -1572,7 +1570,7 @@ public synchronized void destroyTunnelNetwork(final Connection conn, final Netwo } return; } catch (final Exception e) { - s_logger.warn("destroyTunnelNetwork failed:", e); + logger.warn("destroyTunnelNetwork failed:", e); return; } } @@ -1581,7 +1579,7 @@ void destroyVDIbyNameLabel(final Connection conn, final String nameLabel) { try { final Set vdis = VDI.getByNameLabel(conn, nameLabel); if (vdis.size() != 1) { - s_logger.warn("destroyVDIbyNameLabel failed due to there are " + vdis.size() + " VDIs with name " + nameLabel); + logger.warn("destroyVDIbyNameLabel failed due to there are " + vdis.size() + " VDIs with name " + nameLabel); return; } for (final VDI vdi : vdis) { @@ -1589,14 +1587,14 @@ void destroyVDIbyNameLabel(final Connection conn, final String nameLabel) { vdi.destroy(conn); } catch (final Exception e) { final String msg = "Failed to destroy VDI : " + nameLabel + "due to " + e.toString() + "\n Force deleting VDI using system 'rm' command"; - s_logger.warn(msg); + logger.warn(msg); try { final String srUUID = vdi.getSR(conn).getUuid(conn); final String vdiUUID = vdi.getUuid(conn); final String vdifile = "/var/run/sr-mount/" + srUUID + "/" + vdiUUID + ".vhd"; callHostPluginAsync(conn, "vmopspremium", "remove_corrupt_vdi", 10, "vdifile", vdifile); } catch (final Exception e2) { - s_logger.warn(e2); + logger.warn(e2); } } } @@ -1625,7 +1623,7 @@ public boolean doPingTest(final Connection conn, final String computingHostIp) { } return true; } catch (final Exception e) { - s_logger.warn("Catch exception " + e.toString(), e); + logger.warn("Catch exception " + e.toString(), e); return false; } finally { sshConnection.close(); @@ -1694,18 +1692,18 @@ protected Network enableVlanNetwork(final Connection conn, final long tag, final final String newName = "VLAN-" + network.getNetworkRecord(conn).uuid + "-" + tag; XsLocalNetwork vlanNic = getNetworkByName(conn, newName); if (vlanNic == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Couldn't find vlan network with the new name so trying old name: " + oldName); + if (logger.isDebugEnabled()) { + logger.debug("Couldn't find vlan network with the new name so trying old name: " + oldName); } vlanNic = getNetworkByName(conn, oldName); if (vlanNic != null) { - s_logger.info("Renaming VLAN with old name " + oldName + " to " + newName); + logger.info("Renaming VLAN with old name " + oldName + " to " + newName); vlanNic.getNetwork().setNameLabel(conn, newName); } } if (vlanNic == null) { // Can't find it, then create it. - if (s_logger.isDebugEnabled()) { - s_logger.debug("Creating VLAN network for " + tag + " on host " + _host.getIp()); + if (logger.isDebugEnabled()) { + logger.debug("Creating VLAN network for " + tag + " on host " + _host.getIp()); } final Network.Record nwr = new Network.Record(); nwr.nameLabel = newName; @@ -1728,15 +1726,15 @@ protected Network enableVlanNetwork(final Connection conn, final long tag, final return vlanNetwork; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Creating VLAN " + tag + " on host " + _host.getIp() + " on device " + nPifr.device); + if (logger.isDebugEnabled()) { + logger.debug("Creating VLAN " + tag + " on host " + _host.getIp() + " on device " + nPifr.device); } final VLAN vlan = VLAN.create(conn, nPif, tag, vlanNetwork); if (vlan != null) { final VLAN.Record vlanr = vlan.getRecord(conn); if (vlanr != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("VLAN is created for " + tag + ". The uuid is " + vlanr.uuid); + if (logger.isDebugEnabled()) { + logger.debug("VLAN is created for " + tag + ". The uuid is " + vlanr.uuid); } } } @@ -1774,7 +1772,7 @@ public ExecutionResult executeInVR(final String routerIP, final String script, f // semicolon need to be escape for bash cmdline = cmdline.replaceAll(";", "\\\\;"); try { - s_logger.debug("Executing command in VR: " + cmdline); + logger.debug("Executing command in VR: " + cmdline); result = SshHelper.sshExecute(_host.getIp(), 22, _username, null, _password.peek(), cmdline, VRScripts.CONNECTION_TIMEOUT, VRScripts.CONNECTION_TIMEOUT, timeout); } catch (final Exception e) { return new ExecutionResult(false, e.getMessage()); @@ -1856,8 +1854,8 @@ protected void fillHostInfo(final Connection conn, final StartupRoutingCommand c cmd.setMemory(ram); cmd.setDom0MinMemory(dom0Ram); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Total Ram: " + toHumanReadableSize(ram) + " dom0 Ram: " + toHumanReadableSize(dom0Ram)); + if (logger.isDebugEnabled()) { + logger.debug("Total Ram: " + toHumanReadableSize(ram) + " dom0 Ram: " + toHumanReadableSize(dom0Ram)); } PIF pif = PIF.getByUuid(conn, _host.getPrivatePif()); @@ -1917,7 +1915,7 @@ protected void fillHostInfo(final Connection conn, final StartupRoutingCommand c cmd.setSupportsClonedVolumes(supportsClonedVolumes); } catch (NumberFormatException ex) { - s_logger.warn("Issue sending 'xe sm-list' via SSH to XenServer host: " + ex.getMessage()); + logger.warn("Issue sending 'xe sm-list' via SSH to XenServer host: " + ex.getMessage()); } } catch (final XmlRpcException e) { throw new CloudRuntimeException("XML RPC Exception: " + e.getMessage(), e); @@ -1933,7 +1931,7 @@ protected void syncPlatformAndCoresPerSocketSettings(String coresPerSocket, Map< return; } if (platform.containsKey(PLATFORM_CORES_PER_SOCKET_KEY)) { - s_logger.debug("Updating the cores per socket value from: " + platform.get(PLATFORM_CORES_PER_SOCKET_KEY) + " to " + coresPerSocket); + logger.debug("Updating the cores per socket value from: " + platform.get(PLATFORM_CORES_PER_SOCKET_KEY) + " to " + coresPerSocket); } platform.put(PLATFORM_CORES_PER_SOCKET_KEY, coresPerSocket); } @@ -1974,14 +1972,14 @@ protected void finalizeVmMetaData(final VM vm, final VM.Record vmr, final Connec // Add configuration settings VM record for User VM instances before creating VM Map extraConfig = vmSpec.getExtraConfig(); if (vmSpec.getType().equals(VirtualMachine.Type.User) && MapUtils.isNotEmpty(extraConfig)) { - s_logger.info("Appending user extra configuration settings to VM"); + logger.info("Appending user extra configuration settings to VM"); ExtraConfigurationUtility.setExtraConfigurationToVm(conn,vmr, vm, extraConfig); } } protected void setVmBootDetails(final VM vm, final Connection conn, String bootType, String bootMode) throws XenAPIException, XmlRpcException { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Setting boottype=%s and bootmode=%s for VM: %s", bootType, bootMode, vm.getUuid(conn))); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Setting boottype=%s and bootmode=%s for VM: %s", bootType, bootMode, vm.getUuid(conn))); } Boolean isSecure = bootType.equals(ApiConstants.BootType.UEFI.toString()) && ApiConstants.BootMode.SECURE.toString().equals(bootMode); @@ -2017,14 +2015,14 @@ public synchronized Network findOrCreateTunnelNetwork(final Connection conn, fin otherConfig.put("assume_network_is_shared", "true"); rec.otherConfig = otherConfig; nw = Network.create(conn, rec); - s_logger.debug("### XenServer network for tunnels created:" + nwName); + logger.debug("### XenServer network for tunnels created:" + nwName); } else { nw = networks.iterator().next(); - s_logger.debug("XenServer network for tunnels found:" + nwName); + logger.debug("XenServer network for tunnels found:" + nwName); } return nw; } catch (final Exception e) { - s_logger.warn("createTunnelNetwork failed", e); + logger.warn("createTunnelNetwork failed", e); return null; } } @@ -2037,7 +2035,7 @@ void forceShutdownVM(final Connection conn, final VM vm) { vm.destroy(conn); } catch (final Exception e) { final String msg = "forceShutdown failed due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); throw new CloudRuntimeException(msg); } } @@ -2123,7 +2121,7 @@ public PingCommand getCurrentStatus(final long id) { if (!pingXAPI()) { Thread.sleep(1000); if (!pingXAPI()) { - s_logger.warn("can not ping xenserver " + _host.getUuid()); + logger.warn("can not ping xenserver " + _host.getUuid()); return null; } } @@ -2138,7 +2136,7 @@ public PingCommand getCurrentStatus(final long id) { return new PingRoutingWithNwGroupsCommand(getType(), id, getHostVmStateReport(conn), nwGrpStates); } } catch (final Exception e) { - s_logger.warn("Unable to get current status", e); + logger.warn("Unable to get current status", e); return null; } } @@ -2160,14 +2158,14 @@ protected double getDataAverage(final Node dataNode, final int col, final int nu if (!Double.isInfinite(value) && !Double.isNaN(value)) { return value; } else { - s_logger.warn("Found an invalid value (infinity/NaN) in getDataAverage(), numRows=0"); + logger.warn("Found an invalid value (infinity/NaN) in getDataAverage(), numRows=0"); return dummy; } } else { if (!Double.isInfinite(value / numRowsUsed) && !Double.isNaN(value / numRowsUsed)) { return value / numRowsUsed; } else { - s_logger.warn("Found an invalid value (infinity/NaN) in getDataAverage(), numRows>0"); + logger.warn("Found an invalid value (infinity/NaN) in getDataAverage(), numRows>0"); return dummy; } } @@ -2180,7 +2178,7 @@ public HashMap> getGPUGroupDetails(final protected String getGuestOsType(String platformEmulator) { if (StringUtils.isBlank(platformEmulator)) { - s_logger.debug("no guest OS type, start it as HVM guest"); + logger.debug("no guest OS type, start it as HVM guest"); platformEmulator = "Other install media"; } return platformEmulator; @@ -2238,7 +2236,7 @@ protected boolean getHostInfo(final Connection conn) throws IllegalArgumentExcep if (_guestNetworkName != null && !_guestNetworkName.equals(_privateNetworkName)) { guestNic = getNetworkByName(conn, _guestNetworkName); if (guestNic == null) { - s_logger.warn("Unable to find guest network " + _guestNetworkName); + logger.warn("Unable to find guest network " + _guestNetworkName); throw new IllegalArgumentException("Unable to find guest network " + _guestNetworkName + " for host " + _host.getIp()); } } else { @@ -2252,7 +2250,7 @@ protected boolean getHostInfo(final Connection conn) throws IllegalArgumentExcep if (_publicNetworkName != null && !_publicNetworkName.equals(_guestNetworkName)) { publicNic = getNetworkByName(conn, _publicNetworkName); if (publicNic == null) { - s_logger.warn("Unable to find public network " + _publicNetworkName + " for host " + _host.getIp()); + logger.warn("Unable to find public network " + _publicNetworkName + " for host " + _host.getIp()); throw new IllegalArgumentException("Unable to find public network " + _publicNetworkName + " for host " + _host.getIp()); } } else { @@ -2267,7 +2265,7 @@ protected boolean getHostInfo(final Connection conn) throws IllegalArgumentExcep XsLocalNetwork storageNic1 = null; storageNic1 = getNetworkByName(conn, _storageNetworkName1); if (storageNic1 == null) { - s_logger.warn("Unable to find storage network " + _storageNetworkName1 + " for host " + _host.getIp()); + logger.warn("Unable to find storage network " + _storageNetworkName1 + " for host " + _host.getIp()); throw new IllegalArgumentException("Unable to find storage network " + _storageNetworkName1 + " for host " + _host.getIp()); } else { _host.setStorageNetwork1(storageNic1.getNetworkRecord(conn).uuid); @@ -2282,17 +2280,17 @@ protected boolean getHostInfo(final Connection conn) throws IllegalArgumentExcep } } - s_logger.info("XenServer Version is " + _host.getProductVersion() + " for host " + _host.getIp()); - s_logger.info("Private Network is " + _privateNetworkName + " for host " + _host.getIp()); - s_logger.info("Guest Network is " + _guestNetworkName + " for host " + _host.getIp()); - s_logger.info("Public Network is " + _publicNetworkName + " for host " + _host.getIp()); + logger.info("XenServer Version is " + _host.getProductVersion() + " for host " + _host.getIp()); + logger.info("Private Network is " + _privateNetworkName + " for host " + _host.getIp()); + logger.info("Guest Network is " + _guestNetworkName + " for host " + _host.getIp()); + logger.info("Public Network is " + _publicNetworkName + " for host " + _host.getIp()); return true; } catch (final XenAPIException e) { - s_logger.warn("Unable to get host information for " + _host.getIp(), e); + logger.warn("Unable to get host information for " + _host.getIp(), e); return false; } catch (final Exception e) { - s_logger.warn("Unable to get host information for " + _host.getIp(), e); + logger.warn("Unable to get host information for " + _host.getIp(), e); return false; } } @@ -2361,7 +2359,7 @@ public HostStatsEntry getHostStats(final Connection conn, final GetHostStatsComm /* * if (hostStats.getNumCpus() != 0) { * hostStats.setCpuUtilization(hostStats.getCpuUtilization() / - * hostStats.getNumCpus()); s_logger.debug("Host cpu utilization " + + * hostStats.getNumCpus()); logger.debug("Host cpu utilization " + * hostStats.getCpuUtilization()); } */ @@ -2376,7 +2374,7 @@ protected HashMap getHostVmStateReport(final Con vm_map = VM.getAllRecords(conn); break; } catch (final Throwable e) { - s_logger.warn("Unable to get vms", e); + logger.warn("Unable to get vms", e); } try { Thread.sleep(1000); @@ -2400,11 +2398,11 @@ protected HashMap getHostVmStateReport(final Con try { host_uuid = host.getUuid(conn); } catch (final BadServerResponse e) { - s_logger.error("Failed to get host uuid for host " + host.toWireString(), e); + logger.error("Failed to get host uuid for host " + host.toWireString(), e); } catch (final XenAPIException e) { - s_logger.error("Failed to get host uuid for host " + host.toWireString(), e); + logger.error("Failed to get host uuid for host " + host.toWireString(), e); } catch (final XmlRpcException e) { - s_logger.error("Failed to get host uuid for host " + host.toWireString(), e); + logger.error("Failed to get host uuid for host " + host.toWireString(), e); } if (host_uuid.equalsIgnoreCase(_host.getUuid())) { @@ -2440,7 +2438,7 @@ public SR getIscsiSR(final Connection conn, final String srNameLabel, final Stri final String tmp[] = path.split("/"); if (tmp.length != 3) { final String msg = "Wrong iscsi path " + path + " it should be /targetIQN/LUN"; - s_logger.warn(msg); + logger.warn(msg); throw new CloudRuntimeException(msg); } final String targetiqn = tmp[1].trim(); @@ -2494,11 +2492,11 @@ public SR getIscsiSR(final Connection conn, final String srNameLabel, final Stri } catch (final XenAPIException e) { final String msg = "Unable to create Iscsi SR " + deviceConfig + " due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); throw new CloudRuntimeException(msg, e); } catch (final Exception e) { final String msg = "Unable to create Iscsi SR " + deviceConfig + " due to " + e.getMessage(); - s_logger.warn(msg, e); + logger.warn(msg, e); throw new CloudRuntimeException(msg, e); } } @@ -2522,7 +2520,7 @@ private SR introduceAndPlugIscsiSr(Connection conn, String pooluuid, String srNa if (setHosts == null) { final String msg = "Unable to create iSCSI SR " + deviceConfig + " due to hosts not available."; - s_logger.warn(msg); + logger.warn(msg); throw new CloudRuntimeException(msg); } @@ -2636,12 +2634,12 @@ private String probeScisiId(Connection conn, Host host, Map devi } if (!found) { final String msg = "can not find LUN " + lunid + " in " + errmsg; - s_logger.warn(msg); + logger.warn(msg); throw new CloudRuntimeException(msg); } } else { final String msg = "Unable to create Iscsi SR " + deviceConfig + " due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); throw new CloudRuntimeException(msg, e); } } @@ -2657,14 +2655,14 @@ public SR getISOSRbyVmName(final Connection conn, final String vmName) { return srs.iterator().next(); } else { final String msg = "getIsoSRbyVmName failed due to there are more than 1 SR having same Label"; - s_logger.warn(msg); + logger.warn(msg); } } catch (final XenAPIException e) { final String msg = "getIsoSRbyVmName failed due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); } catch (final Exception e) { final String msg = "getIsoSRbyVmName failed due to " + e.getMessage(); - s_logger.warn(msg, e); + logger.warn(msg, e); } return null; } @@ -2755,28 +2753,28 @@ public String getLowestAvailableVIFDeviceNum(final Connection conn, final VM vm) if (vm.getIsControlDomain(conn) || vif.getCurrentlyAttached(conn)) { usedDeviceNums.add(Integer.valueOf(deviceId)); } else { - s_logger.debug("Found unplugged VIF " + deviceId + " in VM " + vmName + " destroy it"); + logger.debug("Found unplugged VIF " + deviceId + " in VM " + vmName + " destroy it"); vif.destroy(conn); } } catch (final NumberFormatException e) { final String msg = "Obtained an invalid value for an allocated VIF device number for VM: " + vmName; - s_logger.debug(msg, e); + logger.debug(msg, e); throw new CloudRuntimeException(msg); } } for (Integer i = 0; i < _maxNics; i++) { if (!usedDeviceNums.contains(i)) { - s_logger.debug("Lowest available Vif device number: " + i + " for VM: " + vmName); + logger.debug("Lowest available Vif device number: " + i + " for VM: " + vmName); return i.toString(); } } } catch (final XmlRpcException e) { final String msg = "Caught XmlRpcException: " + e.getMessage(); - s_logger.warn(msg, e); + logger.warn(msg, e); } catch (final XenAPIException e) { final String msg = "Caught XenAPIException: " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); } throw new CloudRuntimeException("Could not find available VIF slot in VM with name: " + vmName); @@ -2793,11 +2791,11 @@ protected XsLocalNetwork getManagementNetwork(final Connection conn) throws XmlR if (rec.VLAN != null && rec.VLAN != -1) { final String msg = new StringBuilder("Unsupported configuration. Management network is on a VLAN. host=").append(_host.getUuid()).append("; pif=").append(rec.uuid) .append("; vlan=").append(rec.VLAN).toString(); - s_logger.warn(msg); + logger.warn(msg); throw new CloudRuntimeException(msg); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Management network is on pif=" + rec.uuid); + if (logger.isDebugEnabled()) { + logger.debug("Management network is on pif=" + rec.uuid); } mgmtPif = pif; mgmtPifRec = rec; @@ -2806,14 +2804,14 @@ protected XsLocalNetwork getManagementNetwork(final Connection conn) throws XmlR } if (mgmtPif == null) { final String msg = "Unable to find management network for " + _host.getUuid(); - s_logger.warn(msg); + logger.warn(msg); throw new CloudRuntimeException(msg); } final Bond bond = mgmtPifRec.bondSlaveOf; if (!isRefNull(bond)) { final String msg = "Management interface is on slave(" + mgmtPifRec.uuid + ") of bond(" + bond.getUuid(conn) + ") on host(" + _host.getUuid() + "), please move management interface to bond!"; - s_logger.warn(msg); + logger.warn(msg); throw new CloudRuntimeException(msg); } final Network nk = mgmtPifRec.network; @@ -2828,8 +2826,8 @@ public String getName() { public XsLocalNetwork getNativeNetworkForTraffic(final Connection conn, final TrafficType type, final String name) throws XenAPIException, XmlRpcException { if (name != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Looking for network named " + name); + if (logger.isDebugEnabled()) { + logger.debug("Looking for network named " + name); } return getNetworkByName(conn, name); } @@ -2858,7 +2856,7 @@ public Network getNetwork(final Connection conn, final NicTO nic) throws XenAPIE final String name = nic.getName(); final XsLocalNetwork network = getNativeNetworkForTraffic(conn, nic.getType(), name); if (network == null) { - s_logger.error("Network is not configured on the backend for nic " + nic.toString()); + logger.error("Network is not configured on the backend for nic " + nic.toString()); throw new CloudRuntimeException("Network for the backend is not configured correctly for network broadcast domain: " + nic.getBroadcastUri()); } final URI uri = nic.getBroadcastUri(); @@ -2949,8 +2947,8 @@ public XsLocalNetwork getNetworkByName(final Connection conn, final String name) return null; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found more than one network with the name " + name); + if (logger.isDebugEnabled()) { + logger.debug("Found more than one network with the name " + name); } Network earliestNetwork = null; Network.Record earliestNetworkRecord = null; @@ -3019,7 +3017,7 @@ public long[] getNetworkLbStats(final String privateIp, final String publicIp, f ExecutionResult callResult = executeInVR(privateIp, "get_haproxy_stats.sh", args); String detail = callResult.getDetails(); if (detail == null || detail.isEmpty()) { - s_logger.error("Get network loadbalancer stats returns empty result"); + logger.error("Get network loadbalancer stats returns empty result"); } final long[] stats = new long[1]; if (detail != null) { @@ -3117,7 +3115,7 @@ public String getPerfMon(final Connection conn, final Map params return result; } } catch (final Exception e) { - s_logger.error("Can not get performance monitor for AS due to ", e); + logger.error("Can not get performance monitor for AS due to ", e); } return null; } @@ -3133,7 +3131,7 @@ protected Object[] getRRDData(final Connection conn, final int flag) { try { doc = getStatsRawXML(conn, flag == 1 ? true : false); } catch (final Exception e1) { - s_logger.warn("Error whilst collecting raw stats from plugin: ", e1); + logger.warn("Error whilst collecting raw stats from plugin: ", e1); return null; } @@ -3192,7 +3190,7 @@ protected SR getSRByNameLabelandHost(final Connection conn, final String name) t private long getStaticMax(final String os, final boolean b, final long dynamicMinRam, final long dynamicMaxRam, final long recommendedValue) { if (recommendedValue == 0) { - s_logger.warn("No recommended value found for dynamic max, setting static max and dynamic max equal"); + logger.warn("No recommended value found for dynamic max, setting static max and dynamic max equal"); return dynamicMaxRam; } final long staticMax = Math.min(recommendedValue, 4L * dynamicMinRam); // XS @@ -3201,7 +3199,7 @@ private long getStaticMax(final String os, final boolean b, final long dynamicMi // stability if (dynamicMaxRam > staticMax) { // XS constraint that dynamic max <= // static max - s_logger.warn("dynamic max " + toHumanReadableSize(dynamicMaxRam) + " can't be greater than static max " + toHumanReadableSize(staticMax) + ", this can lead to stability issues. Setting static max as much as dynamic max "); + logger.warn("dynamic max " + toHumanReadableSize(dynamicMaxRam) + " can't be greater than static max " + toHumanReadableSize(staticMax) + ", this can lead to stability issues. Setting static max as much as dynamic max "); return dynamicMaxRam; } return staticMax; @@ -3209,13 +3207,13 @@ private long getStaticMax(final String os, final boolean b, final long dynamicMi private long getStaticMin(final String os, final boolean b, final long dynamicMinRam, final long dynamicMaxRam, final long recommendedValue) { if (recommendedValue == 0) { - s_logger.warn("No recommended value found for dynamic min"); + logger.warn("No recommended value found for dynamic min"); return dynamicMinRam; } if (dynamicMinRam < recommendedValue) { // XS constraint that dynamic min // > static min - s_logger.warn("Vm ram is set to dynamic min " + toHumanReadableSize(dynamicMinRam) + " and is less than the recommended static min " + toHumanReadableSize(recommendedValue) + ", this could lead to stability issues"); + logger.warn("Vm ram is set to dynamic min " + toHumanReadableSize(dynamicMinRam) + " and is less than the recommended static min " + toHumanReadableSize(recommendedValue) + ", this could lead to stability issues"); } return dynamicMinRam; } @@ -3239,23 +3237,23 @@ protected Document getStatsRawXML(final Connection conn, final boolean host) { final InputSource statsSource = new InputSource(in); return ParserUtils.getSaferDocumentBuilderFactory().newDocumentBuilder().parse(statsSource); } catch (final MalformedURLException e) { - s_logger.warn("Malformed URL? come on...." + urlStr); + logger.warn("Malformed URL? come on...." + urlStr); return null; } catch (final IOException e) { - s_logger.warn("Problems getting stats using " + urlStr, e); + logger.warn("Problems getting stats using " + urlStr, e); return null; } catch (final SAXException e) { - s_logger.warn("Problems getting stats using " + urlStr, e); + logger.warn("Problems getting stats using " + urlStr, e); return null; } catch (final ParserConfigurationException e) { - s_logger.warn("Problems getting stats using " + urlStr, e); + logger.warn("Problems getting stats using " + urlStr, e); return null; } finally { if (in != null) { try { in.close(); } catch (final IOException e) { - s_logger.warn("Unable to close the buffer ", e); + logger.warn("Unable to close the buffer ", e); } } } @@ -3275,8 +3273,8 @@ public SR getStorageRepository(final Connection conn, final String srNameLabel) throw new CloudRuntimeException("More than one storage repository was found for pool with uuid: " + srNameLabel); } else if (srs.size() == 1) { final SR sr = srs.iterator().next(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("SR retrieved for " + srNameLabel); + if (logger.isDebugEnabled()) { + logger.debug("SR retrieved for " + srNameLabel); } if (checkSR(conn, sr)) { @@ -3307,15 +3305,15 @@ protected VDI getVDIbyLocationandSR(final Connection conn, final String loc, fin } final String msg = "can not getVDIbyLocationandSR " + loc; - s_logger.warn(msg); + logger.warn(msg); return null; } catch (final XenAPIException e) { final String msg = "getVDIbyLocationandSR exception " + loc + " due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); throw new CloudRuntimeException(msg, e); } catch (final Exception e) { final String msg = "getVDIbyLocationandSR exception " + loc + " due to " + e.getMessage(); - s_logger.warn(msg, e); + logger.warn(msg, e); throw new CloudRuntimeException(msg, e); } @@ -3332,7 +3330,7 @@ public VDI getVDIbyUuid(final Connection conn, final String uuid, final boolean if (throwExceptionIfNotFound) { final String msg = "Catch Exception " + e.getClass().getName() + " :VDI getByUuid for uuid: " + uuid + " failed due to " + e.toString(); - s_logger.debug(msg); + logger.debug(msg); throw new CloudRuntimeException(msg, e); } @@ -3345,7 +3343,7 @@ public String getVhdParent(final Connection conn, final String primaryStorageSRU final String parentUuid = callHostPlugin(conn, "vmopsSnapshot", "getVhdParent", "primaryStorageSRUuid", primaryStorageSRUuid, "snapshotUuid", snapshotUuid, "isISCSI", isISCSI.toString()); if (parentUuid == null || parentUuid.isEmpty() || parentUuid.equalsIgnoreCase("None")) { - s_logger.debug("Unable to get parent of VHD " + snapshotUuid + " in SR " + primaryStorageSRUuid); + logger.debug("Unable to get parent of VHD " + snapshotUuid + " in SR " + primaryStorageSRUuid); // errString is already logged. return null; } @@ -3386,7 +3384,7 @@ public VM getVM(final Connection conn, final String vmName) { // If there is more than one VM, print a warning if (vms.size() > 1) { - s_logger.warn("Found " + vms.size() + " VMs with name: " + vmName); + logger.warn("Found " + vms.size() + " VMs with name: " + vmName); } // Return the first VM in the set @@ -3424,7 +3422,7 @@ public long getVMSnapshotChainSize(final Connection conn, final VolumeObjectTO v } } } catch (final Exception e) { - s_logger.debug("Exception occurs when calculate snapshot capacity for volumes: due to " + e.toString()); + logger.debug("Exception occurs when calculate snapshot capacity for volumes: due to " + e.toString()); continue; } } @@ -3436,23 +3434,23 @@ public long getVMSnapshotChainSize(final Connection conn, final VolumeObjectTO v for (VM vmsnap : vmSnapshots) { try { final String vmSnapName = vmsnap.getNameLabel(conn); - s_logger.debug("snapname " + vmSnapName); + logger.debug("snapname " + vmSnapName); if (vmSnapName != null && vmSnapName.contains(vmSnapshotName) && vmsnap.getIsASnapshot(conn)) { - s_logger.debug("snapname " + vmSnapName + "isASnapshot"); + logger.debug("snapname " + vmSnapName + "isASnapshot"); VDI memoryVDI = vmsnap.getSuspendVDI(conn); if (!isRefNull(memoryVDI)) { size = size + memoryVDI.getPhysicalUtilisation(conn); - s_logger.debug("memoryVDI size :" + toHumanReadableSize(size)); + logger.debug("memoryVDI size :" + toHumanReadableSize(size)); String parentUuid = memoryVDI.getSmConfig(conn).get("vhd-parent"); VDI pMemoryVDI = VDI.getByUuid(conn, parentUuid); if (!isRefNull(pMemoryVDI)) { size = size + pMemoryVDI.getPhysicalUtilisation(conn); } - s_logger.debug("memoryVDI size+parent :" + toHumanReadableSize(size)); + logger.debug("memoryVDI size+parent :" + toHumanReadableSize(size)); } } } catch (Exception e) { - s_logger.debug("Exception occurs when calculate snapshot capacity for memory: due to " + e.toString()); + logger.debug("Exception occurs when calculate snapshot capacity for memory: due to " + e.toString()); continue; } @@ -3485,7 +3483,7 @@ public PowerState getVmState(final Connection conn, final String vmName) { // com.xensource.xenapi.Types$BadServerResponse // [HANDLE_INVALID, VM, // 3dde93f9-c1df-55a7-2cde-55e1dce431ab] - s_logger.info("Unable to get a vm PowerState due to " + e.toString() + ". We are retrying. Count: " + retry); + logger.info("Unable to get a vm PowerState due to " + e.toString() + ". We are retrying. Count: " + retry); try { Thread.sleep(3000); } catch (final InterruptedException ex) { @@ -3493,11 +3491,11 @@ public PowerState getVmState(final Connection conn, final String vmName) { } } catch (final XenAPIException e) { final String msg = "Unable to get a vm PowerState due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); break; } catch (final XmlRpcException e) { final String msg = "Unable to get a vm PowerState due to " + e.getMessage(); - s_logger.warn(msg, e); + logger.warn(msg, e); break; } } @@ -3582,8 +3580,8 @@ public HashMap getVmStats(final Connection conn, final Get } vmStatsAnswer.setCPUUtilization(vmStatsAnswer.getCPUUtilization() * 100); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Vm cpu utilization " + vmStatsAnswer.getCPUUtilization()); + if (logger.isDebugEnabled()) { + logger.debug("Vm cpu utilization " + vmStatsAnswer.getCPUUtilization()); } } @@ -3598,7 +3596,7 @@ record = vm.getRecord(conn); final Set consoles = record.consoles; if (consoles.isEmpty()) { - s_logger.warn("There are no Consoles available to the vm : " + record.nameDescription); + logger.warn("There are no Consoles available to the vm : " + record.nameDescription); return null; } final Iterator i = consoles.iterator(); @@ -3610,11 +3608,11 @@ record = vm.getRecord(conn); } } catch (final XenAPIException e) { final String msg = "Unable to get console url due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); return null; } catch (final XmlRpcException e) { final String msg = "Unable to get console url due to " + e.getMessage(); - s_logger.warn(msg, e); + logger.warn(msg, e); return null; } return null; @@ -3638,13 +3636,13 @@ protected void destroyUnattachedVBD(Connection conn, VM vm) { } } } catch (final Exception e) { - s_logger.debug("Failed to destroy unattached VBD due to ", e); + logger.debug("Failed to destroy unattached VBD due to ", e); } } public String handleVmStartFailure(final Connection conn, final String vmName, final VM vm, final String message, final Throwable th) { final String msg = "Unable to start " + vmName + " due to " + message; - s_logger.warn(msg, th); + logger.warn(msg, th); if (vm == null) { return msg; @@ -3659,24 +3657,24 @@ public String handleVmStartFailure(final Connection conn, final String vmName, f if (rec != null) { networks.add(rec.network); } else { - s_logger.warn("Unable to cleanup VIF: " + vif.toWireString() + " As vif record is null"); + logger.warn("Unable to cleanup VIF: " + vif.toWireString() + " As vif record is null"); } } catch (final Exception e) { - s_logger.warn("Unable to cleanup VIF", e); + logger.warn("Unable to cleanup VIF", e); } } if (vmr.powerState == VmPowerState.RUNNING) { try { vm.hardShutdown(conn); } catch (final Exception e) { - s_logger.warn("VM hardshutdown failed due to ", e); + logger.warn("VM hardshutdown failed due to ", e); } } if (vm.getPowerState(conn) == VmPowerState.HALTED) { try { vm.destroy(conn); } catch (final Exception e) { - s_logger.warn("VM destroy failed due to ", e); + logger.warn("VM destroy failed due to ", e); } } for (final VBD vbd : vmr.VBDs) { @@ -3684,7 +3682,7 @@ public String handleVmStartFailure(final Connection conn, final String vmName, f vbd.unplug(conn); vbd.destroy(conn); } catch (final Exception e) { - s_logger.warn("Unable to clean up VBD due to ", e); + logger.warn("Unable to clean up VBD due to ", e); } } for (final VIF vif : vmr.VIFs) { @@ -3692,7 +3690,7 @@ public String handleVmStartFailure(final Connection conn, final String vmName, f vif.unplug(conn); vif.destroy(conn); } catch (final Exception e) { - s_logger.warn("Unable to cleanup VIF", e); + logger.warn("Unable to cleanup VIF", e); } } for (final Network network : networks) { @@ -3701,7 +3699,7 @@ public String handleVmStartFailure(final Connection conn, final String vmName, f } } } catch (final Exception e) { - s_logger.warn("VM getRecord failed due to ", e); + logger.warn("VM getRecord failed due to ", e); } return msg; @@ -3711,7 +3709,7 @@ public String handleVmStartFailure(final Connection conn, final String vmName, f public StartupCommand[] initialize() throws IllegalArgumentException { final Connection conn = getConnection(); if (!getHostInfo(conn)) { - s_logger.warn("Unable to get host information for " + _host.getIp()); + logger.warn("Unable to get host information for " + _host.getIp()); return null; } final StartupRoutingCommand cmd = new StartupRoutingCommand(); @@ -3725,13 +3723,13 @@ public StartupCommand[] initialize() throws IllegalArgumentException { final Pool.Record poolr = pool.getRecord(conn); poolr.master.getRecord(conn); } catch (final Throwable e) { - s_logger.warn("Check for master failed, failing the FULL Cluster sync command"); + logger.warn("Check for master failed, failing the FULL Cluster sync command"); } List startUpLocalStorageCommands = null; try { startUpLocalStorageCommands = initializeLocalSrs(conn); } catch (XenAPIException | XmlRpcException e) { - s_logger.warn("Could not initialize local SRs on host: " + _host.getUuid(), e); + logger.warn("Could not initialize local SRs on host: " + _host.getUuid(), e); } if (CollectionUtils.isEmpty(startUpLocalStorageCommands)) { return new StartupCommand[] {cmd}; @@ -3782,17 +3780,17 @@ protected List getAllLocalSrForType(Connection conn, SRType srType) throws X Host host = pbd.getHost(conn); if (!isRefNull(host) && StringUtils.equals(host.getUuid(conn), _host.getUuid())) { if (!pbd.getCurrentlyAttached(conn)) { - s_logger.debug(String.format("PBD [%s] of local SR [%s] was unplugged, pluggin it now", pbd.getUuid(conn), srRec.uuid)); + logger.debug(String.format("PBD [%s] of local SR [%s] was unplugged, pluggin it now", pbd.getUuid(conn), srRec.uuid)); pbd.plug(conn); } - s_logger.debug("Scanning local SR: " + srRec.uuid); + logger.debug("Scanning local SR: " + srRec.uuid); SR sr = entry.getKey(); sr.scan(conn); localSrs.add(sr); } } } - s_logger.debug(String.format("Found %d local storage of type [%s] for host [%s]", localSrs.size(), srType.toString(), _host.getUuid())); + logger.debug(String.format("Found %d local storage of type [%s] for host [%s]", localSrs.size(), srType.toString(), _host.getUuid())); return localSrs; } @@ -3879,10 +3877,10 @@ public boolean isDeviceUsed(final Connection conn, final VM vm, final Long devic return true; } catch (final XmlRpcException e) { msg = "Catch XmlRpcException due to: " + e.getMessage(); - s_logger.warn(msg, e); + logger.warn(msg, e); } catch (final XenAPIException e) { msg = "Catch XenAPIException due to: " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); } throw new CloudRuntimeException("When check deviceId " + msg); } @@ -3904,8 +3902,8 @@ public boolean IsISCSI(final String type) { public boolean isNetworkSetupByName(final String nameTag) throws XenAPIException, XmlRpcException { if (nameTag != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Looking for network setup by name " + nameTag); + if (logger.isDebugEnabled()) { + logger.debug("Looking for network setup by name " + nameTag); } final Connection conn = getConnection(); final XsLocalNetwork network = getNetworkByName(conn, nameTag); @@ -3942,7 +3940,7 @@ boolean killCopyProcess(final Connection conn, final String nameLabel) { String errMsg = null; if (results == null || results.equals("false")) { errMsg = "kill_copy_process failed"; - s_logger.warn(errMsg); + logger.warn(errMsg); return false; } else { return true; @@ -3952,7 +3950,7 @@ boolean killCopyProcess(final Connection conn, final String nameLabel) { public boolean launchHeartBeat(final Connection conn) { final String result = callHostPluginPremium(conn, "heartbeat", "host", _host.getUuid(), "timeout", Integer.toString(_heartbeatTimeout), "interval", Integer.toString(_heartbeatInterval)); if (result == null || !result.contains("> DONE <")) { - s_logger.warn("Unable to launch the heartbeat process on " + _host.getIp()); + logger.warn("Unable to launch the heartbeat process on " + _host.getIp()); return false; } return true; @@ -3982,14 +3980,14 @@ public void migrateVM(final Connection conn, final Host destHost, final VM vm, f } } catch (final XenAPIException e) { final String msg = "Unable to migrate VM(" + vmName + ") from host(" + _host.getUuid() + ")"; - s_logger.warn(msg, e); + logger.warn(msg, e); throw new CloudRuntimeException(msg); } finally { if (task != null) { try { task.destroy(conn); } catch (final Exception e1) { - s_logger.debug("unable to destroy task(" + task.toString() + ") on host(" + _host.getUuid() + ") due to " + e1.toString()); + logger.debug("unable to destroy task(" + task.toString() + ") on host(" + _host.getUuid() + ") due to " + e1.toString()); } } } @@ -4078,7 +4076,7 @@ private List> ovsFullSyncStates() { for (final String log : logs) { final String[] info = log.split(","); if (info.length != 5) { - s_logger.warn("Wrong element number in ovs log(" + log + ")"); + logger.warn("Wrong element number in ovs log(" + log + ")"); continue; } @@ -4115,11 +4113,11 @@ public HashMap parseDefaultOvsRuleCommand(final String str) { protected Pair parseTimestamp(final String timeStampStr) { final String[] tokens = timeStampStr.split("-"); if (tokens.length != 3) { - s_logger.debug("timeStamp in network has wrong pattern: " + timeStampStr); + logger.debug("timeStamp in network has wrong pattern: " + timeStampStr); return null; } if (!tokens[0].equals("CsCreateTime")) { - s_logger.debug("timeStamp in network doesn't start with CsCreateTime: " + timeStampStr); + logger.debug("timeStamp in network doesn't start with CsCreateTime: " + timeStampStr); return null; } return new Pair(Long.parseLong(tokens[1]), Integer.parseInt(tokens[2])); @@ -4127,13 +4125,13 @@ protected Pair parseTimestamp(final String timeStampStr) { private void pbdPlug(final Connection conn, final PBD pbd, final String uuid) { try { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Plugging in PBD " + uuid + " for " + _host); + if (logger.isDebugEnabled()) { + logger.debug("Plugging in PBD " + uuid + " for " + _host); } pbd.plug(conn); } catch (final Exception e) { final String msg = "PBD " + uuid + " is not attached! and PBD plug failed due to " + e.toString() + ". Please check this PBD in " + _host; - s_logger.warn(msg, e); + logger.warn(msg, e); throw new CloudRuntimeException(msg); } } @@ -4155,17 +4153,17 @@ public boolean pingXAPI() { try { final Host host = Host.getByUuid(conn, _host.getUuid()); if (!host.getEnabled(conn)) { - s_logger.debug("Host " + _host.getIp() + " is not enabled!"); + logger.debug("Host " + _host.getIp() + " is not enabled!"); return false; } } catch (final Exception e) { - s_logger.debug("cannot get host enabled status, host " + _host.getIp() + " due to " + e.toString(), e); + logger.debug("cannot get host enabled status, host " + _host.getIp() + " due to " + e.toString(), e); return false; } try { callHostPlugin(conn, "echo", "main"); } catch (final Exception e) { - s_logger.debug("cannot ping host " + _host.getIp() + " due to " + e.toString(), e); + logger.debug("cannot ping host " + _host.getIp() + " due to " + e.toString(), e); return false; } return true; @@ -4196,10 +4194,10 @@ protected boolean postCreatePrivateTemplate(final Connection conn, final String // Else, command threw an exception which has already been logged. if (result.equalsIgnoreCase("1")) { - s_logger.debug("Successfully created template.properties file on secondary storage for " + tmpltFilename); + logger.debug("Successfully created template.properties file on secondary storage for " + tmpltFilename); success = true; } else { - s_logger.warn("Could not create template.properties file on secondary storage for " + tmpltFilename + " for templateId: " + templateId); + logger.warn("Could not create template.properties file on secondary storage for " + tmpltFilename + " for templateId: " + templateId); } } @@ -4328,17 +4326,17 @@ protected VDI prepareManagedStorage(final Connection conn, final Map vdis = sr.getVDIs(conn); @@ -4589,10 +4587,10 @@ protected void skipOrRemoveSR(Connection conn, SR sr) { removeSR(conn, sr); return; } catch (XenAPIException | XmlRpcException e) { - s_logger.warn(logX(sr, "Unable to get current operations " + e.toString()), e); + logger.warn(logX(sr, "Unable to get current operations " + e.toString()), e); } String msg = "Remove SR failed"; - s_logger.warn(msg); + logger.warn(msg); } public void removeSR(final Connection conn, final SR sr) { @@ -4600,8 +4598,8 @@ public void removeSR(final Connection conn, final SR sr) { return; } - if (s_logger.isDebugEnabled()) { - s_logger.debug(logX(sr, "Removing SR")); + if (logger.isDebugEnabled()) { + logger.debug(logX(sr, "Removing SR")); } for (int i = 0; i < 2; i++) { @@ -4613,8 +4611,8 @@ public void removeSR(final Connection conn, final SR sr) { Set pbds = sr.getPBDs(conn); for (final PBD pbd : pbds) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(logX(pbd, "Unplugging pbd")); + if (logger.isDebugEnabled()) { + logger.debug(logX(pbd, "Unplugging pbd")); } // if (pbd.getCurrentlyAttached(conn)) { @@ -4627,8 +4625,8 @@ public void removeSR(final Connection conn, final SR sr) { pbds = sr.getPBDs(conn); if (pbds.size() == 0) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(logX(sr, "Forgetting")); + if (logger.isDebugEnabled()) { + logger.debug(logX(sr, "Forgetting")); } sr.forget(conn); @@ -4636,31 +4634,31 @@ public void removeSR(final Connection conn, final SR sr) { return; } - if (s_logger.isDebugEnabled()) { - s_logger.debug(logX(sr, "There is still one or more PBDs attached.")); + if (logger.isDebugEnabled()) { + logger.debug(logX(sr, "There is still one or more PBDs attached.")); - if (s_logger.isTraceEnabled()) { + if (logger.isTraceEnabled()) { for (final PBD pbd : pbds) { - s_logger.trace(logX(pbd, " Still attached")); + logger.trace(logX(pbd, " Still attached")); } } } } catch (final XenAPIException e) { - s_logger.debug(logX(sr, "Catch XenAPIException: " + e.toString())); + logger.debug(logX(sr, "Catch XenAPIException: " + e.toString())); } catch (final XmlRpcException e) { - s_logger.debug(logX(sr, "Catch Exception: " + e.getMessage())); + logger.debug(logX(sr, "Catch Exception: " + e.getMessage())); } } - s_logger.warn(logX(sr, "Unable to remove SR")); + logger.warn(logX(sr, "Unable to remove SR")); } protected String removeSRSync(final Connection conn, final SR sr) { if (sr == null) { return null; } - if (s_logger.isDebugEnabled()) { - s_logger.debug(logX(sr, "Removing SR")); + if (logger.isDebugEnabled()) { + logger.debug(logX(sr, "Removing SR")); } long waittime = 0; try { @@ -4672,7 +4670,7 @@ protected String removeSRSync(final Connection conn, final SR sr) { } if (waittime >= 1800000) { final String msg = "This template is being used, try late time"; - s_logger.warn(msg); + logger.warn(msg); return msg; } waittime += 30000; @@ -4684,12 +4682,12 @@ protected String removeSRSync(final Connection conn, final SR sr) { removeSR(conn, sr); return null; } catch (final XenAPIException e) { - s_logger.warn(logX(sr, "Unable to get current operations " + e.toString()), e); + logger.warn(logX(sr, "Unable to get current operations " + e.toString()), e); } catch (final XmlRpcException e) { - s_logger.warn(logX(sr, "Unable to get current operations " + e.getMessage()), e); + logger.warn(logX(sr, "Unable to get current operations " + e.getMessage()), e); } final String msg = "Remove SR failed"; - s_logger.warn(msg); + logger.warn(msg); return msg; } @@ -4709,7 +4707,7 @@ public String revertToSnapshot(final Connection conn, final VM vmSnapshot, final errMsg = "revert_memory_snapshot exception"; } } - s_logger.warn(errMsg); + logger.warn(errMsg); throw new CloudRuntimeException(errMsg); } @@ -4804,7 +4802,7 @@ protected void setNicDevIdIfCorrectVifIsNotNull(final Connection conn, final IpA if (ip.isAdd()) { throw new InternalErrorException("Failed to find DomR VIF to associate IP with."); } else { - s_logger.debug("VIF to deassociate IP with does not exist, return success"); + logger.debug("VIF to deassociate IP with does not exist, return success"); } } else { ip.setNicDevId(Integer.valueOf(correctVif.getDevice(conn))); @@ -4825,8 +4823,8 @@ public String setupHeartbeatSr(final Connection conn, final SR sr, final boolean final Host host = Host.getByUuid(conn, _host.getUuid()); final Set tags = host.getTags(conn); if (force || !tags.contains("cloud-heartbeat-" + srUuid)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Setting up the heartbeat sr for host " + _host.getIp() + " and sr " + srUuid); + if (logger.isDebugEnabled()) { + logger.debug("Setting up the heartbeat sr for host " + _host.getIp() + " and sr " + srUuid); } final Set pbds = sr.getPBDs(conn); for (final PBD pbd : pbds) { @@ -4895,12 +4893,12 @@ public void setupLinkLocalNetwork(final Connection conn) { /* create temp VIF0 */ if (dom0vif == null) { - s_logger.debug("Can't find a vif on dom0 for link local, creating a new one"); + logger.debug("Can't find a vif on dom0 for link local, creating a new one"); final VIF.Record vifr = new VIF.Record(); vifr.VM = dom0; vifr.device = getLowestAvailableVIFDeviceNum(conn, dom0); if (vifr.device == null) { - s_logger.debug("Failed to create link local network, no vif available"); + logger.debug("Failed to create link local network, no vif available"); return; } final Map config = new HashMap(); @@ -4912,7 +4910,7 @@ public void setupLinkLocalNetwork(final Connection conn) { dom0vif = VIF.create(conn, vifr); plugDom0Vif(conn, dom0vif); } else { - s_logger.debug("already have a vif on dom0 for link local network"); + logger.debug("already have a vif on dom0 for link local network"); if (!dom0vif.getCurrentlyAttached(conn)) { plugDom0Vif(conn, dom0vif); } @@ -4923,10 +4921,10 @@ public void setupLinkLocalNetwork(final Connection conn) { _host.setLinkLocalNetwork(linkLocal.getUuid(conn)); } catch (final XenAPIException e) { - s_logger.warn("Unable to create local link network", e); + logger.warn("Unable to create local link network", e); throw new CloudRuntimeException("Unable to create local link network due to " + e.toString(), e); } catch (final XmlRpcException e) { - s_logger.warn("Unable to create local link network", e); + logger.warn("Unable to create local link network", e); throw new CloudRuntimeException("Unable to create local link network due to " + e.toString(), e); } } @@ -4946,7 +4944,7 @@ public boolean setupServer(final Connection conn, final Host host) { final String tag = it.next(); if (tag.startsWith("vmops-version-")) { if (tag.contains(version)) { - s_logger.info(logX(host, "Host " + hr.address + " is already setup.")); + logger.info(logX(host, "Host " + hr.address + " is already setup.")); return false; } else { it.remove(); @@ -5008,22 +5006,22 @@ public boolean setupServer(final Connection conn, final Host host) { } if (!new File(f).exists()) { - s_logger.warn("We cannot locate " + f); + logger.warn("We cannot locate " + f); continue; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Copying " + f + " to " + directoryPath + " on " + hr.address + " with permission " + permissions); + if (logger.isDebugEnabled()) { + logger.debug("Copying " + f + " to " + directoryPath + " on " + hr.address + " with permission " + permissions); } if (!SSHCmdHelper.sshExecuteCmd(sshConnection, "mkdir -m 700 -p " + directoryPath)) { - s_logger.debug("Unable to create destination path: " + directoryPath + " on " + hr.address + "."); + logger.debug("Unable to create destination path: " + directoryPath + " on " + hr.address + "."); } try { scp.put(f, directoryPath, permissions); } catch (final IOException e) { final String msg = "Unable to copy file " + f + " to path " + directoryPath + " with permissions " + permissions; - s_logger.debug(msg); + logger.debug(msg); throw new CloudRuntimeException("Unable to setup the server: " + msg, e); } } @@ -5039,11 +5037,11 @@ public boolean setupServer(final Connection conn, final Host host) { return true; } catch (final XenAPIException e) { final String msg = "XenServer setup failed due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); throw new CloudRuntimeException("Unable to get host information " + e.toString(), e); } catch (final XmlRpcException e) { final String msg = "XenServer setup failed due to " + e.getMessage(); - s_logger.warn(msg, e); + logger.warn(msg, e); throw new CloudRuntimeException("Unable to get host information ", e); } } @@ -5067,11 +5065,11 @@ public synchronized Network setupvSwitchNetwork(final Connection conn) { } return _host.getVswitchNetwork(); } catch (final BadServerResponse e) { - s_logger.error("Failed to setup vswitch network", e); + logger.error("Failed to setup vswitch network", e); } catch (final XenAPIException e) { - s_logger.error("Failed to setup vswitch network", e); + logger.error("Failed to setup vswitch network", e); } catch (final XmlRpcException e) { - s_logger.error("Failed to setup vswitch network", e); + logger.error("Failed to setup vswitch network", e); } return null; @@ -5098,14 +5096,14 @@ public void shutdownVM(final Connection conn, final VM vm, final String vmName, throw new CloudRuntimeException("Shutdown VM catch HandleInvalid and VM is not in HALTED state"); } } catch (final XenAPIException e) { - s_logger.debug("Unable to shutdown VM(" + vmName + ") with force=" + forcedStop + " on host(" + _host.getUuid() + ") due to " + e.toString()); + logger.debug("Unable to shutdown VM(" + vmName + ") with force=" + forcedStop + " on host(" + _host.getUuid() + ") due to " + e.toString()); try { VmPowerState state = vm.getPowerState(conn); if (state == VmPowerState.RUNNING) { try { vm.hardShutdown(conn); } catch (final Exception e1) { - s_logger.debug("Unable to hardShutdown VM(" + vmName + ") on host(" + _host.getUuid() + ") due to " + e.toString()); + logger.debug("Unable to hardShutdown VM(" + vmName + ") on host(" + _host.getUuid() + ") due to " + e.toString()); state = vm.getPowerState(conn); if (state == VmPowerState.RUNNING) { forceShutdownVM(conn, vm); @@ -5116,12 +5114,12 @@ public void shutdownVM(final Connection conn, final VM vm, final String vmName, return; } else { final String msg = "After cleanShutdown the VM status is " + state.toString() + ", that is not expected"; - s_logger.warn(msg); + logger.warn(msg); throw new CloudRuntimeException(msg); } } catch (final Exception e1) { final String msg = "Unable to hardShutdown VM(" + vmName + ") on host(" + _host.getUuid() + ") due to " + e.toString(); - s_logger.warn(msg, e1); + logger.warn(msg, e1); throw new CloudRuntimeException(msg); } } finally { @@ -5129,7 +5127,7 @@ public void shutdownVM(final Connection conn, final VM vm, final String vmName, try { task.destroy(conn); } catch (final Exception e1) { - s_logger.debug("unable to destroy task(" + task.toString() + ") on host(" + _host.getUuid() + ") due to " + e1.toString()); + logger.debug("unable to destroy task(" + task.toString() + ") on host(" + _host.getUuid() + ") due to " + e1.toString()); } } } @@ -5150,14 +5148,14 @@ public void startVM(final Connection conn, final Host host, final VM vm, final S checkForSuccess(conn, task); } catch (final Types.HandleInvalid e) { if (vm.getPowerState(conn) == VmPowerState.RUNNING) { - s_logger.debug("VM " + vmName + " is in Running status", e); + logger.debug("VM " + vmName + " is in Running status", e); task = null; return; } throw new CloudRuntimeException("Start VM " + vmName + " catch HandleInvalid and VM is not in RUNNING state"); } catch (final TimeoutException e) { if (vm.getPowerState(conn) == VmPowerState.RUNNING) { - s_logger.debug("VM " + vmName + " is in Running status", e); + logger.debug("VM " + vmName + " is in Running status", e); task = null; return; } @@ -5165,14 +5163,14 @@ public void startVM(final Connection conn, final Host host, final VM vm, final S } } catch (final XenAPIException e) { final String msg = "Unable to start VM(" + vmName + ") on host(" + _host.getUuid() + ") due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); throw new CloudRuntimeException(msg); } finally { if (task != null) { try { task.destroy(conn); } catch (final Exception e1) { - s_logger.debug("unable to destroy task(" + task.toString() + ") on host(" + _host.getUuid() + ") due to " + e1.toString()); + logger.debug("unable to destroy task(" + task.toString() + ") on host(" + _host.getUuid() + ") due to " + e1.toString()); } } } @@ -5187,7 +5185,7 @@ protected void startvmfailhandle(final Connection conn, final VM vm, final List< vm.hardShutdown(conn); } catch (final Exception e) { final String msg = "VM hardshutdown failed due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); } } if (vm.getPowerState(conn) == VmPowerState.HALTED) { @@ -5195,12 +5193,12 @@ protected void startvmfailhandle(final Connection conn, final VM vm, final List< vm.destroy(conn); } catch (final Exception e) { final String msg = "VM destroy failed due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); } } } catch (final Exception e) { final String msg = "VM getPowerState failed due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); } } if (mounts != null) { @@ -5211,7 +5209,7 @@ protected void startvmfailhandle(final Connection conn, final VM vm, final List< vbds = vdi.getVBDs(conn); } catch (final Exception e) { final String msg = "VDI getVBDS failed due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); continue; } for (final VBD vbd : vbds) { @@ -5220,7 +5218,7 @@ protected void startvmfailhandle(final Connection conn, final VM vm, final List< vbd.destroy(conn); } catch (final Exception e) { final String msg = "VBD destroy failed due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); } } } @@ -5237,7 +5235,7 @@ private HashMap> syncNetworkGroups(final Connection con final HashMap> states = new HashMap>(); final String result = callHostPlugin(conn, "vmops", "get_rule_logs_for_vms", "host_uuid", _host.getUuid()); - s_logger.trace("syncNetworkGroups: id=" + id + " got: " + result); + logger.trace("syncNetworkGroups: id=" + id + " got: " + result); final String[] rulelogs = result != null ? result.split(";") : new String[0]; for (final String rulesforvm : rulelogs) { final String[] log = rulesforvm.split(","); @@ -5268,16 +5266,16 @@ public boolean transferManagementNetwork(final Connection conn, final Host host, } ++count; } catch (final XmlRpcException e) { - s_logger.debug("Waiting for host to come back: " + e.getMessage()); + logger.debug("Waiting for host to come back: " + e.getMessage()); } catch (final XenAPIException e) { - s_logger.debug("Waiting for host to come back: " + e.getMessage()); + logger.debug("Waiting for host to come back: " + e.getMessage()); } catch (final InterruptedException e) { - s_logger.debug("Gotta run"); + logger.debug("Gotta run"); return false; } } if (hostUuid == null) { - s_logger.warn("Unable to transfer the management network from " + spr.uuid); + logger.warn("Unable to transfer the management network from " + spr.uuid); return false; } @@ -5293,7 +5291,7 @@ public void umountSnapshotDir(final Connection conn, final Long dcId) { try { callHostPlugin(conn, "vmopsSnapshot", "unmountSnapshotsDir", "dcId", dcId.toString()); } catch (final Exception e) { - s_logger.debug("Failed to umount snapshot dir", e); + logger.debug("Failed to umount snapshot dir", e); } } @@ -5302,7 +5300,7 @@ public String upgradeSnapshot(final Connection conn, final String templatePath, if (results == null || results.isEmpty()) { final String msg = "upgrade_snapshot return null"; - s_logger.warn(msg); + logger.warn(msg); throw new CloudRuntimeException(msg); } final String[] tmp = results.split("#"); @@ -5310,27 +5308,27 @@ public String upgradeSnapshot(final Connection conn, final String templatePath, if (status.equals("0")) { return results; } else { - s_logger.warn(results); + logger.warn(results); throw new CloudRuntimeException(results); } } public void waitForTask(final Connection c, final Task task, final long pollInterval, final long timeout) throws XenAPIException, XmlRpcException, TimeoutException { final long beginTime = System.currentTimeMillis(); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Task " + task.getNameLabel(c) + " (" + task.getUuid(c) + ") sent to " + c.getSessionReference() + " is pending completion with a " + timeout + "ms timeout"); + if (logger.isTraceEnabled()) { + logger.trace("Task " + task.getNameLabel(c) + " (" + task.getUuid(c) + ") sent to " + c.getSessionReference() + " is pending completion with a " + timeout + "ms timeout"); } while (task.getStatus(c) == Types.TaskStatusType.PENDING) { try { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Task " + task.getNameLabel(c) + " (" + task.getUuid(c) + ") is pending, sleeping for " + pollInterval + "ms"); + if (logger.isTraceEnabled()) { + logger.trace("Task " + task.getNameLabel(c) + " (" + task.getUuid(c) + ") is pending, sleeping for " + pollInterval + "ms"); } Thread.sleep(pollInterval); } catch (final InterruptedException e) { } if (System.currentTimeMillis() - beginTime > timeout) { final String msg = "Async " + timeout / 1000 + " seconds timeout for task " + task.toString(); - s_logger.warn(msg); + logger.warn(msg); task.cancel(c); task.destroy(c); throw new TimeoutException(msg); @@ -5345,14 +5343,14 @@ public boolean createAndAttachConfigDriveIsoForVM(final Connection conn, final V // create SR final SR sr = createLocalIsoSR(conn, _configDriveSRName + _host.getIp()); if (sr == null) { - s_logger.debug("Failed to create local SR for the config drive"); + logger.debug("Failed to create local SR for the config drive"); return false; } - s_logger.debug("Creating vm data files in config drive for vm " + vmName); + logger.debug("Creating vm data files in config drive for vm " + vmName); // 1. create vm data files if (!createVmdataFiles(vmName, vmDataList, configDriveLabel)) { - s_logger.debug("Failed to create vm data files in config drive for vm " + vmName); + logger.debug("Failed to create vm data files in config drive for vm " + vmName); return false; } @@ -5381,9 +5379,9 @@ public boolean createVmdataFiles(final String vmName, final List vmDat try { deleteLocalFolder("/tmp/" + isoPath); } catch (final IOException e) { - s_logger.debug("Failed to delete the exiting config drive for vm " + vmName + " " + e.getMessage()); + logger.debug("Failed to delete the exiting config drive for vm " + vmName + " " + e.getMessage()); } catch (final Exception e) { - s_logger.debug("Failed to delete the exiting config drive for vm " + vmName + " " + e.getMessage()); + logger.debug("Failed to delete the exiting config drive for vm " + vmName + " " + e.getMessage()); } if (vmDataList != null) { @@ -5406,7 +5404,7 @@ public boolean createVmdataFiles(final String vmName, final List vmDat dir.mkdirs(); } } catch (final SecurityException ex) { - s_logger.debug("Failed to create dir " + ex.getMessage()); + logger.debug("Failed to create dir " + ex.getMessage()); return false; } @@ -5415,16 +5413,16 @@ public boolean createVmdataFiles(final String vmName, final List vmDat try (OutputStreamWriter fw = new OutputStreamWriter(new FileOutputStream(file.getAbsoluteFile()), "UTF-8"); BufferedWriter bw = new BufferedWriter(fw);) { bw.write(content); - s_logger.debug("created file: " + file + " in folder:" + folder); + logger.debug("created file: " + file + " in folder:" + folder); } catch (final IOException ex) { - s_logger.debug("Failed to create file " + ex.getMessage()); + logger.debug("Failed to create file " + ex.getMessage()); return false; } } } } } - s_logger.debug("Created the vm data in " + isoPath); + logger.debug("Created the vm data in " + isoPath); } String s = null; @@ -5439,16 +5437,16 @@ public boolean createVmdataFiles(final String vmName, final List vmDat // read the output from the command while ((s = stdInput.readLine()) != null) { - s_logger.debug(s); + logger.debug(s); } // read any errors from the attempted command while ((s = stdError.readLine()) != null) { - s_logger.debug(s); + logger.debug(s); } - s_logger.debug(" Created config drive ISO using the command " + cmd + " in the host " + _host.getIp()); + logger.debug(" Created config drive ISO using the command " + cmd + " in the host " + _host.getIp()); } catch (final IOException e) { - s_logger.debug(e.getMessage()); + logger.debug(e.getMessage()); return false; } @@ -5467,18 +5465,18 @@ public boolean copyConfigDriveIsoToHost(final Connection conn, final SR sr, fina throw new CloudRuntimeException("Unable to authenticate"); } - s_logger.debug("scp config drive iso file " + vmIso + " to host " + _host.getIp() + " path " + _configDriveIsopath); + logger.debug("scp config drive iso file " + vmIso + " to host " + _host.getIp() + " path " + _configDriveIsopath); final SCPClient scp = new SCPClient(sshConnection); final String p = "0755"; scp.put(vmIso, _configDriveIsopath, p); sr.scan(conn); - s_logger.debug("copied config drive iso to host " + _host); + logger.debug("copied config drive iso to host " + _host); } catch (final IOException e) { - s_logger.debug("failed to copy configdrive iso " + vmIso + " to host " + _host, e); + logger.debug("failed to copy configdrive iso " + vmIso + " to host " + _host, e); return false; } catch (final XmlRpcException e) { - s_logger.debug("Failed to scan config drive iso SR " + _configDriveSRName + _host.getIp() + " in host " + _host, e); + logger.debug("Failed to scan config drive iso SR " + _configDriveSRName + _host.getIp() + " in host " + _host, e); return false; } finally { sshConnection.close(); @@ -5487,9 +5485,9 @@ public boolean copyConfigDriveIsoToHost(final Connection conn, final SR sr, fina final String configDir = "/tmp/" + vmName; try { deleteLocalFolder(configDir); - s_logger.debug("Successfully cleaned up config drive directory " + configDir + " after copying it to host "); + logger.debug("Successfully cleaned up config drive directory " + configDir + " after copying it to host "); } catch (final Exception e) { - s_logger.debug("Failed to delete config drive folder :" + configDir + " for VM " + vmName + " " + e.getMessage()); + logger.debug("Failed to delete config drive folder :" + configDir + " for VM " + vmName + " " + e.getMessage()); } } @@ -5514,10 +5512,10 @@ public boolean attachConfigDriveIsoToVm(final Connection conn, final VM vm) thro srVdi = vdis.iterator().next(); } catch (final XenAPIException e) { - s_logger.debug("Unable to get config drive iso: " + isoURL + " due to " + e.toString()); + logger.debug("Unable to get config drive iso: " + isoURL + " due to " + e.toString()); return false; } catch (final Exception e) { - s_logger.debug("Unable to get config drive iso: " + isoURL + " due to " + e.toString()); + logger.debug("Unable to get config drive iso: " + isoURL + " due to " + e.toString()); return false; } @@ -5549,7 +5547,7 @@ public boolean attachConfigDriveIsoToVm(final Connection conn, final VM vm) thro final VBD cfgDriveVBD = VBD.create(conn, cfgDriveVbdr); isoVBD = cfgDriveVBD; - s_logger.debug("Created CD-ROM VBD for VM: " + vm); + logger.debug("Created CD-ROM VBD for VM: " + vm); } if (isoVBD != null) { @@ -5561,9 +5559,9 @@ public boolean attachConfigDriveIsoToVm(final Connection conn, final VM vm) thro try { // Insert the new ISO isoVBD.insert(conn, srVdi); - s_logger.debug("Attached config drive iso to vm " + vmName); + logger.debug("Attached config drive iso to vm " + vmName); } catch (final XmlRpcException ex) { - s_logger.debug("Failed to attach config drive iso to vm " + vmName); + logger.debug("Failed to attach config drive iso to vm " + vmName); return false; } } @@ -5577,7 +5575,7 @@ public SR createLocalIsoSR(final Connection conn, final String srName) throws Xe SR sr = getSRByNameLabelandHost(conn, srName); if (sr != null) { - s_logger.debug("Config drive SR already exist, returing it"); + logger.debug("Config drive SR already exist, returing it"); return sr; } @@ -5600,7 +5598,7 @@ public SR createLocalIsoSR(final Connection conn, final String srName) throws Xe } finally { sshConnection.close(); } - s_logger.debug("Created the config drive SR " + srName + " folder path " + _configDriveIsopath); + logger.debug("Created the config drive SR " + srName + " folder path " + _configDriveIsopath); deviceConfig.put("location", _configDriveIsopath); deviceConfig.put("legacy_mode", "true"); @@ -5612,15 +5610,15 @@ public SR createLocalIsoSR(final Connection conn, final String srName) throws Xe sr.setNameDescription(conn, deviceConfig.get("location")); sr.scan(conn); - s_logger.debug("Config drive ISO SR at the path " + _configDriveIsopath + " got created in host " + _host); + logger.debug("Config drive ISO SR at the path " + _configDriveIsopath + " got created in host " + _host); return sr; } catch (final XenAPIException e) { final String msg = "createLocalIsoSR failed! mountpoint " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); throw new CloudRuntimeException(msg, e); } catch (final Exception e) { final String msg = "createLocalIsoSR failed! mountpoint: due to " + e.getMessage(); - s_logger.warn(msg, e); + logger.warn(msg, e); throw new CloudRuntimeException(msg, e); } @@ -5629,7 +5627,7 @@ public SR createLocalIsoSR(final Connection conn, final String srName) throws Xe public void deleteLocalFolder(final String directory) throws Exception { if (directory == null || directory.isEmpty()) { final String msg = "Invalid directory path (null/empty) detected. Cannot delete specified directory."; - s_logger.debug(msg); + logger.debug(msg); throw new Exception(msg); } @@ -5664,7 +5662,7 @@ public boolean attachConfigDriveToMigratedVm(Connection conn, String vmName, Str // attach the config drive in destination host try { - s_logger.debug("Attaching config drive iso device for the VM " + vmName + " In host " + ipAddr); + logger.debug("Attaching config drive iso device for the VM " + vmName + " In host " + ipAddr); Set vms = VM.getByNameLabel(conn, vmName); SR sr = getSRByNameLabel(conn, vmName + VM_NAME_ISO_SUFFIX); @@ -5672,7 +5670,7 @@ public boolean attachConfigDriveToMigratedVm(Connection conn, String vmName, Str //one is from source host and second from dest host Set vdis = VDI.getByNameLabel(conn, vmName + VM_FILE_ISO_SUFFIX); if (vdis.isEmpty()) { - s_logger.debug("Could not find config drive ISO: " + vmName); + logger.debug("Could not find config drive ISO: " + vmName); return false; } @@ -5682,16 +5680,16 @@ public boolean attachConfigDriveToMigratedVm(Connection conn, String vmName, Str if (vdiSr.getUuid(conn).equals(sr.getUuid(conn))) { //get this vdi to attach to vbd configdriveVdi = vdi; - s_logger.debug("VDI for the config drive ISO " + vdi); + logger.debug("VDI for the config drive ISO " + vdi); } else { // delete the vdi in source host so that the .iso file is get removed - s_logger.debug("Removing the source host VDI for the config drive ISO " + vdi); + logger.debug("Removing the source host VDI for the config drive ISO " + vdi); vdi.destroy(conn); } } if (configdriveVdi == null) { - s_logger.debug("Config drive ISO VDI is not found "); + logger.debug("Config drive ISO VDI is not found "); return false; } @@ -5708,7 +5706,7 @@ public boolean attachConfigDriveToMigratedVm(Connection conn, String vmName, Str VBD cfgDriveVBD = VBD.create(conn, cfgDriveVbdr); - s_logger.debug("Inserting vbd " + configdriveVdi); + logger.debug("Inserting vbd " + configdriveVdi); cfgDriveVBD.insert(conn, configdriveVdi); break; @@ -5717,13 +5715,13 @@ public boolean attachConfigDriveToMigratedVm(Connection conn, String vmName, Str return true; } catch (BadServerResponse e) { - s_logger.warn("Failed to attach config drive ISO to the VM " + vmName + " In host " + ipAddr + " due to a bad server response.", e); + logger.warn("Failed to attach config drive ISO to the VM " + vmName + " In host " + ipAddr + " due to a bad server response.", e); return false; } catch (XenAPIException e) { - s_logger.warn("Failed to attach config drive ISO to the VM " + vmName + " In host " + ipAddr + " due to a xapi problem.", e); + logger.warn("Failed to attach config drive ISO to the VM " + vmName + " In host " + ipAddr + " due to a xapi problem.", e); return false; } catch (XmlRpcException e) { - s_logger.warn("Failed to attach config drive ISO to the VM " + vmName + " In host " + ipAddr + " due to a problem in a remote call.", e); + logger.warn("Failed to attach config drive ISO to the VM " + vmName + " In host " + ipAddr + " due to a problem in a remote call.", e); return false; } @@ -5834,7 +5832,7 @@ public Answer copyDiagnosticsFileToSecondaryStorage(Connection conn, CopyToSecon return answer; } catch (Exception e) { String msg = "Exception caught zip file copy to secondary storage URI: " + secondaryStorageUrl + "Exception : " + e; - s_logger.error(msg, e); + logger.error(msg, e); return new CopyToSecondaryStorageAnswer(cmd, false, msg); } finally { if (localDir != null) umountNfs(conn, secondaryStorageMountPath, localDir); @@ -5856,7 +5854,7 @@ private void umountNfs(Connection conn, String remoteDir, String localDir) { String result = callHostPlugin(conn, "cloud-plugin-storage", "umountNfsSecondaryStorage", "localDir", localDir, "remoteDir", remoteDir); if (StringUtils.isBlank(result)) { String errMsg = "Could not umount secondary storage " + remoteDir + " on host " + localDir; - s_logger.warn(errMsg); + logger.warn(errMsg); } } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XcpServerResource.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XcpServerResource.java index 9de2b2996bc8..29312a3d764d 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XcpServerResource.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XcpServerResource.java @@ -17,7 +17,6 @@ package com.cloud.hypervisor.xenserver.resource; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.apache.xmlrpc.XmlRpcException; import com.xensource.xenapi.Connection; @@ -29,7 +28,6 @@ public class XcpServerResource extends CitrixResourceBase { - private final static Logger s_logger = Logger.getLogger(XcpServerResource.class); private final static long mem_32m = 33554432L; @Override @@ -89,8 +87,8 @@ host memory (taking into account various memory overheads). @Override protected void setMemory(final Connection conn, final VM vm, final long minMemsize, final long maxMemsize) throws XmlRpcException, XenAPIException { //setMemoryLimits(staticMin, staticMax, dynamicMin, dynamicMax) - if (s_logger.isDebugEnabled()) { - s_logger.debug("Memory Limits for VM [" + vm.getNameLabel(conn) + "[staticMin:" + toHumanReadableSize(mem_32m) + ", staticMax:" + toHumanReadableSize(maxMemsize) + ", dynamicMin: " + toHumanReadableSize(minMemsize) + + if (logger.isDebugEnabled()) { + logger.debug("Memory Limits for VM [" + vm.getNameLabel(conn) + "[staticMin:" + toHumanReadableSize(mem_32m) + ", staticMax:" + toHumanReadableSize(maxMemsize) + ", dynamicMin: " + toHumanReadableSize(minMemsize) + ", dynamicMax:" + toHumanReadableSize(maxMemsize) + "]]"); } vm.setMemoryLimits(conn, mem_32m, maxMemsize, minMemsize, maxMemsize); diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServer56Resource.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServer56Resource.java index 9ae8bcf49c6d..92e812d8d780 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServer56Resource.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServer56Resource.java @@ -17,7 +17,6 @@ package com.cloud.hypervisor.xenserver.resource; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.agent.api.StartupCommand; import com.cloud.utils.exception.CloudRuntimeException; @@ -30,7 +29,6 @@ import com.xensource.xenapi.VLAN; public class XenServer56Resource extends CitrixResourceBase { - private final static Logger s_logger = Logger.getLogger(XenServer56Resource.class); @Override protected String getPatchFilePath() { @@ -67,7 +65,7 @@ public void disableVlanNetwork(final Connection conn, final Network network, boo host.forgetDataSourceArchives(conn, "pif_" + device + "." + vlannum + "_tx"); host.forgetDataSourceArchives(conn, "pif_" + device + "." + vlannum + "_rx"); } catch (final XenAPIException e) { - s_logger.trace("Catch " + e.getClass().getName() + ": failed to destroy VLAN " + device + " on host " + _host.getUuid() + " due to " + e.toString()); + logger.trace("Catch " + e.getClass().getName() + ": failed to destroy VLAN " + device + " on host " + _host.getUuid() + " due to " + e.toString()); } } return; @@ -75,10 +73,10 @@ public void disableVlanNetwork(final Connection conn, final Network network, boo } } catch (final XenAPIException e) { final String msg = "Unable to disable VLAN network due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); } catch (final Exception e) { final String msg = "Unable to disable VLAN network due to " + e.getMessage(); - s_logger.warn(msg, e); + logger.warn(msg, e); } } @@ -115,13 +113,13 @@ public Boolean checkHeartbeat(final String hostuuid) { final String shcmd = "/opt/cloud/bin/check_heartbeat.sh " + hostuuid + " " + Integer.toString(_heartbeatInterval * 2); if (!SSHCmdHelper.sshExecuteCmd(sshConnection, shcmd)) { - s_logger.debug("Heart beat is gone so dead."); + logger.debug("Heart beat is gone so dead."); return false; } - s_logger.debug("Heart beat is still going"); + logger.debug("Heart beat is still going"); return true; } catch (final Exception e) { - s_logger.debug("health check failed due to catch exception " + e.toString()); + logger.debug("health check failed due to catch exception " + e.toString()); return null; } finally { sshConnection.close(); diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServer610Resource.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServer610Resource.java index 7066d6205df2..77f1e7936f2e 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServer610Resource.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServer610Resource.java @@ -23,7 +23,6 @@ import java.util.Set; import org.apache.cloudstack.storage.to.VolumeObjectTO; -import org.apache.log4j.Logger; import org.apache.xmlrpc.XmlRpcException; import com.cloud.agent.api.to.DiskTO; @@ -39,7 +38,6 @@ public class XenServer610Resource extends XenServer600Resource { - private static final Logger s_logger = Logger.getLogger(XenServer610Resource.class); public List getUpdatedVolumePathsOfMigratedVm(final Connection connection, final VM migratedVm, final DiskTO[] volumes) throws CloudRuntimeException { final List volumeToList = new ArrayList(); @@ -69,7 +67,7 @@ public List getUpdatedVolumePathsOfMigratedVm(final Connection c } } } catch (final Exception e) { - s_logger.error("Unable to get the updated VDI paths of the migrated vm " + e.toString(), e); + logger.error("Unable to get the updated VDI paths of the migrated vm " + e.toString(), e); throw new CloudRuntimeException("Unable to get the updated VDI paths of the migrated vm " + e.toString(), e); } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServer620Resource.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServer620Resource.java index affccc695a5a..dd0767aa595c 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServer620Resource.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServer620Resource.java @@ -19,7 +19,6 @@ import java.util.Set; import org.apache.cloudstack.hypervisor.xenserver.XenserverConfigs; -import org.apache.log4j.Logger; import com.cloud.agent.api.StartupRoutingCommand; import com.xensource.xenapi.Connection; @@ -29,7 +28,6 @@ public class XenServer620Resource extends XenServer610Resource { - private static final Logger s_logger = Logger.getLogger(XenServer620Resource.class); protected boolean hostHasHotFix(final Connection conn, final String hotFixUuid) { try { @@ -44,7 +42,7 @@ protected boolean hostHasHotFix(final Connection conn, final String hotFixUuid) } } } catch (final Exception e) { - s_logger.debug("can't get patches information for hotFix: " + hotFixUuid); + logger.debug("can't get patches information for hotFix: " + hotFixUuid); } return false; } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServer620SP1Resource.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServer620SP1Resource.java index 5997b490e588..e9c19b8b9954 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServer620SP1Resource.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServer620SP1Resource.java @@ -23,7 +23,6 @@ import java.util.Map; import java.util.Set; -import org.apache.log4j.Logger; import org.apache.xmlrpc.XmlRpcException; import com.cloud.agent.api.StartCommand; @@ -42,7 +41,6 @@ public class XenServer620SP1Resource extends XenServer620Resource { - private static final Logger s_logger = Logger.getLogger(XenServer620SP1Resource.class); @Override protected void fillHostInfo(final Connection conn, final StartupRoutingCommand cmd) { @@ -54,8 +52,8 @@ protected void fillHostInfo(final Connection conn, final StartupRoutingCommand c cmd.setHostTags("GPU"); } } catch (final Exception e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Error while getting GPU device info from host " + cmd.getName(), e); + if (logger.isDebugEnabled()) { + logger.debug("Error while getting GPU device info from host " + cmd.getName(), e); } } } @@ -104,8 +102,8 @@ public HashMap> getGPUGroupDetails(final @Override public void createVGPU(final Connection conn, final StartCommand cmd, final VM vm, final GPUDeviceTO gpuDevice) throws XenAPIException, XmlRpcException { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Creating VGPU of VGPU type [ " + gpuDevice.getVgpuType() + " ] in gpu group" + gpuDevice.getGpuGroup() + if (logger.isDebugEnabled()) { + logger.debug("Creating VGPU of VGPU type [ " + gpuDevice.getVgpuType() + " ] in gpu group" + gpuDevice.getGpuGroup() + " for VM " + cmd.getVirtualMachine().getName()); } @@ -126,8 +124,8 @@ public void createVGPU(final Connection conn, final StartCommand cmd, final VM v final Map other_config = new HashMap(); VGPU.create(conn, vm, gpuGroup, device, other_config, vgpuType); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Created VGPU of VGPU type [ " + gpuDevice.getVgpuType() + " ] for VM " + cmd.getVirtualMachine().getName()); + if (logger.isDebugEnabled()) { + logger.debug("Created VGPU of VGPU type [ " + gpuDevice.getVgpuType() + " ] for VM " + cmd.getVirtualMachine().getName()); } // Calculate and set remaining GPU capacity in the host. cmd.getVirtualMachine().getGpuDevice().setGroupDetails(getGPUGroupDetails(conn)); diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServerConnectionPool.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServerConnectionPool.java index 2f27b1376fdb..87b869ba3c6a 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServerConnectionPool.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServerConnectionPool.java @@ -29,7 +29,8 @@ import com.xensource.xenapi.Types.XenAPIException; import org.apache.cloudstack.utils.security.SSLUtils; import org.apache.cloudstack.utils.security.SecureSSLSocketFactory; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.xmlrpc.XmlRpcException; import org.apache.xmlrpc.client.XmlRpcClientException; @@ -48,7 +49,7 @@ import java.util.Queue; public class XenServerConnectionPool { - private static final Logger s_logger = Logger.getLogger(XenServerConnectionPool.class); + protected static Logger LOGGER = LogManager.getLogger(XenServerConnectionPool.class); protected HashMap _conns = new HashMap(); protected int _retries; protected int _interval; @@ -57,7 +58,7 @@ public class XenServerConnectionPool { static { File file = PropertiesUtil.findConfigFile("environment.properties"); if (file == null) { - s_logger.debug("Unable to find environment.properties"); + LOGGER.debug("Unable to find environment.properties"); } else { try { final Properties props = PropertiesUtil.loadFromFile(file); @@ -65,11 +66,11 @@ public class XenServerConnectionPool { if (search != null) { s_sleepOnError = NumbersUtil.parseInterval(search, 10) * 1000; } - s_logger.info("XenServer Connection Pool Configs: sleep.interval.on.error=" + s_sleepOnError); + LOGGER.info("XenServer Connection Pool Configs: sleep.interval.on.error=" + s_sleepOnError); } catch (FileNotFoundException e) { - s_logger.debug("File is not found", e); + LOGGER.debug("File is not found", e); } catch (IOException e) { - s_logger.debug("IO Exception while reading file", e); + LOGGER.debug("IO Exception while reading file", e); } } try { @@ -89,7 +90,7 @@ public boolean verify(String hostName, SSLSession session) { } catch (NoSuchAlgorithmException e) { //ignore this } catch (KeyManagementException e) { - s_logger.debug("Init SSLContext failed ", e); + LOGGER.debug("Init SSLContext failed ", e); } } @@ -101,8 +102,8 @@ protected XenServerConnectionPool() { private void addConnect(String poolUuid, XenServerConnection conn) { if (poolUuid == null) return; - if (s_logger.isDebugEnabled()) { - s_logger.debug("Add master connection through " + conn.getIp() + " for pool(" + conn.getPoolUuid() + ")"); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Add master connection through " + conn.getIp() + " for pool(" + conn.getPoolUuid() + ")"); } synchronized (_conns) { _conns.put(poolUuid, conn); @@ -126,8 +127,8 @@ private void removeConnect(String poolUuid) { conn = _conns.remove(poolUuid); } if (conn != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Remove master connection through " + conn.getIp() + " for pool(" + conn.getPoolUuid() + ")"); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Remove master connection through " + conn.getIp() + " for pool(" + conn.getPoolUuid() + ")"); } } @@ -159,12 +160,12 @@ public Connection getConnect(String ip, String username, Queue password) loginWithPassword(conn, username, password, APIVersion.latest().toString()); } catch (Exception e1) { String msg = "Unable to create master connection to host(" + maddress +") , due to " + e1.toString(); - s_logger.debug(msg); + LOGGER.debug(msg); throw new CloudRuntimeException(msg, e1); } } catch (Exception e) { String msg = "Unable to create master connection to host(" + ip +") , due to " + e.toString(); - s_logger.debug(msg); + LOGGER.debug(msg); throw new CloudRuntimeException(msg, e); } return conn; @@ -175,8 +176,8 @@ public URL getURL(String ip) { return new URL("https://" + ip); } catch (Exception e) { String msg = "Unable to convert IP " + ip + " to URL due to " + e.toString(); - if (s_logger.isDebugEnabled()) { - s_logger.debug(msg); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(msg); } throw new CloudRuntimeException(msg, e); } @@ -188,7 +189,7 @@ public Connection connect(String hostUuid, String poolUuid, String ipAddress, if (hostUuid == null || poolUuid == null || ipAddress == null || username == null || password == null) { String msg = "Connect some parameter are null hostUuid:" + hostUuid + " ,poolUuid:" + poolUuid + " ,ipAddress:" + ipAddress; - s_logger.debug(msg); + LOGGER.debug(msg); throw new CloudRuntimeException(msg); } synchronized (poolUuid.intern()) { @@ -198,7 +199,7 @@ public Connection connect(String hostUuid, String poolUuid, String ipAddress, Host host = Host.getByUuid(mConn, hostUuid); if (!host.getEnabled(mConn)) { String msg = "Cannot connect this host " + ipAddress + " due to the host is not enabled"; - s_logger.debug(msg); + LOGGER.debug(msg); if (mConn.getIp().equalsIgnoreCase(ipAddress)) { removeConnect(poolUuid); mConn = null; @@ -209,9 +210,9 @@ public Connection connect(String hostUuid, String poolUuid, String ipAddress, } catch (CloudRuntimeException e) { throw e; } catch (Exception e) { - if (s_logger.isDebugEnabled()) { + if (LOGGER.isDebugEnabled()) { String ip = mConn != null ? mConn.getIp() : null; - s_logger.debug("connect through IP(" + ip + ") for pool(" + poolUuid + ") is broken due to " + e.toString()); + LOGGER.debug("connect through IP(" + ip + ") for pool(" + poolUuid + ") is broken due to " + e.toString()); } removeConnect(poolUuid); mConn = null; @@ -228,13 +229,13 @@ public Connection connect(String hostUuid, String poolUuid, String ipAddress, try{ Session.logout(conn); } catch (Exception e) { - s_logger.debug("Caught exception during logout", e); + LOGGER.debug("Caught exception during logout", e); } conn.dispose(); } if (!hostenabled) { String msg = "Unable to create master connection, due to master Host " + ipAddress + " is not enabled"; - s_logger.debug(msg); + LOGGER.debug(msg); throw new CloudRuntimeException(msg); } mConn = new XenServerConnection(getURL(ipAddress), ipAddress, username, password, _retries, _interval, wait, _connWait); @@ -247,12 +248,12 @@ public Connection connect(String hostUuid, String poolUuid, String ipAddress, Host host = session.getThisHost(mConn); if (!host.getEnabled(mConn)) { String msg = "Unable to create master connection, due to master Host " + maddress + " is not enabled"; - s_logger.debug(msg); + LOGGER.debug(msg); throw new CloudRuntimeException(msg); } } catch (Exception e1) { String msg = "Unable to create master connection to host(" + maddress +") , due to " + e1.toString(); - s_logger.debug(msg); + LOGGER.debug(msg); throw new CloudRuntimeException(msg, e1); } @@ -260,7 +261,7 @@ public Connection connect(String hostUuid, String poolUuid, String ipAddress, throw e; } catch (Exception e) { String msg = "Unable to create master connection to host(" + ipAddress +") , due to " + e.toString(); - s_logger.debug(msg); + LOGGER.debug(msg); throw new CloudRuntimeException(msg, e); } addConnect(poolUuid, mConn); @@ -457,19 +458,19 @@ protected Map dispatch(String methodcall, Object[] methodparams) throws XmlRpcE try { return super.dispatch(methodcall, methodparams); } catch (Types.SessionInvalid e) { - s_logger.debug("Session is invalid for method: " + methodcall + " due to " + e.toString()); + LOGGER.debug("Session is invalid for method: " + methodcall + " due to " + e.toString()); removeConnect(_poolUuid); throw e; } catch (XmlRpcClientException e) { - s_logger.debug("XmlRpcClientException for method: " + methodcall + " due to " + e.toString()); + LOGGER.debug("XmlRpcClientException for method: " + methodcall + " due to " + e.toString()); removeConnect(_poolUuid); throw e; } catch (XmlRpcException e) { - s_logger.debug("XmlRpcException for method: " + methodcall + " due to " + e.toString()); + LOGGER.debug("XmlRpcException for method: " + methodcall + " due to " + e.toString()); removeConnect(_poolUuid); throw e; } catch (Types.HostIsSlave e) { - s_logger.debug("HostIsSlave Exception for method: " + methodcall + " due to " + e.toString()); + LOGGER.debug("HostIsSlave Exception for method: " + methodcall + " due to " + e.toString()); removeConnect(_poolUuid); throw e; } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessor.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessor.java index cb226ed7d9bf..4298c9a72189 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessor.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessor.java @@ -56,7 +56,8 @@ import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.commons.lang3.BooleanUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.xmlrpc.XmlRpcException; import com.cloud.agent.api.Answer; @@ -89,7 +90,7 @@ import com.xensource.xenapi.VM; public class XenServerStorageProcessor implements StorageProcessor { - private static final Logger s_logger = Logger.getLogger(XenServerStorageProcessor.class); + protected Logger logger = LogManager.getLogger(getClass()); protected CitrixResourceBase hypervisorResource; protected String BaseMountPointOnHost = "/var/run/cloud_mount"; @@ -156,7 +157,7 @@ public SnapshotAndCopyAnswer snapshotAndCopy(final SnapshotAndCopyCommand cmd) { return snapshotAndCopyAnswer; } catch (final Exception ex) { - s_logger.warn("Failed to take and copy snapshot: " + ex.toString(), ex); + logger.warn("Failed to take and copy snapshot: " + ex.toString(), ex); return new SnapshotAndCopyAnswer(ex.getMessage()); } @@ -195,7 +196,7 @@ public ResignatureAnswer resignature(final ResignatureCommand cmd) { return resignatureAnswer; } catch (final Exception ex) { - s_logger.warn("Failed to resignature: " + ex.toString(), ex); + logger.warn("Failed to resignature: " + ex.toString(), ex); return new ResignatureAnswer(ex.getMessage()); } @@ -219,13 +220,13 @@ public Answer copyVolumeFromPrimaryToPrimary(CopyCommand cmd) { @Override public Answer checkDataStoreStoragePolicyCompliance(CheckDataStoreStoragePolicyComplainceCommand cmd) { - s_logger.info("'CheckDataStoreStoragePolicyComplainceCommand' not applicable used for XenServerStorageProcessor"); + logger.info("'CheckDataStoreStoragePolicyComplainceCommand' not applicable used for XenServerStorageProcessor"); return new Answer(cmd,false,"Not applicable used for XenServerStorageProcessor"); } @Override public Answer syncVolumePath(SyncVolumePathCommand cmd) { - s_logger.info("SyncVolumePathCommand not currently applicable for XenServerStorageProcessor"); + logger.info("SyncVolumePathCommand not currently applicable for XenServerStorageProcessor"); return new Answer(cmd, false, "Not currently applicable for XenServerStorageProcessor"); } @@ -241,7 +242,7 @@ public AttachAnswer attachIso(final AttachCommand cmd) { isoURL = iso.getName(); } else { if (!(store instanceof NfsTO)) { - s_logger.debug("Can't attach a iso which is not created on nfs: "); + logger.debug("Can't attach a iso which is not created on nfs: "); return new AttachAnswer("Can't attach a iso which is not created on nfs: "); } final NfsTO nfsStore = (NfsTO) store; @@ -286,10 +287,10 @@ public AttachAnswer attachIso(final AttachCommand cmd) { return new AttachAnswer(disk); } catch (final XenAPIException e) { - s_logger.warn("Failed to attach iso" + ": " + e.toString(), e); + logger.warn("Failed to attach iso" + ": " + e.toString(), e); return new AttachAnswer(e.toString()); } catch (final Exception e) { - s_logger.warn("Failed to attach iso" + ": " + e.toString(), e); + logger.warn("Failed to attach iso" + ": " + e.toString(), e); return new AttachAnswer(e.toString()); } } @@ -377,7 +378,7 @@ public AttachAnswer attachVolume(final AttachCommand cmd) { } catch (final Exception e) { final String msg = "Failed to attach volume for uuid: " + data.getPath() + " due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new AttachAnswer(msg); } @@ -395,7 +396,7 @@ public Answer dettachIso(final DettachCommand cmd) { isoURL = iso.getName(); } else { if (!(store instanceof NfsTO)) { - s_logger.debug("Can't detach a iso which is not created on nfs: "); + logger.debug("Can't detach a iso which is not created on nfs: "); return new AttachAnswer("Can't detach a iso which is not created on nfs: "); } final NfsTO nfsStore = (NfsTO) store; @@ -438,11 +439,11 @@ public Answer dettachIso(final DettachCommand cmd) { return new DettachAnswer(disk); } catch (final XenAPIException e) { final String msg = "Failed to detach volume" + " for uuid: " + data.getPath() + " due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new DettachAnswer(msg); } catch (final Exception e) { final String msg = "Failed to detach volume" + " for uuid: " + data.getPath() + " due to " + e.getMessage(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new DettachAnswer(msg); } } @@ -501,7 +502,7 @@ public Answer dettachVolume(final DettachCommand cmd) { return new DettachAnswer(disk); } catch (final Exception e) { - s_logger.warn("Failed dettach volume: " + data.getPath()); + logger.warn("Failed dettach volume: " + data.getPath()); return new DettachAnswer("Failed dettach volume: " + data.getPath() + ", due to " + e.toString()); } } @@ -558,7 +559,7 @@ public Answer createSnapshot(final CreateObjectCommand cmd) { snapshotUUID = preSnapshotUUID; } } catch (final Exception e) { - s_logger.debug("Failed to get parent snapshot", e); + logger.debug("Failed to get parent snapshot", e); } } final SnapshotObjectTO newSnapshot = new SnapshotObjectTO(); @@ -566,10 +567,10 @@ public Answer createSnapshot(final CreateObjectCommand cmd) { return new CreateObjectAnswer(newSnapshot); } catch (final XenAPIException e) { details += ", reason: " + e.toString(); - s_logger.warn(details, e); + logger.warn(details, e); } catch (final Exception e) { details += ", reason: " + e.toString(); - s_logger.warn(details, e); + logger.warn(details, e); } return new CreateObjectAnswer(details); @@ -588,13 +589,13 @@ public Answer deleteVolume(final DeleteCommand cmd) { deleteVDI(conn, vdi); return new Answer(null); } catch (final BadServerResponse e) { - s_logger.debug("Failed to delete volume", e); + logger.debug("Failed to delete volume", e); errorMsg = e.toString(); } catch (final XenAPIException e) { - s_logger.debug("Failed to delete volume", e); + logger.debug("Failed to delete volume", e); errorMsg = e.toString(); } catch (final XmlRpcException e) { - s_logger.debug("Failed to delete volume", e); + logger.debug("Failed to delete volume", e); errorMsg = e.toString(); } return new Answer(null, false, errorMsg); @@ -625,7 +626,7 @@ private String copy_vhd_from_secondarystorage(final Connection conn, final Strin if (hypervisorResource.killCopyProcess(conn, source)) { destroyVDIbyNameLabel(conn, nameLabel); } - s_logger.warn(errMsg); + logger.warn(errMsg); throw new CloudRuntimeException(errMsg); } @@ -633,7 +634,7 @@ private void destroyVDIbyNameLabel(final Connection conn, final String nameLabel try { final Set vdis = VDI.getByNameLabel(conn, nameLabel); if (vdis.size() != 1) { - s_logger.warn("destroyVDIbyNameLabel failed due to there are " + vdis.size() + " VDIs with name " + nameLabel); + logger.warn("destroyVDIbyNameLabel failed due to there are " + vdis.size() + " VDIs with name " + nameLabel); return; } for (final VDI vdi : vdis) { @@ -651,7 +652,7 @@ protected VDI getVDIbyUuid(final Connection conn, final String uuid) { return VDI.getByUuid(conn, uuid); } catch (final Exception e) { final String msg = "Catch Exception " + e.getClass().getName() + " :VDI getByUuid for uuid: " + uuid + " failed due to " + e.toString(); - s_logger.debug(msg); + logger.debug(msg); throw new CloudRuntimeException(msg, e); } } @@ -662,7 +663,7 @@ protected String getVhdParent(final Connection conn, final String primaryStorage "isISCSI", isISCSI.toString()); if (parentUuid == null || parentUuid.isEmpty() || parentUuid.equalsIgnoreCase("None")) { - s_logger.debug("Unable to get parent of VHD " + snapshotUuid + " in SR " + primaryStorageSRUuid); + logger.debug("Unable to get parent of VHD " + snapshotUuid + " in SR " + primaryStorageSRUuid); // errString is already logged. return null; } @@ -733,7 +734,7 @@ public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) { if (srs.size() != 1) { final String msg = "There are " + srs.size() + " SRs with same name: " + srName; - s_logger.warn(msg); + logger.warn(msg); return new CopyCmdAnswer(msg); } else { @@ -785,7 +786,7 @@ public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) { } catch (final Exception e) { final String msg = "Catch Exception " + e.getClass().getName() + " for template + " + " due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new CopyCmdAnswer(msg); } @@ -825,7 +826,7 @@ public Answer createVolume(final CreateObjectCommand cmd) { return new CreateObjectAnswer(newVol); } catch (final Exception e) { - s_logger.debug("create volume failed: " + e.toString()); + logger.debug("create volume failed: " + e.toString()); return new CreateObjectAnswer(e.toString()); } } @@ -844,16 +845,16 @@ public Answer cloneVolumeFromBaseTemplate(final CopyCommand cmd) { vdi = tmpltvdi.createClone(conn, new HashMap()); Long virtualSize = vdi.getVirtualSize(conn); if (volume.getSize() > virtualSize) { - s_logger.debug("Overriding provided template's size with new size " + toHumanReadableSize(volume.getSize()) + " for volume: " + volume.getName()); + logger.debug("Overriding provided template's size with new size " + toHumanReadableSize(volume.getSize()) + " for volume: " + volume.getName()); vdi.resize(conn, volume.getSize()); } else { - s_logger.debug("Using templates disk size of " + toHumanReadableSize(virtualSize) + " for volume: " + volume.getName() + " since size passed was " + toHumanReadableSize(volume.getSize())); + logger.debug("Using templates disk size of " + toHumanReadableSize(virtualSize) + " for volume: " + volume.getName() + " since size passed was " + toHumanReadableSize(volume.getSize())); } vdi.setNameLabel(conn, volume.getName()); VDI.Record vdir; vdir = vdi.getRecord(conn); - s_logger.debug("Successfully created VDI: Uuid = " + vdir.uuid); + logger.debug("Successfully created VDI: Uuid = " + vdir.uuid); final VolumeObjectTO newVol = new VolumeObjectTO(); newVol.setName(vdir.nameLabel); @@ -862,7 +863,7 @@ public Answer cloneVolumeFromBaseTemplate(final CopyCommand cmd) { return new CopyCmdAnswer(newVol); } catch (final Exception e) { - s_logger.warn("Unable to create volume; Pool=" + destData + "; Disk: ", e); + logger.warn("Unable to create volume; Pool=" + destData + "; Disk: ", e); return new CopyCmdAnswer(e.toString()); } } @@ -894,12 +895,12 @@ public Answer copyVolumeFromImageCacheToPrimary(final CopyCommand cmd) { return new CopyCmdAnswer(newVol); } catch (final Exception e) { final String msg = "Catch Exception " + e.getClass().getName() + " due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new CopyCmdAnswer(e.toString()); } } - s_logger.debug("unsupported protocol"); + logger.debug("unsupported protocol"); return new CopyCmdAnswer("unsupported protocol"); } @@ -935,7 +936,7 @@ public Answer copyVolumeFromPrimaryToSecondary(final CopyCommand cmd) { newVol.setSize(srcVolume.getSize()); return new CopyCmdAnswer(newVol); } catch (final Exception e) { - s_logger.debug("Failed to copy volume to secondary: " + e.toString()); + logger.debug("Failed to copy volume to secondary: " + e.toString()); return new CopyCmdAnswer("Failed to copy volume to secondary: " + e.toString()); } finally { hypervisorResource.removeSR(conn, secondaryStorage); @@ -953,7 +954,7 @@ private boolean swiftUpload(final Connection conn, final SwiftTO swift, final St String result = hypervisorResource.callHostPluginAsync(conn, "swiftxenserver", "swift", wait, params.toArray(new String[params.size()])); return BooleanUtils.toBoolean(result); } catch (final Exception e) { - s_logger.warn("swift upload failed due to " + e.toString(), e); + logger.warn("swift upload failed due to " + e.toString(), e); } return false; } @@ -1043,7 +1044,7 @@ protected String backupSnapshotToS3(final Connection connection, final S3TO s3, return null; } catch (final Exception e) { - s_logger.error(String.format("S3 upload failed of snapshot %1$s due to %2$s.", snapshotUuid, e.toString()), e); + logger.error(String.format("S3 upload failed of snapshot %1$s due to %2$s.", snapshotUuid, e.toString()), e); } return null; @@ -1089,7 +1090,7 @@ private String backupSnapshot(final Connection conn, final String primaryStorage // So we don't rely on status value but return backupSnapshotUuid as an // indicator of success. if (status != null && status.equalsIgnoreCase("1") && backupSnapshotUuid != null) { - s_logger.debug("Successfully copied backupUuid: " + backupSnapshotUuid + " to secondary storage"); + logger.debug("Successfully copied backupUuid: " + backupSnapshotUuid + " to secondary storage"); return results; } else { errMsg = @@ -1099,7 +1100,7 @@ private String backupSnapshot(final Connection conn, final String primaryStorage } final String source = backupUuid + ".vhd"; hypervisorResource.killCopyProcess(conn, source); - s_logger.warn(errMsg); + logger.warn(errMsg); throw new CloudRuntimeException(errMsg); } @@ -1122,17 +1123,17 @@ protected boolean destroySnapshotOnPrimaryStorageExceptThis(final Connection con } } catch (final Exception e) { final String msg = "Destroying snapshot: " + snapshot + " on primary storage failed due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); } } - s_logger.debug("Successfully destroyed snapshot on volume: " + volumeUuid + " execept this current snapshot " + avoidSnapshotUuid); + logger.debug("Successfully destroyed snapshot on volume: " + volumeUuid + " execept this current snapshot " + avoidSnapshotUuid); return true; } catch (final XenAPIException e) { final String msg = "Destroying snapshot on volume: " + volumeUuid + " execept this current snapshot " + avoidSnapshotUuid + " failed due to " + e.toString(); - s_logger.error(msg, e); + logger.error(msg, e); } catch (final Exception e) { final String msg = "Destroying snapshot on volume: " + volumeUuid + " execept this current snapshot " + avoidSnapshotUuid + " failed due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); } return false; @@ -1143,17 +1144,17 @@ protected boolean destroySnapshotOnPrimaryStorage(final Connection conn, final S final VDI snapshot = getVDIbyUuid(conn, lastSnapshotUuid); if (snapshot == null) { // since this is just used to cleanup leftover bad snapshots, no need to throw exception - s_logger.warn("Could not destroy snapshot " + lastSnapshotUuid + " due to can not find it"); + logger.warn("Could not destroy snapshot " + lastSnapshotUuid + " due to can not find it"); return false; } snapshot.destroy(conn); return true; } catch (final XenAPIException e) { final String msg = "Destroying snapshot: " + lastSnapshotUuid + " failed due to " + e.toString(); - s_logger.error(msg, e); + logger.error(msg, e); } catch (final Exception e) { final String msg = "Destroying snapshot: " + lastSnapshotUuid + " failed due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); } return false; } @@ -1222,7 +1223,7 @@ public Answer backupSnapshot(final CopyCommand cmd) { } } } catch (final Exception e) { - s_logger.debug("Failed to get parent snapshots, take full snapshot", e); + logger.debug("Failed to get parent snapshots, take full snapshot", e); fullbackup = true; } } @@ -1239,7 +1240,7 @@ public Answer backupSnapshot(final CopyCommand cmd) { if (!hypervisorResource.createSecondaryStorageFolder(conn, secondaryStorageMountPath, folder, nfsVersion)) { details = " Filed to create folder " + folder + " in secondary storage"; - s_logger.warn(details); + logger.warn(details); return new CopyCmdAnswer(details); } final String snapshotMountpoint = secondaryStorageUrl + "/" + folder; @@ -1261,7 +1262,7 @@ public Answer backupSnapshot(final CopyCommand cmd) { try { deleteSnapshotBackup(conn, localMountPoint, folder, secondaryStorageMountPath, snapshotBackupUuid); } catch (final Exception e) { - s_logger.debug("Failed to delete snapshot on cache storages", e); + logger.debug("Failed to delete snapshot on cache storages", e); } } @@ -1275,7 +1276,7 @@ public Answer backupSnapshot(final CopyCommand cmd) { try { deleteSnapshotBackup(conn, localMountPoint, folder, secondaryStorageMountPath, snapshotBackupUuid); } catch (final Exception e) { - s_logger.debug("Failed to delete snapshot on cache storages", e); + logger.debug("Failed to delete snapshot on cache storages", e); } } // finalPath = folder + File.separator + snapshotBackupUuid; @@ -1326,17 +1327,17 @@ public Answer backupSnapshot(final CopyCommand cmd) { return new CopyCmdAnswer(newSnapshot); } catch (final XenAPIException e) { details = "BackupSnapshot Failed due to " + e.toString(); - s_logger.warn(details, e); + logger.warn(details, e); } catch (final Exception e) { details = "BackupSnapshot Failed due to " + e.getMessage(); - s_logger.warn(details, e); + logger.warn(details, e); } finally { if (!result) { // remove last bad primary snapshot when exception happens try { destroySnapshotOnPrimaryStorage(conn, snapshotUuid); } catch (final Exception e) { - s_logger.debug("clean up snapshot failed", e); + logger.debug("clean up snapshot failed", e); } } } @@ -1369,7 +1370,7 @@ public Answer createTemplateFromVolume(final CopyCommand cmd) { installPath = template.getPath(); if (!hypervisorResource.createSecondaryStorageFolder(conn, secondaryStorageMountPath, installPath, nfsVersion)) { details = " Filed to create folder " + installPath + " in secondary storage"; - s_logger.warn(details); + logger.warn(details); return new CopyCmdAnswer(details); } @@ -1417,7 +1418,7 @@ public Answer createTemplateFromVolume(final CopyCommand cmd) { hypervisorResource.deleteSecondaryStorageFolder(conn, secondaryStorageMountPath, installPath, nfsVersion); } details = "Creating template from volume " + volumeUUID + " failed due to " + e.toString(); - s_logger.error(details, e); + logger.error(details, e); } return new CopyCmdAnswer(details); } @@ -1443,7 +1444,7 @@ public Answer createTemplateFromSnapshot(final CopyCommand cmd) { destUri = new URI(destStore.getUrl()); } catch (final Exception ex) { - s_logger.debug("Invalid URI", ex); + logger.debug("Invalid URI", ex); return new CopyCmdAnswer("Invalid URI: " + ex.toString()); } @@ -1472,7 +1473,7 @@ public Answer createTemplateFromSnapshot(final CopyCommand cmd) { if (!hypervisorResource.createSecondaryStorageFolder(conn, destNfsPath, destDir, destNfsVersion)) { final String details = " Failed to create folder " + destDir + " in secondary storage"; - s_logger.warn(details); + logger.warn(details); return new CopyCmdAnswer(details); } @@ -1523,7 +1524,7 @@ public Answer createTemplateFromSnapshot(final CopyCommand cmd) { return new CopyCmdAnswer(newTemplate); } catch (final Exception ex) { - s_logger.error("Failed to create a template from a snapshot", ex); + logger.error("Failed to create a template from a snapshot", ex); return new CopyCmdAnswer("Failed to create a template from a snapshot: " + ex.toString()); } finally { @@ -1532,7 +1533,7 @@ public Answer createTemplateFromSnapshot(final CopyCommand cmd) { try { destVdi.destroy(conn); } catch (final Exception e) { - s_logger.debug("Cleaned up leftover VDI on destination storage due to failure: ", e); + logger.debug("Cleaned up leftover VDI on destination storage due to failure: ", e); } } } @@ -1648,16 +1649,16 @@ public Answer createVolumeFromSnapshot(final CopyCommand cmd) { } catch (final XenAPIException e) { details = "Exception due to " + e.toString(); - s_logger.warn(details, e); + logger.warn(details, e); } catch (final Exception e) { details = "Exception due to " + e.getMessage(); - s_logger.warn(details, e); + logger.warn(details, e); } if (!result) { // Is this logged at a higher level? - s_logger.error(details); + logger.error(details); } // In all cases return something. @@ -1703,7 +1704,7 @@ Answer createManagedVolumeFromManagedSnapshot(final CopyCommand cmd) { return new CopyCmdAnswer(newVol); } catch (final Exception ex) { - s_logger.warn("Failed to copy snapshot to volume: " + ex.toString(), ex); + logger.warn("Failed to copy snapshot to volume: " + ex.toString(), ex); return new CopyCmdAnswer(ex.getMessage()); } @@ -1743,7 +1744,7 @@ Answer createNonManagedVolumeFromManagedSnapshot(final CopyCommand cmd) { return new CopyCmdAnswer(newVol); } catch (Exception ex) { - s_logger.warn("Failed to copy snapshot to volume: " + ex.toString(), ex); + logger.warn("Failed to copy snapshot to volume: " + ex.toString(), ex); return new CopyCmdAnswer(ex.getMessage()); } @@ -1768,13 +1769,13 @@ public Answer deleteSnapshot(final DeleteCommand cmd) { try { deleteVDI(conn, snapshotVdi); } catch (final BadServerResponse e) { - s_logger.debug("delete snapshot failed:" + e.toString()); + logger.debug("delete snapshot failed:" + e.toString()); errMsg = e.toString(); } catch (final XenAPIException e) { - s_logger.debug("delete snapshot failed:" + e.toString()); + logger.debug("delete snapshot failed:" + e.toString()); errMsg = e.toString(); } catch (final XmlRpcException e) { - s_logger.debug("delete snapshot failed:" + e.toString()); + logger.debug("delete snapshot failed:" + e.toString()); errMsg = e.toString(); } return new Answer(cmd, false, errMsg); @@ -1791,7 +1792,7 @@ public Answer introduceObject(final IntroduceObjectCmd cmd) { poolSr.scan(conn); return new IntroduceObjectAnswer(cmd.getDataTO()); } catch (final Exception e) { - s_logger.debug("Failed to introduce object", e); + logger.debug("Failed to introduce object", e); return new Answer(cmd, false, e.toString()); } } @@ -1805,7 +1806,7 @@ public Answer forgetObject(final ForgetObjectCmd cmd) { vdi.forget(conn); return new IntroduceObjectAnswer(cmd.getDataTO()); } catch (final Exception e) { - s_logger.debug("Failed to forget object", e); + logger.debug("Failed to forget object", e); return new Answer(cmd, false, e.toString()); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/Xenserver625Resource.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/Xenserver625Resource.java index 407beb774ee8..65c9e604100d 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/Xenserver625Resource.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/Xenserver625Resource.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.xenserver.resource; import org.apache.cloudstack.hypervisor.xenserver.XenServerResourceNewBase; -import org.apache.log4j.Logger; import org.apache.xmlrpc.XmlRpcException; import com.cloud.storage.resource.StorageSubsystemCommandHandler; @@ -33,7 +32,6 @@ public class Xenserver625Resource extends XenServerResourceNewBase { - private static final Logger s_logger = Logger.getLogger(Xenserver625Resource.class); @Override protected String getPatchFilePath() { @@ -70,7 +68,7 @@ public boolean setupServer(final Connection conn,final Host host) { SSHCmdHelper.sshExecuteCmd(sshConnection, cmd); } catch (final Exception e) { - s_logger.debug("Catch exception " + e.toString(), e); + logger.debug("Catch exception " + e.toString(), e); } finally { sshConnection.close(); } @@ -96,7 +94,7 @@ public String revertToSnapshot(final Connection conn, final VM vmSnapshot, errMsg = "revert_memory_snapshot exception"; } } - s_logger.warn(errMsg); + logger.warn(errMsg); throw new CloudRuntimeException(errMsg); } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java index 68236f92ac44..773b443a57e8 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java @@ -38,7 +38,6 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.apache.xmlrpc.XmlRpcException; import com.cloud.agent.api.Answer; @@ -67,7 +66,6 @@ import static com.cloud.utils.NumbersUtil.toHumanReadableSize; public class Xenserver625StorageProcessor extends XenServerStorageProcessor { - private static final Logger s_logger = Logger.getLogger(XenServerStorageProcessor.class); public Xenserver625StorageProcessor(final CitrixResourceBase resource) { super(resource); @@ -80,7 +78,7 @@ private void mountNfs(Connection conn, String remoteDir, String localDir, String String result = hypervisorResource.callHostPluginAsync(conn, "cloud-plugin-storage", "mountNfsSecondaryStorage", 100 * 1000, "localDir", localDir, "remoteDir", remoteDir, "nfsVersion", nfsVersion); if (StringUtils.isBlank(result)) { String errMsg = "Could not mount secondary storage " + remoteDir + " on host " + localDir; - s_logger.warn(errMsg); + logger.warn(errMsg); throw new CloudRuntimeException(errMsg); } } @@ -118,7 +116,7 @@ protected SR createFileSR(Connection conn, String path) { */ protected SR createNewFileSr(Connection conn, String srPath) { String hostUuid = hypervisorResource.getHost().getUuid(); - s_logger.debug(String.format("Creating file SR for path [%s] on host [%s]", srPath, this.hypervisorResource._host.getUuid())); + logger.debug(String.format("Creating file SR for path [%s] on host [%s]", srPath, this.hypervisorResource._host.getUuid())); SR sr = null; PBD pbd = null; try { @@ -143,14 +141,14 @@ protected SR createNewFileSr(Connection conn, String srPath) { Types.InternalError internalErrorException = (Types.InternalError)e; if (StringUtils.contains(internalErrorException.message, expectedDuplicatedFileSrErrorMessage)) { - s_logger.debug(String.format( + logger.debug(String.format( "It seems that we have hit a race condition case here while creating file SR for [%s]. Instead of creating one, we will reuse the one that already exist in the XenServer pool.", srPath)); return retrieveAlreadyConfiguredSrWithoutException(conn, srPath); } } removeSrAndPbdIfPossible(conn, sr, pbd); - s_logger.debug(String.format("Could not create file SR [%s] on host [%s].", srPath, hostUuid), e); + logger.debug(String.format("Could not create file SR [%s] on host [%s].", srPath, hostUuid), e); return null; } } @@ -187,7 +185,7 @@ protected SR retrieveAlreadyConfiguredSrWithoutException(Connection conn, String protected SR retrieveAlreadyConfiguredSr(Connection conn, String path) throws XenAPIException, XmlRpcException { Set srs = SR.getByNameLabel(conn, path); if (CollectionUtils.isEmpty(srs)) { - s_logger.debug("No file SR found for path: " + path); + logger.debug("No file SR found for path: " + path); return null; } if (srs.size() > 1) { @@ -195,19 +193,19 @@ protected SR retrieveAlreadyConfiguredSr(Connection conn, String path) throws Xe } SR sr = srs.iterator().next(); String srUuid = sr.getUuid(conn); - s_logger.debug(String.format("SR [%s] was already introduced in XenServer. Checking if we can reuse it.", srUuid)); + logger.debug(String.format("SR [%s] was already introduced in XenServer. Checking if we can reuse it.", srUuid)); Map currentOperations = sr.getCurrentOperations(conn); if (MapUtils.isEmpty(currentOperations)) { - s_logger.debug(String.format("There are no current operation in SR [%s]. It looks like an unusual condition. We will check if it is usable before returning it.", srUuid)); + logger.debug(String.format("There are no current operation in SR [%s]. It looks like an unusual condition. We will check if it is usable before returning it.", srUuid)); } try { sr.scan(conn); } catch (XenAPIException | XmlRpcException e) { - s_logger.debug(String.format("Problems while checking if cached temporary SR [%s] is working properly (we executed sr-scan). We will not reuse it.", srUuid)); + logger.debug(String.format("Problems while checking if cached temporary SR [%s] is working properly (we executed sr-scan). We will not reuse it.", srUuid)); forgetSr(conn, sr); return null; } - s_logger.debug(String.format("Cached temporary SR [%s] is working properly. We will reuse it.", srUuid)); + logger.debug(String.format("Cached temporary SR [%s] is working properly. We will reuse it.", srUuid)); return sr; } @@ -221,10 +219,10 @@ protected void forgetSr(Connection conn, SR sr) { srUuid = sr.getUuid(conn); Set pbDs = sr.getPBDs(conn); for (PBD pbd : pbDs) { - s_logger.debug(String.format("Unpluging PBD [%s] of SR [%s] as it is not working properly.", pbd.getUuid(conn), srUuid)); + logger.debug(String.format("Unpluging PBD [%s] of SR [%s] as it is not working properly.", pbd.getUuid(conn), srUuid)); unplugPbd(conn, pbd); } - s_logger.debug(String.format("Forgetting SR [%s] as it is not working properly.", srUuid)); + logger.debug(String.format("Forgetting SR [%s] as it is not working properly.", srUuid)); sr.forget(conn); } catch (XenAPIException | XmlRpcException e) { throw new CloudRuntimeException("Exception while forgeting SR: " + srUuid, e); @@ -336,7 +334,7 @@ public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) { if (srs.size() != 1) { final String msg = "There are " + srs.size() + " SRs with same name: " + srName; - s_logger.warn(msg); + logger.warn(msg); return new CopyCmdAnswer(msg); } else { @@ -392,7 +390,7 @@ public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) { } catch (final Exception e) { final String msg = "Catch Exception " + e.getClass().getName() + " for template due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new CopyCmdAnswer(msg); } finally { @@ -400,7 +398,7 @@ public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) { try { task.destroy(conn); } catch (final Exception e) { - s_logger.debug("unable to destroy task (" + task.toWireString() + ") due to " + e.toString()); + logger.debug("unable to destroy task (" + task.toWireString() + ") due to " + e.toString()); } } @@ -454,7 +452,7 @@ protected String backupSnapshot(final Connection conn, final String primaryStora try { task.destroy(conn); } catch (final Exception e) { - s_logger.warn("unable to destroy task(" + task.toWireString() + ") due to " + e.toString()); + logger.warn("unable to destroy task(" + task.toWireString() + ") due to " + e.toString()); } } } @@ -462,7 +460,7 @@ protected String backupSnapshot(final Connection conn, final String primaryStora return result; } catch (final Exception e) { final String msg = "Exception in backupsnapshot stage due to " + e.toString(); - s_logger.debug(msg); + logger.debug(msg); throw new CloudRuntimeException(msg, e); } finally { try { @@ -470,7 +468,7 @@ protected String backupSnapshot(final Connection conn, final String primaryStora hypervisorResource.removeSR(conn, ssSR); } } catch (final Exception e) { - s_logger.debug("Exception in backupsnapshot cleanup stage due to " + e.toString()); + logger.debug("Exception in backupsnapshot cleanup stage due to " + e.toString()); } } } @@ -481,7 +479,7 @@ protected String getVhdParent(final Connection conn, final String primaryStorage isISCSI.toString()); if (parentUuid == null || parentUuid.isEmpty() || parentUuid.equalsIgnoreCase("None")) { - s_logger.debug("Unable to get parent of VHD " + snapshotUuid + " in SR " + primaryStorageSRUuid); + logger.debug("Unable to get parent of VHD " + snapshotUuid + " in SR " + primaryStorageSRUuid); // errString is already logged. return null; } @@ -575,7 +573,7 @@ public Answer backupSnapshot(final CopyCommand cmd) { final boolean result = makeDirectory(conn, localDir + "/" + folder); if (!result) { details = " Failed to create folder " + folder + " in secondary storage"; - s_logger.warn(details); + logger.warn(details); return new CopyCmdAnswer(details); } @@ -600,7 +598,7 @@ public Answer backupSnapshot(final CopyCommand cmd) { try { deleteSnapshotBackup(conn, localMountPoint, folder, secondaryStorageMountPath, snapshotBackupUuid); } catch (final Exception e) { - s_logger.debug("Failed to delete snapshot on cache storages", e); + logger.debug("Failed to delete snapshot on cache storages", e); } } @@ -614,7 +612,7 @@ public Answer backupSnapshot(final CopyCommand cmd) { try { deleteSnapshotBackup(conn, localMountPoint, folder, secondaryStorageMountPath, snapshotBackupUuid); } catch (final Exception e) { - s_logger.debug("Failed to delete snapshot on cache storages", e); + logger.debug("Failed to delete snapshot on cache storages", e); } } // finalPath = folder + File.separator + @@ -628,7 +626,7 @@ public Answer backupSnapshot(final CopyCommand cmd) { try { task.destroy(conn); } catch (final Exception e) { - s_logger.warn("unable to destroy task(" + task.toWireString() + ") due to " + e.toString()); + logger.warn("unable to destroy task(" + task.toWireString() + ") due to " + e.toString()); } } if (snapshotSr != null) { @@ -671,14 +669,14 @@ public Answer backupSnapshot(final CopyCommand cmd) { } else { newSnapshot.setParentSnapshotPath(prevBackupUuid); } - s_logger.info("New snapshot details: " + newSnapshot.toString()); - s_logger.info("New snapshot physical utilization: " + toHumanReadableSize(physicalSize)); + logger.info("New snapshot details: " + newSnapshot.toString()); + logger.info("New snapshot physical utilization: " + toHumanReadableSize(physicalSize)); return new CopyCmdAnswer(newSnapshot); } catch (final Exception e) { final String reason = e instanceof Types.XenAPIException ? e.toString() : e.getMessage(); details = "BackupSnapshot Failed due to " + reason; - s_logger.warn(details, e); + logger.warn(details, e); // remove last bad primary snapshot when exception happens destroySnapshotOnPrimaryStorage(conn, snapshotUuid); @@ -713,7 +711,7 @@ public Answer createTemplateFromVolume(final CopyCommand cmd) { installPath = template.getPath(); if (!hypervisorResource.createSecondaryStorageFolder(conn, secondaryStorageMountPath, installPath, nfsVersion)) { details = " Filed to create folder " + installPath + " in secondary storage"; - s_logger.warn(details); + logger.warn(details); return new CopyCmdAnswer(details); } @@ -762,13 +760,13 @@ public Answer createTemplateFromVolume(final CopyCommand cmd) { hypervisorResource.deleteSecondaryStorageFolder(conn, secondaryStorageMountPath, installPath, nfsVersion); } details = "Creating template from volume " + volumeUUID + " failed due to " + e.toString(); - s_logger.error(details, e); + logger.error(details, e); } finally { if (task != null) { try { task.destroy(conn); } catch (final Exception e) { - s_logger.warn("unable to destroy task(" + task.toWireString() + ") due to " + e.toString()); + logger.warn("unable to destroy task(" + task.toWireString() + ") due to " + e.toString()); } } } @@ -889,10 +887,10 @@ public Answer createVolumeFromSnapshot(final CopyCommand cmd) { return new CopyCmdAnswer(newVol); } catch (final Types.XenAPIException e) { details += " due to " + e.toString(); - s_logger.warn(details, e); + logger.warn(details, e); } catch (final Exception e) { details += " due to " + e.getMessage(); - s_logger.warn(details, e); + logger.warn(details, e); } finally { if (srcSr != null) { hypervisorResource.skipOrRemoveSR(conn, srcSr); @@ -906,13 +904,13 @@ public Answer createVolumeFromSnapshot(final CopyCommand cmd) { try { destVdi.destroy(conn); } catch (final Exception e) { - s_logger.debug("destroy dest vdi failed", e); + logger.debug("destroy dest vdi failed", e); } } } if (!result) { // Is this logged at a higher level? - s_logger.error(details); + logger.error(details); } // In all cases return something. @@ -921,13 +919,13 @@ public Answer createVolumeFromSnapshot(final CopyCommand cmd) { @Override public Answer checkDataStoreStoragePolicyCompliance(CheckDataStoreStoragePolicyComplainceCommand cmd) { - s_logger.info("'CheckDataStoreStoragePolicyComplainceCommand' not applicable used for XenServerStorageProcessor"); + logger.info("'CheckDataStoreStoragePolicyComplainceCommand' not applicable used for XenServerStorageProcessor"); return new Answer(cmd,false,"Not applicable used for XenServerStorageProcessor"); } @Override public Answer syncVolumePath(SyncVolumePathCommand cmd) { - s_logger.info("SyncVolumePathCommand not currently applicable for XenServerStorageProcessor"); + logger.info("SyncVolumePathCommand not currently applicable for XenServerStorageProcessor"); return new Answer(cmd, false, "Not currently applicable for XenServerStorageProcessor"); } @@ -968,14 +966,14 @@ public Answer copyVolumeFromPrimaryToSecondary(final CopyCommand cmd) { newVol.setSize(srcVolume.getSize()); return new CopyCmdAnswer(newVol); } catch (final Exception e) { - s_logger.debug("Failed to copy volume to secondary: " + e.toString()); + logger.debug("Failed to copy volume to secondary: " + e.toString()); return new CopyCmdAnswer("Failed to copy volume to secondary: " + e.toString()); } finally { if (task != null) { try { task.destroy(conn); } catch (final Exception e) { - s_logger.warn("unable to destroy task(" + task.toWireString() + ") due to " + e.toString()); + logger.warn("unable to destroy task(" + task.toWireString() + ") due to " + e.toString()); } } hypervisorResource.removeSR(conn, secondaryStorage); @@ -1031,14 +1029,14 @@ public Answer copyVolumeFromImageCacheToPrimary(final CopyCommand cmd) { return new CopyCmdAnswer(newVol); } catch (final Exception e) { final String msg = "Catch Exception " + e.getClass().getName() + " due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new CopyCmdAnswer(e.toString()); } finally { if (task != null) { try { task.destroy(conn); } catch (final Exception e) { - s_logger.warn("unable to destroy task(" + task.toString() + ") due to " + e.toString()); + logger.warn("unable to destroy task(" + task.toString() + ") due to " + e.toString()); } } if (srcSr != null) { @@ -1047,7 +1045,7 @@ public Answer copyVolumeFromImageCacheToPrimary(final CopyCommand cmd) { } } - s_logger.debug("unsupported protocol"); + logger.debug("unsupported protocol"); return new CopyCmdAnswer("unsupported protocol"); } @@ -1077,7 +1075,7 @@ public Answer createTemplateFromSnapshot(final CopyCommand cmd) { srcUri = new URI(srcStore.getUrl()); destUri = new URI(destStore.getUrl()); } catch (final Exception e) { - s_logger.debug("incorrect url", e); + logger.debug("incorrect url", e); return new CopyCmdAnswer("incorrect url" + e.toString()); } @@ -1174,7 +1172,7 @@ public Answer createTemplateFromSnapshot(final CopyCommand cmd) { return new CopyCmdAnswer(newTemplate); } catch (final Exception e) { - s_logger.error("Failed create template from snapshot", e); + logger.error("Failed create template from snapshot", e); return new CopyCmdAnswer("Failed create template from snapshot " + e.toString()); } finally { @@ -1183,7 +1181,7 @@ public Answer createTemplateFromSnapshot(final CopyCommand cmd) { try { destVdi.destroy(conn); } catch (final Exception e) { - s_logger.debug("Clean up left over on dest storage failed: ", e); + logger.debug("Clean up left over on dest storage failed: ", e); } } } @@ -1215,7 +1213,7 @@ private Answer createTemplateFromSnapshot2(final CopyCommand cmd) { destStore = (NfsTO)templateObjTO.getDataStore(); destUri = new URI(destStore.getUrl()); } catch (final Exception ex) { - s_logger.debug("Invalid URI", ex); + logger.debug("Invalid URI", ex); return new CopyCmdAnswer("Invalid URI: " + ex.toString()); } @@ -1291,15 +1289,15 @@ private Answer createTemplateFromSnapshot2(final CopyCommand cmd) { return new CopyCmdAnswer(newTemplate); } catch (final BadServerResponse e) { - s_logger.error("Failed to create a template from a snapshot due to incomprehensible server response", e); + logger.error("Failed to create a template from a snapshot due to incomprehensible server response", e); return new CopyCmdAnswer("Failed to create a template from a snapshot: " + e.toString()); } catch (final XenAPIException e) { - s_logger.error("Failed to create a template from a snapshot due to xenapi error", e); + logger.error("Failed to create a template from a snapshot due to xenapi error", e); return new CopyCmdAnswer("Failed to create a template from a snapshot: " + e.toString()); } catch (final XmlRpcException e) { - s_logger.error("Failed to create a template from a snapshot due to rpc error", e); + logger.error("Failed to create a template from a snapshot due to rpc error", e); return new CopyCmdAnswer("Failed to create a template from a snapshot: " + e.toString()); } finally { @@ -1308,7 +1306,7 @@ private Answer createTemplateFromSnapshot2(final CopyCommand cmd) { try { destVdi.destroy(conn); } catch (final Exception e) { - s_logger.debug("Cleaned up leftover VDI on destination storage due to failure: ", e); + logger.debug("Cleaned up leftover VDI on destination storage due to failure: ", e); } } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XsLocalNetwork.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XsLocalNetwork.java index e03a5895a5b4..c8ec2b106ab8 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XsLocalNetwork.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XsLocalNetwork.java @@ -16,7 +16,8 @@ // under the License. package com.cloud.hypervisor.xenserver.resource; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.xmlrpc.XmlRpcException; import com.xensource.xenapi.Connection; @@ -29,7 +30,7 @@ */ public class XsLocalNetwork { - private static final Logger s_logger = Logger.getLogger(XsLocalNetwork.class); + protected Logger logger = LogManager.getLogger(getClass()); private final CitrixResourceBase _citrixResourceBase; private final Network _n; @@ -67,8 +68,8 @@ public PIF getPif(final Connection conn) throws XenAPIException, XmlRpcException for (final PIF pif : nr.PIFs) { final PIF.Record pr = pif.getRecord(conn); if (_citrixResourceBase.getHost().getUuid().equals(pr.host.getUuid(conn))) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found a network called " + nr.nameLabel + " on host=" + _citrixResourceBase.getHost().getIp() + "; Network=" + nr.uuid + "; pif=" + pr.uuid); + if (logger.isDebugEnabled()) { + logger.debug("Found a network called " + nr.nameLabel + " on host=" + _citrixResourceBase.getHost().getIp() + "; Network=" + nr.uuid + "; pif=" + pr.uuid); } _p = pif; _pr = pr; diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xcp/XcpServerGetAutoScaleMetricsCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xcp/XcpServerGetAutoScaleMetricsCommandWrapper.java index 74d5a8ed0a45..1bf1c5070696 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xcp/XcpServerGetAutoScaleMetricsCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xcp/XcpServerGetAutoScaleMetricsCommandWrapper.java @@ -20,7 +20,6 @@ package com.cloud.hypervisor.xenserver.resource.wrapper.xcp; import com.cloud.hypervisor.xenserver.resource.XcpServerResource; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.routing.GetAutoScaleMetricsAnswer; @@ -36,7 +35,6 @@ @ResourceWrapper(handles = GetAutoScaleMetricsCommand.class) public final class XcpServerGetAutoScaleMetricsCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(XcpServerGetAutoScaleMetricsCommandWrapper.class); @Override public Answer execute(final GetAutoScaleMetricsCommand command, final XcpServerResource xcpServer) { @@ -77,7 +75,7 @@ public Answer execute(final GetAutoScaleMetricsCommand command, final XcpServerR return new GetAutoScaleMetricsAnswer(command, true, values); } catch (final Exception ex) { - s_logger.warn("Failed to get autoscale metrics due to ", ex); + logger.warn("Failed to get autoscale metrics due to ", ex); return new GetAutoScaleMetricsAnswer(command, false); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xcp/XcpServerNetworkUsageCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xcp/XcpServerNetworkUsageCommandWrapper.java index 0f5aaa1fb134..cc37c8373fb1 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xcp/XcpServerNetworkUsageCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xcp/XcpServerNetworkUsageCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.xenserver.resource.wrapper.xcp; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.NetworkUsageAnswer; @@ -32,7 +31,6 @@ @ResourceWrapper(handles = NetworkUsageCommand.class) public final class XcpServerNetworkUsageCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(XcpServerNetworkUsageCommandWrapper.class); @Override public Answer execute(final NetworkUsageCommand command, final XcpServerResource xcpServerResource) { @@ -47,7 +45,7 @@ public Answer execute(final NetworkUsageCommand command, final XcpServerResource final NetworkUsageAnswer answer = new NetworkUsageAnswer(command, "", stats[0], stats[1]); return answer; } catch (final Exception ex) { - s_logger.warn("Failed to get network usage stats due to ", ex); + logger.warn("Failed to get network usage stats due to ", ex); return new NetworkUsageAnswer(command, ex); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen56/XenServer56CheckOnHostCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen56/XenServer56CheckOnHostCommandWrapper.java index 49445163f379..d59ef1f5a0da 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen56/XenServer56CheckOnHostCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen56/XenServer56CheckOnHostCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.xenserver.resource.wrapper.xen56; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.CheckOnHostAnswer; @@ -31,7 +30,6 @@ @ResourceWrapper(handles = CheckOnHostCommand.class) public final class XenServer56CheckOnHostCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(XenServer56CheckOnHostCommandWrapper.class); @Override public Answer execute(final CheckOnHostCommand command, final XenServer56Resource xenServer56) { @@ -44,7 +42,7 @@ public Answer execute(final CheckOnHostCommand command, final XenServer56Resourc } else { msg = "Heart beat is gone so dead."; } - s_logger.debug(msg); + logger.debug(msg); return new CheckOnHostAnswer(command, alive, msg); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen56/XenServer56FenceCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen56/XenServer56FenceCommandWrapper.java index 3cebbd9a342e..c76059640b32 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen56/XenServer56FenceCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen56/XenServer56FenceCommandWrapper.java @@ -21,7 +21,6 @@ import java.util.Set; -import org.apache.log4j.Logger; import org.apache.xmlrpc.XmlRpcException; import com.cloud.agent.api.Answer; @@ -37,7 +36,6 @@ @ResourceWrapper(handles = FenceCommand.class) public final class XenServer56FenceCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(XenServer56FenceCommandWrapper.class); @Override public Answer execute(final FenceCommand command, final XenServer56Resource xenServer56) { @@ -45,28 +43,28 @@ public Answer execute(final FenceCommand command, final XenServer56Resource xenS try { final Boolean alive = xenServer56.checkHeartbeat(command.getHostGuid()); if (alive == null) { - s_logger.debug("Failed to check heartbeat, so unable to fence"); + logger.debug("Failed to check heartbeat, so unable to fence"); return new FenceAnswer(command, false, "Failed to check heartbeat, so unable to fence"); } if (alive) { - s_logger.debug("Heart beat is still going so unable to fence"); + logger.debug("Heart beat is still going so unable to fence"); return new FenceAnswer(command, false, "Heartbeat is still going on unable to fence"); } final Set vms = VM.getByNameLabel(conn, command.getVmName()); for (final VM vm : vms) { - s_logger.info("Fence command for VM " + command.getVmName()); + logger.info("Fence command for VM " + command.getVmName()); vm.powerStateReset(conn); vm.destroy(conn); } return new FenceAnswer(command); } catch (final XmlRpcException e) { - s_logger.warn("Unable to fence", e); + logger.warn("Unable to fence", e); return new FenceAnswer(command, false, e.getMessage()); } catch (final XenAPIException e) { - s_logger.warn("Unable to fence", e); + logger.warn("Unable to fence", e); return new FenceAnswer(command, false, e.getMessage()); } catch (final Exception e) { - s_logger.warn("Unable to fence", e); + logger.warn("Unable to fence", e); return new FenceAnswer(command, false, e.getMessage()); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen56/XenServer56GetAutoScaleMetricsCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen56/XenServer56GetAutoScaleMetricsCommandWrapper.java index 8cd11345fca1..3da752dd4e8e 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen56/XenServer56GetAutoScaleMetricsCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen56/XenServer56GetAutoScaleMetricsCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.xenserver.resource.wrapper.xen56; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.routing.GetAutoScaleMetricsAnswer; @@ -36,7 +35,6 @@ @ResourceWrapper(handles = GetAutoScaleMetricsCommand.class) public final class XenServer56GetAutoScaleMetricsCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(XenServer56GetAutoScaleMetricsCommandWrapper.class); @Override public Answer execute(final GetAutoScaleMetricsCommand command, final XenServer56Resource xenServer56) { @@ -77,7 +75,7 @@ public Answer execute(final GetAutoScaleMetricsCommand command, final XenServer5 return new GetAutoScaleMetricsAnswer(command, true, values); } catch (final Exception ex) { - s_logger.warn("Failed to get autoscale metrics due to ", ex); + logger.warn("Failed to get autoscale metrics due to ", ex); return new GetAutoScaleMetricsAnswer(command, false); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen56/XenServer56NetworkUsageCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen56/XenServer56NetworkUsageCommandWrapper.java index 4f3209e6d6f9..43233cc657cf 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen56/XenServer56NetworkUsageCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen56/XenServer56NetworkUsageCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.xenserver.resource.wrapper.xen56; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.NetworkUsageAnswer; @@ -33,7 +32,6 @@ @ResourceWrapper(handles = NetworkUsageCommand.class) public final class XenServer56NetworkUsageCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(XenServer56NetworkUsageCommandWrapper.class); @Override public Answer execute(final NetworkUsageCommand command, final XenServer56Resource xenServer56) { @@ -51,7 +49,7 @@ public Answer execute(final NetworkUsageCommand command, final XenServer56Resour final NetworkUsageAnswer answer = new NetworkUsageAnswer(command, "", stats[0], stats[1]); return answer; } catch (final Exception ex) { - s_logger.warn("Failed to get network usage stats due to ", ex); + logger.warn("Failed to get network usage stats due to ", ex); return new NetworkUsageAnswer(command, ex); } } @@ -97,7 +95,7 @@ protected NetworkUsageAnswer executeNetworkUsage(final NetworkUsageCommand comma } return new NetworkUsageAnswer(command, "success", 0L, 0L); } catch (final Exception ex) { - s_logger.warn("Failed to get network usage stats due to ", ex); + logger.warn("Failed to get network usage stats due to ", ex); return new NetworkUsageAnswer(command, ex); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen56p1/XenServer56FP1FenceCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen56p1/XenServer56FP1FenceCommandWrapper.java index bc7a4434bd06..84fe14ecee86 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen56p1/XenServer56FP1FenceCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen56p1/XenServer56FP1FenceCommandWrapper.java @@ -23,7 +23,6 @@ import java.util.Map; import java.util.Set; -import org.apache.log4j.Logger; import org.apache.xmlrpc.XmlRpcException; import com.cloud.agent.api.Answer; @@ -41,7 +40,6 @@ @ResourceWrapper(handles = FenceCommand.class) public final class XenServer56FP1FenceCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(XenServer56FP1FenceCommandWrapper.class); @Override public Answer execute(final FenceCommand command, final XenServer56Resource xenServer56) { @@ -49,11 +47,11 @@ public Answer execute(final FenceCommand command, final XenServer56Resource xenS try { final Boolean alive = xenServer56.checkHeartbeat(command.getHostGuid()); if ( alive == null ) { - s_logger.debug("Failed to check heartbeat, so unable to fence"); + logger.debug("Failed to check heartbeat, so unable to fence"); return new FenceAnswer(command, false, "Failed to check heartbeat, so unable to fence"); } if ( alive ) { - s_logger.debug("Heart beat is still going so unable to fence"); + logger.debug("Heart beat is still going so unable to fence"); return new FenceAnswer(command, false, "Heartbeat is still going on unable to fence"); } final Set vms = VM.getByNameLabel(conn, command.getVmName()); @@ -66,7 +64,7 @@ public Answer execute(final FenceCommand command, final XenServer56Resource xenS vdis.add(vdi); } } - s_logger.info("Fence command for VM " + command.getVmName()); + logger.info("Fence command for VM " + command.getVmName()); vm.powerStateReset(conn); vm.destroy(conn); for (final VDI vdi : vdis) { @@ -81,13 +79,13 @@ public Answer execute(final FenceCommand command, final XenServer56Resource xenS } return new FenceAnswer(command); } catch (final XmlRpcException e) { - s_logger.warn("Unable to fence", e); + logger.warn("Unable to fence", e); return new FenceAnswer(command, false, e.getMessage()); } catch (final XenAPIException e) { - s_logger.warn("Unable to fence", e); + logger.warn("Unable to fence", e); return new FenceAnswer(command, false, e.getMessage()); } catch (final Exception e) { - s_logger.warn("Unable to fence", e); + logger.warn("Unable to fence", e); return new FenceAnswer(command, false, e.getMessage()); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen610/XenServer610MigrateVolumeCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen610/XenServer610MigrateVolumeCommandWrapper.java index e35bfb045727..aac0af723baf 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen610/XenServer610MigrateVolumeCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen610/XenServer610MigrateVolumeCommandWrapper.java @@ -22,7 +22,6 @@ import java.util.HashMap; import java.util.Map; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.storage.MigrateVolumeAnswer; @@ -41,7 +40,6 @@ @ResourceWrapper(handles = MigrateVolumeCommand.class) public final class XenServer610MigrateVolumeCommandWrapper extends CommandWrapper { - private static final Logger LOGGER = Logger.getLogger(XenServer610MigrateVolumeCommandWrapper.class); @Override public Answer execute(final MigrateVolumeCommand command, final XenServer610Resource xenServer610Resource) { @@ -89,7 +87,7 @@ public Answer execute(final MigrateVolumeCommand command, final XenServer610Reso String msg = "Caught exception " + ex.getClass().getName() + " due to the following: " + ex.toString(); - LOGGER.error(msg, ex); + logger.error(msg, ex); return new MigrateVolumeAnswer(command, false, msg, null); } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen610/XenServer610MigrateWithStorageCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen610/XenServer610MigrateWithStorageCommandWrapper.java index f22b4f1bf78c..e46b930b07a7 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen610/XenServer610MigrateWithStorageCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen610/XenServer610MigrateWithStorageCommandWrapper.java @@ -25,7 +25,6 @@ import java.util.Set; import org.apache.cloudstack.storage.to.VolumeObjectTO; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.MigrateWithStorageAnswer; @@ -56,7 +55,6 @@ @ResourceWrapper(handles = MigrateWithStorageCommand.class) public final class XenServer610MigrateWithStorageCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(XenServer610MigrateWithStorageCommandWrapper.class); @Override public Answer execute(final MigrateWithStorageCommand command, final XenServer610Resource xenServer610Resource) { @@ -108,7 +106,7 @@ public Answer execute(final MigrateWithStorageCommand command, final XenServer61 xenServer610Resource.waitForTask(connection, task, 1000, timeout); xenServer610Resource.checkForSuccess(connection, task); } catch (final Types.HandleInvalid e) { - s_logger.error("Error while checking if vm " + vmName + " can be migrated to the destination host " + host, e); + logger.error("Error while checking if vm " + vmName + " can be migrated to the destination host " + host, e); throw new CloudRuntimeException("Error while checking if vm " + vmName + " can be migrated to the " + "destination host " + host, e); } @@ -120,7 +118,7 @@ public Answer execute(final MigrateWithStorageCommand command, final XenServer61 xenServer610Resource.waitForTask(connection, task, 1000, timeout); xenServer610Resource.checkForSuccess(connection, task); } catch (final Types.HandleInvalid e) { - s_logger.error("Error while migrating vm " + vmName + " to the destination host " + host, e); + logger.error("Error while migrating vm " + vmName + " to the destination host " + host, e); throw new CloudRuntimeException("Error while migrating vm " + vmName + " to the destination host " + host, e); } @@ -129,14 +127,14 @@ public Answer execute(final MigrateWithStorageCommand command, final XenServer61 vmToMigrate.setAffinity(connection, host); return new MigrateWithStorageAnswer(command, volumeToList); } catch (final Exception e) { - s_logger.warn("Catch Exception " + e.getClass().getName() + ". Storage motion failed due to " + e.toString(), e); + logger.warn("Catch Exception " + e.getClass().getName() + ". Storage motion failed due to " + e.toString(), e); return new MigrateWithStorageAnswer(command, e); } finally { if (task != null) { try { task.destroy(connection); } catch (final Exception e) { - s_logger.debug("Unable to destroy task " + task.toString() + " on host " + uuid + " due to " + e.toString()); + logger.debug("Unable to destroy task " + task.toString() + " on host " + uuid + " due to " + e.toString()); } } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen610/XenServer610MigrateWithStorageCompleteCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen610/XenServer610MigrateWithStorageCompleteCommandWrapper.java index bf649aab5118..1aaa4011f093 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen610/XenServer610MigrateWithStorageCompleteCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen610/XenServer610MigrateWithStorageCompleteCommandWrapper.java @@ -23,7 +23,6 @@ import java.util.Set; import org.apache.cloudstack.storage.to.VolumeObjectTO; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.MigrateWithStorageCompleteAnswer; @@ -41,7 +40,6 @@ @ResourceWrapper(handles = MigrateWithStorageCompleteCommand.class) public final class XenServer610MigrateWithStorageCompleteCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(XenServer610MigrateWithStorageCompleteCommandWrapper.class); @Override public Answer execute(final MigrateWithStorageCompleteCommand command, final XenServer610Resource xenServer610Resource) { @@ -73,10 +71,10 @@ public Answer execute(final MigrateWithStorageCompleteCommand command, final Xen return new MigrateWithStorageCompleteAnswer(command, volumeToSet); } catch (final CloudRuntimeException e) { - s_logger.error("Migration of vm " + name + " with storage failed due to " + e.toString(), e); + logger.error("Migration of vm " + name + " with storage failed due to " + e.toString(), e); return new MigrateWithStorageCompleteAnswer(command, e); } catch (final Exception e) { - s_logger.error("Migration of vm " + name + " with storage failed due to " + e.toString(), e); + logger.error("Migration of vm " + name + " with storage failed due to " + e.toString(), e); return new MigrateWithStorageCompleteAnswer(command, e); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen610/XenServer610MigrateWithStorageReceiveCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen610/XenServer610MigrateWithStorageReceiveCommandWrapper.java index 6bb01965632b..422c0a9cfc77 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen610/XenServer610MigrateWithStorageReceiveCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen610/XenServer610MigrateWithStorageReceiveCommandWrapper.java @@ -24,7 +24,6 @@ import java.util.List; import java.util.Map; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.MigrateWithStorageReceiveAnswer; @@ -48,7 +47,6 @@ @ResourceWrapper(handles = MigrateWithStorageReceiveCommand.class) public final class XenServer610MigrateWithStorageReceiveCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(XenServer610MigrateWithStorageReceiveCommandWrapper.class); @Override public Answer execute(final MigrateWithStorageReceiveCommand command, final XenServer610Resource xenServer610Resource) { @@ -94,10 +92,10 @@ public Answer execute(final MigrateWithStorageReceiveCommand command, final XenS return new MigrateWithStorageReceiveAnswer(command, volumeToSr, nicToNetwork, token); } catch (final CloudRuntimeException e) { - s_logger.error("Migration of vm " + vmSpec.getName() + " with storage failed due to " + e.toString(), e); + logger.error("Migration of vm " + vmSpec.getName() + " with storage failed due to " + e.toString(), e); return new MigrateWithStorageReceiveAnswer(command, e); } catch (final Exception e) { - s_logger.error("Migration of vm " + vmSpec.getName() + " with storage failed due to " + e.toString(), e); + logger.error("Migration of vm " + vmSpec.getName() + " with storage failed due to " + e.toString(), e); return new MigrateWithStorageReceiveAnswer(command, e); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen610/XenServer610MigrateWithStorageSendCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen610/XenServer610MigrateWithStorageSendCommandWrapper.java index 7b1e4c85397a..59abe5303cd6 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen610/XenServer610MigrateWithStorageSendCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen610/XenServer610MigrateWithStorageSendCommandWrapper.java @@ -24,7 +24,6 @@ import java.util.Map; import java.util.Set; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.MigrateWithStorageSendAnswer; @@ -49,7 +48,6 @@ @ResourceWrapper(handles = MigrateWithStorageSendCommand.class) public final class XenServer610MigrateWithStorageSendCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(XenServer610MigrateWithStorageSendCommandWrapper.class); @Override public Answer execute(final MigrateWithStorageSendCommand command, final XenServer610Resource xenServer610Resource) { @@ -114,7 +112,7 @@ public Answer execute(final MigrateWithStorageSendCommand command, final XenServ xenServer610Resource.waitForTask(connection, task, 1000, timeout); xenServer610Resource.checkForSuccess(connection, task); } catch (final Types.HandleInvalid e) { - s_logger.error("Error while checking if vm " + vmName + " can be migrated.", e); + logger.error("Error while checking if vm " + vmName + " can be migrated.", e); throw new CloudRuntimeException("Error while checking if vm " + vmName + " can be migrated.", e); } @@ -126,24 +124,24 @@ public Answer execute(final MigrateWithStorageSendCommand command, final XenServ xenServer610Resource.waitForTask(connection, task, 1000, timeout); xenServer610Resource.checkForSuccess(connection, task); } catch (final Types.HandleInvalid e) { - s_logger.error("Error while migrating vm " + vmName, e); + logger.error("Error while migrating vm " + vmName, e); throw new CloudRuntimeException("Error while migrating vm " + vmName, e); } final Set volumeToSet = null; return new MigrateWithStorageSendAnswer(command, volumeToSet); } catch (final CloudRuntimeException e) { - s_logger.error("Migration of vm " + vmName + " with storage failed due to " + e.toString(), e); + logger.error("Migration of vm " + vmName + " with storage failed due to " + e.toString(), e); return new MigrateWithStorageSendAnswer(command, e); } catch (final Exception e) { - s_logger.error("Migration of vm " + vmName + " with storage failed due to " + e.toString(), e); + logger.error("Migration of vm " + vmName + " with storage failed due to " + e.toString(), e); return new MigrateWithStorageSendAnswer(command, e); } finally { if (task != null) { try { task.destroy(connection); } catch (final Exception e) { - s_logger.debug("Unable to destroy task " + task.toString() + " on host " + xenServer610Resource.getHost().getUuid() + " due to " + e.toString()); + logger.debug("Unable to destroy task " + task.toString() + " on host " + xenServer610Resource.getHost().getUuid() + " due to " + e.toString()); } } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen620sp1/XenServer620SP1GetGPUStatsCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen620sp1/XenServer620SP1GetGPUStatsCommandWrapper.java index 8fbe663ce180..1370effb8bf1 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen620sp1/XenServer620SP1GetGPUStatsCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen620sp1/XenServer620SP1GetGPUStatsCommandWrapper.java @@ -21,7 +21,6 @@ import java.util.HashMap; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.GetGPUStatsAnswer; @@ -35,7 +34,6 @@ @ResourceWrapper(handles = GetGPUStatsCommand.class) public final class XenServer620SP1GetGPUStatsCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(XenServer620SP1GetGPUStatsCommandWrapper.class); @Override public Answer execute(final GetGPUStatsCommand command, final XenServer620SP1Resource xenServer620SP1Resource) { @@ -45,7 +43,7 @@ public Answer execute(final GetGPUStatsCommand command, final XenServer620SP1Res groupDetails = xenServer620SP1Resource.getGPUGroupDetails(conn); } catch (final Exception e) { final String msg = "Unable to get GPU stats" + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new GetGPUStatsAnswer(command, false, msg); } return new GetGPUStatsAnswer(command, groupDetails); diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixAttachIsoCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixAttachIsoCommandWrapper.java index 120c7f62c713..30fd0642174f 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixAttachIsoCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixAttachIsoCommandWrapper.java @@ -21,7 +21,6 @@ import java.util.Set; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.AttachIsoCommand; @@ -40,7 +39,6 @@ @ResourceWrapper(handles = AttachIsoCommand.class) public final class CitrixAttachIsoCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixAttachIsoCommandWrapper.class); @Override public Answer execute(final AttachIsoCommand command, final CitrixResourceBase citrixResourceBase) { @@ -126,10 +124,10 @@ public Answer execute(final AttachIsoCommand command, final CitrixResourceBase c return new Answer(command); } } catch (final XenAPIException e) { - s_logger.warn(errorMsg + ": " + e.toString(), e); + logger.warn(errorMsg + ": " + e.toString(), e); return new Answer(command, false, e.toString()); } catch (final Exception e) { - s_logger.warn(errorMsg + ": " + e.toString(), e); + logger.warn(errorMsg + ": " + e.toString(), e); return new Answer(command, false, e.getMessage()); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixAttachOrDettachConfigDriveCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixAttachOrDettachConfigDriveCommandWrapper.java index 08da7ae07cce..dcdc6013d2ec 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixAttachOrDettachConfigDriveCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixAttachOrDettachConfigDriveCommandWrapper.java @@ -29,7 +29,6 @@ import com.xensource.xenapi.VDI; import com.xensource.xenapi.VM; import com.xensource.xenapi.Types; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.hypervisor.xenserver.resource.CitrixResourceBase; @@ -39,7 +38,6 @@ @ResourceWrapper(handles = AttachOrDettachConfigDriveCommand.class) public final class CitrixAttachOrDettachConfigDriveCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixAttachOrDettachConfigDriveCommandWrapper.class); @Override public Answer execute(final AttachOrDettachConfigDriveCommand command, final CitrixResourceBase citrixResourceBase) { @@ -55,13 +53,13 @@ public Answer execute(final AttachOrDettachConfigDriveCommand command, final Cit for (VM vm : vms) { if (isAttach) { if (!citrixResourceBase.createAndAttachConfigDriveIsoForVM(conn, vm, vmData, label)) { - s_logger.debug("Failed to attach config drive iso to VM " + vmName); + logger.debug("Failed to attach config drive iso to VM " + vmName); } } else { // delete the config drive iso attached to VM Set vdis = VDI.getByNameLabel(conn, vmName+".iso"); if (vdis != null && !vdis.isEmpty()) { - s_logger.debug("Deleting config drive for the VM " + vmName); + logger.debug("Deleting config drive for the VM " + vmName); VDI vdi = vdis.iterator().next(); // Find the VM's CD-ROM VBD Set vbds = vdi.getVBDs(conn); @@ -79,13 +77,13 @@ public Answer execute(final AttachOrDettachConfigDriveCommand command, final Cit vdi.destroy(conn); } - s_logger.debug("Successfully dettached config drive iso from the VM " + vmName); + logger.debug("Successfully dettached config drive iso from the VM " + vmName); } } }catch (Types.XenAPIException ex) { - s_logger.debug("Failed to attach config drive iso to VM " + vmName + " " + ex.getMessage() ); + logger.debug("Failed to attach config drive iso to VM " + vmName + " " + ex.getMessage() ); }catch (XmlRpcException ex) { - s_logger.debug("Failed to attach config drive iso to VM " + vmName + " "+ex.getMessage()); + logger.debug("Failed to attach config drive iso to VM " + vmName + " "+ex.getMessage()); } return new Answer(command, true, "success"); diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCheckGuestOsMappingCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCheckGuestOsMappingCommandWrapper.java index 68403d76ca4b..927e2b3fbe38 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCheckGuestOsMappingCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCheckGuestOsMappingCommandWrapper.java @@ -22,7 +22,6 @@ import java.util.Set; import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.CheckGuestOsMappingAnswer; @@ -36,15 +35,13 @@ @ResourceWrapper(handles = CheckGuestOsMappingCommand.class) public final class CitrixCheckGuestOsMappingCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixCheckGuestOsMappingCommandWrapper.class); - @Override public Answer execute(final CheckGuestOsMappingCommand command, final CitrixResourceBase citrixResourceBase) { final Connection conn = citrixResourceBase.getConnection(); String guestOsName = command.getGuestOsName(); String guestOsMappingName = command.getGuestOsHypervisorMappingName(); try { - s_logger.info("Checking guest os mapping name: " + guestOsMappingName + " for the guest os: " + guestOsName + " in the hypervisor"); + logger.info("Checking guest os mapping name: " + guestOsMappingName + " for the guest os: " + guestOsName + " in the hypervisor"); final Set vms = VM.getAll(conn); if (CollectionUtils.isEmpty(vms)) { return new CheckGuestOsMappingAnswer(command, "Unable to match guest os mapping name: " + guestOsMappingName + " in the hypervisor"); @@ -52,15 +49,15 @@ public Answer execute(final CheckGuestOsMappingCommand command, final CitrixReso for (VM vm : vms) { if (vm != null && vm.getIsATemplate(conn) && guestOsMappingName.equalsIgnoreCase(vm.getNameLabel(conn))) { if (guestOsName.equalsIgnoreCase(vm.getNameLabel(conn))) { - s_logger.debug("Hypervisor guest os name label matches with os name: " + guestOsName); + logger.debug("Hypervisor guest os name label matches with os name: " + guestOsName); } - s_logger.info("Hypervisor guest os name label matches with os mapping: " + guestOsMappingName + " from user"); + logger.info("Hypervisor guest os name label matches with os mapping: " + guestOsMappingName + " from user"); return new CheckGuestOsMappingAnswer(command); } } return new CheckGuestOsMappingAnswer(command, "Guest os mapping name: " + guestOsMappingName + " not found in the hypervisor"); } catch (final Exception e) { - s_logger.error("Failed to find the hypervisor guest os mapping name: " + guestOsMappingName, e); + logger.error("Failed to find the hypervisor guest os mapping name: " + guestOsMappingName, e); return new CheckGuestOsMappingAnswer(command, e.getLocalizedMessage()); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCheckNetworkCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCheckNetworkCommandWrapper.java index 2825d751bbba..600c8e2625d8 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCheckNetworkCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCheckNetworkCommandWrapper.java @@ -21,7 +21,6 @@ import java.util.List; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.CheckNetworkAnswer; @@ -35,12 +34,11 @@ @ResourceWrapper(handles = CheckNetworkCommand.class) public final class CitrixCheckNetworkCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixCheckNetworkCommandWrapper.class); @Override public Answer execute(final CheckNetworkCommand command, final CitrixResourceBase citrixResourceBase) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Checking if network name setup is done on the resource"); + if (logger.isDebugEnabled()) { + logger.debug("Checking if network name setup is done on the resource"); } final List infoList = command.getPhysicalNetworkInfoList(); @@ -77,7 +75,7 @@ public Answer execute(final CheckNetworkCommand command, final CitrixResourceBas }*/ } if (errorout) { - s_logger.error(msg); + logger.error(msg); return new CheckNetworkAnswer(command, false, msg); } else { return new CheckNetworkAnswer(command, true, "Network Setup check by names is done"); @@ -85,11 +83,11 @@ public Answer execute(final CheckNetworkCommand command, final CitrixResourceBas } catch (final XenAPIException e) { final String msg = "CheckNetworkCommand failed with XenAPIException:" + e.toString() + " host:" + citrixResourceBase.getHost().getUuid(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new CheckNetworkAnswer(command, false, msg); } catch (final Exception e) { final String msg = "CheckNetworkCommand failed with Exception:" + e.getMessage() + " host:" + citrixResourceBase.getHost().getUuid(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new CheckNetworkAnswer(command, false, msg); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCheckSshCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCheckSshCommandWrapper.java index 2c318935c43d..2873040ae22b 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCheckSshCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCheckSshCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.xenserver.resource.wrapper.xenbase; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.check.CheckSshAnswer; @@ -32,7 +31,6 @@ @ResourceWrapper(handles = CheckSshCommand.class) public final class CitrixCheckSshCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixCheckSshCommandWrapper.class); @Override public Answer execute(final CheckSshCommand command, final CitrixResourceBase citrixResourceBase) { @@ -41,8 +39,8 @@ public Answer execute(final CheckSshCommand command, final CitrixResourceBase ci final String privateIp = command.getIp(); final int cmdPort = command.getPort(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Ping command port, " + privateIp + ":" + cmdPort); + if (logger.isDebugEnabled()) { + logger.debug("Ping command port, " + privateIp + ":" + cmdPort); } try { @@ -56,8 +54,8 @@ public Answer execute(final CheckSshCommand command, final CitrixResourceBase ci return new CheckSshAnswer(command, e); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Ping command port succeeded for vm " + vmName); + if (logger.isDebugEnabled()) { + logger.debug("Ping command port succeeded for vm " + vmName); } return new CheckSshAnswer(command); diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCheckVirtualMachineCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCheckVirtualMachineCommandWrapper.java index c3e75d490155..87bb7fd4e489 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCheckVirtualMachineCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCheckVirtualMachineCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.xenserver.resource.wrapper.xenbase; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.CheckVirtualMachineAnswer; @@ -33,7 +32,6 @@ @ResourceWrapper(handles = CheckVirtualMachineCommand.class) public final class CitrixCheckVirtualMachineCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixCheckVirtualMachineCommandWrapper.class); @Override public Answer execute(final CheckVirtualMachineCommand command, final CitrixResourceBase citrixResourceBase) { @@ -42,7 +40,7 @@ public Answer execute(final CheckVirtualMachineCommand command, final CitrixReso final PowerState powerState = citrixResourceBase.getVmState(conn, vmName); final Integer vncPort = null; if (powerState == PowerState.PowerOn) { - s_logger.debug("3. The VM " + vmName + " is in Running state"); + logger.debug("3. The VM " + vmName + " is in Running state"); } return new CheckVirtualMachineAnswer(command, powerState, vncPort); diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCleanupNetworkRulesCmdWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCleanupNetworkRulesCmdWrapper.java index 74c23d830213..a367a67019bf 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCleanupNetworkRulesCmdWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCleanupNetworkRulesCmdWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.xenserver.resource.wrapper.xenbase; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.CleanupNetworkRulesCmd; @@ -31,7 +30,6 @@ @ResourceWrapper(handles = CleanupNetworkRulesCmd.class) public final class CitrixCleanupNetworkRulesCmdWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixCleanupNetworkRulesCmdWrapper.class); @Override public Answer execute(final CleanupNetworkRulesCmd command, final CitrixResourceBase citrixResourceBase) { @@ -44,12 +42,12 @@ public Answer execute(final CleanupNetworkRulesCmd command, final CitrixResource final int numCleaned = Integer.parseInt(result); if (result == null || result.isEmpty() || numCleaned < 0) { - s_logger.warn("Failed to cleanup rules for host " + citrixResourceBase.getHost().getIp()); + logger.warn("Failed to cleanup rules for host " + citrixResourceBase.getHost().getIp()); return new Answer(command, false, result); } if (numCleaned > 0) { - s_logger.info("Cleaned up rules for " + result + " vms on host " + citrixResourceBase.getHost().getIp()); + logger.info("Cleaned up rules for " + result + " vms on host " + citrixResourceBase.getHost().getIp()); } return new Answer(command, true, result); } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCleanupPersistentNetworkResourceCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCleanupPersistentNetworkResourceCommandWrapper.java index 3be321cf6b8b..43329eb3df51 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCleanupPersistentNetworkResourceCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCleanupPersistentNetworkResourceCommandWrapper.java @@ -17,7 +17,6 @@ package com.cloud.hypervisor.xenserver.resource.wrapper.xenbase; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.CleanupPersistentNetworkResourceAnswer; @@ -32,7 +31,6 @@ @ResourceWrapper(handles = CleanupPersistentNetworkResourceCommand.class) public class CitrixCleanupPersistentNetworkResourceCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixCleanupPersistentNetworkResourceCommandWrapper.class); @Override public Answer execute(CleanupPersistentNetworkResourceCommand command, CitrixResourceBase citrixResourceBase) { @@ -48,7 +46,7 @@ public Answer execute(CleanupPersistentNetworkResourceCommand command, CitrixRes return new CleanupPersistentNetworkResourceAnswer(command, true, "Successfully deleted network VLAN on host: "+ host.getIp()); } catch (final Exception e) { final String msg = " Failed to cleanup network VLAN on host: " + host.getIp() + " due to: " + e.toString(); - s_logger.error(msg, e); + logger.error(msg, e); return new CleanupPersistentNetworkResourceAnswer(command, false, msg); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixClusterVMMetaDataSyncCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixClusterVMMetaDataSyncCommandWrapper.java index a85fb4491538..e02fc7060ca9 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixClusterVMMetaDataSyncCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixClusterVMMetaDataSyncCommandWrapper.java @@ -21,7 +21,6 @@ import java.util.HashMap; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.ClusterVMMetaDataSyncAnswer; @@ -36,7 +35,6 @@ @ResourceWrapper(handles = ClusterVMMetaDataSyncCommand.class) public final class CitrixClusterVMMetaDataSyncCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixClusterVMMetaDataSyncCommandWrapper.class); @Override public Answer execute(final ClusterVMMetaDataSyncCommand command, final CitrixResourceBase citrixResourceBase) { @@ -50,7 +48,7 @@ public Answer execute(final ClusterVMMetaDataSyncCommand command, final CitrixRe return new ClusterVMMetaDataSyncAnswer(command.getClusterId(), null); } } catch (final Throwable e) { - s_logger.warn("Check for master failed, failing the Cluster sync VMMetaData command"); + logger.warn("Check for master failed, failing the Cluster sync VMMetaData command"); return new ClusterVMMetaDataSyncAnswer(command.getClusterId(), null); } final HashMap vmMetadatum = citrixResourceBase.clusterVMMetaDataSync(conn); diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixConsoleProxyLoadCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixConsoleProxyLoadCommandWrapper.java index 70334580b7c1..740dedec6c82 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixConsoleProxyLoadCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixConsoleProxyLoadCommandWrapper.java @@ -26,7 +26,6 @@ import java.net.URL; import java.net.URLConnection; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.Command; @@ -36,7 +35,6 @@ public abstract class CitrixConsoleProxyLoadCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixConsoleProxyLoadCommandWrapper.class); protected Answer executeProxyLoadScan(final Command cmd, final long proxyVmId, final String proxyVmName, final String proxyManagementIp, final int cmdPort) { String result = null; @@ -68,12 +66,12 @@ protected Answer executeProxyLoadScan(final Command cmd, final long proxyVmId, f try { is.close(); } catch (final IOException e) { - s_logger.warn("Exception when closing , console proxy address : " + proxyManagementIp); + logger.warn("Exception when closing , console proxy address : " + proxyManagementIp); success = false; } } } catch (final IOException e) { - s_logger.warn("Unable to open console proxy command port url, console proxy address : " + proxyManagementIp); + logger.warn("Unable to open console proxy command port url, console proxy address : " + proxyManagementIp); success = false; } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCoppyToSecondaryStorageCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCoppyToSecondaryStorageCommandWrapper.java index 45bbf4a631c2..2ca389478b00 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCoppyToSecondaryStorageCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCoppyToSecondaryStorageCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.xenserver.resource.wrapper.xenbase; import org.apache.cloudstack.diagnostics.CopyToSecondaryStorageCommand; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.hypervisor.xenserver.resource.CitrixResourceBase; @@ -30,13 +29,12 @@ @ResourceWrapper(handles = CopyToSecondaryStorageCommand.class) public class CitrixCoppyToSecondaryStorageCommandWrapper extends CommandWrapper { - public static final Logger LOGGER = Logger.getLogger(CitrixCoppyToSecondaryStorageCommandWrapper.class); @Override public Answer execute(CopyToSecondaryStorageCommand cmd, CitrixResourceBase citrixResourceBase) { final Connection conn = citrixResourceBase.getConnection(); String msg = String.format("Copying diagnostics zip file %s from system vm %s to secondary storage %s", cmd.getFileName(), cmd.getSystemVmIp(), cmd.getSecondaryStorageUrl()); - LOGGER.debug(msg); + logger.debug(msg); // Allow the hypervisor host to copy file from system VM to mounted secondary storage return citrixResourceBase.copyDiagnosticsFileToSecondaryStorage(conn, cmd); } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCreateCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCreateCommandWrapper.java index 928c8f04a9dd..75bdd63bd88a 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCreateCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCreateCommandWrapper.java @@ -21,7 +21,6 @@ import java.util.HashMap; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.storage.CreateAnswer; @@ -41,7 +40,6 @@ @ResourceWrapper(handles = CreateCommand.class) public final class CitrixCreateCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixCreateCommandWrapper.class); @Override public Answer execute(final CreateCommand command, final CitrixResourceBase citrixResourceBase) { @@ -72,14 +70,14 @@ public Answer execute(final CreateCommand command, final CitrixResourceBase citr VDI.Record vdir; vdir = vdi.getRecord(conn); - s_logger.debug("Successfully created VDI for " + command + ". Uuid = " + vdir.uuid); + logger.debug("Successfully created VDI for " + command + ". Uuid = " + vdir.uuid); final VolumeTO vol = new VolumeTO(command.getVolumeId(), dskch.getType(), pool.getType(), pool.getUuid(), vdir.nameLabel, pool.getPath(), vdir.uuid, vdir.virtualSize, null); return new CreateAnswer(command, vol); } catch (final Exception e) { - s_logger.warn("Unable to create volume; Pool=" + pool + "; Disk: " + dskch, e); + logger.warn("Unable to create volume; Pool=" + pool + "; Disk: " + dskch, e); return new CreateAnswer(command, e); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCreateStoragePoolCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCreateStoragePoolCommandWrapper.java index dd4290c89f72..7aef00601bef 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCreateStoragePoolCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCreateStoragePoolCommandWrapper.java @@ -21,7 +21,6 @@ import java.util.Map; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.CreateStoragePoolCommand; @@ -35,7 +34,6 @@ @ResourceWrapper(handles = CreateStoragePoolCommand.class) public final class CitrixCreateStoragePoolCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixCreateStoragePoolCommandWrapper.class); @Override public Answer execute(final CreateStoragePoolCommand command, final CitrixResourceBase citrixResourceBase) { @@ -68,7 +66,7 @@ public Answer execute(final CreateStoragePoolCommand command, final CitrixResour final String msg = "Catch Exception " + e.getClass().getName() + ", create StoragePool failed due to " + e.toString() + " on host:" + citrixResourceBase.getHost().getUuid() + " pool: " + pool.getHost() + pool.getPath(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new Answer(command, false, msg); } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCreateVMSnapshotCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCreateVMSnapshotCommandWrapper.java index 68c295717c3e..85cfa5d8aa8f 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCreateVMSnapshotCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCreateVMSnapshotCommandWrapper.java @@ -25,7 +25,6 @@ import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.storage.to.VolumeObjectTO; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.CreateVMSnapshotAnswer; @@ -47,7 +46,6 @@ @ResourceWrapper(handles = CreateVMSnapshotCommand.class) public final class CitrixCreateVMSnapshotCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixCreateVMSnapshotCommandWrapper.class); @Override public Answer execute(final CreateVMSnapshotCommand command, final CitrixResourceBase citrixResourceBase) { @@ -163,13 +161,13 @@ public Answer execute(final CreateVMSnapshotCommand command, final CitrixResourc } else { msg = e.toString(); } - s_logger.warn("Creating VM Snapshot " + command.getTarget().getSnapshotName() + " failed due to: " + msg, e); + logger.warn("Creating VM Snapshot " + command.getTarget().getSnapshotName() + " failed due to: " + msg, e); return new CreateVMSnapshotAnswer(command, false, msg); } finally { try { if (!success) { if (vmSnapshot != null) { - s_logger.debug("Delete existing VM Snapshot " + vmSnapshotName + " after making VolumeTO failed"); + logger.debug("Delete existing VM Snapshot " + vmSnapshotName + " after making VolumeTO failed"); final Set vbds = vmSnapshot.getVBDs(conn); for (final VBD vbd : vbds) { final VBD.Record vbdr = vbd.getRecord(conn); @@ -187,7 +185,7 @@ public Answer execute(final CreateVMSnapshotCommand command, final CitrixResourc } } } catch (final Exception e2) { - s_logger.error("delete snapshot error due to " + e2.getMessage()); + logger.error("delete snapshot error due to " + e2.getMessage()); } } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixDeleteStoragePoolCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixDeleteStoragePoolCommandWrapper.java index d3cfc2514746..bdf63414a596 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixDeleteStoragePoolCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixDeleteStoragePoolCommandWrapper.java @@ -21,7 +21,6 @@ import java.util.Map; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.DeleteStoragePoolCommand; @@ -35,7 +34,6 @@ @ResourceWrapper(handles = DeleteStoragePoolCommand.class) public final class CitrixDeleteStoragePoolCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixDeleteStoragePoolCommandWrapper.class); @Override public Answer execute(final DeleteStoragePoolCommand command, final CitrixResourceBase citrixResourceBase) { @@ -67,7 +65,7 @@ public Answer execute(final DeleteStoragePoolCommand command, final CitrixResour final String msg = "DeleteStoragePoolCommand XenAPIException:" + e.getMessage() + " host:" + citrixResourceBase.getHost().getUuid() + " pool: " + poolTO.getHost() + poolTO.getPath(); - s_logger.error(msg, e); + logger.error(msg, e); return new Answer(command, false, msg); } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixDeleteVMSnapshotCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixDeleteVMSnapshotCommandWrapper.java index b74111e8441c..5e7ca01ea1dd 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixDeleteVMSnapshotCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixDeleteVMSnapshotCommandWrapper.java @@ -25,7 +25,6 @@ import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.storage.to.VolumeObjectTO; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.DeleteVMSnapshotAnswer; @@ -43,7 +42,6 @@ @ResourceWrapper(handles = DeleteVMSnapshotCommand.class) public final class CitrixDeleteVMSnapshotCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixDeleteVMSnapshotCommandWrapper.class); @Override public Answer execute(final DeleteVMSnapshotCommand command, final CitrixResourceBase citrixResourceBase) { @@ -54,7 +52,7 @@ public Answer execute(final DeleteVMSnapshotCommand command, final CitrixResourc final List vdiList = new ArrayList(); final Set snapshots = VM.getByNameLabel(conn, snapshotName); if (snapshots == null || snapshots.size() == 0) { - s_logger.warn("VM snapshot with name " + snapshotName + " does not exist, assume it is already deleted"); + logger.warn("VM snapshot with name " + snapshotName + " does not exist, assume it is already deleted"); return new DeleteVMSnapshotAnswer(command, command.getVolumeTOs()); } final VM snapshot = snapshots.iterator().next(); @@ -90,7 +88,7 @@ public Answer execute(final DeleteVMSnapshotCommand command, final CitrixResourc return new DeleteVMSnapshotAnswer(command, command.getVolumeTOs()); } catch (final Exception e) { - s_logger.warn("Catch Exception: " + e.getClass().toString() + " due to " + e.toString(), e); + logger.warn("Catch Exception: " + e.getClass().toString() + " due to " + e.toString(), e); return new DeleteVMSnapshotAnswer(command, false, e.getMessage()); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixDestroyCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixDestroyCommandWrapper.java index d2cf3d0bdd62..0c5c32d7779a 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixDestroyCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixDestroyCommandWrapper.java @@ -21,7 +21,6 @@ import java.util.Set; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.storage.DestroyCommand; @@ -36,7 +35,6 @@ @ResourceWrapper(handles = DestroyCommand.class) public final class CitrixDestroyCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixDestroyCommandWrapper.class); @Override public Answer execute(final DestroyCommand command, final CitrixResourceBase citrixResourceBase) { @@ -55,7 +53,7 @@ public Answer execute(final DestroyCommand command, final CitrixResourceBase cit vbds = vdi.getVBDs(conn); } catch (final Exception e) { final String msg = "VDI getVBDS for " + volumeUUID + " failed due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new Answer(command, false, msg); } for (final VBD vbd : vbds) { @@ -64,7 +62,7 @@ public Answer execute(final DestroyCommand command, final CitrixResourceBase cit vbd.destroy(conn); } catch (final Exception e) { final String msg = "VM destroy for " + volumeUUID + " failed due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new Answer(command, false, msg); } } @@ -76,7 +74,7 @@ public Answer execute(final DestroyCommand command, final CitrixResourceBase cit vdi.destroy(conn); } catch (final Exception e) { final String msg = "VDI destroy for " + volumeUUID + " failed due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new Answer(command, false, msg); } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixGetHostStatsCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixGetHostStatsCommandWrapper.java index 256d862ba25b..ff9d2afd6f33 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixGetHostStatsCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixGetHostStatsCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.xenserver.resource.wrapper.xenbase; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.GetHostStatsAnswer; @@ -33,7 +32,6 @@ @ResourceWrapper(handles = GetHostStatsCommand.class) public final class CitrixGetHostStatsCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixGetHostStatsCommandWrapper.class); @Override public Answer execute(final GetHostStatsCommand command, final CitrixResourceBase citrixResourceBase) { @@ -43,7 +41,7 @@ public Answer execute(final GetHostStatsCommand command, final CitrixResourceBas return new GetHostStatsAnswer(command, hostStats); } catch (final Exception e) { final String msg = "Unable to get Host stats" + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new GetHostStatsAnswer(command, null); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixGetHypervisorGuestOsNamesCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixGetHypervisorGuestOsNamesCommandWrapper.java index 0be3e5a5746b..72ae6fb132d9 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixGetHypervisorGuestOsNamesCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixGetHypervisorGuestOsNamesCommandWrapper.java @@ -25,7 +25,6 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.GetHypervisorGuestOsNamesAnswer; @@ -40,14 +39,12 @@ @ResourceWrapper(handles = GetHypervisorGuestOsNamesCommand.class) public final class CitrixGetHypervisorGuestOsNamesCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixGetHypervisorGuestOsNamesCommandWrapper.class); - @Override public Answer execute(final GetHypervisorGuestOsNamesCommand command, final CitrixResourceBase citrixResourceBase) { final Connection conn = citrixResourceBase.getConnection(); String keyword = command.getKeyword(); try { - s_logger.info("Getting guest os names in the hypervisor"); + logger.info("Getting guest os names in the hypervisor"); final Set vms = VM.getAll(conn); if (CollectionUtils.isEmpty(vms)) { return new GetHypervisorGuestOsNamesAnswer(command, "Guest os names not found in the hypervisor"); @@ -69,7 +66,7 @@ public Answer execute(final GetHypervisorGuestOsNamesCommand command, final Citr } return new GetHypervisorGuestOsNamesAnswer(command, hypervisorGuestOsNames); } catch (final Exception e) { - s_logger.error("Failed to fetch hypervisor guest os names due to: " + e.getLocalizedMessage(), e); + logger.error("Failed to fetch hypervisor guest os names due to: " + e.getLocalizedMessage(), e); return new GetHypervisorGuestOsNamesAnswer(command, e.getLocalizedMessage()); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixGetStorageStatsCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixGetStorageStatsCommandWrapper.java index c99d90e0608a..d6dd6c5fcacd 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixGetStorageStatsCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixGetStorageStatsCommandWrapper.java @@ -21,7 +21,6 @@ import java.util.Set; -import org.apache.log4j.Logger; import org.apache.xmlrpc.XmlRpcException; import com.cloud.agent.api.Answer; @@ -37,7 +36,6 @@ @ResourceWrapper(handles = GetStorageStatsCommand.class) public final class CitrixGetStorageStatsCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixGetStorageStatsCommandWrapper.class); @Override public Answer execute(final GetStorageStatsCommand command, final CitrixResourceBase citrixResourceBase) { @@ -46,7 +44,7 @@ public Answer execute(final GetStorageStatsCommand command, final CitrixResource final Set srs = SR.getByNameLabel(conn, command.getStorageId()); if (srs.size() != 1) { final String msg = "There are " + srs.size() + " storageid: " + command.getStorageId(); - s_logger.warn(msg); + logger.warn(msg); return new GetStorageStatsAnswer(command, msg); } final SR sr = srs.iterator().next(); @@ -56,15 +54,15 @@ public Answer execute(final GetStorageStatsCommand command, final CitrixResource return new GetStorageStatsAnswer(command, capacity, used); } catch (final XenAPIException e) { final String msg = "GetStorageStats Exception:" + e.toString() + "host:" + citrixResourceBase.getHost().getUuid() + "storageid: " + command.getStorageId(); - s_logger.warn(msg); + logger.warn(msg); return new GetStorageStatsAnswer(command, msg); } catch (final XmlRpcException e) { final String msg = "GetStorageStats Exception:" + e.getMessage() + "host:" + citrixResourceBase.getHost().getUuid() + "storageid: " + command.getStorageId(); - s_logger.warn(msg); + logger.warn(msg); return new GetStorageStatsAnswer(command, msg); } catch (final Exception e) { final String msg = "GetStorageStats Exception:" + e.getMessage() + "host:" + citrixResourceBase.getHost().getUuid() + "storageid: " + command.getStorageId(); - s_logger.warn(msg); + logger.warn(msg); return new GetStorageStatsAnswer(command, msg); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixGetVmIpAddressCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixGetVmIpAddressCommandWrapper.java index b67ef0850baf..a324ec1bdada 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixGetVmIpAddressCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixGetVmIpAddressCommandWrapper.java @@ -27,7 +27,6 @@ import com.xensource.xenapi.VM; import com.xensource.xenapi.VMGuestMetrics; import com.xensource.xenapi.Types; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.hypervisor.xenserver.resource.CitrixResourceBase; @@ -37,7 +36,6 @@ @ResourceWrapper(handles = GetVmIpAddressCommand.class) public final class CitrixGetVmIpAddressCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixGetVmIpAddressCommandWrapper.class); @Override public Answer execute(final GetVmIpAddressCommand command, final CitrixResourceBase citrixResourceBase) { @@ -63,16 +61,16 @@ public Answer execute(final GetVmIpAddressCommand command, final CitrixResourceB } if (vmIp != null) { - s_logger.debug("VM " +vmName + " ip address got retrieved "+vmIp); + logger.debug("VM " +vmName + " ip address got retrieved "+vmIp); result = true; return new Answer(command, result, vmIp); } }catch (Types.XenAPIException e) { - s_logger.debug("Got exception in GetVmIpAddressCommand "+ e.getMessage()); + logger.debug("Got exception in GetVmIpAddressCommand "+ e.getMessage()); errorMsg = "Failed to retrived vm ip addr, exception: "+e.getMessage(); }catch (XmlRpcException e) { - s_logger.debug("Got exception in GetVmIpAddressCommand "+ e.getMessage()); + logger.debug("Got exception in GetVmIpAddressCommand "+ e.getMessage()); errorMsg = "Failed to retrived vm ip addr, exception: "+e.getMessage(); } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixGetVmStatsCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixGetVmStatsCommandWrapper.java index 329ce4977b44..b2c06c0365bd 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixGetVmStatsCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixGetVmStatsCommandWrapper.java @@ -24,7 +24,6 @@ import java.util.List; import java.util.Map; -import org.apache.log4j.Logger; import org.apache.xmlrpc.XmlRpcException; import com.cloud.agent.api.Answer; @@ -41,7 +40,6 @@ @ResourceWrapper(handles = GetVmStatsCommand.class) public final class CitrixGetVmStatsCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixGetVmStatsCommandWrapper.class); @Override public Answer execute(final GetVmStatsCommand command, final CitrixResourceBase citrixResourceBase) { @@ -73,11 +71,11 @@ public Answer execute(final GetVmStatsCommand command, final CitrixResourceBase return new GetVmStatsAnswer(command, vmStatsNameMap); } catch (final XenAPIException e) { final String msg = "Unable to get VM stats" + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new GetVmStatsAnswer(command, vmStatsNameMap); } catch (final XmlRpcException e) { final String msg = "Unable to get VM stats" + e.getMessage(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new GetVmStatsAnswer(command, vmStatsNameMap); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixGetVncPortCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixGetVncPortCommandWrapper.java index e95430aeebb4..362b6b02b2b2 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixGetVncPortCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixGetVncPortCommandWrapper.java @@ -21,7 +21,6 @@ import java.util.Set; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.GetVncPortAnswer; @@ -35,7 +34,6 @@ @ResourceWrapper(handles = GetVncPortCommand.class) public final class CitrixGetVncPortCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixGetVncPortCommandWrapper.class); @Override public Answer execute(final GetVncPortCommand command, final CitrixResourceBase citrixResourceBase) { @@ -51,7 +49,7 @@ public Answer execute(final GetVncPortCommand command, final CitrixResourceBase } } catch (final Exception e) { final String msg = "Unable to get vnc port due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new GetVncPortAnswer(command, msg); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixGetVolumeStatsCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixGetVolumeStatsCommandWrapper.java index bb959621a43c..f516d2545f94 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixGetVolumeStatsCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixGetVolumeStatsCommandWrapper.java @@ -21,7 +21,6 @@ import java.util.HashMap; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.GetVolumeStatsAnswer; @@ -35,7 +34,6 @@ @ResourceWrapper(handles = GetVolumeStatsCommand.class) public final class CitrixGetVolumeStatsCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixGetVolumeStatsCommandWrapper.class); @Override public Answer execute(final GetVolumeStatsCommand cmd, final CitrixResourceBase citrixResourceBase) { @@ -48,11 +46,11 @@ public Answer execute(final GetVolumeStatsCommand cmd, final CitrixResourceBase VolumeStatsEntry vse = new VolumeStatsEntry(volumeUuid, vdi.getPhysicalUtilisation(conn), vdi.getVirtualSize(conn)); statEntry.put(volumeUuid, vse); } catch (Exception e) { - s_logger.warn("Unable to get volume stats", e); + logger.warn("Unable to get volume stats", e); statEntry.put(volumeUuid, new VolumeStatsEntry(volumeUuid, -1, -1)); } } else { - s_logger.warn("VDI not found for path " + volumeUuid); + logger.warn("VDI not found for path " + volumeUuid); statEntry.put(volumeUuid, new VolumeStatsEntry(volumeUuid, -1L, -1L)); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixListDataStoreObjectsCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixListDataStoreObjectsCommandWrapper.java index 1be7879d602e..2367f6d608bf 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixListDataStoreObjectsCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixListDataStoreObjectsCommandWrapper.java @@ -25,25 +25,22 @@ import com.cloud.resource.ResourceWrapper; import com.xensource.xenapi.Types.XenAPIException; import org.apache.cloudstack.storage.command.browser.ListDataStoreObjectsCommand; -import org.apache.log4j.Logger; import org.apache.xmlrpc.XmlRpcException; @ResourceWrapper(handles = ListDataStoreObjectsCommand.class) public final class CitrixListDataStoreObjectsCommandWrapper extends CommandWrapper { - private static final Logger LOGGER = Logger.getLogger(CitrixListDataStoreObjectsCommandWrapper.class); - @Override public Answer execute(final ListDataStoreObjectsCommand command, final CitrixResourceBase citrixResourceBase) { try { return citrixResourceBase.listFilesAtPath(command); } catch (XenAPIException e) { - LOGGER.warn("XenAPI exception", e); + logger.warn("XenAPI exception", e); } catch (XmlRpcException e) { - LOGGER.warn("Xml Rpc Exception", e); + logger.warn("Xml Rpc Exception", e); } catch (Exception e) { - LOGGER.warn("Caught exception", e); + logger.warn("Caught exception", e); } return null; } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixMaintainCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixMaintainCommandWrapper.java index 84c043ab1719..065fd9fe0ab4 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixMaintainCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixMaintainCommandWrapper.java @@ -21,7 +21,6 @@ import java.util.Iterator; -import org.apache.log4j.Logger; import org.apache.xmlrpc.XmlRpcException; import com.cloud.agent.api.Answer; @@ -38,7 +37,6 @@ @ResourceWrapper(handles = MaintainCommand.class) public final class CitrixMaintainCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixMaintainCommandWrapper.class); @Override public Answer execute(final MaintainCommand command, final CitrixResourceBase citrixResourceBase) { @@ -53,7 +51,7 @@ public Answer execute(final MaintainCommand command, final CitrixResourceBase ci // Adding this check because could not get the mock to work. Will push the code and fix it afterwards. if (hr == null) { - s_logger.warn("Host.Record is null."); + logger.warn("Host.Record is null."); return new MaintainAnswer(command, false, "Host.Record is null"); } @@ -67,10 +65,10 @@ public Answer execute(final MaintainCommand command, final CitrixResourceBase ci host.setTags(conn, hr.tags); return new MaintainAnswer(command); } catch (final XenAPIException e) { - s_logger.warn("Unable to put server in maintainence mode", e); + logger.warn("Unable to put server in maintainence mode", e); return new MaintainAnswer(command, false, e.getMessage()); } catch (final XmlRpcException e) { - s_logger.warn("Unable to put server in maintainence mode", e); + logger.warn("Unable to put server in maintainence mode", e); return new MaintainAnswer(command, false, e.getMessage()); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixMigrateCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixMigrateCommandWrapper.java index 68ee19a8058c..269eb5ceb8bf 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixMigrateCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixMigrateCommandWrapper.java @@ -22,7 +22,6 @@ import java.util.Set; import org.apache.commons.lang.BooleanUtils; -import org.apache.log4j.Logger; import org.apache.xmlrpc.XmlRpcException; import com.cloud.agent.api.Answer; @@ -42,7 +41,6 @@ @ResourceWrapper(handles = MigrateCommand.class) public class CitrixMigrateCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixMigrateCommandWrapper.class); @Override public Answer execute(final MigrateCommand command, final CitrixResourceBase citrixResourceBase) { @@ -65,7 +63,7 @@ public Answer execute(final MigrateCommand command, final CitrixResourceBase cit } if (dsthost == null) { final String msg = "Migration failed due to unable to find host " + dstHostIpAddr + " in XenServer pool " + citrixResourceBase.getHost().getPool(); - s_logger.warn(msg); + logger.warn(msg); return new MigrateAnswer(command, false, msg, null); } for (final VM vm : vms) { @@ -93,12 +91,12 @@ public Answer execute(final MigrateCommand command, final CitrixResourceBase cit // Attach the config drive iso device to VM VM vm = vms.iterator().next(); if (!citrixResourceBase.attachConfigDriveIsoToVm(conn, vm)) { - s_logger.debug("Config drive ISO attach failed after migration for vm "+vmName); + logger.debug("Config drive ISO attach failed after migration for vm "+vmName); } return new MigrateAnswer(command, true, "migration succeeded", null); } catch (final Exception e) { - s_logger.warn(e.getMessage(), e); + logger.warn(e.getMessage(), e); return new MigrateAnswer(command, false, e.getMessage(), null); } } @@ -111,9 +109,9 @@ protected void destroyMigratedVmNetworkRulesOnSourceHost(final MigrateCommand co if (citrixResourceBase.canBridgeFirewall()) { final String result = citrixResourceBase.callHostPlugin(conn, "vmops", "destroy_network_rules_for_vm", "vmName", command.getVmName()); if (BooleanUtils.toBoolean(result)) { - s_logger.debug(String.format("Removed network rules from source host [%s] for migrated vm [%s]", dsthost.getHostname(conn), command.getVmName())); + logger.debug(String.format("Removed network rules from source host [%s] for migrated vm [%s]", dsthost.getHostname(conn), command.getVmName())); } else { - s_logger.warn(String.format("Failed to remove network rules from source host [%s] for migrated vm [%s]", dsthost.getHostname(conn), command.getVmName())); + logger.warn(String.format("Failed to remove network rules from source host [%s] for migrated vm [%s]", dsthost.getHostname(conn), command.getVmName())); } } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixModifyStoragePoolCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixModifyStoragePoolCommandWrapper.java index 07fe32a4a803..63cb675c6143 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixModifyStoragePoolCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixModifyStoragePoolCommandWrapper.java @@ -22,7 +22,6 @@ import java.util.HashMap; import java.util.Map; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.ModifyStoragePoolAnswer; @@ -41,7 +40,6 @@ @ResourceWrapper(handles = ModifyStoragePoolCommand.class) public final class CitrixModifyStoragePoolCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixModifyStoragePoolCommandWrapper.class); @Override public Answer execute(final ModifyStoragePoolCommand command, final CitrixResourceBase citrixResourceBase) { @@ -60,7 +58,7 @@ public Answer execute(final ModifyStoragePoolCommand command, final CitrixResour final long available = capacity - sr.getPhysicalUtilisation(conn); if (capacity == -1) { final String msg = "Pool capacity is -1! pool: " + pool.getHost() + pool.getPath(); - s_logger.warn(msg); + logger.warn(msg); return new Answer(command, false, msg); } final Map tInfo = new HashMap(); @@ -69,12 +67,12 @@ public Answer execute(final ModifyStoragePoolCommand command, final CitrixResour } catch (final XenAPIException e) { final String msg = "ModifyStoragePoolCommand add XenAPIException:" + e.toString() + " host:" + citrixResourceBase.getHost().getUuid() + " pool: " + pool.getHost() + pool.getPath(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new Answer(command, false, msg); } catch (final Exception e) { final String msg = "ModifyStoragePoolCommand add XenAPIException:" + e.getMessage() + " host:" + citrixResourceBase.getHost().getUuid() + " pool: " + pool.getHost() + pool.getPath(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new Answer(command, false, msg); } } else { @@ -91,12 +89,12 @@ public Answer execute(final ModifyStoragePoolCommand command, final CitrixResour } catch (final XenAPIException e) { final String msg = "ModifyStoragePoolCommand remove XenAPIException:" + e.toString() + " host:" + citrixResourceBase.getHost().getUuid() + " pool: " + pool.getHost() + pool.getPath(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new Answer(command, false, msg); } catch (final Exception e) { final String msg = "ModifyStoragePoolCommand remove XenAPIException:" + e.getMessage() + " host:" + citrixResourceBase.getHost().getUuid() + " pool: " + pool.getHost() + pool.getPath(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new Answer(command, false, msg); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixNetworkElementCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixNetworkElementCommandWrapper.java index 184187a051c8..2efe38417754 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixNetworkElementCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixNetworkElementCommandWrapper.java @@ -25,11 +25,9 @@ import com.cloud.hypervisor.xenserver.resource.CitrixResourceBase; import com.cloud.resource.CommandWrapper; import com.cloud.resource.ResourceWrapper; -import org.apache.log4j.Logger; @ResourceWrapper(handles = NetworkElementCommand.class) public final class CitrixNetworkElementCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixNetworkElementCommandWrapper.class); @Override public Answer execute(final NetworkElementCommand command, final CitrixResourceBase citrixResourceBase) { final VirtualRoutingResource routingResource = citrixResourceBase.getVirtualRoutingResource(); diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsCreateGreTunnelCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsCreateGreTunnelCommandWrapper.java index 45ddda278150..2e87f03c7e93 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsCreateGreTunnelCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsCreateGreTunnelCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.xenserver.resource.wrapper.xenbase; -import org.apache.log4j.Logger; import org.apache.xmlrpc.XmlRpcException; import com.cloud.agent.api.Answer; @@ -36,7 +35,6 @@ @ResourceWrapper(handles = OvsCreateGreTunnelCommand.class) public final class CitrixOvsCreateGreTunnelCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixOvsCreateGreTunnelCommandWrapper.class); @Override public Answer execute(final OvsCreateGreTunnelCommand command, final CitrixResourceBase citrixResourceBase) { @@ -57,11 +55,11 @@ public Answer execute(final OvsCreateGreTunnelCommand command, final CitrixResou return new OvsCreateGreTunnelAnswer(command, true, result, citrixResourceBase.getHost().getIp(), bridge, Integer.parseInt(res[1])); } } catch (final BadServerResponse e) { - s_logger.error("An error occurred while creating a GRE tunnel to " + command.getRemoteIp() + " on host " + citrixResourceBase.getHost().getIp(), e); + logger.error("An error occurred while creating a GRE tunnel to " + command.getRemoteIp() + " on host " + citrixResourceBase.getHost().getIp(), e); } catch (final XenAPIException e) { - s_logger.error("An error occurred while creating a GRE tunnel to " + command.getRemoteIp() + " on host " + citrixResourceBase.getHost().getIp(), e); + logger.error("An error occurred while creating a GRE tunnel to " + command.getRemoteIp() + " on host " + citrixResourceBase.getHost().getIp(), e); } catch (final XmlRpcException e) { - s_logger.error("An error occurred while creating a GRE tunnel to " + command.getRemoteIp() + " on host " + citrixResourceBase.getHost().getIp(), e); + logger.error("An error occurred while creating a GRE tunnel to " + command.getRemoteIp() + " on host " + citrixResourceBase.getHost().getIp(), e); } return new OvsCreateGreTunnelAnswer(command, false, "EXCEPTION", citrixResourceBase.getHost().getIp(), bridge); diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsCreateTunnelCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsCreateTunnelCommandWrapper.java index f051b5cc1d4d..98888c248e1a 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsCreateTunnelCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsCreateTunnelCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.xenserver.resource.wrapper.xenbase; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.OvsCreateTunnelAnswer; @@ -33,7 +32,6 @@ @ResourceWrapper(handles = OvsCreateTunnelCommand.class) public final class CitrixOvsCreateTunnelCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixOvsCreateTunnelCommandWrapper.class); @Override public Answer execute(final OvsCreateTunnelCommand command, final CitrixResourceBase citrixResourceBase) { @@ -42,7 +40,7 @@ public Answer execute(final OvsCreateTunnelCommand command, final CitrixResource try { final Network nw = citrixResourceBase.findOrCreateTunnelNetwork(conn, command.getNetworkName()); if (nw == null) { - s_logger.debug("Error during bridge setup"); + logger.debug("Error during bridge setup"); return new OvsCreateTunnelAnswer(command, false, "Cannot create network", bridge); } @@ -61,8 +59,8 @@ public Answer execute(final OvsCreateTunnelCommand command, final CitrixResource return new OvsCreateTunnelAnswer(command, false, result, bridge); } } catch (final Exception e) { - s_logger.debug("Error during tunnel setup"); - s_logger.warn("Caught execption when creating ovs tunnel", e); + logger.debug("Error during tunnel setup"); + logger.warn("Caught execption when creating ovs tunnel", e); return new OvsCreateTunnelAnswer(command, false, e.getMessage(), bridge); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsDeleteFlowCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsDeleteFlowCommandWrapper.java index 511b870ffba6..bcf7170e908a 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsDeleteFlowCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsDeleteFlowCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.xenserver.resource.wrapper.xenbase; -import org.apache.log4j.Logger; import org.apache.xmlrpc.XmlRpcException; import com.cloud.agent.api.Answer; @@ -35,7 +34,6 @@ @ResourceWrapper(handles = OvsDeleteFlowCommand.class) public final class CitrixOvsDeleteFlowCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixOvsDeleteFlowCommandWrapper.class); @Override public Answer execute(final OvsDeleteFlowCommand command, final CitrixResourceBase citrixResourceBase) { @@ -53,11 +51,11 @@ public Answer execute(final OvsDeleteFlowCommand command, final CitrixResourceBa return new Answer(command, false, result); } } catch (final BadServerResponse e) { - s_logger.error("Failed to delete flow", e); + logger.error("Failed to delete flow", e); } catch (final XenAPIException e) { - s_logger.error("Failed to delete flow", e); + logger.error("Failed to delete flow", e); } catch (final XmlRpcException e) { - s_logger.error("Failed to delete flow", e); + logger.error("Failed to delete flow", e); } return new Answer(command, false, "failed to delete flow for " + command.getVmName()); } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsDestroyBridgeCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsDestroyBridgeCommandWrapper.java index 4aaa9c845de6..ceac995b5588 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsDestroyBridgeCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsDestroyBridgeCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.xenserver.resource.wrapper.xenbase; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.OvsDestroyBridgeCommand; @@ -32,7 +31,6 @@ @ResourceWrapper(handles = OvsDestroyBridgeCommand.class) public final class CitrixOvsDestroyBridgeCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixOvsDestroyBridgeCommandWrapper.class); @Override public Answer execute(final OvsDestroyBridgeCommand command, final CitrixResourceBase citrixResourceBase) { @@ -44,11 +42,11 @@ public Answer execute(final OvsDestroyBridgeCommand command, final CitrixResourc citrixResourceBase.destroyTunnelNetwork(conn, nw, command.getHostId()); - s_logger.debug("OVS Bridge destroyed"); + logger.debug("OVS Bridge destroyed"); return new Answer(command, true, null); } catch (final Exception e) { - s_logger.warn("caught execption when destroying ovs bridge", e); + logger.warn("caught execption when destroying ovs bridge", e); return new Answer(command, false, e.getMessage()); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsDestroyTunnelCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsDestroyTunnelCommandWrapper.java index dffeeda91e65..c54c27db4bf5 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsDestroyTunnelCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsDestroyTunnelCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.xenserver.resource.wrapper.xenbase; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.OvsDestroyTunnelCommand; @@ -32,7 +31,6 @@ @ResourceWrapper(handles = OvsDestroyTunnelCommand.class) public final class CitrixOvsDestroyTunnelCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixOvsDestroyTunnelCommandWrapper.class); @Override public Answer execute(final OvsDestroyTunnelCommand command, final CitrixResourceBase citrixResourceBase) { @@ -40,7 +38,7 @@ public Answer execute(final OvsDestroyTunnelCommand command, final CitrixResourc try { final Network nw = citrixResourceBase.findOrCreateTunnelNetwork(conn, command.getBridgeName()); if (nw == null) { - s_logger.warn("Unable to find tunnel network for GRE key:" + command.getBridgeName()); + logger.warn("Unable to find tunnel network for GRE key:" + command.getBridgeName()); return new Answer(command, false, "No network found"); } @@ -53,7 +51,7 @@ public Answer execute(final OvsDestroyTunnelCommand command, final CitrixResourc return new Answer(command, false, result); } } catch (final Exception e) { - s_logger.warn("caught execption when destroy ovs tunnel", e); + logger.warn("caught execption when destroy ovs tunnel", e); return new Answer(command, false, e.getMessage()); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsFetchInterfaceCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsFetchInterfaceCommandWrapper.java index 4a03acfc1a7e..3a1f3971d32e 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsFetchInterfaceCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsFetchInterfaceCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.xenserver.resource.wrapper.xenbase; -import org.apache.log4j.Logger; import org.apache.xmlrpc.XmlRpcException; import com.cloud.agent.api.Answer; @@ -38,7 +37,6 @@ @ResourceWrapper(handles = OvsFetchInterfaceCommand.class) public final class CitrixOvsFetchInterfaceCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixOvsFetchInterfaceCommandWrapper.class); @Override public Answer execute(final OvsFetchInterfaceCommand command, final CitrixResourceBase citrixResourceBase) { @@ -47,26 +45,26 @@ public Answer execute(final OvsFetchInterfaceCommand command, final CitrixResour if (citrixResourceBase.isXcp()) { label = citrixResourceBase.getLabel(); } - s_logger.debug("Will look for network with name-label:" + label + " on host " + citrixResourceBase.getHost().getIp()); + logger.debug("Will look for network with name-label:" + label + " on host " + citrixResourceBase.getHost().getIp()); final Connection conn = citrixResourceBase.getConnection(); try { final XsLocalNetwork nw = citrixResourceBase.getNetworkByName(conn, label); if(nw == null) { throw new CloudRuntimeException("Unable to locate the network with name-label: " + label + " on host: " + citrixResourceBase.getHost().getIp()); } - s_logger.debug("Network object:" + nw.getNetwork().getUuid(conn)); + logger.debug("Network object:" + nw.getNetwork().getUuid(conn)); final PIF pif = nw.getPif(conn); final PIF.Record pifRec = pif.getRecord(conn); - s_logger.debug("PIF object:" + pifRec.uuid + "(" + pifRec.device + ")"); + logger.debug("PIF object:" + pifRec.uuid + "(" + pifRec.device + ")"); return new OvsFetchInterfaceAnswer(command, true, "Interface " + pifRec.device + " retrieved successfully", pifRec.IP, pifRec.netmask, pifRec.MAC); } catch (final BadServerResponse e) { - s_logger.error("An error occurred while fetching the interface for " + label + " on host " + citrixResourceBase.getHost().getIp(), e); + logger.error("An error occurred while fetching the interface for " + label + " on host " + citrixResourceBase.getHost().getIp(), e); return new OvsFetchInterfaceAnswer(command, false, "EXCEPTION:" + e.getMessage()); } catch (final XenAPIException e) { - s_logger.error("An error occurred while fetching the interface for " + label + " on host " + citrixResourceBase.getHost().getIp(), e); + logger.error("An error occurred while fetching the interface for " + label + " on host " + citrixResourceBase.getHost().getIp(), e); return new OvsFetchInterfaceAnswer(command, false, "EXCEPTION:" + e.getMessage()); } catch (final XmlRpcException e) { - s_logger.error("An error occurred while fetching the interface for " + label + " on host " + citrixResourceBase.getHost().getIp(), e); + logger.error("An error occurred while fetching the interface for " + label + " on host " + citrixResourceBase.getHost().getIp(), e); return new OvsFetchInterfaceAnswer(command, false, "EXCEPTION:" + e.getMessage()); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsSetTagAndFlowCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsSetTagAndFlowCommandWrapper.java index 14e43f301dcb..d389056f2aa0 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsSetTagAndFlowCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsSetTagAndFlowCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.xenserver.resource.wrapper.xenbase; -import org.apache.log4j.Logger; import org.apache.xmlrpc.XmlRpcException; import com.cloud.agent.api.Answer; @@ -36,7 +35,6 @@ @ResourceWrapper(handles = OvsSetTagAndFlowCommand.class) public final class CitrixOvsSetTagAndFlowCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixOvsSetTagAndFlowCommandWrapper.class); @Override public Answer execute(final OvsSetTagAndFlowCommand command, final CitrixResourceBase citrixResourceBase) { @@ -54,7 +52,7 @@ public Answer execute(final OvsSetTagAndFlowCommand command, final CitrixResourc */ final String result = citrixResourceBase.callHostPlugin(conn, "ovsgre", "ovs_set_tag_and_flow", "bridge", bridge, "vmName", command.getVmName(), "tag", command.getTag(), "vlans", command.getVlans(), "seqno", command.getSeqNo()); - s_logger.debug("set flow for " + command.getVmName() + " " + result); + logger.debug("set flow for " + command.getVmName() + " " + result); if (result != null && result.equalsIgnoreCase("SUCCESS")) { return new OvsSetTagAndFlowAnswer(command, true, result); @@ -62,11 +60,11 @@ public Answer execute(final OvsSetTagAndFlowCommand command, final CitrixResourc return new OvsSetTagAndFlowAnswer(command, false, result); } } catch (final BadServerResponse e) { - s_logger.error("Failed to set tag and flow", e); + logger.error("Failed to set tag and flow", e); } catch (final XenAPIException e) { - s_logger.error("Failed to set tag and flow", e); + logger.error("Failed to set tag and flow", e); } catch (final XmlRpcException e) { - s_logger.error("Failed to set tag and flow", e); + logger.error("Failed to set tag and flow", e); } return new OvsSetTagAndFlowAnswer(command, false, "EXCEPTION"); diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsSetupBridgeCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsSetupBridgeCommandWrapper.java index c3a54a0ceed9..0eb57c4f7e71 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsSetupBridgeCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsSetupBridgeCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.xenserver.resource.wrapper.xenbase; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.OvsSetupBridgeCommand; @@ -31,7 +30,6 @@ @ResourceWrapper(handles = OvsSetupBridgeCommand.class) public final class CitrixOvsSetupBridgeCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixOvsSetupBridgeCommandWrapper.class); @Override public Answer execute(final OvsSetupBridgeCommand command, final CitrixResourceBase citrixResourceBase) { @@ -40,7 +38,7 @@ public Answer execute(final OvsSetupBridgeCommand command, final CitrixResourceB citrixResourceBase.findOrCreateTunnelNetwork(conn, command.getBridgeName()); citrixResourceBase.configureTunnelNetwork(conn, command.getNetworkId(), command.getHostId(), command.getBridgeName()); - s_logger.debug("OVS Bridge configured"); + logger.debug("OVS Bridge configured"); return new Answer(command, true, null); } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsVpcPhysicalTopologyConfigCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsVpcPhysicalTopologyConfigCommandWrapper.java index d95a1fd5891f..034d35087300 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsVpcPhysicalTopologyConfigCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsVpcPhysicalTopologyConfigCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.xenserver.resource.wrapper.xenbase; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.OvsVpcPhysicalTopologyConfigCommand; @@ -32,7 +31,6 @@ @ResourceWrapper(handles = OvsVpcPhysicalTopologyConfigCommand.class) public final class CitrixOvsVpcPhysicalTopologyConfigCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixOvsVpcPhysicalTopologyConfigCommandWrapper.class); @Override public Answer execute(final OvsVpcPhysicalTopologyConfigCommand command, final CitrixResourceBase citrixResourceBase) { @@ -52,7 +50,7 @@ public Answer execute(final OvsVpcPhysicalTopologyConfigCommand command, final C return new Answer(command, false, result); } } catch (final Exception e) { - s_logger.warn("caught exception while updating host with latest VPC topology", e); + logger.warn("caught exception while updating host with latest VPC topology", e); return new Answer(command, false, e.getMessage()); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsVpcRoutingPolicyConfigCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsVpcRoutingPolicyConfigCommandWrapper.java index 9193e029e14c..da6c7bef5554 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsVpcRoutingPolicyConfigCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixOvsVpcRoutingPolicyConfigCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.xenserver.resource.wrapper.xenbase; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.OvsVpcRoutingPolicyConfigCommand; @@ -32,7 +31,6 @@ @ResourceWrapper(handles = OvsVpcRoutingPolicyConfigCommand.class) public final class CitrixOvsVpcRoutingPolicyConfigCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixOvsVpcRoutingPolicyConfigCommandWrapper.class); @Override public Answer execute(final OvsVpcRoutingPolicyConfigCommand command, final CitrixResourceBase citrixResourceBase) { @@ -52,7 +50,7 @@ public Answer execute(final OvsVpcRoutingPolicyConfigCommand command, final Citr return new Answer(command, false, result); } } catch (final Exception e) { - s_logger.warn("caught exception while updating host with latest routing policies", e); + logger.warn("caught exception while updating host with latest routing policies", e); return new Answer(command, false, e.getMessage()); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixPatchSystemVmCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixPatchSystemVmCommandWrapper.java index 0f37bea15cb8..02f332654adb 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixPatchSystemVmCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixPatchSystemVmCommandWrapper.java @@ -29,13 +29,11 @@ import com.cloud.utils.validation.ChecksumUtil; import com.xensource.xenapi.Connection; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import java.io.File; @ResourceWrapper(handles = PatchSystemVmCommand.class) public class CitrixPatchSystemVmCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixPatchSystemVmCommandWrapper.class); private static int sshPort = CitrixResourceBase.DEFAULTDOMRSSHPORT; private static File pemFile = new File(CitrixResourceBase.SSHPRVKEYPATH); @@ -62,7 +60,7 @@ public Answer execute(PatchSystemVmCommand command, CitrixResourceBase serverRes String checksum = ChecksumUtil.calculateCurrentChecksum(sysVMName, "vms/cloud-scripts.tgz").trim(); if (!StringUtils.isEmpty(checksum) && checksum.equals(scriptChecksum) && !command.isForced()) { String msg = String.format("No change in the scripts checksum, not patching systemVM %s", sysVMName); - s_logger.info(msg); + logger.info(msg); return new PatchSystemVmAnswer(command, msg, lines[0], lines[1]); } @@ -79,7 +77,7 @@ public Answer execute(PatchSystemVmCommand command, CitrixResourceBase serverRes String res = patchResult.replace("\n", " "); String[] output = res.split(":"); if (output.length != 2) { - s_logger.warn("Failed to get the latest script version"); + logger.warn("Failed to get the latest script version"); } else { scriptVersion = output[1].split(" ")[0]; } @@ -96,12 +94,12 @@ private ExecutionResult getSystemVmVersionAndChecksum(CitrixResourceBase serverR result = serverResource.executeInVR(controlIp, VRScripts.VERSION, null); if (!result.isSuccess()) { String errMsg = String.format("GetSystemVMVersionCmd on %s failed, message %s", controlIp, result.getDetails()); - s_logger.error(errMsg); + logger.error(errMsg); throw new CloudRuntimeException(errMsg); } } catch (final Exception e) { final String msg = "GetSystemVMVersionCmd failed due to " + e; - s_logger.error(msg, e); + logger.error(msg, e); throw new CloudRuntimeException(msg, e); } return result; diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixPlugNicCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixPlugNicCommandWrapper.java index 6e954be805f5..2fc3aa517d8d 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixPlugNicCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixPlugNicCommandWrapper.java @@ -21,7 +21,6 @@ import java.util.Set; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.PlugNicAnswer; @@ -37,7 +36,6 @@ @ResourceWrapper(handles = PlugNicCommand.class) public final class CitrixPlugNicCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixPlugNicCommandWrapper.class); @Override public Answer execute(final PlugNicCommand command, final CitrixResourceBase citrixResourceBase) { @@ -67,7 +65,7 @@ public Answer execute(final PlugNicCommand command, final CitrixResourceBase cit // redundant. if (counter > 2) { final String msg = " Plug Nic failed due to a VIF with the same mac " + nic.getMac() + " exists in more than 2 routers."; - s_logger.error(msg); + logger.error(msg); return new PlugNicAnswer(command, false, msg); } @@ -75,7 +73,7 @@ public Answer execute(final PlugNicCommand command, final CitrixResourceBase cit // VIF vif = getVifByMac(conn, vm, nic.getMac()); // if (vif != null) { // final String msg = " Plug Nic failed due to a VIF with the same mac " + nic.getMac() + " exists"; - // s_logger.warn(msg); + // logger.warn(msg); // return new PlugNicAnswer(cmd, false, msg); // } @@ -87,7 +85,7 @@ public Answer execute(final PlugNicCommand command, final CitrixResourceBase cit return new PlugNicAnswer(command, true, "success"); } catch (final Exception e) { final String msg = " Plug Nic failed due to " + e.toString(); - s_logger.error(msg, e); + logger.error(msg, e); return new PlugNicAnswer(command, false, msg); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixPrepareForMigrationCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixPrepareForMigrationCommandWrapper.java index 8a8ebb418058..806016a21d9f 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixPrepareForMigrationCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixPrepareForMigrationCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.xenserver.resource.wrapper.xenbase; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.PrepareForMigrationAnswer; @@ -36,7 +35,6 @@ @ResourceWrapper(handles = PrepareForMigrationCommand.class) public final class CitrixPrepareForMigrationCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixPrepareForMigrationCommandWrapper.class); @Override public Answer execute(final PrepareForMigrationCommand command, final CitrixResourceBase citrixResourceBase) { @@ -50,8 +48,8 @@ public Answer execute(final PrepareForMigrationCommand command, final CitrixReso configDriveLabel = "config-2"; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Preparing host for migrating " + vm); + if (logger.isDebugEnabled()) { + logger.debug("Preparing host for migrating " + vm); } final NicTO[] nics = vm.getNics(); @@ -61,11 +59,11 @@ public Answer execute(final PrepareForMigrationCommand command, final CitrixReso for (final NicTO nic : nics) { citrixResourceBase.getNetwork(conn, nic); } - s_logger.debug("4. The VM " + vm.getName() + " is in Migrating state"); + logger.debug("4. The VM " + vm.getName() + " is in Migrating state"); return new PrepareForMigrationAnswer(command); } catch (final Exception e) { - s_logger.warn("Catch Exception " + e.getClass().getName() + " prepare for migration failed due to " + e.toString(), e); + logger.warn("Catch Exception " + e.getClass().getName() + " prepare for migration failed due to " + e.toString(), e); return new PrepareForMigrationAnswer(command, e); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixPrimaryStorageDownloadCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixPrimaryStorageDownloadCommandWrapper.java index 23be5eb1dcdb..b5a145a9ecab 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixPrimaryStorageDownloadCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixPrimaryStorageDownloadCommandWrapper.java @@ -23,7 +23,6 @@ import java.util.HashMap; import java.util.Set; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.storage.PrimaryStorageDownloadAnswer; @@ -38,7 +37,6 @@ @ResourceWrapper(handles = PrimaryStorageDownloadCommand.class) public final class CitrixPrimaryStorageDownloadCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixPrimaryStorageDownloadCommandWrapper.class); @Override public Answer execute(final PrimaryStorageDownloadCommand command, final CitrixResourceBase citrixResourceBase) { @@ -53,7 +51,7 @@ public Answer execute(final PrimaryStorageDownloadCommand command, final CitrixR final Set srs = SR.getByNameLabel(conn, poolName); if (srs.size() != 1) { final String msg = "There are " + srs.size() + " SRs with same name: " + poolName; - s_logger.warn(msg); + logger.warn(msg); return new PrimaryStorageDownloadAnswer(msg); } else { poolsr = srs.iterator().next(); @@ -78,7 +76,7 @@ public Answer execute(final PrimaryStorageDownloadCommand command, final CitrixR } catch (final Exception e) { final String msg = "Catch Exception " + e.getClass().getName() + " on host:" + citrixResourceBase.getHost().getUuid() + " for template: " + tmplturl + " due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new PrimaryStorageDownloadAnswer(msg); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixPvlanSetupCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixPvlanSetupCommandWrapper.java index 313cb4e34e4a..2873f3f99a84 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixPvlanSetupCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixPvlanSetupCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.xenserver.resource.wrapper.xenbase; -import org.apache.log4j.Logger; import org.apache.xmlrpc.XmlRpcException; import com.cloud.agent.api.Answer; @@ -36,7 +35,6 @@ @ResourceWrapper(handles = PvlanSetupCommand.class) public final class CitrixPvlanSetupCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixPvlanSetupCommandWrapper.class); @Override public Answer execute(final PvlanSetupCommand command, final CitrixResourceBase citrixResourceBase) { @@ -55,15 +53,15 @@ public Answer execute(final PvlanSetupCommand command, final CitrixResourceBase try { final XsLocalNetwork nw = citrixResourceBase.getNativeNetworkForTraffic(conn, TrafficType.Guest, networkTag); if (nw == null) { - s_logger.error("Network is not configured on the backend for pvlan " + primaryPvlan); + logger.error("Network is not configured on the backend for pvlan " + primaryPvlan); throw new CloudRuntimeException("Network for the backend is not configured correctly for pvlan primary: " + primaryPvlan); } nwNameLabel = nw.getNetwork().getNameLabel(conn); } catch (final XenAPIException e) { - s_logger.warn("Fail to get network", e); + logger.warn("Fail to get network", e); return new Answer(command, false, e.toString()); } catch (final XmlRpcException e) { - s_logger.warn("Fail to get network", e); + logger.warn("Fail to get network", e); return new Answer(command, false, e.toString()); } @@ -73,20 +71,20 @@ public Answer execute(final PvlanSetupCommand command, final CitrixResourceBase isolatedPvlan, "dhcp-name", dhcpName, "dhcp-ip", dhcpIp, "dhcp-mac", dhcpMac); if (result == null || result.isEmpty() || !Boolean.parseBoolean(result)) { - s_logger.warn("Failed to program pvlan for dhcp server with mac " + dhcpMac); + logger.warn("Failed to program pvlan for dhcp server with mac " + dhcpMac); return new Answer(command, false, result); } else { - s_logger.info("Programmed pvlan for dhcp server with mac " + dhcpMac); + logger.info("Programmed pvlan for dhcp server with mac " + dhcpMac); } } else if (command.getType() == PvlanSetupCommand.Type.VM) { result = citrixResourceBase.callHostPlugin(conn, "ovs-pvlan", "setup-pvlan-vm", "op", op, "nw-label", nwNameLabel, "primary-pvlan", primaryPvlan, "isolated-pvlan", isolatedPvlan, "vm-mac", vmMac); if (result == null || result.isEmpty() || !Boolean.parseBoolean(result)) { - s_logger.warn("Failed to program pvlan for vm with mac " + vmMac); + logger.warn("Failed to program pvlan for vm with mac " + vmMac); return new Answer(command, false, result); } else { - s_logger.info("Programmed pvlan for vm with mac " + vmMac); + logger.info("Programmed pvlan for vm with mac " + vmMac); } } return new Answer(command, true, result); diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixReadyCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixReadyCommandWrapper.java index c276aff5fc2e..e7f7e00e6ae7 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixReadyCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixReadyCommandWrapper.java @@ -23,7 +23,6 @@ import java.util.HashMap; import java.util.Set; -import org.apache.log4j.Logger; import org.apache.xmlrpc.XmlRpcException; import static com.cloud.hypervisor.xenserver.discoverer.XcpServerDiscoverer.isUefiSupported; @@ -43,7 +42,6 @@ @ResourceWrapper(handles = ReadyCommand.class) public final class CitrixReadyCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixReadyCommandWrapper.class); @Override public Answer execute(final ReadyCommand command, final CitrixResourceBase citrixResourceBase) { @@ -74,10 +72,10 @@ public Answer execute(final ReadyCommand command, final CitrixResourceBase citri return new ReadyAnswer(command, "Unable to cleanup halted vms"); } } catch (final XenAPIException e) { - s_logger.warn("Unable to cleanup halted vms", e); + logger.warn("Unable to cleanup halted vms", e); return new ReadyAnswer(command, "Unable to cleanup halted vms"); } catch (final XmlRpcException e) { - s_logger.warn("Unable to cleanup halted vms", e); + logger.warn("Unable to cleanup halted vms", e); return new ReadyAnswer(command, "Unable to cleanup halted vms"); } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixRebootCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixRebootCommandWrapper.java index 6d5b9f7992d1..3ea832cadf7c 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixRebootCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixRebootCommandWrapper.java @@ -21,7 +21,6 @@ import java.util.Set; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.RebootAnswer; @@ -36,21 +35,20 @@ @ResourceWrapper(handles = RebootCommand.class) public final class CitrixRebootCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixRebootCommandWrapper.class); @Override public Answer execute(final RebootCommand command, final CitrixResourceBase citrixResourceBase) { final Connection conn = citrixResourceBase.getConnection(); - s_logger.debug("7. The VM " + command.getVmName() + " is in Starting state"); + logger.debug("7. The VM " + command.getVmName() + " is in Starting state"); try { Set vms = null; try { vms = VM.getByNameLabel(conn, command.getVmName()); } catch (final XenAPIException e0) { - s_logger.debug("getByNameLabel failed " + e0.toString()); + logger.debug("getByNameLabel failed " + e0.toString()); return new RebootAnswer(command, "getByNameLabel failed " + e0.toString(), false); } catch (final Exception e0) { - s_logger.debug("getByNameLabel failed " + e0.getMessage()); + logger.debug("getByNameLabel failed " + e0.getMessage()); return new RebootAnswer(command, "getByNameLabel failed", false); } for (final VM vm : vms) { @@ -58,13 +56,13 @@ public Answer execute(final RebootCommand command, final CitrixResourceBase citr citrixResourceBase.rebootVM(conn, vm, vm.getNameLabel(conn)); } catch (final Exception e) { final String msg = e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new RebootAnswer(command, msg, false); } } return new RebootAnswer(command, "reboot succeeded", true); } finally { - s_logger.debug("8. The VM " + command.getVmName() + " is in Running state"); + logger.debug("8. The VM " + command.getVmName() + " is in Running state"); } } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixResizeVolumeCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixResizeVolumeCommandWrapper.java index e7505cc2f34e..2ddf1bd18511 100755 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixResizeVolumeCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixResizeVolumeCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.xenserver.resource.wrapper.xenbase; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.storage.ResizeVolumeAnswer; @@ -40,7 +39,6 @@ @ResourceWrapper(handles = ResizeVolumeCommand.class) public final class CitrixResizeVolumeCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixResizeVolumeCommandWrapper.class); @Override public Answer execute(final ResizeVolumeCommand command, final CitrixResourceBase citrixResourceBase) { @@ -52,7 +50,7 @@ public Answer execute(final ResizeVolumeCommand command, final CitrixResourceBas try { if (command.getCurrentSize() >= newSize) { - s_logger.info("No need to resize volume: " + volId +", current size " + toHumanReadableSize(command.getCurrentSize()) + " is same as new size " + toHumanReadableSize(newSize)); + logger.info("No need to resize volume: " + volId +", current size " + toHumanReadableSize(command.getCurrentSize()) + " is same as new size " + toHumanReadableSize(newSize)); return new ResizeVolumeAnswer(command, true, "success", newSize); } if (command.isManaged()) { @@ -65,7 +63,7 @@ public Answer execute(final ResizeVolumeCommand command, final CitrixResourceBas return new ResizeVolumeAnswer(command, true, "success", newSize); } catch (Exception ex) { - s_logger.warn("Unable to resize volume", ex); + logger.warn("Unable to resize volume", ex); String error = "Failed to resize volume: " + ex; @@ -91,7 +89,7 @@ private void resizeSr(Connection conn, ResizeVolumeCommand command) { Set pbds = sr.getPBDs(conn); if (pbds.size() <= 0) { - s_logger.debug("No PBDs found for the following SR: " + sr.getNameLabel(conn)); + logger.debug("No PBDs found for the following SR: " + sr.getNameLabel(conn)); } allPbds.addAll(pbds); diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixRevertToVMSnapshotCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixRevertToVMSnapshotCommandWrapper.java index f8bb1b892420..be5139390c9e 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixRevertToVMSnapshotCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixRevertToVMSnapshotCommandWrapper.java @@ -25,7 +25,6 @@ import java.util.Set; import org.apache.cloudstack.storage.to.VolumeObjectTO; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.RevertToVMSnapshotAnswer; @@ -44,7 +43,6 @@ @ResourceWrapper(handles = RevertToVMSnapshotCommand.class) public final class CitrixRevertToVMSnapshotCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixRevertToVMSnapshotCommandWrapper.class); @Override public Answer execute(final RevertToVMSnapshotCommand command, final CitrixResourceBase citrixResourceBase) { @@ -105,7 +103,7 @@ public Answer execute(final RevertToVMSnapshotCommand command, final CitrixResou return new RevertToVMSnapshotAnswer(command, listVolumeTo, vmState); } catch (final Exception e) { - s_logger.error("revert vm " + vmName + " to snapshot " + command.getTarget().getSnapshotName() + " failed due to " + e.getMessage()); + logger.error("revert vm " + vmName + " to snapshot " + command.getTarget().getSnapshotName() + " failed due to " + e.getMessage()); return new RevertToVMSnapshotAnswer(command, false, e.getMessage()); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixScaleVmCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixScaleVmCommandWrapper.java index 8aa77277c786..d1ca2ee437b5 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixScaleVmCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixScaleVmCommandWrapper.java @@ -22,7 +22,6 @@ import java.util.Iterator; import java.util.Set; -import org.apache.log4j.Logger; import org.apache.xmlrpc.XmlRpcException; import com.cloud.agent.api.Answer; @@ -42,7 +41,6 @@ @ResourceWrapper(handles = ScaleVmCommand.class) public final class CitrixScaleVmCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixScaleVmCommandWrapper.class); @Override public Answer execute(final ScaleVmCommand command, final CitrixResourceBase citrixResourceBase) { @@ -60,7 +58,7 @@ public Answer execute(final ScaleVmCommand command, final CitrixResourceBase cit } if (vms == null || vms.size() == 0) { - s_logger.info("No running VM " + vmName + " exists on XenServer" + citrixResourceBase.getHost().getUuid()); + logger.info("No running VM " + vmName + " exists on XenServer" + citrixResourceBase.getHost().getUuid()); return new ScaleVmAnswer(command, false, "VM does not exist"); } @@ -82,26 +80,26 @@ public Answer execute(final ScaleVmCommand command, final CitrixResourceBase cit citrixResourceBase.scaleVM(conn, vm, vmSpec, host); } catch (final Exception e) { final String msg = "Catch exception " + e.getClass().getName() + " when scaling VM:" + vmName + " due to " + e.toString(); - s_logger.debug(msg); + logger.debug(msg); return new ScaleVmAnswer(command, false, msg); } } final String msg = "scaling VM " + vmName + " is successful on host " + host; - s_logger.debug(msg); + logger.debug(msg); return new ScaleVmAnswer(command, true, msg); } catch (final XenAPIException e) { final String msg = "Upgrade Vm " + vmName + " fail due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new ScaleVmAnswer(command, false, msg); } catch (final XmlRpcException e) { final String msg = "Upgrade Vm " + vmName + " fail due to " + e.getMessage(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new ScaleVmAnswer(command, false, msg); } catch (final Exception e) { final String msg = "Unable to upgrade " + vmName + " due to " + e.getMessage(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new ScaleVmAnswer(command, false, msg); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixSecurityGroupRulesCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixSecurityGroupRulesCommandWrapper.java index 00974d75b72f..816d9703b98d 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixSecurityGroupRulesCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixSecurityGroupRulesCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.xenserver.resource.wrapper.xenbase; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.SecurityGroupRuleAnswer; @@ -32,17 +31,16 @@ @ResourceWrapper(handles = SecurityGroupRulesCmd.class) public final class CitrixSecurityGroupRulesCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixSecurityGroupRulesCommandWrapper.class); @Override public Answer execute(final SecurityGroupRulesCmd command, final CitrixResourceBase citrixResourceBase) { final Connection conn = citrixResourceBase.getConnection(); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Sending network rules command to " + citrixResourceBase.getHost().getIp()); + if (logger.isTraceEnabled()) { + logger.trace("Sending network rules command to " + citrixResourceBase.getHost().getIp()); } if (!citrixResourceBase.canBridgeFirewall()) { - s_logger.warn("Host " + citrixResourceBase.getHost().getIp() + " cannot do bridge firewalling"); + logger.warn("Host " + citrixResourceBase.getHost().getIp() + " cannot do bridge firewalling"); return new SecurityGroupRuleAnswer(command, false, "Host " + citrixResourceBase.getHost().getIp() + " cannot do bridge firewalling", SecurityGroupRuleAnswer.FailureReason.CANNOT_BRIDGE_FIREWALL); } @@ -52,10 +50,10 @@ public Answer execute(final SecurityGroupRulesCmd command, final CitrixResourceB "true", "rules", command.compressStringifiedRules(), "secIps", command.getSecIpsString()); if (result == null || result.isEmpty() || !Boolean.parseBoolean(result)) { - s_logger.warn("Failed to program network rules for vm " + command.getVmName()); + logger.warn("Failed to program network rules for vm " + command.getVmName()); return new SecurityGroupRuleAnswer(command, false, "programming network rules failed"); } else { - s_logger.info("Programmed network rules for vm " + command.getVmName() + " guestIp=" + command.getGuestIp() + ", ingress numrules=" + logger.info("Programmed network rules for vm " + command.getVmName() + " guestIp=" + command.getGuestIp() + ", ingress numrules=" + command.getIngressRuleSet().size() + ", egress numrules=" + command.getEgressRuleSet().size()); return new SecurityGroupRuleAnswer(command); } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixSetupCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixSetupCommandWrapper.java index 263dade328eb..57daba4da41d 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixSetupCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixSetupCommandWrapper.java @@ -22,7 +22,6 @@ import java.util.Map; import java.util.Set; -import org.apache.log4j.Logger; import org.apache.xmlrpc.XmlRpcException; import com.cloud.agent.api.Answer; @@ -45,7 +44,6 @@ @ResourceWrapper(handles = SetupCommand.class) public final class CitrixSetupCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixSetupCommandWrapper.class); @Override public Answer execute(final SetupCommand command, final CitrixResourceBase citrixResourceBase) { @@ -61,7 +59,7 @@ public Answer execute(final SetupCommand command, final CitrixResourceBase citri citrixResourceBase.setupServer(conn, host); if (!citrixResourceBase.setIptables(conn)) { - s_logger.warn("set xenserver Iptable failed"); + logger.warn("set xenserver Iptable failed"); return null; } @@ -70,8 +68,8 @@ public Answer execute(final SetupCommand command, final CitrixResourceBase citri citrixResourceBase.setCanBridgeFirewall(canBridgeFirewall); if (!canBridgeFirewall) { final String msg = "Failed to configure bridge firewall"; - s_logger.warn(msg); - s_logger.warn("Check host " + citrixResourceBase.getHost().getIp() +" for CSP is installed or not and check network mode for bridge"); + logger.warn(msg); + logger.warn("Check host " + citrixResourceBase.getHost().getIp() +" for CSP is installed or not and check network mode for bridge"); return new SetupAnswer(command, msg); } @@ -90,14 +88,14 @@ public Answer execute(final SetupCommand command, final CitrixResourceBase citri } } catch (final Types.MapDuplicateKey e) { - s_logger.debug("multipath is already set"); + logger.debug("multipath is already set"); } if (command.needSetup() ) { final String result = citrixResourceBase.callHostPlugin(conn, "vmops", "setup_iscsi", "uuid", citrixResourceBase.getHost().getUuid()); if (!result.contains("> DONE <")) { - s_logger.warn("Unable to setup iscsi: " + result); + logger.warn("Unable to setup iscsi: " + result); return new SetupAnswer(command, result); } @@ -114,11 +112,11 @@ public Answer execute(final SetupCommand command, final CitrixResourceBase citri .append("; vlan=") .append(rec.VLAN) .toString(); - s_logger.warn(msg); + logger.warn(msg); return new SetupAnswer(command, msg); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Management network is on pif=" + rec.uuid); + if (logger.isDebugEnabled()) { + logger.debug("Management network is on pif=" + rec.uuid); } mgmtPif = new Pair(pif, rec); break; @@ -127,14 +125,14 @@ public Answer execute(final SetupCommand command, final CitrixResourceBase citri if (mgmtPif == null) { final String msg = "Unable to find management network for " + citrixResourceBase.getHost().getUuid(); - s_logger.warn(msg); + logger.warn(msg); return new SetupAnswer(command, msg); } final Map networks = Network.getAllRecords(conn); if(networks == null) { final String msg = "Unable to setup as there are no networks in the host: " + citrixResourceBase.getHost().getUuid(); - s_logger.warn(msg); + logger.warn(msg); return new SetupAnswer(command, msg); } for (final Network.Record network : networks.values()) { @@ -142,8 +140,8 @@ public Answer execute(final SetupCommand command, final CitrixResourceBase citri for (final PIF pif : network.PIFs) { final PIF.Record pr = pif.getRecord(conn); if (citrixResourceBase.getHost().getUuid().equals(pr.host.getUuid(conn))) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found a network called cloud-private. host=" + citrixResourceBase.getHost().getUuid() + "; Network=" + network.uuid + "; pif=" + pr.uuid); + if (logger.isDebugEnabled()) { + logger.debug("Found a network called cloud-private. host=" + citrixResourceBase.getHost().getUuid() + "; Network=" + network.uuid + "; pif=" + pr.uuid); } if (pr.VLAN != null && pr.VLAN != -1) { final String msg = @@ -151,7 +149,7 @@ public Answer execute(final SetupCommand command, final CitrixResourceBase citri .append(" ; pif=") .append(pr.uuid) .toString(); - s_logger.warn(msg); + logger.warn(msg); return new SetupAnswer(command, msg); } if (!pr.management && pr.bondMasterOf != null && pr.bondMasterOf.size() > 0) { @@ -161,7 +159,7 @@ public Answer execute(final SetupCommand command, final CitrixResourceBase citri .append("; pif=") .append(pr.uuid) .toString(); - s_logger.warn(msg); + logger.warn(msg); return new SetupAnswer(command, msg); } final Bond bond = pr.bondMasterOf.iterator().next(); @@ -173,7 +171,7 @@ public Answer execute(final SetupCommand command, final CitrixResourceBase citri final String msg = new StringBuilder("Unable to transfer management network. slave=" + spr.uuid + "; master=" + pr.uuid + "; host=" + citrixResourceBase.getHost().getUuid()).toString(); - s_logger.warn(msg); + logger.warn(msg); return new SetupAnswer(command, msg); } break; @@ -188,13 +186,13 @@ public Answer execute(final SetupCommand command, final CitrixResourceBase citri return new SetupAnswer(command, false); } catch (final XmlRpcException e) { - s_logger.warn("Unable to setup", e); + logger.warn("Unable to setup", e); return new SetupAnswer(command, e.getMessage()); } catch (final XenAPIException e) { - s_logger.warn("Unable to setup", e); + logger.warn("Unable to setup", e); return new SetupAnswer(command, e.getMessage()); } catch (final Exception e) { - s_logger.warn("Unable to setup", e); + logger.warn("Unable to setup", e); return new SetupAnswer(command, e.getMessage()); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixSetupPersistentNetworkCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixSetupPersistentNetworkCommandWrapper.java index cab5a080949c..7c84d44c49f8 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixSetupPersistentNetworkCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixSetupPersistentNetworkCommandWrapper.java @@ -18,7 +18,6 @@ package com.cloud.hypervisor.xenserver.resource.wrapper.xenbase; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.SetupPersistentNetworkAnswer; @@ -33,7 +32,6 @@ @ResourceWrapper(handles = SetupPersistentNetworkCommand.class) public class CitrixSetupPersistentNetworkCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixSetupPersistentNetworkCommandWrapper.class); @Override public Answer execute(SetupPersistentNetworkCommand command, CitrixResourceBase citrixResourceBase) { @@ -47,7 +45,7 @@ public Answer execute(SetupPersistentNetworkCommand command, CitrixResourceBase return new SetupPersistentNetworkAnswer(command, true, "Successfully setup network on host: "+ host.getIp()); } catch (final Exception e) { final String msg = " Failed to setup network on host: " + host.getIp() + " due to: " + e.toString(); - s_logger.error(msg, e); + logger.error(msg, e); return new SetupPersistentNetworkAnswer(command, false, msg); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixStartCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixStartCommandWrapper.java index ad76b7f4541a..33d4eaf6e777 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixStartCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixStartCommandWrapper.java @@ -28,7 +28,6 @@ import com.cloud.agent.resource.virtualnetwork.VRScripts; import com.cloud.agent.resource.virtualnetwork.VirtualRoutingResource; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.OvsSetTagAndFlowAnswer; @@ -57,7 +56,6 @@ @ResourceWrapper(handles = StartCommand.class) public final class CitrixStartCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixStartCommandWrapper.class); @Override public Answer execute(final StartCommand command, final CitrixResourceBase citrixResourceBase) { @@ -79,22 +77,22 @@ public Answer execute(final StartCommand command, final CitrixResourceBase citri } else if (vRec.powerState == VmPowerState.RUNNING) { final String host = vRec.residentOn.getUuid(conn); final String msg = "VM " + vmName + " is runing on host " + host; - s_logger.debug(msg); + logger.debug(msg); return new StartAnswer(command, msg, host); } else { final String msg = "There is already a VM having the same name " + vmName + " vm record " + vRec.toString(); - s_logger.warn(msg); + logger.warn(msg); return new StartAnswer(command, msg); } } } - s_logger.debug("1. The VM " + vmName + " is in Starting state."); + logger.debug("1. The VM " + vmName + " is in Starting state."); final Host host = Host.getByUuid(conn, citrixResourceBase.getHost().getUuid()); vm = citrixResourceBase.createVmFromTemplate(conn, vmSpec, host); final GPUDeviceTO gpuDevice = vmSpec.getGpuDevice(); if (gpuDevice != null) { - s_logger.debug("Creating VGPU for of VGPU type: " + gpuDevice.getVgpuType() + " in GPU group " + gpuDevice.getGpuGroup() + " for VM " + vmName); + logger.debug("Creating VGPU for of VGPU type: " + gpuDevice.getVgpuType() + " in GPU group " + gpuDevice.getGpuGroup() + " for VM " + vmName); citrixResourceBase.createVGPU(conn, command, vm, gpuDevice); } @@ -123,9 +121,9 @@ public Answer execute(final StartCommand command, final CitrixResourceBase citri final OvsSetTagAndFlowAnswer r = (OvsSetTagAndFlowAnswer) citrixRequestWrapper.execute(flowCmd, citrixResourceBase); if (!r.getResult()) { - s_logger.warn("Failed to set flow for VM " + r.getVmId()); + logger.warn("Failed to set flow for VM " + r.getVmId()); } else { - s_logger.info("Success to set flow for VM " + r.getVmId()); + logger.info("Success to set flow for VM " + r.getVmId()); } } } @@ -145,9 +143,9 @@ public Answer execute(final StartCommand command, final CitrixResourceBase citri if (secGrpEnabled) { result = citrixResourceBase.callHostPlugin(conn, "vmops", "default_network_rules_systemvm", "vmName", vmName); if (result == null || result.isEmpty() || !Boolean.parseBoolean(result)) { - s_logger.warn("Failed to program default network rules for " + vmName); + logger.warn("Failed to program default network rules for " + vmName); } else { - s_logger.info("Programmed default network rules for " + vmName); + logger.info("Programmed default network rules for " + vmName); } } @@ -172,9 +170,9 @@ public Answer execute(final StartCommand command, final CitrixResourceBase citri "vmID", Long.toString(vmSpec.getId()), "secIps", secIpsStr); if (result == null || result.isEmpty() || !Boolean.parseBoolean(result)) { - s_logger.warn("Failed to program default network rules for " + vmName + " on nic with ip:" + nic.getIp() + " mac:" + nic.getMac()); + logger.warn("Failed to program default network rules for " + vmName + " on nic with ip:" + nic.getIp() + " mac:" + nic.getMac()); } else { - s_logger.info("Programmed default network rules for " + vmName + " on nic with ip:" + nic.getIp() + " mac:" + nic.getMac()); + logger.info("Programmed default network rules for " + vmName + " on nic with ip:" + nic.getIp() + " mac:" + nic.getMac()); } } } @@ -194,7 +192,7 @@ public Answer execute(final StartCommand command, final CitrixResourceBase citri String result2 = citrixResourceBase.connect(conn, vmName, controlIp, 1000); if (StringUtils.isEmpty(result2)) { - s_logger.info(String.format("Connected to SystemVM: %s", vmName)); + logger.info(String.format("Connected to SystemVM: %s", vmName)); } try { @@ -202,12 +200,12 @@ public Answer execute(final StartCommand command, final CitrixResourceBase citri VirtualRoutingResource vrResource = citrixResourceBase.getVirtualRoutingResource(); if (!vrResource.isSystemVMSetup(vmName, controlIp)) { String errMsg = "Failed to patch systemVM"; - s_logger.error(errMsg); + logger.error(errMsg); return new StartAnswer(command, errMsg); } } catch (Exception e) { String errMsg = "Failed to scp files to system VM. Patching of systemVM failed"; - s_logger.error(errMsg, e); + logger.error(errMsg, e); return new StartAnswer(command, String.format("%s due to: %s", errMsg, e.getMessage())); } } @@ -218,7 +216,7 @@ public Answer execute(final StartCommand command, final CitrixResourceBase citri return startAnswer; } catch (final Exception e) { - s_logger.warn("Catch Exception: " + e.getClass().toString() + " due to " + e.toString(), e); + logger.warn("Catch Exception: " + e.getClass().toString() + " due to " + e.toString(), e); final String msg = citrixResourceBase.handleVmStartFailure(conn, vmName, vm, "", e); final StartAnswer startAnswer = new StartAnswer(command, msg); @@ -228,9 +226,9 @@ public Answer execute(final StartCommand command, final CitrixResourceBase citri return startAnswer; } finally { if (state != VmPowerState.HALTED) { - s_logger.debug("2. The VM " + vmName + " is in " + state + " state."); + logger.debug("2. The VM " + vmName + " is in " + state + " state."); } else { - s_logger.debug("The VM is in stopped state, detected problem during startup : " + vmName); + logger.debug("The VM is in stopped state, detected problem during startup : " + vmName); } } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixStopCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixStopCommandWrapper.java index 45171a49f9ff..c464fcc16246 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixStopCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixStopCommandWrapper.java @@ -27,7 +27,6 @@ import java.util.Set; import org.apache.commons.collections.MapUtils; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.StopAnswer; @@ -50,7 +49,6 @@ @ResourceWrapper(handles = StopCommand.class) public final class CitrixStopCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixStopCommandWrapper.class); @Override public Answer execute(final StopCommand command, final CitrixResourceBase citrixResourceBase) { @@ -85,23 +83,23 @@ public Answer execute(final StopCommand command, final CitrixResourceBase citrix platformstring = StringUtils.mapToString(vmr.platform); if (vmr.isControlDomain) { final String msg = "Tring to Shutdown control domain"; - s_logger.warn(msg); + logger.warn(msg); return new StopAnswer(command, msg, false); } if (vmr.powerState == VmPowerState.RUNNING && !citrixResourceBase.isRefNull(vmr.residentOn) && !vmr.residentOn.getUuid(conn).equals(citrixResourceBase.getHost().getUuid())) { final String msg = "Stop Vm " + vmName + " failed due to this vm is not running on this host: " + citrixResourceBase.getHost().getUuid() + " but host:" + vmr.residentOn.getUuid(conn); - s_logger.warn(msg); + logger.warn(msg); return new StopAnswer(command, msg, platformstring, false); } if (command.checkBeforeCleanup() && vmr.powerState == VmPowerState.RUNNING) { final String msg = "Vm " + vmName + " is running on host and checkBeforeCleanup flag is set, so bailing out"; - s_logger.debug(msg); + logger.debug(msg); return new StopAnswer(command, msg, false); } - s_logger.debug("9. The VM " + vmName + " is in Stopping state"); + logger.debug("9. The VM " + vmName + " is in Stopping state"); try { if (vmr.powerState == VmPowerState.RUNNING) { @@ -111,16 +109,16 @@ public Answer execute(final StopCommand command, final CitrixResourceBase citrix if (citrixResourceBase.canBridgeFirewall()) { final String result = citrixResourceBase.callHostPlugin(conn, "vmops", "destroy_network_rules_for_vm", "vmName", command.getVmName()); if (result == null || result.isEmpty() || !Boolean.parseBoolean(result)) { - s_logger.warn("Failed to remove network rules for vm " + command.getVmName()); + logger.warn("Failed to remove network rules for vm " + command.getVmName()); } else { - s_logger.info("Removed network rules for vm " + command.getVmName()); + logger.info("Removed network rules for vm " + command.getVmName()); } } citrixResourceBase.shutdownVM(conn, vm, vmName, command.isForceStop()); } } catch (final Exception e) { final String msg = "Catch exception " + e.getClass().getName() + " when stop VM:" + command.getVmName() + " due to " + e.toString(); - s_logger.debug(msg); + logger.debug(msg); return new StopAnswer(command, msg, platformstring, false); } finally { @@ -131,7 +129,7 @@ public Answer execute(final StopCommand command, final CitrixResourceBase citrix try { vGPUs = vm.getVGPUs(conn); } catch (final XenAPIException e2) { - s_logger.debug("VM " + vmName + " does not have GPU support."); + logger.debug("VM " + vmName + " does not have GPU support."); } if (vGPUs != null && !vGPUs.isEmpty()) { final HashMap> groupDetails = citrixResourceBase.getGPUGroupDetails(conn); @@ -162,16 +160,16 @@ public Answer execute(final StopCommand command, final CitrixResourceBase citrix } } catch (final Exception e) { final String msg = "VM destroy failed in Stop " + vmName + " Command due to " + e.getMessage(); - s_logger.warn(msg, e); + logger.warn(msg, e); } finally { - s_logger.debug("10. The VM " + vmName + " is in Stopped state"); + logger.debug("10. The VM " + vmName + " is in Stopped state"); } } } } catch (final Exception e) { final String msg = "Stop Vm " + vmName + " fail due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new StopAnswer(command, msg, platformstring, false); } return new StopAnswer(command, "Stop VM failed", platformstring, false); diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixUnPlugNicCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixUnPlugNicCommandWrapper.java index 28981413a723..0e7a0629ed01 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixUnPlugNicCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixUnPlugNicCommandWrapper.java @@ -23,7 +23,6 @@ import java.util.Set; import org.apache.commons.collections.MapUtils; -import org.apache.log4j.Logger; import org.apache.xmlrpc.XmlRpcException; import com.cloud.agent.api.Answer; @@ -42,7 +41,6 @@ @ResourceWrapper(handles = UnPlugNicCommand.class) public final class CitrixUnPlugNicCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixUnPlugNicCommandWrapper.class); @Override public Answer execute(final UnPlugNicCommand command, final CitrixResourceBase citrixResourceBase) { @@ -73,7 +71,7 @@ public Answer execute(final UnPlugNicCommand command, final CitrixResourceBase c return new UnPlugNicAnswer(command, true, "success"); } catch (final Exception e) { final String msg = " UnPlug Nic failed due to " + e.toString(); - s_logger.warn(msg, e); + logger.warn(msg, e); return new UnPlugNicAnswer(command, false, msg); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixUpdateHostPasswordCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixUpdateHostPasswordCommandWrapper.java index 39110b12a25d..1acc292b450b 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixUpdateHostPasswordCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixUpdateHostPasswordCommandWrapper.java @@ -21,7 +21,6 @@ import static com.cloud.hypervisor.xenserver.resource.wrapper.xenbase.XenServerUtilitiesHelper.SCRIPT_CMD_PATH; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.UpdateHostPasswordCommand; @@ -34,7 +33,6 @@ @ResourceWrapper(handles = UpdateHostPasswordCommand.class) public final class CitrixUpdateHostPasswordCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixUpdateHostPasswordCommandWrapper.class); @Override public Answer execute(final UpdateHostPasswordCommand command, final CitrixResourceBase citrixResourceBase) { @@ -47,7 +45,7 @@ public Answer execute(final UpdateHostPasswordCommand command, final CitrixResou Pair result; try { - s_logger.debug("Executing command in Host: " + cmdLine); + logger.debug("Executing command in Host: " + cmdLine); final String hostPassword = citrixResourceBase.getPwdFromQueue(); result = xenServerUtilitiesHelper.executeSshWrapper(hostIp, 22, username, null, hostPassword, cmdLine.toString()); } catch (final Exception e) { diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixUpgradeSnapshotCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixUpgradeSnapshotCommandWrapper.java index c5feef042162..9022e51ef6ba 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixUpgradeSnapshotCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixUpgradeSnapshotCommandWrapper.java @@ -21,7 +21,6 @@ import java.net.URI; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.UpgradeSnapshotCommand; @@ -33,7 +32,6 @@ @ResourceWrapper(handles = UpgradeSnapshotCommand.class) public final class CitrixUpgradeSnapshotCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixUpgradeSnapshotCommandWrapper.class); @Override public Answer execute(final UpgradeSnapshotCommand command, final CitrixResourceBase citrixResourceBase) { @@ -58,7 +56,7 @@ public Answer execute(final UpgradeSnapshotCommand command, final CitrixResource return new Answer(command, true, "success"); } catch (final Exception e) { final String details = "upgrading snapshot " + backedUpSnapshotUuid + " failed due to " + e.toString(); - s_logger.error(details, e); + logger.error(details, e); } return new Answer(command, false, "failure"); diff --git a/plugins/hypervisors/xenserver/src/main/java/org/apache/cloudstack/hypervisor/xenserver/ExtraConfigurationUtility.java b/plugins/hypervisors/xenserver/src/main/java/org/apache/cloudstack/hypervisor/xenserver/ExtraConfigurationUtility.java index 46d0a39cf6f1..a2702efeb723 100644 --- a/plugins/hypervisors/xenserver/src/main/java/org/apache/cloudstack/hypervisor/xenserver/ExtraConfigurationUtility.java +++ b/plugins/hypervisors/xenserver/src/main/java/org/apache/cloudstack/hypervisor/xenserver/ExtraConfigurationUtility.java @@ -19,7 +19,8 @@ import java.util.HashMap; import java.util.Map; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.xmlrpc.XmlRpcException; import com.cloud.exception.InvalidParameterValueException; @@ -29,7 +30,7 @@ import com.xensource.xenapi.VM; public class ExtraConfigurationUtility { - private static final Logger LOG = Logger.getLogger(ExtraConfigurationUtility.class); + protected static Logger LOGGER = LogManager.getLogger(ExtraConfigurationUtility.class); public static void setExtraConfigurationToVm(Connection conn, VM.Record vmr, VM vm, Map extraConfig) { Map recordMap = vmr.toMap(); @@ -63,7 +64,7 @@ private static void applyConfigWithNestedKeyValue(Connection conn, VM vm, Map recordMap, String paramKey, String paramValue) { if (!isValidOperation(recordMap, paramKey)) { - LOG.error("Unsupported extra configuration has been passed: " + paramKey); + LOGGER.error("Unsupported extra configuration has been passed: " + paramKey); throw new InvalidParameterValueException("Unsupported extra configuration parameter key has been passed: " + paramKey); } @@ -161,10 +162,10 @@ private static void applyConfigWithKeyValue(Connection conn, VM vm, Map classes = new HashSet(); @@ -112,14 +110,14 @@ protected void waitForTask2(final Connection c, final Task task, final long poll Set events = map.events; if (events.size() == 0) { final String msg = "No event for task " + task.toWireString(); - s_logger.warn(msg); + logger.warn(msg); task.cancel(c); throw new TimeoutException(msg); } for (final Event.Record rec : events) { if (!(rec.snapshot instanceof Task.Record)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Skipping over " + rec); + if (logger.isDebugEnabled()) { + logger.debug("Skipping over " + rec); } continue; } @@ -127,20 +125,20 @@ protected void waitForTask2(final Connection c, final Task task, final long poll final Task.Record taskRecord = (Task.Record)rec.snapshot; if (taskRecord.status != Types.TaskStatusType.PENDING) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Task, ref:" + task.toWireString() + ", UUID:" + taskRecord.uuid + " is done " + taskRecord.status); + if (logger.isDebugEnabled()) { + logger.debug("Task, ref:" + task.toWireString() + ", UUID:" + taskRecord.uuid + " is done " + taskRecord.status); } return; } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Task: ref:" + task.toWireString() + ", UUID:" + taskRecord.uuid + " progress: " + taskRecord.progress); + if (logger.isDebugEnabled()) { + logger.debug("Task: ref:" + task.toWireString() + ", UUID:" + taskRecord.uuid + " progress: " + taskRecord.progress); } } } if (System.currentTimeMillis() - beginTime > timeout) { final String msg = "Async " + timeout / 1000 + " seconds timeout for task " + task.toString(); - s_logger.warn(msg); + logger.warn(msg); task.cancel(c); throw new TimeoutException(msg); } @@ -171,7 +169,7 @@ public void run() { try { results = Event.from(conn, _classes, _token, new Double(30)); } catch (final Exception e) { - s_logger.error("Retrying the waiting on VM events due to: ", e); + logger.error("Retrying the waiting on VM events due to: ", e); continue; } @@ -182,8 +180,8 @@ public void run() { for (final Event.Record event : events) { try { if (!(event.snapshot instanceof VM.Record)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("The snapshot is not a VM: " + event); + if (logger.isDebugEnabled()) { + logger.debug("The snapshot is not a VM: " + event); } continue; } @@ -195,11 +193,11 @@ public void run() { } recordChanges(conn, vm, hostUuid); } catch (final Exception e) { - s_logger.error("Skipping over " + event, e); + logger.error("Skipping over " + event, e); } } } catch (final Throwable th) { - s_logger.error("Exception caught in eventlistener thread: ", th); + logger.error("Exception caught in eventlistener thread: ", th); } } } @@ -217,11 +215,11 @@ public void start() { try { results = Event.from(conn, _classes, _token, new Double(30)); } catch (final Exception e) { - s_logger.error("Retrying the waiting on VM events due to: ", e); + logger.error("Retrying the waiting on VM events due to: ", e); throw new CloudRuntimeException("Unable to start a listener thread to listen to VM events", e); } _token = results.token; - s_logger.debug("Starting the event listener thread for " + _host.getUuid()); + logger.debug("Starting the event listener thread for " + _host.getUuid()); super.start(); } } diff --git a/plugins/hypervisors/xenserver/src/main/java/org/apache/cloudstack/storage/motion/XenServerStorageMotionStrategy.java b/plugins/hypervisors/xenserver/src/main/java/org/apache/cloudstack/storage/motion/XenServerStorageMotionStrategy.java index dbfcfe987fc5..caf28e849a0e 100644 --- a/plugins/hypervisors/xenserver/src/main/java/org/apache/cloudstack/storage/motion/XenServerStorageMotionStrategy.java +++ b/plugins/hypervisors/xenserver/src/main/java/org/apache/cloudstack/storage/motion/XenServerStorageMotionStrategy.java @@ -37,7 +37,8 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; @@ -75,7 +76,7 @@ @Component public class XenServerStorageMotionStrategy implements DataMotionStrategy { - private static final Logger s_logger = Logger.getLogger(XenServerStorageMotionStrategy.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject AgentManager agentMgr; @Inject @@ -126,7 +127,7 @@ public void copyAsync(Map volumeMap, VirtualMachineTO vmT throw new CloudRuntimeException("Unsupported operation requested for moving data."); } } catch (Exception e) { - s_logger.error("copy failed", e); + logger.error("copy failed", e); errMsg = e.toString(); } @@ -198,7 +199,7 @@ private String handleManagedVolumePreMigration(VolumeInfo volumeInfo, StoragePoo String errMsg = "Error interacting with host (related to CreateStoragePoolCommand)" + (StringUtils.isNotBlank(answer.getDetails()) ? ": " + answer.getDetails() : ""); - s_logger.error(errMsg); + logger.error(errMsg); throw new CloudRuntimeException(errMsg); } @@ -240,7 +241,7 @@ private void handleManagedVolumePostMigration(VolumeInfo volumeInfo, Host srcHos String errMsg = "Error interacting with host (related to DeleteStoragePoolCommand)" + (StringUtils.isNotBlank(answer.getDetails()) ? ": " + answer.getDetails() : ""); - s_logger.error(errMsg); + logger.error(errMsg); throw new CloudRuntimeException(errMsg); } @@ -283,7 +284,7 @@ private void handleManagedVolumesAfterFailedMigration(Map String errMsg = "Error interacting with host (related to handleManagedVolumesAfterFailedMigration)" + (StringUtils.isNotBlank(answer.getDetails()) ? ": " + answer.getDetails() : ""); - s_logger.error(errMsg); + logger.error(errMsg); // no need to throw an exception here as the calling code is responsible for doing so // regardless of the success or lack thereof concerning this method @@ -342,10 +343,10 @@ private Answer migrateVmWithVolumesAcrossCluster(VMInstanceVO vm, VirtualMachine MigrateWithStorageReceiveAnswer receiveAnswer = (MigrateWithStorageReceiveAnswer)agentMgr.send(destHost.getId(), receiveCmd); if (receiveAnswer == null) { - s_logger.error("Migration with storage of vm " + vm + " to host " + destHost + " failed."); + logger.error("Migration with storage of vm " + vm + " to host " + destHost + " failed."); throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost); } else if (!receiveAnswer.getResult()) { - s_logger.error("Migration with storage of vm " + vm + " failed. Details: " + receiveAnswer.getDetails()); + logger.error("Migration with storage of vm " + vm + " failed. Details: " + receiveAnswer.getDetails()); throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost); } @@ -356,12 +357,12 @@ private Answer migrateVmWithVolumesAcrossCluster(VMInstanceVO vm, VirtualMachine if (sendAnswer == null) { handleManagedVolumesAfterFailedMigration(volumeToPool, destHost); - s_logger.error("Migration with storage of vm " + vm + " to host " + destHost + " failed."); + logger.error("Migration with storage of vm " + vm + " to host " + destHost + " failed."); throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost); } else if (!sendAnswer.getResult()) { handleManagedVolumesAfterFailedMigration(volumeToPool, destHost); - s_logger.error("Migration with storage of vm " + vm + " failed. Details: " + sendAnswer.getDetails()); + logger.error("Migration with storage of vm " + vm + " failed. Details: " + sendAnswer.getDetails()); throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost); } @@ -369,10 +370,10 @@ private Answer migrateVmWithVolumesAcrossCluster(VMInstanceVO vm, VirtualMachine MigrateWithStorageCompleteAnswer answer = (MigrateWithStorageCompleteAnswer)agentMgr.send(destHost.getId(), command); if (answer == null) { - s_logger.error("Migration with storage of vm " + vm + " failed."); + logger.error("Migration with storage of vm " + vm + " failed."); throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost); } else if (!answer.getResult()) { - s_logger.error("Migration with storage of vm " + vm + " failed. Details: " + answer.getDetails()); + logger.error("Migration with storage of vm " + vm + " failed. Details: " + answer.getDetails()); throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost); } else { // Update the volume details after migration. @@ -381,7 +382,7 @@ private Answer migrateVmWithVolumesAcrossCluster(VMInstanceVO vm, VirtualMachine return answer; } catch (OperationTimedoutException e) { - s_logger.error("Error while migrating vm " + vm + " to host " + destHost, e); + logger.error("Error while migrating vm " + vm + " to host " + destHost, e); throw new AgentUnavailableException("Operation timed out on storage motion for " + vm, destHost.getId()); } } @@ -402,10 +403,10 @@ private Answer migrateVmWithVolumesWithinCluster(VMInstanceVO vm, VirtualMachine MigrateWithStorageCommand command = new MigrateWithStorageCommand(to, volumeToFilerto); MigrateWithStorageAnswer answer = (MigrateWithStorageAnswer)agentMgr.send(destHost.getId(), command); if (answer == null) { - s_logger.error("Migration with storage of vm " + vm + " failed."); + logger.error("Migration with storage of vm " + vm + " failed."); throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost); } else if (!answer.getResult()) { - s_logger.error("Migration with storage of vm " + vm + " failed. Details: " + answer.getDetails()); + logger.error("Migration with storage of vm " + vm + " failed. Details: " + answer.getDetails()); throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost + ". " + answer.getDetails()); } else { // Update the volume details after migration. @@ -414,7 +415,7 @@ private Answer migrateVmWithVolumesWithinCluster(VMInstanceVO vm, VirtualMachine return answer; } catch (OperationTimedoutException e) { - s_logger.error("Error while migrating vm " + vm + " to host " + destHost, e); + logger.error("Error while migrating vm " + vm + " to host " + destHost, e); throw new AgentUnavailableException("Operation timed out on storage motion for " + vm, destHost.getId()); } } @@ -451,7 +452,7 @@ private void updateVolumePathsAfterMigration(Map volumeTo } if (!updated) { - s_logger.error("The volume path wasn't updated for volume '" + volumeInfo + "' after it was migrated."); + logger.error("The volume path wasn't updated for volume '" + volumeInfo + "' after it was migrated."); } } } diff --git a/plugins/integrations/cloudian/src/main/java/org/apache/cloudstack/cloudian/CloudianConnectorImpl.java b/plugins/integrations/cloudian/src/main/java/org/apache/cloudstack/cloudian/CloudianConnectorImpl.java index cfb23da28462..3c1f161dd202 100644 --- a/plugins/integrations/cloudian/src/main/java/org/apache/cloudstack/cloudian/CloudianConnectorImpl.java +++ b/plugins/integrations/cloudian/src/main/java/org/apache/cloudstack/cloudian/CloudianConnectorImpl.java @@ -41,7 +41,6 @@ import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.messagebus.MessageBus; import org.apache.cloudstack.framework.messagebus.MessageSubscriber; -import org.apache.log4j.Logger; import com.cloud.domain.Domain; import com.cloud.domain.DomainVO; @@ -56,7 +55,6 @@ import com.cloud.utils.exception.CloudRuntimeException; public class CloudianConnectorImpl extends ComponentLifecycleBase implements CloudianConnector, Configurable { - private static final Logger LOG = Logger.getLogger(CloudianConnectorImpl.class); @Inject private UserDao userDao; @@ -80,7 +78,7 @@ private CloudianClient getClient() { CloudianAdminUser.value(), CloudianAdminPassword.value(), CloudianValidateSSLSecurity.value(), CloudianAdminApiRequestTimeout.value()); } catch (final KeyStoreException | NoSuchAlgorithmException | KeyManagementException e) { - LOG.error("Failed to create Cloudian API client due to: ", e); + logger.error("Failed to create Cloudian API client due to: ", e); } throw new CloudRuntimeException("Failed to create and return Cloudian API client instance"); } @@ -104,17 +102,17 @@ private boolean removeGroup(final Domain domain) { final CloudianClient client = getClient(); for (final CloudianUser user: client.listUsers(domain.getUuid())) { if (client.removeUser(user.getUserId(), domain.getUuid())) { - LOG.error(String.format("Failed to remove Cloudian user id=%s, while removing Cloudian group id=%s", user.getUserId(), domain.getUuid())); + logger.error(String.format("Failed to remove Cloudian user id=%s, while removing Cloudian group id=%s", user.getUserId(), domain.getUuid())); } } for (int retry = 0; retry < 3; retry++) { if (client.removeGroup(domain.getUuid())) { return true; } else { - LOG.warn("Failed to remove Cloudian group id=" + domain.getUuid() + ", retrying count=" + retry+1); + logger.warn("Failed to remove Cloudian group id=" + domain.getUuid() + ", retrying count=" + retry+1); } } - LOG.warn("Failed to remove Cloudian group id=" + domain.getUuid() + ", please remove manually"); + logger.warn("Failed to remove Cloudian group id=" + domain.getUuid() + ", please remove manually"); return false; } @@ -164,10 +162,10 @@ private boolean removeUserAccount(final Account account) { if (client.removeUser(account.getUuid(), domain.getUuid())) { return true; } else { - LOG.warn("Failed to remove Cloudian user id=" + account.getUuid() + " in group id=" + domain.getUuid() + ", retrying count=" + retry+1); + logger.warn("Failed to remove Cloudian user id=" + account.getUuid() + " in group id=" + domain.getUuid() + ", retrying count=" + retry+1); } } - LOG.warn("Failed to remove Cloudian user id=" + account.getUuid() + " in group id=" + domain.getUuid() + ", please remove manually"); + logger.warn("Failed to remove Cloudian user id=" + account.getUuid() + " in group id=" + domain.getUuid() + ", please remove manually"); return false; } @@ -199,21 +197,21 @@ public String generateSsoUrl() { group = "0"; } - LOG.debug(String.format("Attempting Cloudian SSO with user id=%s, group id=%s", user, group)); + logger.debug(String.format("Attempting Cloudian SSO with user id=%s, group id=%s", user, group)); final CloudianUser ssoUser = getClient().listUser(user, group); if (ssoUser == null || !ssoUser.getActive()) { - LOG.debug(String.format("Failed to find existing Cloudian user id=%s in group id=%s", user, group)); + logger.debug(String.format("Failed to find existing Cloudian user id=%s in group id=%s", user, group)); final CloudianGroup ssoGroup = getClient().listGroup(group); if (ssoGroup == null) { - LOG.debug(String.format("Failed to find existing Cloudian group id=%s, trying to add it", group)); + logger.debug(String.format("Failed to find existing Cloudian group id=%s, trying to add it", group)); if (!addGroup(domain)) { - LOG.error("Failed to add missing Cloudian group id=" + group); + logger.error("Failed to add missing Cloudian group id=" + group); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Aborting Cloudian SSO, failed to add group to Cloudian."); } } if (!addUserAccount(caller, domain)) { - LOG.error("Failed to add missing Cloudian group id=" + group); + logger.error("Failed to add missing Cloudian group id=" + group); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Aborting Cloudian SSO, failed to add user to Cloudian."); } final CloudianUser addedSsoUser = getClient().listUser(user, group); @@ -224,7 +222,7 @@ public String generateSsoUrl() { updateUserAccount(caller, domain, ssoUser); } - LOG.debug(String.format("Validated Cloudian SSO for Cloudian user id=%s, group id=%s", user, group)); + logger.debug(String.format("Validated Cloudian SSO for Cloudian user id=%s, group id=%s", user, group)); return CloudianUtils.generateSSOUrl(getCmcUrl(), user, group, CloudianSsoKey.value()); } @@ -237,11 +235,11 @@ public boolean configure(String name, Map params) throws Configu super.configure(name, params); if (!isEnabled()) { - LOG.debug("Cloudian connector is disabled, skipping configuration"); + logger.debug("Cloudian connector is disabled, skipping configuration"); return true; } - LOG.debug(String.format("Cloudian connector is enabled, completed configuration, integration is ready. " + + logger.debug(String.format("Cloudian connector is enabled, completed configuration, integration is ready. " + "Cloudian admin host:%s, port:%s, user:%s", CloudianAdminHost.value(), CloudianAdminPort.value(), CloudianAdminUser.value())); @@ -255,10 +253,10 @@ public void onPublishMessage(String senderAddress, String subject, Object args) final Domain domain = domainDao.findById(account.getDomainId()); if (!addUserAccount(account, domain)) { - LOG.warn(String.format("Failed to add account in Cloudian while adding CloudStack account=%s in domain=%s", account.getAccountName(), domain.getPath())); + logger.warn(String.format("Failed to add account in Cloudian while adding CloudStack account=%s in domain=%s", account.getAccountName(), domain.getPath())); } } catch (final Exception e) { - LOG.error("Caught exception while adding account in Cloudian: ", e); + logger.error("Caught exception while adding account in Cloudian: ", e); } } }); @@ -269,10 +267,10 @@ public void onPublishMessage(String senderAddress, String subject, Object args) try { final Account account = accountDao.findByIdIncludingRemoved((Long) args); if(!removeUserAccount(account)) { - LOG.warn(String.format("Failed to remove account to Cloudian while removing CloudStack account=%s, id=%s", account.getAccountName(), account.getId())); + logger.warn(String.format("Failed to remove account to Cloudian while removing CloudStack account=%s, id=%s", account.getAccountName(), account.getId())); } } catch (final Exception e) { - LOG.error("Caught exception while removing account in Cloudian: ", e); + logger.error("Caught exception while removing account in Cloudian: ", e); } } }); @@ -283,10 +281,10 @@ public void onPublishMessage(String senderAddress, String subject, Object args) try { final Domain domain = domainDao.findById((Long) args); if (!addGroup(domain)) { - LOG.warn(String.format("Failed to add group in Cloudian while adding CloudStack domain=%s id=%s", domain.getPath(), domain.getId())); + logger.warn(String.format("Failed to add group in Cloudian while adding CloudStack domain=%s id=%s", domain.getPath(), domain.getId())); } } catch (final Exception e) { - LOG.error("Caught exception adding domain/group in Cloudian: ", e); + logger.error("Caught exception adding domain/group in Cloudian: ", e); } } }); @@ -297,10 +295,10 @@ public void onPublishMessage(String senderAddress, String subject, Object args) try { final DomainVO domain = (DomainVO) args; if (!removeGroup(domain)) { - LOG.warn(String.format("Failed to remove group in Cloudian while removing CloudStack domain=%s id=%s", domain.getPath(), domain.getId())); + logger.warn(String.format("Failed to remove group in Cloudian while removing CloudStack domain=%s id=%s", domain.getPath(), domain.getId())); } } catch (final Exception e) { - LOG.error("Caught exception while removing domain/group in Cloudian: ", e); + logger.error("Caught exception while removing domain/group in Cloudian: ", e); } } }); diff --git a/plugins/integrations/cloudian/src/main/java/org/apache/cloudstack/cloudian/client/CloudianClient.java b/plugins/integrations/cloudian/src/main/java/org/apache/cloudstack/cloudian/client/CloudianClient.java index 644a3c68a01c..9deddbe38a34 100644 --- a/plugins/integrations/cloudian/src/main/java/org/apache/cloudstack/cloudian/client/CloudianClient.java +++ b/plugins/integrations/cloudian/src/main/java/org/apache/cloudstack/cloudian/client/CloudianClient.java @@ -56,14 +56,15 @@ import org.apache.http.impl.client.BasicAuthCache; import org.apache.http.impl.client.BasicCredentialsProvider; import org.apache.http.impl.client.HttpClientBuilder; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.utils.nio.TrustAllManager; import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.commons.lang3.StringUtils; public class CloudianClient { - private static final Logger LOG = Logger.getLogger(CloudianClient.class); + protected Logger logger = LogManager.getLogger(getClass()); private final HttpClient httpClient; private final HttpClientContext httpContext; @@ -107,14 +108,14 @@ public CloudianClient(final String host, final Integer port, final String scheme private void checkAuthFailure(final HttpResponse response) { if (response != null && response.getStatusLine().getStatusCode() == HttpStatus.SC_UNAUTHORIZED) { final Credentials credentials = httpContext.getCredentialsProvider().getCredentials(AuthScope.ANY); - LOG.error("Cloudian admin API authentication failed, please check Cloudian configuration. Admin auth principal=" + credentials.getUserPrincipal() + ", password=" + credentials.getPassword() + ", API url=" + adminApiUrl); + logger.error("Cloudian admin API authentication failed, please check Cloudian configuration. Admin auth principal=" + credentials.getUserPrincipal() + ", password=" + credentials.getPassword() + ", API url=" + adminApiUrl); throw new ServerApiException(ApiErrorCode.UNAUTHORIZED, "Cloudian backend API call unauthorized, please ask your administrator to fix integration issues."); } } private void checkResponseOK(final HttpResponse response) { if (response.getStatusLine().getStatusCode() == HttpStatus.SC_NO_CONTENT) { - LOG.debug("Requested Cloudian resource does not exist"); + logger.debug("Requested Cloudian resource does not exist"); return; } if (response.getStatusLine().getStatusCode() != HttpStatus.SC_OK && response.getStatusLine().getStatusCode() != HttpStatus.SC_NO_CONTENT) { @@ -178,12 +179,12 @@ public boolean addUser(final CloudianUser user) { if (user == null) { return false; } - LOG.debug("Adding Cloudian user: " + user); + logger.debug("Adding Cloudian user: " + user); try { final HttpResponse response = put("/user", user); return response.getStatusLine().getStatusCode() == HttpStatus.SC_OK; } catch (final IOException e) { - LOG.error("Failed to add Cloudian user due to:", e); + logger.error("Failed to add Cloudian user due to:", e); checkResponseTimeOut(e); } return false; @@ -193,7 +194,7 @@ public CloudianUser listUser(final String userId, final String groupId) { if (StringUtils.isAnyEmpty(userId, groupId)) { return null; } - LOG.debug("Trying to find Cloudian user with id=" + userId + " and group id=" + groupId); + logger.debug("Trying to find Cloudian user with id=" + userId + " and group id=" + groupId); try { final HttpResponse response = get(String.format("/user?userId=%s&groupId=%s", userId, groupId)); checkResponseOK(response); @@ -203,7 +204,7 @@ public CloudianUser listUser(final String userId, final String groupId) { final ObjectMapper mapper = new ObjectMapper(); return mapper.readValue(response.getEntity().getContent(), CloudianUser.class); } catch (final IOException e) { - LOG.error("Failed to list Cloudian user due to:", e); + logger.error("Failed to list Cloudian user due to:", e); checkResponseTimeOut(e); } return null; @@ -213,7 +214,7 @@ public List listUsers(final String groupId) { if (StringUtils.isEmpty(groupId)) { return new ArrayList<>(); } - LOG.debug("Trying to list Cloudian users in group id=" + groupId); + logger.debug("Trying to list Cloudian users in group id=" + groupId); try { final HttpResponse response = get(String.format("/user/list?groupId=%s&userType=all&userStatus=active", groupId)); checkResponseOK(response); @@ -223,7 +224,7 @@ public List listUsers(final String groupId) { final ObjectMapper mapper = new ObjectMapper(); return Arrays.asList(mapper.readValue(response.getEntity().getContent(), CloudianUser[].class)); } catch (final IOException e) { - LOG.error("Failed to list Cloudian users due to:", e); + logger.error("Failed to list Cloudian users due to:", e); checkResponseTimeOut(e); } return new ArrayList<>(); @@ -233,12 +234,12 @@ public boolean updateUser(final CloudianUser user) { if (user == null) { return false; } - LOG.debug("Updating Cloudian user: " + user); + logger.debug("Updating Cloudian user: " + user); try { final HttpResponse response = post("/user", user); return response.getStatusLine().getStatusCode() == HttpStatus.SC_OK; } catch (final IOException e) { - LOG.error("Failed to update Cloudian user due to:", e); + logger.error("Failed to update Cloudian user due to:", e); checkResponseTimeOut(e); } return false; @@ -248,12 +249,12 @@ public boolean removeUser(final String userId, final String groupId) { if (StringUtils.isAnyEmpty(userId, groupId)) { return false; } - LOG.debug("Removing Cloudian user with user id=" + userId + " in group id=" + groupId); + logger.debug("Removing Cloudian user with user id=" + userId + " in group id=" + groupId); try { final HttpResponse response = delete(String.format("/user?userId=%s&groupId=%s", userId, groupId)); return response.getStatusLine().getStatusCode() == HttpStatus.SC_OK; } catch (final IOException e) { - LOG.error("Failed to remove Cloudian user due to:", e); + logger.error("Failed to remove Cloudian user due to:", e); checkResponseTimeOut(e); } return false; @@ -267,12 +268,12 @@ public boolean addGroup(final CloudianGroup group) { if (group == null) { return false; } - LOG.debug("Adding Cloudian group: " + group); + logger.debug("Adding Cloudian group: " + group); try { final HttpResponse response = put("/group", group); return response.getStatusLine().getStatusCode() == HttpStatus.SC_OK; } catch (final IOException e) { - LOG.error("Failed to add Cloudian group due to:", e); + logger.error("Failed to add Cloudian group due to:", e); checkResponseTimeOut(e); } return false; @@ -282,7 +283,7 @@ public CloudianGroup listGroup(final String groupId) { if (StringUtils.isEmpty(groupId)) { return null; } - LOG.debug("Trying to find Cloudian group with id=" + groupId); + logger.debug("Trying to find Cloudian group with id=" + groupId); try { final HttpResponse response = get(String.format("/group?groupId=%s", groupId)); checkResponseOK(response); @@ -292,14 +293,14 @@ public CloudianGroup listGroup(final String groupId) { final ObjectMapper mapper = new ObjectMapper(); return mapper.readValue(response.getEntity().getContent(), CloudianGroup.class); } catch (final IOException e) { - LOG.error("Failed to list Cloudian group due to:", e); + logger.error("Failed to list Cloudian group due to:", e); checkResponseTimeOut(e); } return null; } public List listGroups() { - LOG.debug("Trying to list Cloudian groups"); + logger.debug("Trying to list Cloudian groups"); try { final HttpResponse response = get("/group/list"); checkResponseOK(response); @@ -309,7 +310,7 @@ public List listGroups() { final ObjectMapper mapper = new ObjectMapper(); return Arrays.asList(mapper.readValue(response.getEntity().getContent(), CloudianGroup[].class)); } catch (final IOException e) { - LOG.error("Failed to list Cloudian groups due to:", e); + logger.error("Failed to list Cloudian groups due to:", e); checkResponseTimeOut(e); } return new ArrayList<>(); @@ -319,12 +320,12 @@ public boolean updateGroup(final CloudianGroup group) { if (group == null) { return false; } - LOG.debug("Updating Cloudian group: " + group); + logger.debug("Updating Cloudian group: " + group); try { final HttpResponse response = post("/group", group); return response.getStatusLine().getStatusCode() == HttpStatus.SC_OK; } catch (final IOException e) { - LOG.error("Failed to update group due to:", e); + logger.error("Failed to update group due to:", e); checkResponseTimeOut(e); } return false; @@ -334,12 +335,12 @@ public boolean removeGroup(final String groupId) { if (StringUtils.isEmpty(groupId)) { return false; } - LOG.debug("Removing Cloudian group id=" + groupId); + logger.debug("Removing Cloudian group id=" + groupId); try { final HttpResponse response = delete(String.format("/group?groupId=%s", groupId)); return response.getStatusLine().getStatusCode() == HttpStatus.SC_OK; } catch (final IOException e) { - LOG.error("Failed to remove group due to:", e); + logger.error("Failed to remove group due to:", e); checkResponseTimeOut(e); } return false; diff --git a/plugins/integrations/cloudian/src/main/java/org/apache/cloudstack/cloudian/client/CloudianUtils.java b/plugins/integrations/cloudian/src/main/java/org/apache/cloudstack/cloudian/client/CloudianUtils.java index 0ef0fc9b5e02..882d615ca0b2 100644 --- a/plugins/integrations/cloudian/src/main/java/org/apache/cloudstack/cloudian/client/CloudianUtils.java +++ b/plugins/integrations/cloudian/src/main/java/org/apache/cloudstack/cloudian/client/CloudianUtils.java @@ -24,14 +24,15 @@ import javax.crypto.spec.SecretKeySpec; import org.apache.commons.codec.binary.Base64; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.utils.HttpUtils; import org.apache.commons.lang3.StringUtils; public class CloudianUtils { - private static final Logger LOG = Logger.getLogger(CloudianUtils.class); + protected static Logger LOGGER = LogManager.getLogger(CloudianUtils.class); private static final String HMAC_SHA1_ALGORITHM = "HmacSHA1"; /** @@ -51,7 +52,7 @@ public static String generateHMACSignature(final String data, final String key) byte[] rawHmac = mac.doFinal(data.getBytes()); return Base64.encodeBase64String(rawHmac); } catch (final Exception e) { - LOG.error("Failed to generate HMAC signature from provided data and key, due to: ", e); + LOGGER.error("Failed to generate HMAC signature from provided data and key, due to: ", e); } return null; } diff --git a/plugins/integrations/kubernetes-service/pom.xml b/plugins/integrations/kubernetes-service/pom.xml index 4f5e1bcf1891..397a3b43b31d 100644 --- a/plugins/integrations/kubernetes-service/pom.xml +++ b/plugins/integrations/kubernetes-service/pom.xml @@ -86,9 +86,12 @@ ${cs.guava.version} - ch.qos.reload4j - reload4j - ${cs.reload4j.version} + org.apache.logging.log4j + log4j-core + + + org.apache.logging.log4j + log4j-api org.springframework diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java index 281fe84089fe..fb95a35a6feb 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java @@ -73,8 +73,6 @@ import org.apache.commons.codec.binary.Base64; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; import com.cloud.api.ApiDBUtils; import com.cloud.api.query.dao.NetworkOfferingJoinDao; @@ -179,10 +177,10 @@ import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.dao.VMInstanceDao; +import org.apache.logging.log4j.Level; public class KubernetesClusterManagerImpl extends ManagerBase implements KubernetesClusterService { - private static final Logger LOGGER = Logger.getLogger(KubernetesClusterManagerImpl.class); private static final String DEFAULT_NETWORK_OFFERING_FOR_KUBERNETES_SERVICE_NAME = "DefaultNetworkOfferingforKubernetesService"; protected StateMachine2 _stateMachine = KubernetesCluster.State.getStateMachine(); @@ -263,15 +261,15 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne private void logMessage(final Level logLevel, final String message, final Exception e) { if (logLevel == Level.WARN) { if (e != null) { - LOGGER.warn(message, e); + logger.warn(message, e); } else { - LOGGER.warn(message); + logger.warn(message); } } else { if (e != null) { - LOGGER.error(message, e); + logger.error(message, e); } else { - LOGGER.error(message); + logger.error(message); } } } @@ -299,25 +297,25 @@ private boolean isKubernetesServiceNetworkOfferingConfigured(DataCenter zone) { // Check network offering String networkOfferingName = KubernetesClusterNetworkOffering.value(); if (networkOfferingName == null || networkOfferingName.isEmpty()) { - LOGGER.warn(String.format("Global setting %s is empty. Admin has not yet specified the network offering to be used for provisioning isolated network for the cluster", KubernetesClusterNetworkOffering.key())); + logger.warn(String.format("Global setting %s is empty. Admin has not yet specified the network offering to be used for provisioning isolated network for the cluster", KubernetesClusterNetworkOffering.key())); return false; } NetworkOfferingVO networkOffering = networkOfferingDao.findByUniqueName(networkOfferingName); if (networkOffering == null) { - LOGGER.warn(String.format("Unable to find the network offering %s to be used for provisioning Kubernetes cluster", networkOfferingName)); + logger.warn(String.format("Unable to find the network offering %s to be used for provisioning Kubernetes cluster", networkOfferingName)); return false; } if (networkOffering.getState() == NetworkOffering.State.Disabled) { - LOGGER.warn(String.format("Network offering ID: %s is not enabled", networkOffering.getUuid())); + logger.warn(String.format("Network offering ID: %s is not enabled", networkOffering.getUuid())); return false; } List services = networkOfferingServiceMapDao.listServicesForNetworkOffering(networkOffering.getId()); if (services == null || services.isEmpty() || !services.contains("SourceNat")) { - LOGGER.warn(String.format("Network offering ID: %s does not have necessary services to provision Kubernetes cluster", networkOffering.getUuid())); + logger.warn(String.format("Network offering ID: %s does not have necessary services to provision Kubernetes cluster", networkOffering.getUuid())); return false; } if (!networkOffering.isEgressDefaultPolicy()) { - LOGGER.warn(String.format("Network offering ID: %s has egress default policy turned off should be on to provision Kubernetes cluster", networkOffering.getUuid())); + logger.warn(String.format("Network offering ID: %s has egress default policy turned off should be on to provision Kubernetes cluster", networkOffering.getUuid())); return false; } boolean offeringAvailableForZone = false; @@ -329,13 +327,13 @@ private boolean isKubernetesServiceNetworkOfferingConfigured(DataCenter zone) { } } if (!offeringAvailableForZone) { - LOGGER.warn(String.format("Network offering ID: %s is not available for zone ID: %s", networkOffering.getUuid(), zone.getUuid())); + logger.warn(String.format("Network offering ID: %s is not available for zone ID: %s", networkOffering.getUuid(), zone.getUuid())); return false; } long physicalNetworkId = networkModel.findPhysicalNetworkId(zone.getId(), networkOffering.getTags(), networkOffering.getTrafficType()); PhysicalNetwork physicalNetwork = physicalNetworkDao.findById(physicalNetworkId); if (physicalNetwork == null) { - LOGGER.warn(String.format("Unable to find physical network with tag: %s", networkOffering.getTags())); + logger.warn(String.format("Unable to find physical network with tag: %s", networkOffering.getTags())); return false; } return true; @@ -364,7 +362,7 @@ private IpAddress getSourceNatIp(Network network) { public VMTemplateVO getKubernetesServiceTemplate(DataCenter dataCenter, Hypervisor.HypervisorType hypervisorType) { VMTemplateVO template = templateDao.findSystemVMReadyTemplate(dataCenter.getId(), hypervisorType); if (DataCenter.Type.Edge.equals(dataCenter.getType()) && template != null && !template.isDirectDownload()) { - LOGGER.debug(String.format("Template %s can not be used for edge zone %s", template, dataCenter)); + logger.debug(String.format("Template %s can not be used for edge zone %s", template, dataCenter)); template = templateDao.findRoutingTemplate(hypervisorType, networkHelper.getHypervisorRouterTemplateConfigMap().get(hypervisorType).valueIn(dataCenter.getId())); } if (template == null) { @@ -378,8 +376,8 @@ protected void validateIsolatedNetworkIpRules(long ipId, FirewallRule.Purpose pu for (FirewallRuleVO rule : rules) { Integer startPort = rule.getSourcePortStart(); Integer endPort = rule.getSourcePortEnd(); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Validating rule with purpose: %s for network: %s with ports: %d-%d", purpose.toString(), network.getUuid(), startPort, endPort)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Validating rule with purpose: %s for network: %s with ports: %d-%d", purpose.toString(), network.getUuid(), startPort, endPort)); } if (startPort <= KubernetesClusterActionWorker.CLUSTER_API_PORT && KubernetesClusterActionWorker.CLUSTER_API_PORT <= endPort) { throw new InvalidParameterValueException(String.format("Network ID: %s has conflicting %s rules to provision Kubernetes cluster for API access", network.getUuid(), purpose.toString().toLowerCase())); @@ -508,12 +506,12 @@ private DeployDestination plan(final long nodesCount, final DataCenter zone, fin ClusterDetailsVO cluster_detail_ram = clusterDetailsDao.findDetail(cluster.getId(), "memoryOvercommitRatio"); Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue()); Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Checking host ID: %s for capacity already reserved %d", hostVO.getUuid(), reserved)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Checking host ID: %s for capacity already reserved %d", hostVO.getUuid(), reserved)); } if (capacityManager.checkIfHostHasCapacity(hostVO.getId(), cpu_requested * reserved, ram_requested * reserved, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Found host ID: %s for with enough capacity, CPU=%d RAM=%s", hostVO.getUuid(), cpu_requested * reserved, toHumanReadableSize(ram_requested * reserved))); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Found host ID: %s for with enough capacity, CPU=%d RAM=%s", hostVO.getUuid(), cpu_requested * reserved, toHumanReadableSize(ram_requested * reserved))); } hostEntry.setValue(new Pair(hostVO, reserved)); suitable_host_found = true; @@ -522,21 +520,21 @@ private DeployDestination plan(final long nodesCount, final DataCenter zone, fin } } if (!suitable_host_found) { - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Suitable hosts not found in datacenter ID: %s for node %d with offering ID: %s", zone.getUuid(), i, offering.getUuid())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Suitable hosts not found in datacenter ID: %s for node %d with offering ID: %s", zone.getUuid(), i, offering.getUuid())); } break; } } if (suitable_host_found) { - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Suitable hosts found in datacenter ID: %s, creating deployment destination", zone.getUuid())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Suitable hosts found in datacenter ID: %s, creating deployment destination", zone.getUuid())); } return new DeployDestination(zone, null, planCluster, null); } String msg = String.format("Cannot find enough capacity for Kubernetes cluster(requested cpu=%d memory=%s) with offering ID: %s", cpu_requested * nodesCount, toHumanReadableSize(ram_requested * nodesCount), offering.getUuid()); - LOGGER.warn(msg); + logger.warn(msg); throw new InsufficientServerCapacityException(msg, DataCenter.class, zone.getId()); } @@ -859,8 +857,8 @@ private Network getKubernetesClusterNetworkIfMissing(final String clusterName, f long physicalNetworkId = networkModel.findPhysicalNetworkId(zone.getId(), networkOffering.getTags(), networkOffering.getTrafficType()); PhysicalNetwork physicalNetwork = physicalNetworkDao.findById(physicalNetworkId); - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Creating network for account ID: %s from the network offering ID: %s as part of Kubernetes cluster: %s deployment process", owner.getUuid(), networkOffering.getUuid(), clusterName)); + if (logger.isInfoEnabled()) { + logger.info(String.format("Creating network for account ID: %s from the network offering ID: %s as part of Kubernetes cluster: %s deployment process", owner.getUuid(), networkOffering.getUuid(), clusterName)); } try { @@ -1132,7 +1130,7 @@ protected boolean stateTransitTo(long kubernetesClusterId, KubernetesCluster.Eve try { return _stateMachine.transitTo(kubernetesCluster, e, null, kubernetesClusterDao); } catch (NoTransitionException nte) { - LOGGER.warn(String.format("Failed to transition state of the Kubernetes cluster : %s in state %s on event %s", kubernetesCluster.getName(), kubernetesCluster.getState().toString(), e.toString()), nte); + logger.warn(String.format("Failed to transition state of the Kubernetes cluster : %s in state %s on event %s", kubernetesCluster.getName(), kubernetesCluster.getState().toString(), e.toString()), nte); return false; } } @@ -1181,8 +1179,8 @@ public KubernetesClusterVO doInTransaction(TransactionStatus status) { addKubernetesClusterDetails(cluster, network, cmd); - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Kubernetes cluster with name: %s and ID: %s has been created", cluster.getName(), cluster.getUuid())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Kubernetes cluster with name: %s and ID: %s has been created", cluster.getName(), cluster.getUuid())); } return cluster; } @@ -1241,8 +1239,8 @@ public KubernetesClusterVO doInTransaction(TransactionStatus status) { addKubernetesClusterDetails(cluster, defaultNetwork, cmd); - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Kubernetes cluster name: %s and ID: %s has been created", cluster.getName(), cluster.getUuid())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Kubernetes cluster name: %s and ID: %s has been created", cluster.getName(), cluster.getUuid())); } return cluster; } @@ -1296,14 +1294,14 @@ public boolean startKubernetesCluster(long kubernetesClusterId, boolean onCreate } accountManager.checkAccess(CallContext.current().getCallingAccount(), SecurityChecker.AccessType.OperateEntry, false, kubernetesCluster); if (kubernetesCluster.getState().equals(KubernetesCluster.State.Running)) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Kubernetes cluster : %s is in running state", kubernetesCluster.getName())); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Kubernetes cluster : %s is in running state", kubernetesCluster.getName())); } return true; } if (kubernetesCluster.getState().equals(KubernetesCluster.State.Starting)) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Kubernetes cluster : %s is already in starting state", kubernetesCluster.getName())); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Kubernetes cluster : %s is already in starting state", kubernetesCluster.getName())); } return true; } @@ -1367,14 +1365,14 @@ public boolean stopKubernetesCluster(StopKubernetesClusterCmd cmd) throws CloudR } accountManager.checkAccess(CallContext.current().getCallingAccount(), SecurityChecker.AccessType.OperateEntry, false, kubernetesCluster); if (kubernetesCluster.getState().equals(KubernetesCluster.State.Stopped)) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Kubernetes cluster : %s is already stopped", kubernetesCluster.getName())); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Kubernetes cluster : %s is already stopped", kubernetesCluster.getName())); } return true; } if (kubernetesCluster.getState().equals(KubernetesCluster.State.Stopping)) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Kubernetes cluster : %s is getting stopped", kubernetesCluster.getName())); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Kubernetes cluster : %s is getting stopped", kubernetesCluster.getName())); } return true; } @@ -1719,26 +1717,26 @@ public void reallyRun() { try { List kubernetesClusters = kubernetesClusterDao.findKubernetesClustersToGarbageCollect(); for (KubernetesCluster kubernetesCluster : kubernetesClusters) { - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Running Kubernetes cluster garbage collector on Kubernetes cluster : %s", kubernetesCluster.getName())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Running Kubernetes cluster garbage collector on Kubernetes cluster : %s", kubernetesCluster.getName())); } try { KubernetesClusterDestroyWorker destroyWorker = new KubernetesClusterDestroyWorker(kubernetesCluster, KubernetesClusterManagerImpl.this); destroyWorker = ComponentContext.inject(destroyWorker); if (destroyWorker.destroy()) { - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Garbage collection complete for Kubernetes cluster : %s", kubernetesCluster.getName())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Garbage collection complete for Kubernetes cluster : %s", kubernetesCluster.getName())); } } else { - LOGGER.warn(String.format("Garbage collection failed for Kubernetes cluster : %s, it will be attempted to garbage collected in next run", kubernetesCluster.getName())); + logger.warn(String.format("Garbage collection failed for Kubernetes cluster : %s, it will be attempted to garbage collected in next run", kubernetesCluster.getName())); } } catch (CloudRuntimeException e) { - LOGGER.warn(String.format("Failed to destroy Kubernetes cluster : %s during GC", kubernetesCluster.getName()), e); + logger.warn(String.format("Failed to destroy Kubernetes cluster : %s during GC", kubernetesCluster.getName()), e); // proceed further with rest of the Kubernetes cluster garbage collection } } } catch (Exception e) { - LOGGER.warn("Caught exception while running Kubernetes cluster gc: ", e); + logger.warn("Caught exception while running Kubernetes cluster gc: ", e); } } } @@ -1776,38 +1774,38 @@ public void reallyRun() { // run through Kubernetes clusters in 'Running' state and ensure all the VM's are Running in the cluster List runningKubernetesClusters = kubernetesClusterDao.findManagedKubernetesClustersInState(KubernetesCluster.State.Running); for (KubernetesCluster kubernetesCluster : runningKubernetesClusters) { - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Running Kubernetes cluster state scanner on Kubernetes cluster : %s", kubernetesCluster.getName())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Running Kubernetes cluster state scanner on Kubernetes cluster : %s", kubernetesCluster.getName())); } try { if (!isClusterVMsInDesiredState(kubernetesCluster, VirtualMachine.State.Running)) { stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.FaultsDetected); } } catch (Exception e) { - LOGGER.warn(String.format("Failed to run Kubernetes cluster Running state scanner on Kubernetes cluster : %s status scanner", kubernetesCluster.getName()), e); + logger.warn(String.format("Failed to run Kubernetes cluster Running state scanner on Kubernetes cluster : %s status scanner", kubernetesCluster.getName()), e); } } // run through Kubernetes clusters in 'Stopped' state and ensure all the VM's are Stopped in the cluster List stoppedKubernetesClusters = kubernetesClusterDao.findManagedKubernetesClustersInState(KubernetesCluster.State.Stopped); for (KubernetesCluster kubernetesCluster : stoppedKubernetesClusters) { - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Running Kubernetes cluster state scanner on Kubernetes cluster : %s for state: %s", kubernetesCluster.getName(), KubernetesCluster.State.Stopped.toString())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Running Kubernetes cluster state scanner on Kubernetes cluster : %s for state: %s", kubernetesCluster.getName(), KubernetesCluster.State.Stopped.toString())); } try { if (!isClusterVMsInDesiredState(kubernetesCluster, VirtualMachine.State.Stopped)) { stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.FaultsDetected); } } catch (Exception e) { - LOGGER.warn(String.format("Failed to run Kubernetes cluster Stopped state scanner on Kubernetes cluster : %s status scanner", kubernetesCluster.getName()), e); + logger.warn(String.format("Failed to run Kubernetes cluster Stopped state scanner on Kubernetes cluster : %s status scanner", kubernetesCluster.getName()), e); } } // run through Kubernetes clusters in 'Alert' state and reconcile state as 'Running' if the VM's are running or 'Stopped' if VM's are stopped List alertKubernetesClusters = kubernetesClusterDao.findManagedKubernetesClustersInState(KubernetesCluster.State.Alert); for (KubernetesClusterVO kubernetesCluster : alertKubernetesClusters) { - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Running Kubernetes cluster state scanner on Kubernetes cluster : %s for state: %s", kubernetesCluster.getName(), KubernetesCluster.State.Alert.toString())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Running Kubernetes cluster state scanner on Kubernetes cluster : %s for state: %s", kubernetesCluster.getName(), KubernetesCluster.State.Alert.toString())); } try { if (isClusterVMsInDesiredState(kubernetesCluster, VirtualMachine.State.Running)) { @@ -1820,7 +1818,7 @@ public void reallyRun() { stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded); } } catch (Exception e) { - LOGGER.warn(String.format("Failed to run Kubernetes cluster Alert state scanner on Kubernetes cluster : %s status scanner", kubernetesCluster.getName()), e); + logger.warn(String.format("Failed to run Kubernetes cluster Alert state scanner on Kubernetes cluster : %s status scanner", kubernetesCluster.getName()), e); } } @@ -1832,8 +1830,8 @@ public void reallyRun() { if ((new Date()).getTime() - kubernetesCluster.getCreated().getTime() < 10*60*1000) { continue; } - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Running Kubernetes cluster state scanner on Kubernetes cluster : %s for state: %s", kubernetesCluster.getName(), KubernetesCluster.State.Starting.toString())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Running Kubernetes cluster state scanner on Kubernetes cluster : %s for state: %s", kubernetesCluster.getName(), KubernetesCluster.State.Starting.toString())); } try { if (isClusterVMsInDesiredState(kubernetesCluster, VirtualMachine.State.Running)) { @@ -1842,25 +1840,25 @@ public void reallyRun() { stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); } } catch (Exception e) { - LOGGER.warn(String.format("Failed to run Kubernetes cluster Starting state scanner on Kubernetes cluster : %s status scanner", kubernetesCluster.getName()), e); + logger.warn(String.format("Failed to run Kubernetes cluster Starting state scanner on Kubernetes cluster : %s status scanner", kubernetesCluster.getName()), e); } } List destroyingKubernetesClusters = kubernetesClusterDao.findManagedKubernetesClustersInState(KubernetesCluster.State.Destroying); for (KubernetesCluster kubernetesCluster : destroyingKubernetesClusters) { - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Running Kubernetes cluster state scanner on Kubernetes cluster : %s for state: %s", kubernetesCluster.getName(), KubernetesCluster.State.Destroying.toString())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Running Kubernetes cluster state scanner on Kubernetes cluster : %s for state: %s", kubernetesCluster.getName(), KubernetesCluster.State.Destroying.toString())); } try { KubernetesClusterDestroyWorker destroyWorker = new KubernetesClusterDestroyWorker(kubernetesCluster, KubernetesClusterManagerImpl.this); destroyWorker = ComponentContext.inject(destroyWorker); destroyWorker.destroy(); } catch (Exception e) { - LOGGER.warn(String.format("Failed to run Kubernetes cluster Destroying state scanner on Kubernetes cluster : %s status scanner", kubernetesCluster.getName()), e); + logger.warn(String.format("Failed to run Kubernetes cluster Destroying state scanner on Kubernetes cluster : %s status scanner", kubernetesCluster.getName()), e); } } } } catch (Exception e) { - LOGGER.warn("Caught exception while running Kubernetes cluster state scanner", e); + logger.warn("Caught exception while running Kubernetes cluster state scanner", e); } firstRun = false; } @@ -1872,8 +1870,8 @@ boolean isClusterVMsInDesiredState(KubernetesCluster kubernetesCluster, VirtualM // check cluster is running at desired capacity include control nodes as well if (clusterVMs.size() < kubernetesCluster.getTotalNodeCount()) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Found only %d VMs in the Kubernetes cluster %s while expected %d VMs to be in state: %s", + if (logger.isDebugEnabled()) { + logger.debug(String.format("Found only %d VMs in the Kubernetes cluster %s while expected %d VMs to be in state: %s", clusterVMs.size(), kubernetesCluster.getName(), kubernetesCluster.getTotalNodeCount(), state.toString())); } return false; @@ -1882,8 +1880,8 @@ boolean isClusterVMsInDesiredState(KubernetesCluster kubernetesCluster, VirtualM for (KubernetesClusterVmMapVO clusterVm : clusterVMs) { VMInstanceVO vm = vmInstanceDao.findByIdIncludingRemoved(clusterVm.getVmId()); if (vm.getState() != state) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Found VM : %s in the Kubernetes cluster : %s in state: %s while expected to be in state: %s. So moving the cluster to Alert state for reconciliation", + if (logger.isDebugEnabled()) { + logger.debug(String.format("Found VM : %s in the Kubernetes cluster : %s in state: %s while expected to be in state: %s. So moving the cluster to Alert state for reconciliation", vm.getUuid(), kubernetesCluster.getName(), vm.getState().toString(), state.toString())); } return false; @@ -1924,7 +1922,7 @@ public boolean start() { new NetworkOfferingServiceMapVO(defaultKubernetesServiceNetworkOffering.getId(), service, defaultKubernetesServiceNetworkOfferingProviders.get(service)); networkOfferingServiceMapDao.persist(offService); - LOGGER.trace("Added service for the network offering: " + offService); + logger.trace("Added service for the network offering: " + offService); } _gcExecutor.scheduleWithFixedDelay(new KubernetesClusterGarbageCollector(), 300, 300, TimeUnit.SECONDS); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java index a84320e4d7f5..f9a1d5dd48f6 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java @@ -31,6 +31,9 @@ import javax.inject.Inject; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.ca.CAManager; import org.apache.cloudstack.config.ApiServiceConfiguration; @@ -39,8 +42,6 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; import com.cloud.dc.DataCenterVO; import com.cloud.dc.dao.DataCenterDao; @@ -108,7 +109,7 @@ public class KubernetesClusterActionWorker { public static final String CKS_CLUSTER_SECURITY_GROUP_NAME = "CKSSecurityGroup"; public static final String CKS_SECURITY_GROUP_DESCRIPTION = "Security group for CKS nodes"; - protected static final Logger LOGGER = Logger.getLogger(KubernetesClusterActionWorker.class); + protected Logger logger = LogManager.getLogger(getClass()); protected StateMachine2 _stateMachine = KubernetesCluster.State.getStateMachine(); @@ -227,32 +228,32 @@ protected String getControlNodeLoginUser() { protected void logMessage(final Level logLevel, final String message, final Exception e) { if (logLevel == Level.INFO) { - if (LOGGER.isInfoEnabled()) { + if (logger.isInfoEnabled()) { if (e != null) { - LOGGER.info(message, e); + logger.info(message, e); } else { - LOGGER.info(message); + logger.info(message); } } } else if (logLevel == Level.DEBUG) { - if (LOGGER.isDebugEnabled()) { + if (logger.isDebugEnabled()) { if (e != null) { - LOGGER.debug(message, e); + logger.debug(message, e); } else { - LOGGER.debug(message); + logger.debug(message); } } } else if (logLevel == Level.WARN) { if (e != null) { - LOGGER.warn(message, e); + logger.warn(message, e); } else { - LOGGER.warn(message); + logger.warn(message); } } else { if (e != null) { - LOGGER.error(message, e); + logger.error(message, e); } else { - LOGGER.error(message); + logger.error(message); } } } @@ -270,7 +271,7 @@ protected void logTransitStateDetachIsoAndThrow(final Level logLevel, final Stri protected void deleteTemplateLaunchPermission() { if (clusterTemplate != null && owner != null) { - LOGGER.info("Revoking launch permission for systemVM template"); + logger.info("Revoking launch permission for systemVM template"); launchPermissionDao.removePermissions(clusterTemplate.getId(), Collections.singletonList(owner.getId())); } } @@ -325,7 +326,7 @@ private UserVm fetchControlVmIfMissing(final UserVm controlVm) { } List clusterVMs = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId()); if (CollectionUtils.isEmpty(clusterVMs)) { - LOGGER.warn(String.format("Unable to retrieve VMs for Kubernetes cluster : %s", kubernetesCluster.getName())); + logger.warn(String.format("Unable to retrieve VMs for Kubernetes cluster : %s", kubernetesCluster.getName())); return null; } List vmIds = new ArrayList<>(); @@ -350,7 +351,7 @@ protected IpAddress getNetworkSourceNatIp(Network network) { if (CollectionUtils.isNotEmpty(addresses)) { return addresses.get(0); } - LOGGER.warn(String.format("No public IP addresses found for network : %s, Kubernetes cluster : %s", network.getName(), kubernetesCluster.getName())); + logger.warn(String.format("No public IP addresses found for network : %s, Kubernetes cluster : %s", network.getName(), kubernetesCluster.getName())); return null; } @@ -361,7 +362,7 @@ protected IpAddress getVpcTierKubernetesPublicIp(Network network) { } IpAddress address = ipAddressDao.findByUuid(detailsVO.getValue()); if (address == null || network.getVpcId() != address.getVpcId()) { - LOGGER.warn(String.format("Public IP with ID: %s linked to the Kubernetes cluster: %s is not usable", detailsVO.getValue(), kubernetesCluster.getName())); + logger.warn(String.format("Public IP with ID: %s linked to the Kubernetes cluster: %s is not usable", detailsVO.getValue(), kubernetesCluster.getName())); return null; } return address; @@ -392,7 +393,7 @@ protected Pair getKubernetesClusterServerIpSshPortForSharedNetw int port = DEFAULT_SSH_PORT; controlVm = fetchControlVmIfMissing(controlVm); if (controlVm == null) { - LOGGER.warn(String.format("Unable to retrieve control VM for Kubernetes cluster : %s", kubernetesCluster.getName())); + logger.warn(String.format("Unable to retrieve control VM for Kubernetes cluster : %s", kubernetesCluster.getName())); return new Pair<>(null, port); } return new Pair<>(controlVm.getPrivateIpAddress(), port); @@ -412,7 +413,7 @@ protected Pair getKubernetesClusterServerIpSshPortForVpcTier(Ne return new Pair<>(address.getAddress().addr(), port); } } - LOGGER.warn(String.format("No public IP found for the VPC tier: %s, Kubernetes cluster : %s", network, kubernetesCluster.getName())); + logger.warn(String.format("No public IP found for the VPC tier: %s, Kubernetes cluster : %s", network, kubernetesCluster.getName())); return new Pair<>(null, port); } @@ -425,7 +426,7 @@ protected Pair getKubernetesClusterServerIpSshPort(UserVm contr } Network network = networkDao.findById(kubernetesCluster.getNetworkId()); if (network == null) { - LOGGER.warn(String.format("Network for Kubernetes cluster : %s cannot be found", kubernetesCluster.getName())); + logger.warn(String.format("Network for Kubernetes cluster : %s cannot be found", kubernetesCluster.getName())); return new Pair<>(null, port); } if (network.getVpcId() != null) { @@ -436,7 +437,7 @@ protected Pair getKubernetesClusterServerIpSshPort(UserVm contr } else if (Network.GuestType.Shared.equals(network.getGuestType())) { return getKubernetesClusterServerIpSshPortForSharedNetwork(controlVm); } - LOGGER.warn(String.format("Unable to retrieve server IP address for Kubernetes cluster : %s", kubernetesCluster.getName())); + logger.warn(String.format("Unable to retrieve server IP address for Kubernetes cluster : %s", kubernetesCluster.getName())); return new Pair<>(null, port); } @@ -444,7 +445,7 @@ protected Pair getKubernetesClusterServerIpSshPort(UserVm contr try { return getKubernetesClusterServerIpSshPort(controlVm, false); } catch (InsufficientAddressCapacityException | ResourceAllocationException | ResourceUnavailableException e) { - LOGGER.debug("This exception should not have occurred", e); + logger.debug("This exception should not have occurred", e); } return new Pair<>(null, CLUSTER_NODES_DEFAULT_START_SSH_PORT); } @@ -476,8 +477,8 @@ protected void attachIsoKubernetesVMs(List clusterVMs, final KubernetesS for (UserVm vm : clusterVMs) { try { templateService.attachIso(iso.getId(), vm.getId(), true); - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Attached binaries ISO for VM : %s in cluster: %s", vm.getDisplayName(), kubernetesCluster.getName())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Attached binaries ISO for VM : %s in cluster: %s", vm.getDisplayName(), kubernetesCluster.getName())); } } catch (CloudRuntimeException ex) { logTransitStateAndThrow(Level.ERROR, String.format("Failed to attach binaries ISO for VM : %s in the Kubernetes cluster name: %s", vm.getDisplayName(), kubernetesCluster.getName()), kubernetesCluster.getId(), failedEvent, ex); @@ -495,15 +496,15 @@ protected void detachIsoKubernetesVMs(List clusterVMs) { try { result = templateService.detachIso(vm.getId(), true); } catch (CloudRuntimeException ex) { - LOGGER.warn(String.format("Failed to detach binaries ISO from VM : %s in the Kubernetes cluster : %s ", vm.getDisplayName(), kubernetesCluster.getName()), ex); + logger.warn(String.format("Failed to detach binaries ISO from VM : %s in the Kubernetes cluster : %s ", vm.getDisplayName(), kubernetesCluster.getName()), ex); } if (result) { - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Detached Kubernetes binaries from VM : %s in the Kubernetes cluster : %s", vm.getDisplayName(), kubernetesCluster.getName())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Detached Kubernetes binaries from VM : %s in the Kubernetes cluster : %s", vm.getDisplayName(), kubernetesCluster.getName())); } continue; } - LOGGER.warn(String.format("Failed to detach binaries ISO from VM : %s in the Kubernetes cluster : %s ", vm.getDisplayName(), kubernetesCluster.getName())); + logger.warn(String.format("Failed to detach binaries ISO from VM : %s in the Kubernetes cluster : %s ", vm.getDisplayName(), kubernetesCluster.getName())); } } @@ -546,7 +547,7 @@ protected boolean stateTransitTo(long kubernetesClusterId, KubernetesCluster.Eve try { return _stateMachine.transitTo(kubernetesCluster, e, null, kubernetesClusterDao); } catch (NoTransitionException nte) { - LOGGER.warn(String.format("Failed to transition state of the Kubernetes cluster : %s in state %s on event %s", + logger.warn(String.format("Failed to transition state of the Kubernetes cluster : %s in state %s on event %s", kubernetesCluster.getName(), kubernetesCluster.getState().toString(), e.toString()), nte); return false; } @@ -571,7 +572,7 @@ protected boolean createCloudStackSecret(String[] keys) { return result.first(); } catch (Exception e) { String msg = String.format("Failed to add cloudstack-secret to Kubernetes cluster: %s", kubernetesCluster.getName()); - LOGGER.warn(msg, e); + logger.warn(msg, e); } return false; } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterDestroyWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterDestroyWorker.java index 29da3ffb59d3..6e87d2071cdc 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterDestroyWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterDestroyWorker.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.context.CallContext; import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Level; import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientAddressCapacityException; @@ -55,6 +54,7 @@ import com.cloud.vm.UserVmVO; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; +import org.apache.logging.log4j.Level; public class KubernetesClusterDestroyWorker extends KubernetesClusterResourceModifierActionWorker { @@ -77,7 +77,7 @@ private void validateClusterSate() { || kubernetesCluster.getState().equals(KubernetesCluster.State.Destroying))) { String msg = String.format("Cannot perform delete operation on cluster : %s in state: %s", kubernetesCluster.getName(), kubernetesCluster.getState()); - LOGGER.warn(msg); + logger.warn(msg); throw new PermissionDeniedException(msg); } } @@ -96,15 +96,15 @@ private boolean destroyClusterVMs() { try { UserVm vm = userVmService.destroyVm(vmID, true); if (!userVmManager.expunge(userVM)) { - LOGGER.warn(String.format("Unable to expunge VM %s : %s, destroying Kubernetes cluster will probably fail", + logger.warn(String.format("Unable to expunge VM %s : %s, destroying Kubernetes cluster will probably fail", vm.getInstanceName() , vm.getUuid())); } kubernetesClusterVmMapDao.expunge(clusterVM.getId()); - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Destroyed VM : %s as part of Kubernetes cluster : %s cleanup", vm.getDisplayName(), kubernetesCluster.getName())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Destroyed VM : %s as part of Kubernetes cluster : %s cleanup", vm.getDisplayName(), kubernetesCluster.getName())); } } catch (ResourceUnavailableException | ConcurrentOperationException e) { - LOGGER.warn(String.format("Failed to destroy VM : %s part of the Kubernetes cluster : %s cleanup. Moving on with destroying remaining resources provisioned for the Kubernetes cluster", userVM.getDisplayName(), kubernetesCluster.getName()), e); + logger.warn(String.format("Failed to destroy VM : %s part of the Kubernetes cluster : %s cleanup. Moving on with destroying remaining resources provisioned for the Kubernetes cluster", userVM.getDisplayName(), kubernetesCluster.getName()), e); return false; } } @@ -127,11 +127,11 @@ private void destroyKubernetesClusterNetwork() throws ManagementServerException boolean networkDestroyed = networkMgr.destroyNetwork(kubernetesCluster.getNetworkId(), context, true); if (!networkDestroyed) { String msg = String.format("Failed to destroy network : %s as part of Kubernetes cluster : %s cleanup", network.getName(), kubernetesCluster.getName()); - LOGGER.warn(msg); + logger.warn(msg); throw new ManagementServerException(msg); } - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Destroyed network : %s as part of Kubernetes cluster : %s cleanup", + if (logger.isInfoEnabled()) { + logger.info(String.format("Destroyed network : %s as part of Kubernetes cluster : %s cleanup", network.getName(), kubernetesCluster.getName())); } } @@ -264,11 +264,11 @@ public boolean destroy() throws CloudRuntimeException { } } } else { - LOGGER.error(String.format("Failed to find network for Kubernetes cluster : %s", kubernetesCluster.getName())); + logger.error(String.format("Failed to find network for Kubernetes cluster : %s", kubernetesCluster.getName())); } } - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Destroying Kubernetes cluster : %s", kubernetesCluster.getName())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Destroying Kubernetes cluster : %s", kubernetesCluster.getName())); } stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.DestroyRequested); boolean vmsDestroyed = destroyClusterVMs(); @@ -280,7 +280,7 @@ public boolean destroy() throws CloudRuntimeException { destroyKubernetesClusterNetwork(); } catch (ManagementServerException e) { String msg = String.format("Failed to destroy network of Kubernetes cluster : %s cleanup", kubernetesCluster.getName()); - LOGGER.warn(msg, e); + logger.warn(msg, e); updateKubernetesClusterEntryForGC(); throw new CloudRuntimeException(msg, e); } @@ -289,7 +289,7 @@ public boolean destroy() throws CloudRuntimeException { checkForRulesToDelete(); } catch (ManagementServerException e) { String msg = String.format("Failed to remove network rules of Kubernetes cluster : %s", kubernetesCluster.getName()); - LOGGER.warn(msg, e); + logger.warn(msg, e); updateKubernetesClusterEntryForGC(); throw new CloudRuntimeException(msg, e); } @@ -297,14 +297,14 @@ public boolean destroy() throws CloudRuntimeException { releaseVpcTierPublicIpIfNeeded(); } catch (InsufficientAddressCapacityException e) { String msg = String.format("Failed to release public IP for VPC tier used by Kubernetes cluster : %s", kubernetesCluster.getName()); - LOGGER.warn(msg, e); + logger.warn(msg, e); updateKubernetesClusterEntryForGC(); throw new CloudRuntimeException(msg, e); } } } else { String msg = String.format("Failed to destroy one or more VMs as part of Kubernetes cluster : %s cleanup", kubernetesCluster.getName()); - LOGGER.warn(msg); + logger.warn(msg); updateKubernetesClusterEntryForGC(); throw new CloudRuntimeException(msg); } @@ -317,8 +317,8 @@ public boolean destroy() throws CloudRuntimeException { updateKubernetesClusterEntryForGC(); return false; } - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Kubernetes cluster : %s is successfully deleted", kubernetesCluster.getName())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Kubernetes cluster : %s is successfully deleted", kubernetesCluster.getName())); } return true; } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java index 0ae22bf8c8d5..c7451adf502d 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java @@ -40,7 +40,6 @@ import org.apache.commons.codec.binary.Base64; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Level; import com.cloud.capacity.CapacityManager; import com.cloud.dc.ClusterDetailsDao; @@ -110,6 +109,7 @@ import com.cloud.vm.VirtualMachine; import com.cloud.vm.VmDetailConstants; import com.cloud.vm.dao.VMInstanceDao; +import org.apache.logging.log4j.Level; public class KubernetesClusterResourceModifierActionWorker extends KubernetesClusterActionWorker { @@ -255,12 +255,12 @@ protected DeployDestination plan(final long nodesCount, final DataCenter zone, f ClusterDetailsVO cluster_detail_ram = clusterDetailsDao.findDetail(cluster.getId(), "memoryOvercommitRatio"); Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue()); Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Checking host : %s for capacity already reserved %d", h.getName(), reserved)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Checking host : %s for capacity already reserved %d", h.getName(), reserved)); } if (capacityManager.checkIfHostHasCapacity(h.getId(), cpu_requested * reserved, ram_requested * reserved, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Found host : %s for with enough capacity, CPU=%d RAM=%s", h.getName(), cpu_requested * reserved, toHumanReadableSize(ram_requested * reserved))); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Found host : %s for with enough capacity, CPU=%d RAM=%s", h.getName(), cpu_requested * reserved, toHumanReadableSize(ram_requested * reserved))); } hostEntry.setValue(new Pair(h, reserved)); suitable_host_found = true; @@ -268,31 +268,31 @@ protected DeployDestination plan(final long nodesCount, final DataCenter zone, f } } if (!suitable_host_found) { - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Suitable hosts not found in datacenter : %s for node %d, with offering : %s and hypervisor: %s", + if (logger.isInfoEnabled()) { + logger.info(String.format("Suitable hosts not found in datacenter : %s for node %d, with offering : %s and hypervisor: %s", zone.getName(), i, offering.getName(), clusterTemplate.getHypervisorType().toString())); } break; } } if (suitable_host_found) { - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Suitable hosts found in datacenter : %s, creating deployment destination", zone.getName())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Suitable hosts found in datacenter : %s, creating deployment destination", zone.getName())); } return new DeployDestination(zone, null, null, null); } String msg = String.format("Cannot find enough capacity for Kubernetes cluster(requested cpu=%d memory=%s) with offering : %s and hypervisor: %s", cpu_requested * nodesCount, toHumanReadableSize(ram_requested * nodesCount), offering.getName(), clusterTemplate.getHypervisorType().toString()); - LOGGER.warn(msg); + logger.warn(msg); throw new InsufficientServerCapacityException(msg, DataCenter.class, zone.getId()); } protected DeployDestination plan() throws InsufficientServerCapacityException { ServiceOffering offering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()); DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId()); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Checking deployment destination for Kubernetes cluster : %s in zone : %s", kubernetesCluster.getName(), zone.getName())); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Checking deployment destination for Kubernetes cluster : %s in zone : %s", kubernetesCluster.getName(), zone.getName())); } return plan(kubernetesCluster.getTotalNodeCount(), zone, offering); } @@ -328,8 +328,8 @@ protected void startKubernetesVM(final UserVm vm) throws ManagementServerExcepti f.setAccessible(true); f.set(startVm, vm.getId()); itMgr.advanceStart(vm.getUuid(), null, null); - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Started VM : %s in the Kubernetes cluster : %s", vm.getDisplayName(), kubernetesCluster.getName())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Started VM : %s in the Kubernetes cluster : %s", vm.getDisplayName(), kubernetesCluster.getName())); } } catch (IllegalAccessException | NoSuchFieldException | OperationTimedoutException | ResourceUnavailableException | InsufficientCapacityException ex) { throw new ManagementServerException(String.format("Failed to start VM in the Kubernetes cluster : %s", kubernetesCluster.getName()), ex); @@ -356,8 +356,8 @@ protected List provisionKubernetesClusterNodeVms(final long nodeCount, f throw new ManagementServerException(String.format("Failed to provision worker VM for Kubernetes cluster : %s" , kubernetesCluster.getName())); } nodes.add(vm); - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Provisioned node VM : %s in to the Kubernetes cluster : %s", vm.getDisplayName(), kubernetesCluster.getName())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Provisioned node VM : %s in to the Kubernetes cluster : %s", vm.getDisplayName(), kubernetesCluster.getName())); } } return nodes; @@ -412,8 +412,8 @@ protected UserVm createKubernetesNode(String joinIp) throws ManagementServerExce Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST, base64UserData, null, null, keypairs, null, addrs, null, null, null, customParameterMap, null, null, null, null, true, UserVmManager.CKS_NODE, null); } - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Created node VM : %s, %s in the Kubernetes cluster : %s", hostName, nodeVm.getUuid(), kubernetesCluster.getName())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Created node VM : %s, %s in the Kubernetes cluster : %s", hostName, nodeVm.getUuid(), kubernetesCluster.getName())); } return nodeVm; } @@ -471,8 +471,8 @@ protected void provisionPublicIpPortForwardingRule(IpAddress publicIp, Network n return newRule; }); rulesService.applyPortForwardingRules(publicIp.getId(), account); - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Provisioned SSH port forwarding rule: %s from port %d to %d on %s to the VM IP : %s in Kubernetes cluster : %s", pfRule.getUuid(), sourcePort, destPort, publicIp.getAddress().addr(), vmIp.toString(), kubernetesCluster.getName())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Provisioned SSH port forwarding rule: %s from port %d to %d on %s to the VM IP : %s in Kubernetes cluster : %s", pfRule.getUuid(), sourcePort, destPort, publicIp.getAddress().addr(), vmIp.toString(), kubernetesCluster.getName())); } } @@ -631,8 +631,8 @@ protected void createFirewallRules(IpAddress publicIp, List clusterVMIds, try { int endPort = CLUSTER_NODES_DEFAULT_START_SSH_PORT + clusterVMIds.size() - 1; provisionFirewallRules(publicIp, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort); - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Provisioned firewall rule to open up port %d to %d on %s for Kubernetes cluster : %s", CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort, publicIp.getAddress().addr(), kubernetesCluster.getName())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Provisioned firewall rule to open up port %d to %d on %s for Kubernetes cluster : %s", CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort, publicIp.getAddress().addr(), kubernetesCluster.getName())); } } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | NetworkRuleConflictException e) { throw new ManagementServerException(String.format("Failed to provision firewall rules for SSH access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); @@ -643,8 +643,8 @@ protected void createFirewallRules(IpAddress publicIp, List clusterVMIds, // Firewall rule for API access for control node VMs try { provisionFirewallRules(publicIp, owner, CLUSTER_API_PORT, CLUSTER_API_PORT); - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Provisioned firewall rule to open up port %d on %s for Kubernetes cluster %s", + if (logger.isInfoEnabled()) { + logger.info(String.format("Provisioned firewall rule to open up port %d on %s for Kubernetes cluster %s", CLUSTER_API_PORT, publicIp.getAddress().addr(), kubernetesCluster.getName())); } } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | NetworkRuleConflictException e) { @@ -691,8 +691,8 @@ protected void createVpcTierAclRules(Network network) throws ManagementServerExc // ACL rule for API access for control node VMs try { provisionVpcTierAllowPortACLRule(network, CLUSTER_API_PORT, CLUSTER_API_PORT); - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Provisioned ACL rule to open up port %d on %s for Kubernetes cluster %s", + if (logger.isInfoEnabled()) { + logger.info(String.format("Provisioned ACL rule to open up port %d on %s for Kubernetes cluster %s", CLUSTER_API_PORT, publicIpAddress, kubernetesCluster.getName())); } } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | InvalidParameterValueException | PermissionDeniedException e) { @@ -700,8 +700,8 @@ protected void createVpcTierAclRules(Network network) throws ManagementServerExc } try { provisionVpcTierAllowPortACLRule(network, DEFAULT_SSH_PORT, DEFAULT_SSH_PORT); - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Provisioned ACL rule to open up port %d on %s for Kubernetes cluster %s", + if (logger.isInfoEnabled()) { + logger.info(String.format("Provisioned ACL rule to open up port %d on %s for Kubernetes cluster %s", DEFAULT_SSH_PORT, publicIpAddress, kubernetesCluster.getName())); } } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | InvalidParameterValueException | PermissionDeniedException e) { @@ -716,8 +716,8 @@ protected void removeVpcTierAclRules(Network network) throws ManagementServerExc // ACL rule for API access for control node VMs try { removeVpcTierAllowPortACLRule(network, CLUSTER_API_PORT, CLUSTER_API_PORT); - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Removed network ACL rule to open up port %d on %s for Kubernetes cluster %s", + if (logger.isInfoEnabled()) { + logger.info(String.format("Removed network ACL rule to open up port %d on %s for Kubernetes cluster %s", CLUSTER_API_PORT, publicIpAddress, kubernetesCluster.getName())); } } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException e) { @@ -726,8 +726,8 @@ protected void removeVpcTierAclRules(Network network) throws ManagementServerExc // ACL rule for SSH access for all node VMs try { removeVpcTierAllowPortACLRule(network, DEFAULT_SSH_PORT, DEFAULT_SSH_PORT); - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Removed network ACL rule to open up port %d on %s for Kubernetes cluster %s", + if (logger.isInfoEnabled()) { + logger.info(String.format("Removed network ACL rule to open up port %d on %s for Kubernetes cluster %s", DEFAULT_SSH_PORT, publicIpAddress, kubernetesCluster.getName())); } } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException e) { diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java index df94642a8810..ec04907cf9d4 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java @@ -29,7 +29,6 @@ import org.apache.cloudstack.api.InternalIdentity; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Level; import com.cloud.dc.DataCenter; import com.cloud.exception.InsufficientCapacityException; @@ -57,6 +56,7 @@ import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.dao.VMInstanceDao; +import org.apache.logging.log4j.Level; public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModifierActionWorker { @@ -162,8 +162,8 @@ private void scaleKubernetesClusterVpcTierRules(final List clusterVMIds) t */ private void scaleKubernetesClusterNetworkRules(final List clusterVMIds) throws ManagementServerException { if (!Network.GuestType.Isolated.equals(network.getGuestType())) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Network : %s for Kubernetes cluster : %s is not an isolated network, therefore, no need for network rules", network.getName(), kubernetesCluster.getName())); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Network : %s for Kubernetes cluster : %s is not an isolated network, therefore, no need for network rules", network.getName(), kubernetesCluster.getName())); } return; } @@ -204,7 +204,7 @@ private boolean removeKubernetesClusterNode(final String ipAddress, final int po pkFile, null, String.format("sudo /opt/bin/kubectl drain %s --ignore-daemonsets --delete-local-data", hostName), 10000, 10000, 60000); if (!result.first()) { - LOGGER.warn(String.format("Draining node: %s on VM : %s in Kubernetes cluster : %s unsuccessful", hostName, userVm.getDisplayName(), kubernetesCluster.getName())); + logger.warn(String.format("Draining node: %s on VM : %s in Kubernetes cluster : %s unsuccessful", hostName, userVm.getDisplayName(), kubernetesCluster.getName())); } else { result = SshHelper.sshExecute(ipAddress, port, getControlNodeLoginUser(), pkFile, null, String.format("sudo /opt/bin/kubectl delete node %s", hostName), @@ -212,18 +212,18 @@ private boolean removeKubernetesClusterNode(final String ipAddress, final int po if (result.first()) { return true; } else { - LOGGER.warn(String.format("Deleting node: %s on VM : %s in Kubernetes cluster : %s unsuccessful", hostName, userVm.getDisplayName(), kubernetesCluster.getName())); + logger.warn(String.format("Deleting node: %s on VM : %s in Kubernetes cluster : %s unsuccessful", hostName, userVm.getDisplayName(), kubernetesCluster.getName())); } } break; } catch (Exception e) { String msg = String.format("Failed to remove Kubernetes cluster : %s node: %s on VM : %s", kubernetesCluster.getName(), hostName, userVm.getDisplayName()); - LOGGER.warn(msg, e); + logger.warn(msg, e); } try { Thread.sleep(waitDuration); } catch (InterruptedException ie) { - LOGGER.error(String.format("Error while waiting for Kubernetes cluster : %s node: %s on VM : %s removal", kubernetesCluster.getName(), hostName, userVm.getDisplayName()), ie); + logger.error(String.format("Error while waiting for Kubernetes cluster : %s node: %s on VM : %s removal", kubernetesCluster.getName(), hostName, userVm.getDisplayName()), ie); } retryCounter++; } @@ -314,7 +314,7 @@ private void scaleKubernetesClusterOffering() throws CloudRuntimeException { private void removeNodesFromCluster(List vmMaps) throws CloudRuntimeException { for (KubernetesClusterVmMapVO vmMapVO : vmMaps) { UserVmVO userVM = userVmDao.findById(vmMapVO.getVmId()); - LOGGER.info(String.format("Removing vm : %s from cluster %s", userVM.getDisplayName(), kubernetesCluster.getName())); + logger.info(String.format("Removing vm : %s from cluster %s", userVM.getDisplayName(), kubernetesCluster.getName())); if (!removeKubernetesClusterNode(publicIpAddress, sshPort, userVM, 3, 30000)) { logTransitStateAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster : %s, failed to remove Kubernetes node: %s running on VM : %s", kubernetesCluster.getName(), userVM.getHostName(), userVM.getDisplayName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); } @@ -429,8 +429,8 @@ private boolean isAutoscalingChanged() { public boolean scaleCluster() throws CloudRuntimeException { init(); - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Scaling Kubernetes cluster : %s", kubernetesCluster.getName())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Scaling Kubernetes cluster : %s", kubernetesCluster.getName())); } scaleTimeoutTime = System.currentTimeMillis() + KubernetesClusterService.KubernetesClusterScaleTimeout.value() * 1000; final long originalClusterSize = kubernetesCluster.getNodeCount(); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java index 84ad9bdc0a68..a7cea8093c83 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java @@ -36,7 +36,6 @@ import org.apache.commons.codec.binary.Base64; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Level; import com.cloud.dc.DataCenter; import com.cloud.dc.Vlan; @@ -74,6 +73,7 @@ import com.cloud.vm.UserVmManager; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VmDetailConstants; +import org.apache.logging.log4j.Level; public class KubernetesClusterStartWorker extends KubernetesClusterResourceModifierActionWorker { @@ -122,7 +122,7 @@ private boolean isKubernetesVersionSupportsHA() { haSupported = true; } } catch (IllegalArgumentException e) { - LOGGER.error(String.format("Unable to compare Kubernetes version for cluster version : %s with %s", version.getName(), KubernetesClusterService.MIN_KUBERNETES_VERSION_HA_SUPPORT), e); + logger.error(String.format("Unable to compare Kubernetes version for cluster version : %s with %s", version.getName(), KubernetesClusterService.MIN_KUBERNETES_VERSION_HA_SUPPORT), e); } } return haSupported; @@ -228,8 +228,8 @@ private UserVm createKubernetesControlNode(final Network network, String serverI Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST, base64UserData, null, null, keypairs, requestedIps, addrs, null, null, null, customParameterMap, null, null, null, null, true, UserVmManager.CKS_NODE, null); } - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Created control VM ID: %s, %s in the Kubernetes cluster : %s", controlVm.getUuid(), hostName, kubernetesCluster.getName())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Created control VM ID: %s, %s in the Kubernetes cluster : %s", controlVm.getUuid(), hostName, kubernetesCluster.getName())); } return controlVm; } @@ -303,8 +303,8 @@ private UserVm createKubernetesAdditionalControlNode(final String joinIp, final null, addrs, null, null, null, customParameterMap, null, null, null, null, true, UserVmManager.CKS_NODE, null); } - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Created control VM ID : %s, %s in the Kubernetes cluster : %s", additionalControlVm.getUuid(), hostName, kubernetesCluster.getName())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Created control VM ID : %s, %s in the Kubernetes cluster : %s", additionalControlVm.getUuid(), hostName, kubernetesCluster.getName())); } return additionalControlVm; } @@ -322,8 +322,8 @@ private UserVm provisionKubernetesClusterControlVm(final Network network, final if (k8sControlVM == null) { throw new ManagementServerException(String.format("Failed to provision control VM for Kubernetes cluster : %s" , kubernetesCluster.getName())); } - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Provisioned the control VM : %s in to the Kubernetes cluster : %s", k8sControlVM.getDisplayName(), kubernetesCluster.getName())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Provisioned the control VM : %s in to the Kubernetes cluster : %s", k8sControlVM.getDisplayName(), kubernetesCluster.getName())); } return k8sControlVM; } @@ -345,8 +345,8 @@ private List provisionKubernetesClusterAdditionalControlVms(final String throw new ManagementServerException(String.format("Failed to provision additional control VM for Kubernetes cluster : %s" , kubernetesCluster.getName())); } additionalControlVms.add(vm); - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Provisioned additional control VM : %s in to the Kubernetes cluster : %s", vm.getDisplayName(), kubernetesCluster.getName())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Provisioned additional control VM : %s in to the Kubernetes cluster : %s", vm.getDisplayName(), kubernetesCluster.getName())); } } } @@ -358,18 +358,18 @@ private Network startKubernetesClusterNetwork(final DeployDestination destinatio Network network = networkDao.findById(kubernetesCluster.getNetworkId()); if (network == null) { String msg = String.format("Network for Kubernetes cluster : %s not found", kubernetesCluster.getName()); - LOGGER.warn(msg); + logger.warn(msg); stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed); throw new ManagementServerException(msg); } try { networkMgr.startNetwork(network.getId(), destination, context); - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Network : %s is started for the Kubernetes cluster : %s", network.getName(), kubernetesCluster.getName())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Network : %s is started for the Kubernetes cluster : %s", network.getName(), kubernetesCluster.getName())); } } catch (ConcurrentOperationException | ResourceUnavailableException |InsufficientCapacityException e) { String msg = String.format("Failed to start Kubernetes cluster : %s as unable to start associated network : %s" , kubernetesCluster.getName(), network.getName()); - LOGGER.error(msg, e); + logger.error(msg, e); stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed); throw new ManagementServerException(msg, e); } @@ -378,8 +378,8 @@ private Network startKubernetesClusterNetwork(final DeployDestination destinatio protected void setupKubernetesClusterNetworkRules(Network network, List clusterVMs) throws ManagementServerException { if (!Network.GuestType.Isolated.equals(network.getGuestType())) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Network : %s for Kubernetes cluster : %s is not an isolated network, therefore, no need for network rules", network.getName(), kubernetesCluster.getName())); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Network : %s for Kubernetes cluster : %s is not an isolated network, therefore, no need for network rules", network.getName(), kubernetesCluster.getName())); } return; } @@ -410,7 +410,7 @@ private void startKubernetesClusterVMs() { resizeNodeVolume(vm); startKubernetesVM(vm); } catch (ManagementServerException ex) { - LOGGER.warn(String.format("Failed to start VM : %s in Kubernetes cluster : %s due to ", vm.getDisplayName(), kubernetesCluster.getName()) + ex); + logger.warn(String.format("Failed to start VM : %s in Kubernetes cluster : %s due to ", vm.getDisplayName(), kubernetesCluster.getName()) + ex); // don't bail out here. proceed further to stop the reset of the VM's } } @@ -464,8 +464,8 @@ private void updateKubernetesClusterEntryEndpoint() { public boolean startKubernetesClusterOnCreate() { init(); - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Starting Kubernetes cluster : %s", kubernetesCluster.getName())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Starting Kubernetes cluster : %s", kubernetesCluster.getName())); } final long startTimeoutTime = System.currentTimeMillis() + KubernetesClusterService.KubernetesClusterStartTimeout.value() * 1000; stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.StartRequested); @@ -523,8 +523,8 @@ public boolean startKubernetesClusterOnCreate() { } catch (CloudRuntimeException | ManagementServerException | ResourceUnavailableException | InsufficientCapacityException e) { logTransitStateAndThrow(Level.ERROR, String.format("Provisioning node VM failed in the Kubernetes cluster : %s", kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e); } - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Kubernetes cluster : %s VMs successfully provisioned", kubernetesCluster.getName())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Kubernetes cluster : %s VMs successfully provisioned", kubernetesCluster.getName())); } try { setupKubernetesClusterNetworkRules(network, clusterVMs); @@ -570,8 +570,8 @@ public boolean startKubernetesClusterOnCreate() { public boolean startStoppedKubernetesCluster() throws CloudRuntimeException { init(); - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Starting Kubernetes cluster : %s", kubernetesCluster.getName())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Starting Kubernetes cluster : %s", kubernetesCluster.getName())); } final long startTimeoutTime = System.currentTimeMillis() + KubernetesClusterService.KubernetesClusterStartTimeout.value() * 1000; stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.StartRequested); @@ -597,8 +597,8 @@ public boolean startStoppedKubernetesCluster() throws CloudRuntimeException { logTransitStateAndThrow(Level.ERROR, String.format("Failed to start Kubernetes cluster : %s in usable state as unable to get Dashboard service running for the cluster", kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); } stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded); - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Kubernetes cluster : %s successfully started", kubernetesCluster.getName())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Kubernetes cluster : %s successfully started", kubernetesCluster.getName())); } return true; } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStopWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStopWorker.java index 682175047bf2..e77268b0654c 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStopWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStopWorker.java @@ -19,7 +19,7 @@ import java.util.List; -import org.apache.log4j.Level; +import org.apache.logging.log4j.Level; import com.cloud.exception.ConcurrentOperationException; import com.cloud.kubernetes.cluster.KubernetesCluster; @@ -35,8 +35,8 @@ public KubernetesClusterStopWorker(final KubernetesCluster kubernetesCluster, fi public boolean stop() throws CloudRuntimeException { init(); - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Stopping Kubernetes cluster : %s", kubernetesCluster.getName())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Stopping Kubernetes cluster : %s", kubernetesCluster.getName())); } stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.StopRequested); List clusterVMs = getKubernetesClusterVMs(); @@ -47,7 +47,7 @@ public boolean stop() throws CloudRuntimeException { try { userVmService.stopVirtualMachine(vm.getId(), false); } catch (ConcurrentOperationException ex) { - LOGGER.warn(String.format("Failed to stop VM : %s in Kubernetes cluster : %s", + logger.warn(String.format("Failed to stop VM : %s in Kubernetes cluster : %s", vm.getDisplayName(), kubernetesCluster.getName()), ex); } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java index d418e20f58f7..4fefa54a6d98 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java @@ -23,7 +23,7 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Level; +import org.apache.logging.log4j.Level; import com.cloud.hypervisor.Hypervisor; import com.cloud.kubernetes.cluster.KubernetesCluster; @@ -84,8 +84,8 @@ private void upgradeKubernetesClusterNodes() { hostName = hostName.toLowerCase(); } Pair result; - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Upgrading node on VM %s in Kubernetes cluster %s with Kubernetes version(%s) ID: %s", + if (logger.isInfoEnabled()) { + logger.info(String.format("Upgrading node on VM %s in Kubernetes cluster %s with Kubernetes version(%s) ID: %s", vm.getDisplayName(), kubernetesCluster.getName(), upgradeVersion.getSemanticVersion(), upgradeVersion.getUuid())); } String errorMessage = String.format("Failed to upgrade Kubernetes cluster : %s, unable to drain Kubernetes node on VM : %s", kubernetesCluster.getName(), vm.getDisplayName()); @@ -98,13 +98,13 @@ private void upgradeKubernetesClusterNodes() { break; } if (retry > 0) { - LOGGER.error(String.format("%s, retries left: %s", errorMessage, retry)); + logger.error(String.format("%s, retries left: %s", errorMessage, retry)); } else { logTransitStateDetachIsoAndThrow(Level.ERROR, errorMessage, kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null); } } catch (Exception e) { if (retry > 0) { - LOGGER.error(String.format("%s due to %s, retries left: %s", errorMessage, e, retry)); + logger.error(String.format("%s due to %s, retries left: %s", errorMessage, e, retry)); } else { logTransitStateDetachIsoAndThrow(Level.ERROR, errorMessage, kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, e); } @@ -122,13 +122,13 @@ private void upgradeKubernetesClusterNodes() { break; } if (retry > 0) { - LOGGER.error(String.format("%s, retries left: %s", errorMessage, retry)); + logger.error(String.format("%s, retries left: %s", errorMessage, retry)); } else { logTransitStateDetachIsoAndThrow(Level.ERROR, errorMessage, kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null); } } catch (Exception e) { if (retry > 0) { - LOGGER.error(String.format("%s due to %s, retries left: %s", errorMessage, e, retry)); + logger.error(String.format("%s due to %s, retries left: %s", errorMessage, e, retry)); } else { logTransitStateDetachIsoAndThrow(Level.ERROR, errorMessage, kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, e); } @@ -148,8 +148,8 @@ private void upgradeKubernetesClusterNodes() { if (!KubernetesClusterUtil.clusterNodeVersionMatches(upgradeVersion.getSemanticVersion(), publicIpAddress, sshPort, getControlNodeLoginUser(), getManagementServerSshPublicKeyFile(), hostName, upgradeTimeoutTime, 15000)) { logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, unable to get Kubernetes node on VM : %s upgraded to version %s", kubernetesCluster.getName(), vm.getDisplayName(), upgradeVersion.getSemanticVersion()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null); } - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Successfully upgraded node on VM %s in Kubernetes cluster %s with Kubernetes version(%s) ID: %s", + if (logger.isInfoEnabled()) { + logger.info(String.format("Successfully upgraded node on VM %s in Kubernetes cluster %s with Kubernetes version(%s) ID: %s", vm.getDisplayName(), kubernetesCluster.getName(), upgradeVersion.getSemanticVersion(), upgradeVersion.getUuid())); } } @@ -157,8 +157,8 @@ private void upgradeKubernetesClusterNodes() { public boolean upgradeCluster() throws CloudRuntimeException { init(); - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Upgrading Kubernetes cluster : %s", kubernetesCluster.getName())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Upgrading Kubernetes cluster : %s", kubernetesCluster.getName())); } upgradeTimeoutTime = System.currentTimeMillis() + KubernetesClusterService.KubernetesClusterUpgradeTimeout.value() * 1000; Pair publicIpSshPort = getKubernetesClusterServerIpSshPort(null); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java index e1210a607e67..77f785d27473 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java @@ -33,7 +33,8 @@ import org.apache.cloudstack.utils.security.SSLUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.kubernetes.cluster.KubernetesCluster; import com.cloud.uservm.UserVm; @@ -43,7 +44,7 @@ public class KubernetesClusterUtil { - protected static final Logger LOGGER = Logger.getLogger(KubernetesClusterUtil.class); + protected static Logger LOGGER = LogManager.getLogger(KubernetesClusterUtil.class); public static final String CLUSTER_NODE_READY_COMMAND = "sudo /opt/bin/kubectl get nodes | awk '{if ($1 == \"%s\" && $2 == \"Ready\") print $1}'"; public static final String CLUSTER_NODE_VERSION_COMMAND = "sudo /opt/bin/kubectl get nodes | awk '{if ($1 == \"%s\") print $5}'"; diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java index 3ea30291f431..4cdc30a5850d 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java @@ -32,7 +32,6 @@ import org.apache.cloudstack.api.response.KubernetesSupportedVersionResponse; import org.apache.cloudstack.api.response.ListResponse; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.api.query.dao.TemplateJoinDao; import com.cloud.api.query.vo.TemplateJoinVO; @@ -60,7 +59,6 @@ import com.cloud.utils.exception.CloudRuntimeException; public class KubernetesVersionManagerImpl extends ManagerBase implements KubernetesVersionService { - public static final Logger LOGGER = Logger.getLogger(KubernetesVersionManagerImpl.class.getName()); @Inject private KubernetesSupportedVersionDao kubernetesSupportedVersionDao; @@ -141,7 +139,7 @@ private List filterKubernetesSupportedVersions(Li versions.remove(i); } } catch (IllegalArgumentException e) { - LOGGER.warn(String.format("Unable to compare Kubernetes version for supported version ID: %s with %s", version.getUuid(), minimumSemanticVersion)); + logger.warn(String.format("Unable to compare Kubernetes version for supported version ID: %s with %s", version.getUuid(), minimumSemanticVersion)); versions.remove(i); } } @@ -325,7 +323,7 @@ public KubernetesSupportedVersionResponse addKubernetesSupportedVersion(final Ad VirtualMachineTemplate vmTemplate = registerKubernetesVersionIso(zoneId, name, isoUrl, isoChecksum, isDirectDownload); template = templateDao.findById(vmTemplate.getId()); } catch (IllegalAccessException | NoSuchFieldException | IllegalArgumentException | ResourceAllocationException ex) { - LOGGER.error(String.format("Unable to register binaries ISO for supported kubernetes version, %s, with url: %s", name, isoUrl), ex); + logger.error(String.format("Unable to register binaries ISO for supported kubernetes version, %s, with url: %s", name, isoUrl), ex); throw new CloudRuntimeException(String.format("Unable to register binaries ISO for supported kubernetes version, %s, with url: %s", name, isoUrl)); } @@ -353,13 +351,13 @@ public boolean deleteKubernetesSupportedVersion(final DeleteKubernetesSupportedV VMTemplateVO template = templateDao.findByIdIncludingRemoved(version.getIsoId()); if (template == null) { - LOGGER.warn(String.format("Unable to find ISO associated with supported Kubernetes version ID: %s", version.getUuid())); + logger.warn(String.format("Unable to find ISO associated with supported Kubernetes version ID: %s", version.getUuid())); } if (template != null && template.getRemoved() == null) { // Delete ISO try { deleteKubernetesVersionIso(template.getId()); } catch (IllegalAccessException | NoSuchFieldException | IllegalArgumentException ex) { - LOGGER.error(String.format("Unable to delete binaries ISO ID: %s associated with supported kubernetes version ID: %s", template.getUuid(), version.getUuid()), ex); + logger.error(String.format("Unable to delete binaries ISO ID: %s associated with supported kubernetes version ID: %s", template.getUuid(), version.getUuid()), ex); throw new CloudRuntimeException(String.format("Unable to delete binaries ISO ID: %s associated with supported kubernetes version ID: %s", template.getUuid(), version.getUuid())); } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/admin/kubernetes/version/AddKubernetesSupportedVersionCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/admin/kubernetes/version/AddKubernetesSupportedVersionCmd.java index 380c93cca200..5a86bc74fedc 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/admin/kubernetes/version/AddKubernetesSupportedVersionCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/admin/kubernetes/version/AddKubernetesSupportedVersionCmd.java @@ -32,7 +32,6 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.context.CallContext; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InvalidParameterValueException; @@ -47,7 +46,6 @@ entityType = {KubernetesSupportedVersion.class}, authorized = {RoleType.Admin}) public class AddKubernetesSupportedVersionCmd extends BaseCmd implements AdminCmd { - public static final Logger LOGGER = Logger.getLogger(AddKubernetesSupportedVersionCmd.class.getName()); @Inject private KubernetesVersionService kubernetesVersionService; diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/admin/kubernetes/version/DeleteKubernetesSupportedVersionCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/admin/kubernetes/version/DeleteKubernetesSupportedVersionCmd.java index 42ac28dd16ae..b70a46804e00 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/admin/kubernetes/version/DeleteKubernetesSupportedVersionCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/admin/kubernetes/version/DeleteKubernetesSupportedVersionCmd.java @@ -30,7 +30,6 @@ import org.apache.cloudstack.api.response.KubernetesSupportedVersionResponse; import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.exception.ConcurrentOperationException; import com.cloud.kubernetes.version.KubernetesSupportedVersion; @@ -44,7 +43,6 @@ entityType = {KubernetesSupportedVersion.class}, authorized = {RoleType.Admin}) public class DeleteKubernetesSupportedVersionCmd extends BaseAsyncCmd implements AdminCmd { - public static final Logger LOGGER = Logger.getLogger(DeleteKubernetesSupportedVersionCmd.class.getName()); @Inject private KubernetesVersionService kubernetesVersionService; diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/admin/kubernetes/version/UpdateKubernetesSupportedVersionCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/admin/kubernetes/version/UpdateKubernetesSupportedVersionCmd.java index f932e5a9ba16..35a9c068837e 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/admin/kubernetes/version/UpdateKubernetesSupportedVersionCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/admin/kubernetes/version/UpdateKubernetesSupportedVersionCmd.java @@ -29,7 +29,6 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.command.admin.AdminCmd; import org.apache.cloudstack.api.response.KubernetesSupportedVersionResponse; -import org.apache.log4j.Logger; import com.cloud.exception.ConcurrentOperationException; import com.cloud.kubernetes.version.KubernetesSupportedVersion; @@ -43,7 +42,6 @@ entityType = {KubernetesSupportedVersion.class}, authorized = {RoleType.Admin}) public class UpdateKubernetesSupportedVersionCmd extends BaseCmd implements AdminCmd { - public static final Logger LOGGER = Logger.getLogger(UpdateKubernetesSupportedVersionCmd.class.getName()); @Inject private KubernetesVersionService kubernetesVersionService; diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/AddVirtualMachinesToKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/AddVirtualMachinesToKubernetesClusterCmd.java index a7134f501bc2..bd35794a3a5a 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/AddVirtualMachinesToKubernetesClusterCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/AddVirtualMachinesToKubernetesClusterCmd.java @@ -29,7 +29,6 @@ import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import javax.inject.Inject; import java.util.List; @@ -40,7 +39,6 @@ since = "4.19.0", authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class AddVirtualMachinesToKubernetesClusterCmd extends BaseCmd { - public static final Logger LOGGER = Logger.getLogger(AddVirtualMachinesToKubernetesClusterCmd.class.getName()); @Inject public KubernetesClusterService kubernetesClusterService; diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/CreateKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/CreateKubernetesClusterCmd.java index 12a50c9e88f7..c55510217721 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/CreateKubernetesClusterCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/CreateKubernetesClusterCmd.java @@ -41,7 +41,6 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.context.CallContext; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.kubernetes.cluster.KubernetesCluster; import com.cloud.kubernetes.cluster.KubernetesClusterEventTypes; @@ -57,7 +56,6 @@ responseHasSensitiveInfo = true, authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class CreateKubernetesClusterCmd extends BaseAsyncCreateCmd { - public static final Logger LOGGER = Logger.getLogger(CreateKubernetesClusterCmd.class.getName()); private static final Long DEFAULT_NODE_ROOT_DISK_SIZE = 8L; @Inject diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/DeleteKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/DeleteKubernetesClusterCmd.java index 2b4a1283ce25..05080de9277b 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/DeleteKubernetesClusterCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/DeleteKubernetesClusterCmd.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.api.response.KubernetesClusterResponse; import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.exception.ConcurrentOperationException; import com.cloud.kubernetes.cluster.KubernetesCluster; @@ -42,7 +41,6 @@ entityType = {KubernetesCluster.class}, authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class DeleteKubernetesClusterCmd extends BaseAsyncCmd { - public static final Logger LOGGER = Logger.getLogger(DeleteKubernetesClusterCmd.class.getName()); @Inject public KubernetesClusterService kubernetesClusterService; diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/GetKubernetesClusterConfigCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/GetKubernetesClusterConfigCmd.java index 789e460ee809..42305245d506 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/GetKubernetesClusterConfigCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/GetKubernetesClusterConfigCmd.java @@ -29,7 +29,6 @@ import org.apache.cloudstack.api.response.KubernetesClusterConfigResponse; import org.apache.cloudstack.api.response.KubernetesClusterResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.kubernetes.cluster.KubernetesClusterService; import com.cloud.user.Account; @@ -44,7 +43,6 @@ responseHasSensitiveInfo = true, authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class GetKubernetesClusterConfigCmd extends BaseCmd { - public static final Logger LOGGER = Logger.getLogger(GetKubernetesClusterConfigCmd.class.getName()); @Inject public KubernetesClusterService kubernetesClusterService; diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ListKubernetesClustersCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ListKubernetesClustersCmd.java index 33eab2cbb651..7ee663bd4c38 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ListKubernetesClustersCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ListKubernetesClustersCmd.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.KubernetesClusterResponse; import org.apache.cloudstack.api.response.ListResponse; -import org.apache.log4j.Logger; import com.cloud.kubernetes.cluster.KubernetesClusterService; import com.cloud.utils.exception.CloudRuntimeException; @@ -41,7 +40,6 @@ responseHasSensitiveInfo = true, authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class ListKubernetesClustersCmd extends BaseListProjectAndAccountResourcesCmd { - public static final Logger LOGGER = Logger.getLogger(ListKubernetesClustersCmd.class.getName()); @Inject public KubernetesClusterService kubernetesClusterService; diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/RemoveVirtualMachinesFromKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/RemoveVirtualMachinesFromKubernetesClusterCmd.java index 704d0b2f1f0d..a6452b829c76 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/RemoveVirtualMachinesFromKubernetesClusterCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/RemoveVirtualMachinesFromKubernetesClusterCmd.java @@ -31,7 +31,6 @@ import org.apache.cloudstack.api.response.RemoveVirtualMachinesFromKubernetesClusterResponse; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import javax.inject.Inject; import java.util.List; @@ -42,7 +41,6 @@ since = "4.19.0", authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class RemoveVirtualMachinesFromKubernetesClusterCmd extends BaseListCmd { - public static final Logger LOGGER = Logger.getLogger(RemoveVirtualMachinesFromKubernetesClusterCmd.class.getName()); @Inject public KubernetesClusterService kubernetesClusterService; diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java index e5a5c902f4df..02865357b8a5 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java @@ -34,7 +34,6 @@ import org.apache.cloudstack.api.response.ServiceOfferingResponse; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.exception.ConcurrentOperationException; import com.cloud.kubernetes.cluster.KubernetesCluster; @@ -51,7 +50,6 @@ responseHasSensitiveInfo = true, authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class ScaleKubernetesClusterCmd extends BaseAsyncCmd { - public static final Logger LOGGER = Logger.getLogger(ScaleKubernetesClusterCmd.class.getName()); @Inject public KubernetesClusterService kubernetesClusterService; diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StartKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StartKubernetesClusterCmd.java index 7a7c1e822329..bb0111af2328 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StartKubernetesClusterCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StartKubernetesClusterCmd.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.KubernetesClusterResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.exception.ConcurrentOperationException; import com.cloud.kubernetes.cluster.KubernetesCluster; @@ -44,7 +43,6 @@ responseHasSensitiveInfo = true, authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class StartKubernetesClusterCmd extends BaseAsyncCmd { - public static final Logger LOGGER = Logger.getLogger(StartKubernetesClusterCmd.class.getName()); @Inject public KubernetesClusterService kubernetesClusterService; diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StopKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StopKubernetesClusterCmd.java index 866a7a8fd7f6..5c7dc92ef67d 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StopKubernetesClusterCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/StopKubernetesClusterCmd.java @@ -29,7 +29,6 @@ import org.apache.cloudstack.api.response.KubernetesClusterResponse; import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.exception.ConcurrentOperationException; import com.cloud.kubernetes.cluster.KubernetesCluster; @@ -45,7 +44,6 @@ responseHasSensitiveInfo = true, authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class StopKubernetesClusterCmd extends BaseAsyncCmd { - public static final Logger LOGGER = Logger.getLogger(StopKubernetesClusterCmd.class.getName()); @Inject public KubernetesClusterService kubernetesClusterService; diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/UpgradeKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/UpgradeKubernetesClusterCmd.java index 2cbedf5608ab..18bdbb53527e 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/UpgradeKubernetesClusterCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/UpgradeKubernetesClusterCmd.java @@ -30,7 +30,6 @@ import org.apache.cloudstack.api.response.KubernetesClusterResponse; import org.apache.cloudstack.api.response.KubernetesSupportedVersionResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.exception.ConcurrentOperationException; import com.cloud.kubernetes.cluster.KubernetesCluster; @@ -46,7 +45,6 @@ responseHasSensitiveInfo = true, authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class UpgradeKubernetesClusterCmd extends BaseAsyncCmd { - public static final Logger LOGGER = Logger.getLogger(UpgradeKubernetesClusterCmd.class.getName()); @Inject public KubernetesClusterService kubernetesClusterService; diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/version/ListKubernetesSupportedVersionsCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/version/ListKubernetesSupportedVersionsCmd.java index 15f83252c8af..f718d873842d 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/version/ListKubernetesSupportedVersionsCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/version/ListKubernetesSupportedVersionsCmd.java @@ -29,7 +29,6 @@ import org.apache.cloudstack.api.response.KubernetesSupportedVersionResponse; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.ZoneResponse; -import org.apache.log4j.Logger; import com.cloud.exception.ConcurrentOperationException; import com.cloud.kubernetes.version.KubernetesVersionService; @@ -41,7 +40,6 @@ responseView = ResponseObject.ResponseView.Restricted, authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) public class ListKubernetesSupportedVersionsCmd extends BaseListCmd { - public static final Logger LOGGER = Logger.getLogger(ListKubernetesSupportedVersionsCmd.class.getName()); @Inject private KubernetesVersionService kubernetesVersionService; diff --git a/plugins/integrations/prometheus/src/main/java/org/apache/cloudstack/metrics/PrometheusExporterImpl.java b/plugins/integrations/prometheus/src/main/java/org/apache/cloudstack/metrics/PrometheusExporterImpl.java index 17fbd48181ad..e02bc1102172 100644 --- a/plugins/integrations/prometheus/src/main/java/org/apache/cloudstack/metrics/PrometheusExporterImpl.java +++ b/plugins/integrations/prometheus/src/main/java/org/apache/cloudstack/metrics/PrometheusExporterImpl.java @@ -34,7 +34,6 @@ import com.cloud.user.dao.AccountDao; import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; -import org.apache.log4j.Logger; import com.cloud.alert.AlertManager; import com.cloud.api.ApiDBUtils; @@ -73,7 +72,6 @@ import com.cloud.vm.dao.VMInstanceDao; public class PrometheusExporterImpl extends ManagerBase implements PrometheusExporter, Manager { - private static final Logger LOG = Logger.getLogger(PrometheusExporterImpl.class); private static final String USED = "used"; private static final String ALLOCATED = "allocated"; @@ -493,7 +491,7 @@ public void updateMetrics() { addDomainLimits(latestMetricsItems); addDomainResourceCount(latestMetricsItems); } catch (Exception e) { - LOG.warn("Getting metrics failed ", e); + logger.warn("Getting metrics failed ", e); } metricsItems = latestMetricsItems; } diff --git a/plugins/integrations/prometheus/src/main/java/org/apache/cloudstack/metrics/PrometheusExporterServerImpl.java b/plugins/integrations/prometheus/src/main/java/org/apache/cloudstack/metrics/PrometheusExporterServerImpl.java index cc3b7d556538..d9f25d2f5772 100644 --- a/plugins/integrations/prometheus/src/main/java/org/apache/cloudstack/metrics/PrometheusExporterServerImpl.java +++ b/plugins/integrations/prometheus/src/main/java/org/apache/cloudstack/metrics/PrometheusExporterServerImpl.java @@ -22,7 +22,6 @@ import com.sun.net.httpserver.HttpServer; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; -import org.apache.log4j.Logger; import javax.inject.Inject; import java.io.IOException; @@ -32,14 +31,13 @@ import java.util.Arrays; public class PrometheusExporterServerImpl extends ManagerBase implements PrometheusExporterServer, Configurable { - private static final Logger LOG = Logger.getLogger(PrometheusExporterServerImpl.class); private static HttpServer httpServer; @Inject private PrometheusExporter prometheusExporter; - private final static class ExporterHandler implements HttpHandler { + private final class ExporterHandler implements HttpHandler { private PrometheusExporter prometheusExporter; ExporterHandler(final PrometheusExporter prometheusExporter) { @@ -50,7 +48,7 @@ private final static class ExporterHandler implements HttpHandler { @Override public void handle(final HttpExchange httpExchange) throws IOException { final String remoteClientAddress = httpExchange.getRemoteAddress().getAddress().toString().replace("/", ""); - LOG.debug("Prometheus exporter received client request from: " + remoteClientAddress); + logger.debug("Prometheus exporter received client request from: " + remoteClientAddress); String response = "Forbidden"; int responseCode = 403; if (Arrays.asList(PrometheusExporterAllowedAddresses.value().split(",")).contains(remoteClientAddress)) { @@ -65,9 +63,9 @@ public void handle(final HttpExchange httpExchange) throws IOException { try { os.write(bytesToOutput); } catch (IOException e) { - LOG.error(String.format("could not export Prometheus data due to %s", e.getLocalizedMessage())); - if (LOG.isDebugEnabled()) { - LOG.debug("Error during Prometheus export: ", e); + logger.error(String.format("could not export Prometheus data due to %s", e.getLocalizedMessage())); + if (logger.isDebugEnabled()) { + logger.debug("Error during Prometheus export: ", e); } os.write("The system could not export Prometheus due to an internal error. Contact your operator to learn about the reason.".getBytes()); } finally { @@ -96,9 +94,9 @@ public void handle(HttpExchange httpExchange) throws IOException { } }); httpServer.start(); - LOG.debug("Started prometheus exporter http server"); + logger.debug("Started prometheus exporter http server"); } catch (final IOException e) { - LOG.info("Failed to start prometheus exporter http server due to: ", e); + logger.info("Failed to start prometheus exporter http server due to: ", e); } } return true; @@ -108,7 +106,7 @@ public void handle(HttpExchange httpExchange) throws IOException { public boolean stop() { if (httpServer != null) { httpServer.stop(0); - LOG.debug("Stopped Prometheus exporter http server"); + logger.debug("Stopped Prometheus exporter http server"); } return true; } diff --git a/plugins/metrics/src/main/java/org/apache/cloudstack/api/MetricConstants.java b/plugins/metrics/src/main/java/org/apache/cloudstack/api/MetricConstants.java index 19a1cd405630..8c93f2e1f446 100644 --- a/plugins/metrics/src/main/java/org/apache/cloudstack/api/MetricConstants.java +++ b/plugins/metrics/src/main/java/org/apache/cloudstack/api/MetricConstants.java @@ -29,7 +29,7 @@ public interface MetricConstants { String HEAP_MEMORY_TOTAL = "heapmemorytotal"; String LAST_HEARTBEAT = "lastheartbeat"; String LAST_SUCCESSFUL_JOB = "lastsuccessfuljob"; - String LOG_INFO = "loginfo"; + String logger_INFO = "loginfo"; String REPLICAS = "replicas"; String SESSIONS = "sessions"; String SYSTEM = "system"; diff --git a/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java b/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java index 51c020fc2375..19c453cbd071 100644 --- a/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java +++ b/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java @@ -79,7 +79,6 @@ import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.builder.ReflectionToStringBuilder; import org.apache.commons.lang3.builder.ToStringStyle; -import org.apache.log4j.Logger; import com.cloud.agent.api.VmDiskStatsEntry; import com.cloud.agent.api.VmStatsEntryBase; @@ -138,7 +137,6 @@ import com.google.gson.Gson; public class MetricsServiceImpl extends MutualExclusiveIdsManagerBase implements MetricsService { - private static final Logger LOGGER = Logger.getLogger(MetricsServiceImpl.class); @Inject private DataCenterDao dataCenterDao; @@ -833,19 +831,19 @@ private void addHostCpuMetricsToResponse(HostMetricsSummary metricsResponse, Lon @Override public List listManagementServerMetrics(List managementServerResponses) { final List metricsResponses = new ArrayList<>(); - if(LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Getting metrics for %d MS hosts.", managementServerResponses.size())); + if(logger.isDebugEnabled()) { + logger.debug(String.format("Getting metrics for %d MS hosts.", managementServerResponses.size())); } for (final ManagementServerResponse managementServerResponse: managementServerResponses) { - if(LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Processing metrics for MS hosts %s.", managementServerResponse.getId())); + if(logger.isDebugEnabled()) { + logger.debug(String.format("Processing metrics for MS hosts %s.", managementServerResponse.getId())); } ManagementServerMetricsResponse metricsResponse = new ManagementServerMetricsResponse(); try { BeanUtils.copyProperties(metricsResponse, managementServerResponse); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format("Bean copy result %s.", new ReflectionToStringBuilder(metricsResponse, ToStringStyle.SIMPLE_STYLE).toString())); + if (logger.isTraceEnabled()) { + logger.trace(String.format("Bean copy result %s.", new ReflectionToStringBuilder(metricsResponse, ToStringStyle.SIMPLE_STYLE).toString())); } } catch (IllegalAccessException | InvocationTargetException e) { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to generate zone metrics response."); @@ -862,15 +860,15 @@ public List listManagementServerMetrics(List 0) { copyManagementServerStatusToResponse(metricsResponse, status); @@ -1005,8 +1003,8 @@ public DbMetricsResponse listDbMetrics() { getQueryHistory(response); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace(new ReflectionToStringBuilder(response)); + if (logger.isTraceEnabled()) { + logger.trace(new ReflectionToStringBuilder(response)); } response.setObjectName("dbMetrics"); @@ -1064,8 +1062,8 @@ protected boolean isUsageRunning() { boolean local = false; String usageStatus = Script.runSimpleBashScript("systemctl status cloudstack-usage | grep \" Active:\""); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format("The current usage status is: %s.", usageStatus)); + if (logger.isTraceEnabled()) { + logger.trace(String.format("The current usage status is: %s.", usageStatus)); } if (StringUtils.isNotBlank(usageStatus)) { diff --git a/plugins/metrics/src/main/java/org/apache/cloudstack/response/ManagementServerMetricsResponse.java b/plugins/metrics/src/main/java/org/apache/cloudstack/response/ManagementServerMetricsResponse.java index ae0f57b967b8..95c3fd09c072 100644 --- a/plugins/metrics/src/main/java/org/apache/cloudstack/response/ManagementServerMetricsResponse.java +++ b/plugins/metrics/src/main/java/org/apache/cloudstack/response/ManagementServerMetricsResponse.java @@ -85,7 +85,7 @@ public class ManagementServerMetricsResponse extends ManagementServerResponse { @Param(description = "Virtual size of the fully loaded process") private String systemMemoryVirtualSize; - @SerializedName(MetricConstants.LOG_INFO) + @SerializedName(MetricConstants.logger_INFO) @Param(description = "the log files and their usage on disk") private String logInfo; diff --git a/plugins/network-elements/bigswitch/src/main/java/com/cloud/api/commands/ListBigSwitchBcfDevicesCmd.java b/plugins/network-elements/bigswitch/src/main/java/com/cloud/api/commands/ListBigSwitchBcfDevicesCmd.java index 3e25848804de..f009f3bbc7f5 100644 --- a/plugins/network-elements/bigswitch/src/main/java/com/cloud/api/commands/ListBigSwitchBcfDevicesCmd.java +++ b/plugins/network-elements/bigswitch/src/main/java/com/cloud/api/commands/ListBigSwitchBcfDevicesCmd.java @@ -24,7 +24,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -48,7 +47,6 @@ @APICommand(name = "listBigSwitchBcfDevices", responseObject = BigSwitchBcfDeviceResponse.class, description = "Lists BigSwitch BCF Controller devices", since = "4.6.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListBigSwitchBcfDevicesCmd extends BaseListCmd { - public static final Logger S_LOGGER = Logger.getLogger(ListBigSwitchBcfDevicesCmd.class.getName()); private static final String S_NAME = "listbigswitchbcfdeviceresponse"; @Inject private BigSwitchBcfElementService bigswitchBcfElementService; diff --git a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/BigSwitchBcfApi.java b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/BigSwitchBcfApi.java index a7f0f05e7628..ba81b76022d8 100644 --- a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/BigSwitchBcfApi.java +++ b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/BigSwitchBcfApi.java @@ -48,13 +48,14 @@ import org.apache.commons.httpclient.protocol.Protocol; import org.apache.commons.httpclient.protocol.ProtocolSocketFactory; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.google.gson.Gson; import com.google.gson.reflect.TypeToken; public class BigSwitchBcfApi { - private static final Logger S_LOGGER = Logger.getLogger(BigSwitchBcfApi.class); + protected Logger logger = LogManager.getLogger(getClass()); private final static String S_PROTOCOL = "https"; private final static String S_NS_BASE_URL = "/networkService/v1.1"; private final static String CONTENT_TYPE = "content-type"; @@ -94,7 +95,7 @@ protected HttpMethod createMethod(final String type, final String uri, final int try { url = new URL(S_PROTOCOL, host, port, uri).toString(); } catch (MalformedURLException e) { - S_LOGGER.error("Unable to build Big Switch API URL", e); + logger.error("Unable to build Big Switch API URL", e); throw new BigSwitchBcfApiException("Unable to build Big Switch API URL", e); } @@ -119,7 +120,7 @@ public BigSwitchBcfApi() { // Cast to ProtocolSocketFactory to avoid the deprecated constructor with the SecureProtocolSocketFactory parameter Protocol.registerProtocol("https", new Protocol("https", (ProtocolSocketFactory) new TrustingProtocolSocketFactory(), _port)); } catch (IOException e) { - S_LOGGER.warn("Failed to register the TrustingProtocolSocketFactory, falling back to default SSLSocketFactory", e); + logger.warn("Failed to register the TrustingProtocolSocketFactory, falling back to default SSLSocketFactory", e); } } @@ -289,7 +290,7 @@ private String checkResponse(final HttpMethodBase m, final String errorMessageBa } String errorMessage = responseToErrorMessage(m); m.releaseConnection(); - S_LOGGER.error(errorMessageBase + errorMessage); + logger.error(errorMessageBase + errorMessage); throw new BigSwitchBcfApiException(errorMessageBase + errorMessage + customErrorMsg); } @@ -395,7 +396,7 @@ protected T executeRetrieveObject(final Type returnObjectType, // CAUTIOUS: Safety margin of 2048 characters - extend if needed. returnValue = (T)gson.fromJson(gm.getResponseBodyAsString(2048), returnObjectType); } catch (IOException e) { - S_LOGGER.error("IOException while retrieving response body", e); + logger.error("IOException while retrieving response body", e); throw new BigSwitchBcfApiException(e); } finally { gm.releaseConnection(); @@ -419,11 +420,11 @@ protected void executeMethod(final HttpMethodBase method) throws BigSwitchBcfApi method.releaseConnection(); } } catch (HttpException e) { - S_LOGGER.error("HttpException caught while trying to connect to the BigSwitch Controller", e); + logger.error("HttpException caught while trying to connect to the BigSwitch Controller", e); method.releaseConnection(); throw new BigSwitchBcfApiException("API call to BigSwitch Controller Failed", e); } catch (IOException e) { - S_LOGGER.error("IOException caught while trying to connect to the BigSwitch Controller", e); + logger.error("IOException caught while trying to connect to the BigSwitch Controller", e); method.releaseConnection(); throw new BigSwitchBcfApiException("API call to BigSwitch Controller Failed", e); } @@ -439,7 +440,7 @@ private String responseToErrorMessage(final HttpMethodBase method) { try { return method.getResponseBodyAsString(2048); } catch (IOException e) { - S_LOGGER.debug("Error while loading response body", e); + logger.debug("Error while loading response body", e); } } diff --git a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/BigSwitchBcfUtils.java b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/BigSwitchBcfUtils.java index 35ca009a80c2..db2e1311b86c 100644 --- a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/BigSwitchBcfUtils.java +++ b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/BigSwitchBcfUtils.java @@ -26,7 +26,8 @@ import org.apache.commons.lang3.StringUtils; import org.apache.commons.net.util.SubnetUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.bouncycastle.util.IPAddress; import com.cloud.agent.AgentManager; @@ -76,7 +77,7 @@ import com.cloud.vm.dao.VMInstanceDao; public class BigSwitchBcfUtils { - private static final Logger s_logger = Logger.getLogger(BigSwitchBcfUtils.class); + protected Logger logger = LogManager.getLogger(getClass()); private final NetworkDao _networkDao; private final NicDao _nicDao; @@ -447,7 +448,7 @@ public String syncTopologyToBcfHost(HostVO bigswitchBcfHost){ } BcfAnswer syncAnswer = (BcfAnswer) _agentMgr.easySend(bigswitchBcfHost.getId(), syncCmd); if (syncAnswer == null || !syncAnswer.getResult()) { - s_logger.error("SyncBcfTopologyCommand failed"); + logger.error("SyncBcfTopologyCommand failed"); return null; } return syncAnswer.getHash(); @@ -462,7 +463,7 @@ public String syncTopologyToBcfHost(HostVO bigswitchBcfHost, boolean natEnabled) } BcfAnswer syncAnswer = (BcfAnswer) _agentMgr.easySend(bigswitchBcfHost.getId(), syncCmd); if (syncAnswer == null || !syncAnswer.getResult()) { - s_logger.error("SyncBcfTopologyCommand failed"); + logger.error("SyncBcfTopologyCommand failed"); return null; } return syncAnswer.getHash(); @@ -481,7 +482,7 @@ public BcfAnswer sendBcfCommandWithNetworkSyncCheck(BcfCommand cmd, Network netw BcfAnswer answer = (BcfAnswer) _agentMgr.easySend(cluster.getPrimary().getId(), cmd); if (answer == null || !answer.getResult()) { - s_logger.error ("BCF API Command failed"); + logger.error ("BCF API Command failed"); throw new IllegalArgumentException("Failed API call to Big Switch Network plugin"); } diff --git a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/element/BigSwitchBcfElement.java b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/element/BigSwitchBcfElement.java index 776f76fabca0..95513df9c86e 100644 --- a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/element/BigSwitchBcfElement.java +++ b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/element/BigSwitchBcfElement.java @@ -29,7 +29,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.network.ExternalNetworkDeviceManager.NetworkDevice; import org.apache.commons.net.util.SubnetUtils; @@ -128,7 +127,6 @@ public class BigSwitchBcfElement extends AdapterBase implements BigSwitchBcfElementService, ConnectivityProvider, IpDeployer, SourceNatServiceProvider, StaticNatServiceProvider, NetworkACLServiceProvider, FirewallServiceProvider, ResourceStateAdapter { - private static final Logger s_logger = Logger.getLogger(BigSwitchBcfElement.class); private static final Map> capabilities = setCapabilities(); @@ -194,18 +192,18 @@ public Provider getProvider() { } private boolean canHandle(Network network, Service service) { - s_logger.debug("Checking if BigSwitchBcfElement can handle service " + service.getName() + " on network " + network.getDisplayText()); + logger.debug("Checking if BigSwitchBcfElement can handle service " + service.getName() + " on network " + network.getDisplayText()); if (network.getBroadcastDomainType() != BroadcastDomainType.Vlan) { return false; } if (!_networkModel.isProviderForNetwork(getProvider(), network.getId())) { - s_logger.debug("BigSwitchBcfElement is not a provider for network " + network.getDisplayText()); + logger.debug("BigSwitchBcfElement is not a provider for network " + network.getDisplayText()); return false; } if (!_ntwkSrvcDao.canProviderSupportServiceInNetwork(network.getId(), service, BcfConstants.BIG_SWITCH_BCF)) { - s_logger.debug("BigSwitchBcfElement can't provide the " + service.getName() + " service on network " + network.getDisplayText()); + logger.debug("BigSwitchBcfElement can't provide the " + service.getName() + " service on network " + network.getDisplayText()); return false; } @@ -298,7 +296,7 @@ public boolean release(Network network, NicProfile nic, VirtualMachineProfile vm } if (network.getBroadcastUri() == null) { - s_logger.error("Nic has no broadcast Uri"); + logger.error("Nic has no broadcast Uri"); return false; } @@ -356,7 +354,7 @@ public boolean canEnableIndividualServices() { @Override public boolean verifyServicesCombination(Set services) { if (!services.contains(Service.Connectivity)) { - s_logger.warn("Unable to provide services without Connectivity service enabled for this element"); + logger.warn("Unable to provide services without Connectivity service enabled for this element"); return false; } return true; @@ -642,14 +640,14 @@ public boolean applyStaticNats(Network network, String dstIp = rule.getDestIpAddress(); String mac = rule.getSourceMacAddress(); if(!rule.isForRevoke()) { - s_logger.debug("BCF enables static NAT for public IP: " + srcIp + " private IP " + dstIp + logger.debug("BCF enables static NAT for public IP: " + srcIp + " private IP " + dstIp + " mac " + mac); CreateBcfStaticNatCommand cmd = new CreateBcfStaticNatCommand( tenantId, network.getUuid(), dstIp, srcIp, mac); _bcfUtils.sendBcfCommandWithNetworkSyncCheck(cmd, network); } else { - s_logger.debug("BCF removes static NAT for public IP: " + srcIp + " private IP " + dstIp + logger.debug("BCF removes static NAT for public IP: " + srcIp + " private IP " + dstIp + " mac " + mac); DeleteBcfStaticNatCommand cmd = new DeleteBcfStaticNatCommand(tenantId, srcIp); diff --git a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/guru/BigSwitchBcfGuestNetworkGuru.java b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/guru/BigSwitchBcfGuestNetworkGuru.java index 7cb50edb4efc..e23395bcf65c 100644 --- a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/guru/BigSwitchBcfGuestNetworkGuru.java +++ b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/guru/BigSwitchBcfGuestNetworkGuru.java @@ -26,7 +26,6 @@ import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.context.CallContext; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.CreateBcfAttachmentCommand; @@ -91,7 +90,6 @@ * removes them when the VM is destroyed. */ public class BigSwitchBcfGuestNetworkGuru extends GuestNetworkGuru implements NetworkMigrationResponder { - private static final Logger s_logger = Logger.getLogger(BigSwitchBcfGuestNetworkGuru.class); @Inject PhysicalNetworkDao _physicalNetworkDao; @@ -139,7 +137,7 @@ protected boolean canHandle(NetworkOffering offering, NetworkType networkType, P isMyIsolationMethod(physicalNetwork)) { return true; } else { - s_logger.trace("We only take care of Guest networks of type " + GuestType.Isolated + " in zone of type " + NetworkType.Advanced); + logger.trace("We only take care of Guest networks of type " + GuestType.Isolated + " in zone of type " + NetworkType.Advanced); return false; } } @@ -149,21 +147,21 @@ public Network design(NetworkOffering offering, DeploymentPlan plan, Network use // Check if the isolation type of the physical network is BCF_SEGMENT, then delegate GuestNetworkGuru to design PhysicalNetworkVO physnet = _physicalNetworkDao.findById(plan.getPhysicalNetworkId()); if (physnet == null || physnet.getIsolationMethods() == null || !physnet.getIsolationMethods().contains("BCF_SEGMENT")) { - s_logger.debug("Refusing to design this network, the physical isolation type is not BCF_SEGMENT"); + logger.debug("Refusing to design this network, the physical isolation type is not BCF_SEGMENT"); return null; } List devices = _bigswitchBcfDao.listByPhysicalNetwork(physnet.getId()); if (devices.isEmpty()) { - s_logger.error("No BigSwitch Controller on physical network " + physnet.getName()); + logger.error("No BigSwitch Controller on physical network " + physnet.getName()); return null; } for (BigSwitchBcfDeviceVO d: devices){ - s_logger.debug("BigSwitch Controller " + d.getUuid() + logger.debug("BigSwitch Controller " + d.getUuid() + " found on physical network " + physnet.getId()); } - s_logger.debug("Physical isolation type is BCF_SEGMENT, asking GuestNetworkGuru to design this network"); + logger.debug("Physical isolation type is BCF_SEGMENT, asking GuestNetworkGuru to design this network"); NetworkVO networkObject = (NetworkVO)super.design(offering, plan, userSpecified, owner); if (networkObject == null) { return null; @@ -311,7 +309,7 @@ public boolean release(NicProfile nic, VirtualMachineProfile vm, String reservat public void shutdown(NetworkProfile profile, NetworkOffering offering) { NetworkVO networkObject = _networkDao.findById(profile.getId()); if (networkObject.getBroadcastDomainType() != BroadcastDomainType.Vlan || networkObject.getBroadcastUri() == null) { - s_logger.warn("BroadcastUri is empty or incorrect for guestnetwork " + networkObject.getDisplayText()); + logger.warn("BroadcastUri is empty or incorrect for guestnetwork " + networkObject.getDisplayText()); return; } @@ -355,7 +353,7 @@ public boolean prepareMigration(NicProfile nic, Network network, tenantId = vpc.getUuid(); tenantName = vpc.getName(); boolean released = _vpcDao.releaseFromLockTable(vpc.getId()); - s_logger.debug("BCF guru release lock vpc id: " + vpc.getId() + logger.debug("BCF guru release lock vpc id: " + vpc.getId() + " released? " + released); } else { // use network id in CS as tenant in BSN @@ -401,14 +399,14 @@ public boolean prepareMigration(NicProfile nic, Network network, public void rollbackMigration(NicProfile nic, Network network, VirtualMachineProfile vm, ReservationContext src, ReservationContext dst) { - s_logger.debug("BCF guru rollback migration"); + logger.debug("BCF guru rollback migration"); } @Override public void commitMigration(NicProfile nic, Network network, VirtualMachineProfile vm, ReservationContext src, ReservationContext dst) { - s_logger.debug("BCF guru commit migration"); + logger.debug("BCF guru commit migration"); } private void bcfUtilsInit(){ diff --git a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/resource/BigSwitchBcfResource.java b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/resource/BigSwitchBcfResource.java index de33b8ae7b44..63e8206ec824 100644 --- a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/resource/BigSwitchBcfResource.java +++ b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/resource/BigSwitchBcfResource.java @@ -25,7 +25,6 @@ import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.agent.IAgentControl; import com.cloud.agent.api.Answer; @@ -70,7 +69,6 @@ import com.cloud.utils.component.ManagerBase; public class BigSwitchBcfResource extends ManagerBase implements ServerResource { - private static final Logger s_logger = Logger.getLogger(BigSwitchBcfResource.class); private String _name; private String _guid; @@ -176,20 +174,20 @@ public PingCommand getCurrentStatus(long id) { try{ executeRequest(new SyncBcfTopologyCommand(true, true), _numRetries); } catch(Exception e){ - s_logger.error("BigSwitch BCF sync error", e); + logger.error("BigSwitch BCF sync error", e); } } else { try{ executeRequest(new SyncBcfTopologyCommand(true, false), _numRetries); } catch (Exception e){ - s_logger.error("BigSwitch BCF sync error", e); + logger.error("BigSwitch BCF sync error", e); } } } try { ControlClusterStatus ccs = _bigswitchBcfApi.getControlClusterStatus(); if (!ccs.getStatus()) { - s_logger.error("ControlCluster state is not ready: " + ccs.getStatus()); + logger.error("ControlCluster state is not ready: " + ccs.getStatus()); return null; } if (ccs.isTopologySyncRequested()) { @@ -200,11 +198,11 @@ public PingCommand getCurrentStatus(long id) { executeRequest(new SyncBcfTopologyCommand(true, false), _numRetries); } } else { - s_logger.debug("topology sync needed but no topology history"); + logger.debug("topology sync needed but no topology history"); } } } catch (BigSwitchBcfApiException e) { - s_logger.error("getControlClusterStatus failed", e); + logger.error("getControlClusterStatus failed", e); return null; } try { @@ -222,7 +220,7 @@ public PingCommand getCurrentStatus(long id) { } } catch (BigSwitchBcfApiException e) { - s_logger.error("getCapabilities failed", e); + logger.error("getCapabilities failed", e); } return new PingCommand(Host.Type.L2Networking, id); } @@ -274,7 +272,7 @@ public Answer executeRequest(Command cmd, int numRetries) { } else if (cmd instanceof GetControllerDataCommand) { return executeRequest((GetControllerDataCommand)cmd, numRetries); } - s_logger.debug("Received unsupported command " + cmd.toString()); + logger.debug("Received unsupported command " + cmd.toString()); return Answer.createUnsupportedCommandAnswer(cmd); } @@ -575,7 +573,7 @@ private Answer executeRequest(MaintainCommand cmd) { } private Answer retry(Command cmd, int numRetries) { - s_logger.warn("Retrying " + cmd.getClass().getSimpleName() + ". Number of retries remaining: " + numRetries); + logger.warn("Retrying " + cmd.getClass().getSimpleName() + ". Number of retries remaining: " + numRetries); return executeRequest(cmd, numRetries); } diff --git a/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/api/commands/ListBrocadeVcsDeviceNetworksCmd.java b/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/api/commands/ListBrocadeVcsDeviceNetworksCmd.java index 584a6e665b74..707415b0f583 100644 --- a/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/api/commands/ListBrocadeVcsDeviceNetworksCmd.java +++ b/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/api/commands/ListBrocadeVcsDeviceNetworksCmd.java @@ -24,7 +24,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiErrorCode; import org.apache.cloudstack.api.BaseListCmd; @@ -48,7 +47,6 @@ @APICommand(name = "listBrocadeVcsDeviceNetworks", responseObject = NetworkResponse.class, description = "lists network that are using a brocade vcs switch", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListBrocadeVcsDeviceNetworksCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListBrocadeVcsDeviceNetworksCmd.class.getName()); private static final String s_name = "listbrocadevcsdevicenetworks"; @Inject protected BrocadeVcsElementService brocadeVcsElementService; diff --git a/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/brocade/BrocadeVcsApi.java b/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/brocade/BrocadeVcsApi.java index eb03515b5d5d..cc7f99bb64e3 100644 --- a/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/brocade/BrocadeVcsApi.java +++ b/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/brocade/BrocadeVcsApi.java @@ -42,7 +42,8 @@ import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.apache.http.impl.client.DefaultHttpClient; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.network.schema.interfacevlan.InterfaceVlan; import com.cloud.network.schema.interfacevlan.Interface; @@ -61,7 +62,7 @@ import com.cloud.network.schema.showvcs.Output; public class BrocadeVcsApi { - private static final Logger s_logger = Logger.getLogger(BrocadeVcsApi.class); + protected Logger logger = LogManager.getLogger(getClass()); private final String _host; private final String _adminuser; @@ -74,7 +75,7 @@ protected HttpRequestBase createMethod(String type, String uri) throws BrocadeVc try { url = new URL(Constants.PROTOCOL, _host, Constants.PORT, uri).toString(); } catch (final MalformedURLException e) { - s_logger.error("Unable to build Brocade Switch API URL", e); + logger.error("Unable to build Brocade Switch API URL", e); throw new BrocadeVcsApiException("Unable to build Brocade Switch API URL", e); } @@ -338,12 +339,12 @@ protected boolean executeUpdateObject(T newObject, String uri) throws Brocad try { errorMessage = responseToErrorMessage(response); } catch (final IOException e) { - s_logger.error("Failed to update object : " + e.getMessage()); + logger.error("Failed to update object : " + e.getMessage()); throw new BrocadeVcsApiException("Failed to update object : " + e.getMessage()); } pm.releaseConnection(); - s_logger.error("Failed to update object : " + errorMessage); + logger.error("Failed to update object : " + errorMessage); throw new BrocadeVcsApiException("Failed to update object : " + errorMessage); } @@ -363,12 +364,12 @@ protected String convertToString(T object) throws BrocadeVcsApiException { marshaller.marshal(object, stringWriter); } catch (final JAXBException e) { - s_logger.error("Failed to convert object to string : " + e.getMessage()); + logger.error("Failed to convert object to string : " + e.getMessage()); throw new BrocadeVcsApiException("Failed to convert object to string : " + e.getMessage()); } final String str = stringWriter.toString(); - s_logger.info(str); + logger.info(str); return str; @@ -387,11 +388,11 @@ protected Output convertToXML(String object) throws BrocadeVcsApiException { if (result instanceof Output) { output = (Output)result; - s_logger.info(output); + logger.info(output); } } catch (final JAXBException e) { - s_logger.error("Failed to convert string to object : " + e.getMessage()); + logger.error("Failed to convert string to object : " + e.getMessage()); throw new BrocadeVcsApiException("Failed to convert string to object : " + e.getMessage()); } @@ -417,12 +418,12 @@ protected boolean executeCreateObject(T newObject, String uri) throws Brocad try { errorMessage = responseToErrorMessage(response); } catch (final IOException e) { - s_logger.error("Failed to create object : " + e.getMessage()); + logger.error("Failed to create object : " + e.getMessage()); throw new BrocadeVcsApiException("Failed to create object : " + e.getMessage()); } pm.releaseConnection(); - s_logger.error("Failed to create object : " + errorMessage); + logger.error("Failed to create object : " + errorMessage); throw new BrocadeVcsApiException("Failed to create object : " + errorMessage); } @@ -451,12 +452,12 @@ protected Output executeRetreiveStatus(String uri) throws BrocadeVcsApiException try { errorMessage = responseToErrorMessage(response); } catch (final IOException e) { - s_logger.error("Failed to retreive status : " + e.getMessage()); + logger.error("Failed to retreive status : " + e.getMessage()); throw new BrocadeVcsApiException("Failed to retreive status : " + e.getMessage()); } pm.releaseConnection(); - s_logger.error("Failed to retreive status : " + errorMessage); + logger.error("Failed to retreive status : " + errorMessage); throw new BrocadeVcsApiException("Failed to retreive status : " + errorMessage); } @@ -464,12 +465,12 @@ protected Output executeRetreiveStatus(String uri) throws BrocadeVcsApiException sb = new StringBuffer(); while ((readLine = br.readLine()) != null) { - s_logger.debug(readLine); + logger.debug(readLine); sb.append(readLine); } } catch (final Exception e) { - s_logger.error("Failed to retreive status : " + e.getMessage()); + logger.error("Failed to retreive status : " + e.getMessage()); throw new BrocadeVcsApiException("Failed to retreive status : " + e.getMessage()); } @@ -494,12 +495,12 @@ protected void executeDeleteObject(String uri) throws BrocadeVcsApiException { try { errorMessage = responseToErrorMessage(response); } catch (final IOException e) { - s_logger.error("Failed to delete object : " + e.getMessage()); + logger.error("Failed to delete object : " + e.getMessage()); throw new BrocadeVcsApiException("Failed to delete object : " + e.getMessage()); } dm.releaseConnection(); - s_logger.error("Failed to delete object : " + errorMessage); + logger.error("Failed to delete object : " + errorMessage); throw new BrocadeVcsApiException("Failed to delete object : " + errorMessage); } dm.releaseConnection(); @@ -514,7 +515,7 @@ protected HttpResponse executeMethod(HttpRequestBase method) throws BrocadeVcsAp response = _client.execute(method); } } catch (final IOException e) { - s_logger.error("IOException caught while trying to connect to the Brocade Switch", e); + logger.error("IOException caught while trying to connect to the Brocade Switch", e); method.releaseConnection(); throw new BrocadeVcsApiException("API call to Brocade Switch Failed", e); } diff --git a/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/element/BrocadeVcsElement.java b/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/element/BrocadeVcsElement.java index f075b3202e6a..daf9c1c4e08d 100644 --- a/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/element/BrocadeVcsElement.java +++ b/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/element/BrocadeVcsElement.java @@ -29,7 +29,6 @@ import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.network.ExternalNetworkDeviceManager.NetworkDevice; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; @@ -92,7 +91,6 @@ @Component public class BrocadeVcsElement extends AdapterBase implements NetworkElement, ResourceStateAdapter, BrocadeVcsElementService { - private static final Logger s_logger = Logger.getLogger(BrocadeVcsElement.class); private static final Map> capabilities = setCapabilities(); @@ -138,18 +136,18 @@ public Provider getProvider() { } protected boolean canHandle(Network network, Service service) { - s_logger.debug("Checking if BrocadeVcsElement can handle service " + service.getName() + " on network " + network.getDisplayText()); + logger.debug("Checking if BrocadeVcsElement can handle service " + service.getName() + " on network " + network.getDisplayText()); if (network.getBroadcastDomainType() != BroadcastDomainType.Vcs) { return false; } if (!_networkModel.isProviderForNetwork(getProvider(), network.getId())) { - s_logger.debug("BrocadeVcsElement is not a provider for network " + network.getDisplayText()); + logger.debug("BrocadeVcsElement is not a provider for network " + network.getDisplayText()); return false; } if (!_ntwkSrvcDao.canProviderSupportServiceInNetwork(network.getId(), service, Network.Provider.BrocadeVcs)) { - s_logger.debug("BrocadeVcsElement can't provide the " + service.getName() + " service on network " + network.getDisplayText()); + logger.debug("BrocadeVcsElement can't provide the " + service.getName() + " service on network " + network.getDisplayText()); return false; } @@ -166,7 +164,7 @@ public boolean configure(String name, Map params) throws Configu @Override public boolean implement(Network network, NetworkOffering offering, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { - s_logger.debug("entering BrocadeVcsElement implement function for network " + network.getDisplayText() + " (state " + network.getState() + ")"); + logger.debug("entering BrocadeVcsElement implement function for network " + network.getDisplayText() + " (state " + network.getState() + ")"); if (!canHandle(network, Service.Connectivity)) { return false; @@ -234,7 +232,7 @@ public boolean canEnableIndividualServices() { public boolean verifyServicesCombination(Set services) { if (!services.contains(Service.Connectivity)) { - s_logger.warn("Unable to provide services without Connectivity service enabled for this element"); + logger.warn("Unable to provide services without Connectivity service enabled for this element"); return false; } return true; diff --git a/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/guru/BrocadeVcsGuestNetworkGuru.java b/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/guru/BrocadeVcsGuestNetworkGuru.java index 6a201cf988bb..1e13aaaa326b 100644 --- a/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/guru/BrocadeVcsGuestNetworkGuru.java +++ b/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/guru/BrocadeVcsGuestNetworkGuru.java @@ -56,13 +56,11 @@ import com.cloud.vm.NicProfile; import com.cloud.vm.ReservationContext; import com.cloud.vm.VirtualMachineProfile; -import org.apache.log4j.Logger; import javax.inject.Inject; import java.util.List; public class BrocadeVcsGuestNetworkGuru extends GuestNetworkGuru { - private static final Logger s_logger = Logger.getLogger(BrocadeVcsGuestNetworkGuru.class); @Inject NetworkOfferingServiceMapDao _ntwkOfferingSrvcDao; @@ -91,7 +89,7 @@ protected boolean canHandle(NetworkOffering offering, final NetworkType networkT && isMyIsolationMethod(physicalNetwork) && _ntwkOfferingSrvcDao.areServicesSupportedByNetworkOffering(offering.getId(), Service.Connectivity)) { return true; } else { - s_logger.trace("We only take care of Guest networks of type " + GuestType.Isolated + " in zone of type " + NetworkType.Advanced); + logger.trace("We only take care of Guest networks of type " + GuestType.Isolated + " in zone of type " + NetworkType.Advanced); return false; } } @@ -102,10 +100,10 @@ public Network design(NetworkOffering offering, DeploymentPlan plan, Network use PhysicalNetworkVO physnet = _physicalNetworkDao.findById(plan.getPhysicalNetworkId()); DataCenter dc = _dcDao.findById(plan.getDataCenterId()); if (!canHandle(offering, dc.getNetworkType(), physnet)) { - s_logger.debug("Refusing to design this network"); + logger.debug("Refusing to design this network"); return null; } - s_logger.debug("Physical isolation type is VCS, asking GuestNetworkGuru to design this network"); + logger.debug("Physical isolation type is VCS, asking GuestNetworkGuru to design this network"); NetworkVO networkObject = (NetworkVO)super.design(offering, plan, userSpecified, owner); if (networkObject == null) { return null; @@ -130,7 +128,7 @@ public Network implement(Network network, NetworkOffering offering, DeployDestin List devices = _brocadeVcsDao.listByPhysicalNetwork(physicalNetworkId); if (devices.isEmpty()) { - s_logger.error("No Brocade VCS Switch on physical network " + physicalNetworkId); + logger.error("No Brocade VCS Switch on physical network " + physicalNetworkId); return null; } @@ -142,8 +140,8 @@ public Network implement(Network network, NetworkOffering offering, DeployDestin CreateNetworkAnswer answer = (CreateNetworkAnswer)_agentMgr.easySend(brocadeVcsHost.getId(), cmd); if (answer == null || !answer.getResult()) { - s_logger.error("CreateNetworkCommand failed"); - s_logger.error("Unable to create network " + network.getId()); + logger.error("CreateNetworkCommand failed"); + logger.error("Unable to create network " + network.getId()); return null; } @@ -167,7 +165,7 @@ public void reserve(NicProfile nic, Network network, VirtualMachineProfile vm, D List devices = _brocadeVcsDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - s_logger.error("No Brocade VCS Switch on physical network " + network.getPhysicalNetworkId()); + logger.error("No Brocade VCS Switch on physical network " + network.getPhysicalNetworkId()); return; } for (BrocadeVcsDeviceVO brocadeVcsDevice : devices) { @@ -179,7 +177,7 @@ public void reserve(NicProfile nic, Network network, VirtualMachineProfile vm, D AssociateMacToNetworkAnswer answer = (AssociateMacToNetworkAnswer)_agentMgr.easySend(brocadeVcsHost.getId(), cmd); if (answer == null || !answer.getResult()) { - s_logger.error("AssociateMacToNetworkCommand failed"); + logger.error("AssociateMacToNetworkCommand failed"); throw new InsufficientVirtualNetworkCapacityException("Unable to associate mac " + interfaceMac + " to network " + network.getId(), DataCenter.class, dc.getId()); } } @@ -193,7 +191,7 @@ public void deallocate(Network network, NicProfile nic, VirtualMachineProfile vm List devices = _brocadeVcsDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - s_logger.error("No Brocade VCS Switch on physical network " + network.getPhysicalNetworkId()); + logger.error("No Brocade VCS Switch on physical network " + network.getPhysicalNetworkId()); return; } for (BrocadeVcsDeviceVO brocadeVcsDevice : devices) { @@ -204,8 +202,8 @@ public void deallocate(Network network, NicProfile nic, VirtualMachineProfile vm DisassociateMacFromNetworkAnswer answer = (DisassociateMacFromNetworkAnswer)_agentMgr.easySend(brocadeVcsHost.getId(), cmd); if (answer == null || !answer.getResult()) { - s_logger.error("DisassociateMacFromNetworkCommand failed"); - s_logger.error("Unable to disassociate mac " + interfaceMac + " from network " + network.getId()); + logger.error("DisassociateMacFromNetworkCommand failed"); + logger.error("Unable to disassociate mac " + interfaceMac + " from network " + network.getId()); return; } } @@ -233,13 +231,13 @@ public boolean trash(Network network, NetworkOffering offering) { if (brocadeVcsNetworkVlanMapping != null) { vlanTag = brocadeVcsNetworkVlanMapping.getVlanId(); } else { - s_logger.error("Not able to find vlanId for network " + network.getId()); + logger.error("Not able to find vlanId for network " + network.getId()); return false; } List devices = _brocadeVcsDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - s_logger.error("No Brocade VCS Switch on physical network " + network.getPhysicalNetworkId()); + logger.error("No Brocade VCS Switch on physical network " + network.getPhysicalNetworkId()); return false; } for (BrocadeVcsDeviceVO brocadeVcsDevice : devices) { @@ -250,8 +248,8 @@ public boolean trash(Network network, NetworkOffering offering) { DeleteNetworkAnswer answer = (DeleteNetworkAnswer)_agentMgr.easySend(brocadeVcsHost.getId(), cmd); if (answer == null || !answer.getResult()) { - s_logger.error("DeleteNetworkCommand failed"); - s_logger.error("Unable to delete network " + network.getId()); + logger.error("DeleteNetworkCommand failed"); + logger.error("Unable to delete network " + network.getId()); return false; } } diff --git a/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/resource/BrocadeVcsResource.java b/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/resource/BrocadeVcsResource.java index 0a323992b867..845580b11563 100644 --- a/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/resource/BrocadeVcsResource.java +++ b/plugins/network-elements/brocade-vcs/src/main/java/com/cloud/network/resource/BrocadeVcsResource.java @@ -21,7 +21,8 @@ import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.IAgentControl; import com.cloud.agent.api.Answer; @@ -50,7 +51,7 @@ import com.cloud.resource.ServerResource; public class BrocadeVcsResource implements ServerResource { - private static final Logger s_logger = Logger.getLogger(BrocadeVcsResource.class); + protected Logger logger = LogManager.getLogger(getClass()); private String _name; private String _guid; @@ -143,7 +144,7 @@ public PingCommand getCurrentStatus(long id) { try { output = _brocadeVcsApi.getSwitchStatus(); } catch (BrocadeVcsApiException e) { - s_logger.error("getSwitchStatus failed", e); + logger.error("getSwitchStatus failed", e); return null; } @@ -151,7 +152,7 @@ public PingCommand getCurrentStatus(long id) { if (vcsNodes != null && !vcsNodes.isEmpty()) { for (VcsNodeInfo vcsNodeInfo : vcsNodes) { if (!"Online".equals(vcsNodeInfo.getNodeState())) { - s_logger.error("Brocade Switch is not ready: " + id); + logger.error("Brocade Switch is not ready: " + id); return null; } } @@ -179,7 +180,7 @@ public Answer executeRequest(Command cmd, int numRetries) { } else if (cmd instanceof DeleteNetworkCommand) { return executeRequest((DeleteNetworkCommand)cmd, numRetries); } - s_logger.debug("Received unsupported command " + cmd.toString()); + logger.debug("Received unsupported command " + cmd.toString()); return Answer.createUnsupportedCommandAnswer(cmd); } @@ -276,7 +277,7 @@ private Answer executeRequest(MaintainCommand cmd) { } private Answer retry(Command cmd, int numRetries) { - s_logger.warn("Retrying " + cmd.getClass().getSimpleName() + ". Number of retries remaining: " + numRetries); + logger.warn("Retrying " + cmd.getClass().getSimpleName() + ". Number of retries remaining: " + numRetries); return executeRequest(cmd, numRetries); } diff --git a/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/api/commands/AddCiscoAsa1000vResourceCmd.java b/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/api/commands/AddCiscoAsa1000vResourceCmd.java index 5c912a278f2f..b792637b3dbd 100644 --- a/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/api/commands/AddCiscoAsa1000vResourceCmd.java +++ b/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/api/commands/AddCiscoAsa1000vResourceCmd.java @@ -18,7 +18,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -43,7 +42,6 @@ @APICommand(name = "addCiscoAsa1000vResource", responseObject = CiscoAsa1000vResourceResponse.class, description = "Adds a Cisco Asa 1000v appliance", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class AddCiscoAsa1000vResourceCmd extends BaseCmd { - private static final Logger s_logger = Logger.getLogger(AddCiscoAsa1000vResourceCmd.class.getName()); private static final String s_name = "addCiscoAsa1000vResource"; @Inject CiscoAsa1000vService _ciscoAsa1000vService; diff --git a/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/api/commands/AddCiscoVnmcResourceCmd.java b/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/api/commands/AddCiscoVnmcResourceCmd.java index 15d69b68c30f..858b81423188 100644 --- a/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/api/commands/AddCiscoVnmcResourceCmd.java +++ b/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/api/commands/AddCiscoVnmcResourceCmd.java @@ -18,7 +18,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -42,7 +41,6 @@ @APICommand(name = "addCiscoVnmcResource", responseObject = CiscoVnmcResourceResponse.class, description = "Adds a Cisco Vnmc Controller", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class AddCiscoVnmcResourceCmd extends BaseCmd { - private static final Logger s_logger = Logger.getLogger(AddCiscoVnmcResourceCmd.class.getName()); private static final String s_name = "addCiscoVnmcResource"; @Inject CiscoVnmcElementService _ciscoVnmcElementService; diff --git a/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/api/commands/DeleteCiscoAsa1000vResourceCmd.java b/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/api/commands/DeleteCiscoAsa1000vResourceCmd.java index cdd4fbaca0e6..c0c8101ed80c 100644 --- a/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/api/commands/DeleteCiscoAsa1000vResourceCmd.java +++ b/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/api/commands/DeleteCiscoAsa1000vResourceCmd.java @@ -18,7 +18,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -41,7 +40,6 @@ @APICommand(name = "deleteCiscoAsa1000vResource", responseObject = SuccessResponse.class, description = "Deletes a Cisco ASA 1000v appliance", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteCiscoAsa1000vResourceCmd extends BaseCmd { - private static final Logger s_logger = Logger.getLogger(DeleteCiscoAsa1000vResourceCmd.class.getName()); private static final String s_name = "deleteCiscoAsa1000vResource"; @Inject CiscoAsa1000vService _ciscoAsa1000vService; diff --git a/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/api/commands/DeleteCiscoVnmcResourceCmd.java b/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/api/commands/DeleteCiscoVnmcResourceCmd.java index 2f1aecab98b8..456b8cf40088 100644 --- a/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/api/commands/DeleteCiscoVnmcResourceCmd.java +++ b/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/api/commands/DeleteCiscoVnmcResourceCmd.java @@ -18,7 +18,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -41,7 +40,6 @@ @APICommand(name = "deleteCiscoVnmcResource", responseObject = SuccessResponse.class, description = "Deletes a Cisco Vnmc controller", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteCiscoVnmcResourceCmd extends BaseCmd { - private static final Logger s_logger = Logger.getLogger(DeleteCiscoVnmcResourceCmd.class.getName()); private static final String s_name = "deleteCiscoVnmcResource"; @Inject CiscoVnmcElementService _ciscoVnmcElementService; diff --git a/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/api/commands/ListCiscoAsa1000vResourcesCmd.java b/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/api/commands/ListCiscoAsa1000vResourcesCmd.java index abf0bea11fa2..82974c270aaa 100644 --- a/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/api/commands/ListCiscoAsa1000vResourcesCmd.java +++ b/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/api/commands/ListCiscoAsa1000vResourcesCmd.java @@ -21,7 +21,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -46,7 +45,6 @@ @APICommand(name = "listCiscoAsa1000vResources", responseObject = CiscoAsa1000vResourceResponse.class, description = "Lists Cisco ASA 1000v appliances", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListCiscoAsa1000vResourcesCmd extends BaseListCmd { - private static final Logger s_logger = Logger.getLogger(ListCiscoAsa1000vResourcesCmd.class.getName()); private static final String s_name = "listCiscoAsa1000vResources"; @Inject CiscoAsa1000vService _ciscoAsa1000vService; diff --git a/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/api/commands/ListCiscoVnmcResourcesCmd.java b/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/api/commands/ListCiscoVnmcResourcesCmd.java index c5e05e8013c0..f2a364faa720 100644 --- a/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/api/commands/ListCiscoVnmcResourcesCmd.java +++ b/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/api/commands/ListCiscoVnmcResourcesCmd.java @@ -21,7 +21,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -46,7 +45,6 @@ @APICommand(name = "listCiscoVnmcResources", responseObject = CiscoVnmcResourceResponse.class, description = "Lists Cisco VNMC controllers", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListCiscoVnmcResourcesCmd extends BaseListCmd { - private static final Logger s_logger = Logger.getLogger(ListCiscoVnmcResourcesCmd.class.getName()); private static final String s_name = "listCiscoVnmcResources"; @Inject CiscoVnmcElementService _ciscoVnmcElementService; diff --git a/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/cisco/CiscoVnmcConnectionImpl.java b/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/cisco/CiscoVnmcConnectionImpl.java index 8b8e58958432..90597d7b1e19 100644 --- a/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/cisco/CiscoVnmcConnectionImpl.java +++ b/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/cisco/CiscoVnmcConnectionImpl.java @@ -30,7 +30,8 @@ import org.apache.commons.httpclient.HttpStatus; import org.apache.commons.httpclient.contrib.ssl.EasySSLProtocolSocketFactory; import org.apache.commons.httpclient.methods.PostMethod; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.w3c.dom.Document; import org.w3c.dom.Node; import org.w3c.dom.NodeList; @@ -46,7 +47,7 @@ public class CiscoVnmcConnectionImpl implements CiscoVnmcConnection { private final String _password; private String _cookie; - private static final Logger s_logger = Logger.getLogger(CiscoVnmcConnectionImpl.class); + protected static Logger LOGGER = LogManager.getLogger(CiscoVnmcConnectionImpl.class); private enum VnmcXml { LOGIN("login.xml", "mgmt-controller"), @@ -141,7 +142,7 @@ private String getXml(String filename) { return xml; } catch (Exception e) { - s_logger.debug(e); + LOGGER.debug(e); return null; } } @@ -1291,7 +1292,7 @@ private Document getDocument(String xml) throws ExecutionException { doc = ParserUtils.getSaferDocumentBuilderFactory().newDocumentBuilder().parse(xmlSource); } catch (Exception e) { - s_logger.error(e); + LOGGER.error(e); throw new ExecutionException(e.getMessage()); } diff --git a/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/element/CiscoVnmcElement.java b/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/element/CiscoVnmcElement.java index ed650022d0cd..bea5a2c3f25e 100644 --- a/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/element/CiscoVnmcElement.java +++ b/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/element/CiscoVnmcElement.java @@ -31,7 +31,6 @@ import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.network.ExternalNetworkDeviceManager.NetworkDevice; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -135,7 +134,6 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServiceProvider, FirewallServiceProvider, PortForwardingServiceProvider, IpDeployer, StaticNatServiceProvider, ResourceStateAdapter, NetworkElement, CiscoVnmcElementService, CiscoAsa1000vService { - private static final Logger s_logger = Logger.getLogger(CiscoVnmcElement.class); private static final Map> capabilities = setCapabilities(); @Inject @@ -272,7 +270,7 @@ public boolean implement(final Network network, final NetworkOffering offering, final DataCenter zone = _entityMgr.findById(DataCenter.class, network.getDataCenterId()); if (zone.getNetworkType() == NetworkType.Basic) { - s_logger.debug("Not handling network implement in zone of type " + NetworkType.Basic); + logger.debug("Not handling network implement in zone of type " + NetworkType.Basic); return false; } @@ -282,24 +280,24 @@ public boolean implement(final Network network, final NetworkOffering offering, final List devices = _ciscoVnmcDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - s_logger.error("No Cisco Vnmc device on network " + network.getName()); + logger.error("No Cisco Vnmc device on network " + network.getName()); return false; } List asaList = _ciscoAsa1000vDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (asaList.isEmpty()) { - s_logger.debug("No Cisco ASA 1000v device on network " + network.getName()); + logger.debug("No Cisco ASA 1000v device on network " + network.getName()); return false; } NetworkAsa1000vMapVO asaForNetwork = _networkAsa1000vMapDao.findByNetworkId(network.getId()); if (asaForNetwork != null) { - s_logger.debug("Cisco ASA 1000v device already associated with network " + network.getName()); + logger.debug("Cisco ASA 1000v device already associated with network " + network.getName()); return true; } if (!_networkModel.isProviderSupportServiceInNetwork(network.getId(), Service.SourceNat, Provider.CiscoVnmc)) { - s_logger.error("SourceNat service is not provided by Cisco Vnmc device on network " + network.getName()); + logger.error("SourceNat service is not provided by Cisco Vnmc device on network " + network.getName()); return false; } @@ -307,20 +305,20 @@ public boolean implement(final Network network, final NetworkOffering offering, // ensure that there is an ASA 1000v assigned to this network CiscoAsa1000vDevice assignedAsa = assignAsa1000vToNetwork(network); if (assignedAsa == null) { - s_logger.error("Unable to assign ASA 1000v device to network " + network.getName()); + logger.error("Unable to assign ASA 1000v device to network " + network.getName()); throw new CloudRuntimeException("Unable to assign ASA 1000v device to network " + network.getName()); } ClusterVO asaCluster = _clusterDao.findById(assignedAsa.getClusterId()); ClusterVSMMapVO clusterVsmMap = _clusterVsmMapDao.findByClusterId(assignedAsa.getClusterId()); if (clusterVsmMap == null) { - s_logger.error("Vmware cluster " + asaCluster.getName() + " has no Cisco Nexus VSM device associated with it"); + logger.error("Vmware cluster " + asaCluster.getName() + " has no Cisco Nexus VSM device associated with it"); throw new CloudRuntimeException("Vmware cluster " + asaCluster.getName() + " has no Cisco Nexus VSM device associated with it"); } CiscoNexusVSMDeviceVO vsmDevice = _vsmDeviceDao.findById(clusterVsmMap.getVsmId()); if (vsmDevice == null) { - s_logger.error("Unable to load details of Cisco Nexus VSM device associated with cluster " + asaCluster.getName()); + logger.error("Unable to load details of Cisco Nexus VSM device associated with cluster " + asaCluster.getName()); throw new CloudRuntimeException("Unable to load details of Cisco Nexus VSM device associated with cluster " + asaCluster.getName()); } @@ -355,14 +353,14 @@ public boolean implement(final Network network, final NetworkOffering offering, long callerUserId = CallContext.current().getCallingUserId(); outsideIp = _ipAddrMgr.allocateIp(owner, false, caller, callerUserId, zone, true, null); } catch (ResourceAllocationException e) { - s_logger.error("Unable to allocate additional public Ip address. Exception details " + e); + logger.error("Unable to allocate additional public Ip address. Exception details " + e); throw new CloudRuntimeException("Unable to allocate additional public Ip address. Exception details " + e); } try { outsideIp = _ipAddrMgr.associateIPToGuestNetwork(outsideIp.getId(), network.getId(), true); } catch (ResourceAllocationException e) { - s_logger.error("Unable to assign allocated additional public Ip " + outsideIp.getAddress().addr() + " to network with vlan " + vlanId + + logger.error("Unable to assign allocated additional public Ip " + outsideIp.getAddress().addr() + " to network with vlan " + vlanId + ". Exception details " + e); throw new CloudRuntimeException("Unable to assign allocated additional public Ip " + outsideIp.getAddress().addr() + " to network with vlan " + vlanId + ". Exception details " + e); @@ -375,33 +373,33 @@ public boolean implement(final Network network, final NetworkOffering offering, // all public ip addresses must be from same subnet, this essentially means single public subnet in zone if (!createLogicalEdgeFirewall(vlanId, network.getGateway(), gatewayNetmask, outsideIp.getAddress().addr(), sourceNatIp.getNetmask(), publicGateways, ciscoVnmcHost.getId())) { - s_logger.error("Failed to create logical edge firewall in Cisco VNMC device for network " + network.getName()); + logger.error("Failed to create logical edge firewall in Cisco VNMC device for network " + network.getName()); throw new CloudRuntimeException("Failed to create logical edge firewall in Cisco VNMC device for network " + network.getName()); } // create stuff in VSM for ASA device if (!configureNexusVsmForAsa(vlanId, network.getGateway(), vsmDevice.getUserName(), vsmDevice.getPassword(), vsmDevice.getipaddr(), assignedAsa.getInPortProfile(), ciscoVnmcHost.getId())) { - s_logger.error("Failed to configure Cisco Nexus VSM " + vsmDevice.getipaddr() + " for ASA device for network " + network.getName()); + logger.error("Failed to configure Cisco Nexus VSM " + vsmDevice.getipaddr() + " for ASA device for network " + network.getName()); throw new CloudRuntimeException("Failed to configure Cisco Nexus VSM " + vsmDevice.getipaddr() + " for ASA device for network " + network.getName()); } // configure source NAT if (!configureSourceNat(vlanId, network.getCidr(), sourceNatIp, ciscoVnmcHost.getId())) { - s_logger.error("Failed to configure source NAT in Cisco VNMC device for network " + network.getName()); + logger.error("Failed to configure source NAT in Cisco VNMC device for network " + network.getName()); throw new CloudRuntimeException("Failed to configure source NAT in Cisco VNMC device for network " + network.getName()); } // associate Asa 1000v instance with logical edge firewall if (!associateAsaWithLogicalEdgeFirewall(vlanId, assignedAsa.getManagementIp(), ciscoVnmcHost.getId())) { - s_logger.error("Failed to associate Cisco ASA 1000v (" + assignedAsa.getManagementIp() + ") with logical edge firewall in VNMC for network " + + logger.error("Failed to associate Cisco ASA 1000v (" + assignedAsa.getManagementIp() + ") with logical edge firewall in VNMC for network " + network.getName()); throw new CloudRuntimeException("Failed to associate Cisco ASA 1000v (" + assignedAsa.getManagementIp() + ") with logical edge firewall in VNMC for network " + network.getName()); } } catch (CloudRuntimeException e) { unassignAsa1000vFromNetwork(network); - s_logger.error("CiscoVnmcElement failed", e); + logger.error("CiscoVnmcElement failed", e); return false; } catch (Exception e) { unassignAsa1000vFromNetwork(network); @@ -477,7 +475,7 @@ public boolean canEnableIndividualServices() { @Override public boolean verifyServicesCombination(Set services) { if (!services.contains(Service.Firewall)) { - s_logger.warn("CiscoVnmc must be used as Firewall Service Provider in the network"); + logger.warn("CiscoVnmc must be used as Firewall Service Provider in the network"); return false; } return true; @@ -642,26 +640,26 @@ public IpDeployer getIpDeployer(Network network) { public boolean applyFWRules(Network network, List rules) throws ResourceUnavailableException { if (!_networkModel.isProviderSupportServiceInNetwork(network.getId(), Service.Firewall, Provider.CiscoVnmc)) { - s_logger.error("Firewall service is not provided by Cisco Vnmc device on network " + network.getName()); + logger.error("Firewall service is not provided by Cisco Vnmc device on network " + network.getName()); return false; } // Find VNMC host for physical network List devices = _ciscoVnmcDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - s_logger.error("No Cisco Vnmc device on network " + network.getName()); + logger.error("No Cisco Vnmc device on network " + network.getName()); return true; } // Find if ASA 1000v is associated with network NetworkAsa1000vMapVO asaForNetwork = _networkAsa1000vMapDao.findByNetworkId(network.getId()); if (asaForNetwork == null) { - s_logger.debug("Cisco ASA 1000v device is not associated with network " + network.getName()); + logger.debug("Cisco ASA 1000v device is not associated with network " + network.getName()); return true; } if (network.getState() == Network.State.Allocated) { - s_logger.debug("External firewall was asked to apply firewall rules for network with ID " + network.getId() + + logger.debug("External firewall was asked to apply firewall rules for network with ID " + network.getId() + "; this network is not implemented. Skipping backend commands."); return true; } @@ -688,7 +686,7 @@ public boolean applyFWRules(Network network, List rules) if (answer == null || !answer.getResult()) { String details = (answer != null) ? answer.getDetails() : "details unavailable"; String msg = "Unable to apply firewall rules to Cisco ASA 1000v appliance due to: " + details + "."; - s_logger.error(msg); + logger.error(msg); throw new ResourceUnavailableException(msg, DataCenter.class, network.getDataCenterId()); } } @@ -700,26 +698,26 @@ public boolean applyFWRules(Network network, List rules) public boolean applyPFRules(Network network, List rules) throws ResourceUnavailableException { if (!_networkModel.isProviderSupportServiceInNetwork(network.getId(), Service.PortForwarding, Provider.CiscoVnmc)) { - s_logger.error("Port forwarding service is not provided by Cisco Vnmc device on network " + network.getName()); + logger.error("Port forwarding service is not provided by Cisco Vnmc device on network " + network.getName()); return false; } // Find VNMC host for physical network List devices = _ciscoVnmcDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - s_logger.error("No Cisco Vnmc device on network " + network.getName()); + logger.error("No Cisco Vnmc device on network " + network.getName()); return true; } // Find if ASA 1000v is associated with network NetworkAsa1000vMapVO asaForNetwork = _networkAsa1000vMapDao.findByNetworkId(network.getId()); if (asaForNetwork == null) { - s_logger.debug("Cisco ASA 1000v device is not associated with network " + network.getName()); + logger.debug("Cisco ASA 1000v device is not associated with network " + network.getName()); return true; } if (network.getState() == Network.State.Allocated) { - s_logger.debug("External firewall was asked to apply port forwarding rules for network with ID " + network.getId() + + logger.debug("External firewall was asked to apply port forwarding rules for network with ID " + network.getId() + "; this network is not implemented. Skipping backend commands."); return true; } @@ -743,7 +741,7 @@ public boolean applyPFRules(Network network, List rules) thr if (answer == null || !answer.getResult()) { String details = (answer != null) ? answer.getDetails() : "details unavailable"; String msg = "Unable to apply port forwarding rules to Cisco ASA 1000v appliance due to: " + details + "."; - s_logger.error(msg); + logger.error(msg); throw new ResourceUnavailableException(msg, DataCenter.class, network.getDataCenterId()); } } @@ -754,26 +752,26 @@ public boolean applyPFRules(Network network, List rules) thr @Override public boolean applyStaticNats(Network network, List rules) throws ResourceUnavailableException { if (!_networkModel.isProviderSupportServiceInNetwork(network.getId(), Service.StaticNat, Provider.CiscoVnmc)) { - s_logger.error("Static NAT service is not provided by Cisco Vnmc device on network " + network.getName()); + logger.error("Static NAT service is not provided by Cisco Vnmc device on network " + network.getName()); return false; } // Find VNMC host for physical network List devices = _ciscoVnmcDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - s_logger.error("No Cisco Vnmc device on network " + network.getName()); + logger.error("No Cisco Vnmc device on network " + network.getName()); return true; } // Find if ASA 1000v is associated with network NetworkAsa1000vMapVO asaForNetwork = _networkAsa1000vMapDao.findByNetworkId(network.getId()); if (asaForNetwork == null) { - s_logger.debug("Cisco ASA 1000v device is not associated with network " + network.getName()); + logger.debug("Cisco ASA 1000v device is not associated with network " + network.getName()); return true; } if (network.getState() == Network.State.Allocated) { - s_logger.debug("External firewall was asked to apply static NAT rules for network with ID " + network.getId() + + logger.debug("External firewall was asked to apply static NAT rules for network with ID " + network.getId() + "; this network is not implemented. Skipping backend commands."); return true; } @@ -798,7 +796,7 @@ public boolean applyStaticNats(Network network, List rules) if (answer == null || !answer.getResult()) { String details = (answer != null) ? answer.getDetails() : "details unavailable"; String msg = "Unable to apply static NAT rules to Cisco ASA 1000v appliance due to: " + details + "."; - s_logger.error(msg); + logger.error(msg); throw new ResourceUnavailableException(msg, DataCenter.class, network.getDataCenterId()); } } diff --git a/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/resource/CiscoVnmcResource.java b/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/resource/CiscoVnmcResource.java index 4b8ee6f42c47..bbecdaf05e02 100644 --- a/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/resource/CiscoVnmcResource.java +++ b/plugins/network-elements/cisco-vnmc/src/main/java/com/cloud/network/resource/CiscoVnmcResource.java @@ -23,7 +23,8 @@ import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.IAgentControl; import com.cloud.agent.api.Answer; @@ -81,7 +82,7 @@ public void setConnection(CiscoVnmcConnectionImpl connection) { _connection = connection; } - private static final Logger s_logger = Logger.getLogger(CiscoVnmcResource.class); + protected Logger logger = LogManager.getLogger(getClass()); @Override public Answer executeRequest(Command cmd) { @@ -244,7 +245,7 @@ private boolean refreshVnmcConnection() { try { ret = _connection.login(); } catch (ExecutionException ex) { - s_logger.error("Login to Vnmc failed", ex); + logger.error("Login to Vnmc failed", ex); } return ret; } @@ -312,7 +313,7 @@ private Answer execute(SetSourceNatCommand cmd, int numRetries) { } } catch (ExecutionException e) { String msg = "SetSourceNatCommand failed due to " + e.getMessage(); - s_logger.error(msg, e); + logger.error(msg, e); return new Answer(cmd, false, msg); } @@ -404,7 +405,7 @@ private Answer execute(SetFirewallRulesCommand cmd, int numRetries) { } } catch (ExecutionException e) { String msg = "SetFirewallRulesCommand failed due to " + e.getMessage(); - s_logger.error(msg, e); + logger.error(msg, e); return new Answer(cmd, false, msg); } @@ -489,7 +490,7 @@ private Answer execute(SetStaticNatRulesCommand cmd, int numRetries) { } } catch (ExecutionException e) { String msg = "SetStaticNatRulesCommand failed due to " + e.getMessage(); - s_logger.error(msg, e); + logger.error(msg, e); return new Answer(cmd, false, msg); } @@ -579,7 +580,7 @@ private Answer execute(SetPortForwardingRulesCommand cmd, int numRetries) { } } catch (ExecutionException e) { String msg = "SetPortForwardingRulesCommand failed due to " + e.getMessage(); - s_logger.error(msg, e); + logger.error(msg, e); return new Answer(cmd, false, msg); } @@ -637,7 +638,7 @@ private Answer execute(CreateLogicalEdgeFirewallCommand cmd, int numRetries) { throw new ExecutionException("Failed to create edge firewall in VNMC for guest network with vlan " + cmd.getVlanId()); } catch (ExecutionException e) { String msg = "CreateLogicalEdgeFirewallCommand failed due to " + e.getMessage(); - s_logger.error(msg, e); + logger.error(msg, e); return new Answer(cmd, false, msg); } @@ -658,14 +659,14 @@ private Answer execute(ConfigureNexusVsmForAsaCommand cmd, int numRetries) { params.add(new Pair(OperationType.addvlanid, vlanId)); try { helper = new NetconfHelper(cmd.getVsmIp(), cmd.getVsmUsername(), cmd.getVsmPassword()); - s_logger.debug("Connected to Cisco VSM " + cmd.getVsmIp()); + logger.debug("Connected to Cisco VSM " + cmd.getVsmIp()); helper.addVServiceNode(vlanId, cmd.getIpAddress()); - s_logger.debug("Created vservice node for ASA appliance in Cisco VSM for vlan " + vlanId); + logger.debug("Created vservice node for ASA appliance in Cisco VSM for vlan " + vlanId); helper.updatePortProfile(cmd.getAsaInPortProfile(), SwitchPortMode.access, params); - s_logger.debug("Updated inside port profile for ASA appliance in Cisco VSM with new vlan " + vlanId); + logger.debug("Updated inside port profile for ASA appliance in Cisco VSM with new vlan " + vlanId); } catch (CloudRuntimeException e) { String msg = "ConfigureVSMForASACommand failed due to " + e.getMessage(); - s_logger.error(msg, e); + logger.error(msg, e); return new Answer(cmd, false, msg); } finally { if( helper != null) { @@ -700,7 +701,7 @@ private Answer execute(AssociateAsaWithLogicalEdgeFirewallCommand cmd, int numRe } } catch (ExecutionException e) { String msg = "AssociateAsaWithLogicalEdgeFirewallCommand failed due to " + e.getMessage(); - s_logger.error(msg, e); + logger.error(msg, e); return new Answer(cmd, false, msg); } @@ -721,7 +722,7 @@ private Answer execute(CleanupLogicalEdgeFirewallCommand cmd, int numRetries) { _connection.deleteTenant(tenant); } catch (ExecutionException e) { String msg = "CleanupLogicalEdgeFirewallCommand failed due to " + e.getMessage(); - s_logger.error(msg, e); + logger.error(msg, e); return new Answer(cmd, false, msg); } diff --git a/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/element/ElasticLoadBalancerElement.java b/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/element/ElasticLoadBalancerElement.java index 87ecf0071f10..6c0ac160ceb2 100644 --- a/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/element/ElasticLoadBalancerElement.java +++ b/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/element/ElasticLoadBalancerElement.java @@ -24,7 +24,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; @@ -56,7 +55,6 @@ @Component public class ElasticLoadBalancerElement extends AdapterBase implements LoadBalancingServiceProvider, IpDeployer { - private static final Logger s_logger = Logger.getLogger(ElasticLoadBalancerElement.class); private static final Map> capabilities = setCapabilities(); @Inject NetworkModel _networkManager; @@ -74,7 +72,7 @@ public class ElasticLoadBalancerElement extends AdapterBase implements LoadBalan private boolean canHandle(Network network, List rules) { if (network.getGuestType() != Network.GuestType.Shared || network.getTrafficType() != TrafficType.Guest) { - s_logger.debug("Not handling network with type " + network.getGuestType() + " and traffic type " + network.getTrafficType()); + logger.debug("Not handling network with type " + network.getGuestType() + " and traffic type " + network.getTrafficType()); return false; } @@ -84,7 +82,7 @@ private boolean canHandle(Network network, List rules) { if (schemeCaps != null) { for (LoadBalancingRule rule : rules) { if (!schemeCaps.contains(rule.getScheme().toString())) { - s_logger.debug("Scheme " + rules.get(0).getScheme() + " is not supported by the provider " + this.getName()); + logger.debug("Scheme " + rules.get(0).getScheme() + " is not supported by the provider " + this.getName()); return false; } } diff --git a/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java b/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java index 6975f76e9681..b47b7aad28e1 100644 --- a/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java +++ b/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java @@ -34,7 +34,6 @@ import org.apache.cloudstack.config.ApiServiceConfiguration; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.managed.context.ManagedContextRunnable; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; @@ -103,7 +102,6 @@ @Component public class ElasticLoadBalancerManagerImpl extends ManagerBase implements ElasticLoadBalancerManager, VirtualMachineGuru { - private static final Logger s_logger = Logger.getLogger(ElasticLoadBalancerManagerImpl.class); @Inject private AgentManager _agentMgr; @@ -162,7 +160,7 @@ private boolean sendCommandsToRouter(final DomainRouterVO elbVm, Commands cmds) try { answers = _agentMgr.send(elbVm.getHostId(), cmds); } catch (OperationTimedoutException e) { - s_logger.warn("ELB: Timed Out", e); + logger.warn("ELB: Timed Out", e); throw new AgentUnavailableException("Unable to send commands to virtual elbVm ", elbVm.getHostId(), e); } @@ -249,7 +247,7 @@ public boolean applyLoadBalancerRules(Network network, List r DomainRouterVO elbVm = findElbVmForLb(rules.get(0)); if (elbVm == null) { - s_logger.warn("Unable to apply lb rules, ELB vm doesn't exist in the network " + network.getId()); + logger.warn("Unable to apply lb rules, ELB vm doesn't exist in the network " + network.getId()); throw new ResourceUnavailableException("Unable to apply lb rules", DataCenter.class, network.getDataCenterId()); } @@ -269,10 +267,10 @@ public boolean applyLoadBalancerRules(Network network, List r } return applyLBRules(elbVm, lbRules, network.getId()); } else if (elbVm.getState() == State.Stopped || elbVm.getState() == State.Stopping) { - s_logger.debug("ELB VM is in " + elbVm.getState() + ", so not sending apply LoadBalancing rules commands to the backend"); + logger.debug("ELB VM is in " + elbVm.getState() + ", so not sending apply LoadBalancing rules commands to the backend"); return true; } else { - s_logger.warn("Unable to apply loadbalancing rules, ELB VM is not in the right state " + elbVm.getState()); + logger.warn("Unable to apply loadbalancing rules, ELB VM is not in the right state " + elbVm.getState()); throw new ResourceUnavailableException("Unable to apply loadbalancing rules, ELB VM is not in the right state", VirtualRouter.class, elbVm.getId()); } } @@ -296,13 +294,13 @@ public boolean configure(String name, Map params) throws Configu // this can sometimes happen, if DB is manually or programmatically manipulated if (offerings == null || offerings.size() < 2) { String msg = "Data integrity problem : System Offering For Elastic LB VM has been removed?"; - s_logger.error(msg); + logger.error(msg); throw new ConfigurationException(msg); } String enabled = _configDao.getValue(Config.ElasticLoadBalancerEnabled.key()); _enabled = (enabled == null) ? false : Boolean.parseBoolean(enabled); - s_logger.info("Elastic Load balancer enabled: " + _enabled); + logger.info("Elastic Load balancer enabled: " + _enabled); if (_enabled) { String traffType = _configDao.getValue(Config.ElasticLoadBalancerNetwork.key()); if ("guest".equalsIgnoreCase(traffType)) { @@ -311,11 +309,11 @@ public boolean configure(String name, Map params) throws Configu _frontendTrafficType = TrafficType.Public; } else throw new ConfigurationException("ELB: Traffic type for front end of load balancer has to be guest or public; found : " + traffType); - s_logger.info("ELB: Elastic Load Balancer: will balance on " + traffType); + logger.info("ELB: Elastic Load Balancer: will balance on " + traffType); int gcIntervalMinutes = NumbersUtil.parseInt(configs.get(Config.ElasticLoadBalancerVmGcInterval.key()), 5); if (gcIntervalMinutes < 5) gcIntervalMinutes = 5; - s_logger.info("ELB: Elastic Load Balancer: scheduling GC to run every " + gcIntervalMinutes + " minutes"); + logger.info("ELB: Elastic Load Balancer: scheduling GC to run every " + gcIntervalMinutes + " minutes"); _gcThreadPool = Executors.newScheduledThreadPool(1, new NamedThreadFactory("ELBVM-GC")); _gcThreadPool.scheduleAtFixedRate(new CleanupThread(), gcIntervalMinutes, gcIntervalMinutes, TimeUnit.MINUTES); _itMgr.registerGuru(VirtualMachine.Type.ElasticLoadBalancerVm, this); @@ -327,7 +325,7 @@ public boolean configure(String name, Map params) throws Configu } private DomainRouterVO stop(DomainRouterVO elbVm, boolean forced) throws ConcurrentOperationException, ResourceUnavailableException { - s_logger.debug("Stopping ELB vm " + elbVm); + logger.debug("Stopping ELB vm " + elbVm); try { _itMgr.advanceStop(elbVm.getUuid(), forced); return _routerDao.findById(elbVm.getId()); @@ -346,7 +344,7 @@ void garbageCollectUnusedElbVms() { List unusedElbVms = _elbVmMapDao.listUnusedElbVms(); if (unusedElbVms != null) { if (unusedElbVms.size() > 0) { - s_logger.info("Found " + unusedElbVms.size() + " unused ELB vms"); + logger.info("Found " + unusedElbVms.size() + " unused ELB vms"); } Set currentGcCandidates = new HashSet(); for (DomainRouterVO elbVm : unusedElbVms) { @@ -359,22 +357,22 @@ void garbageCollectUnusedElbVms() { boolean gceed = false; try { - s_logger.info("Attempting to stop ELB VM: " + elbVm); + logger.info("Attempting to stop ELB VM: " + elbVm); stop(elbVm, true); gceed = true; } catch (ConcurrentOperationException e) { - s_logger.warn("Unable to stop unused ELB vm " + elbVm + " due to ", e); + logger.warn("Unable to stop unused ELB vm " + elbVm + " due to ", e); } catch (ResourceUnavailableException e) { - s_logger.warn("Unable to stop unused ELB vm " + elbVm + " due to ", e); + logger.warn("Unable to stop unused ELB vm " + elbVm + " due to ", e); continue; } if (gceed) { try { - s_logger.info("Attempting to destroy ELB VM: " + elbVm); + logger.info("Attempting to destroy ELB VM: " + elbVm); _itMgr.expunge(elbVm.getUuid()); _routerDao.remove(elbVm.getId()); } catch (ResourceUnavailableException e) { - s_logger.warn("Unable to destroy unused ELB vm " + elbVm + " due to ", e); + logger.warn("Unable to destroy unused ELB vm " + elbVm + " due to ", e); gceed = false; } } @@ -444,14 +442,14 @@ public boolean finalizeVirtualMachineProfile(VirtualMachineProfile profile, Depl } else if (nic.getTrafficType() == TrafficType.Control) { // control command is sent over management network in VMware if (dest.getHost().getHypervisorType() == HypervisorType.VMware) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Check if we need to add management server explicit route to ELB vm. pod cidr: " + dest.getPod().getCidrAddress() + "/" + if (logger.isInfoEnabled()) { + logger.info("Check if we need to add management server explicit route to ELB vm. pod cidr: " + dest.getPod().getCidrAddress() + "/" + dest.getPod().getCidrSize() + ", pod gateway: " + dest.getPod().getGateway() + ", management host: " + ApiServiceConfiguration.ManagementServerAddresses.value()); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Added management server explicit route to ELB vm."); + if (logger.isDebugEnabled()) { + logger.debug("Added management server explicit route to ELB vm."); } // always add management explicit route, for basic networking setup buf.append(" mgmtcidr=").append(_mgmtCidr); @@ -478,8 +476,8 @@ public boolean finalizeVirtualMachineProfile(VirtualMachineProfile profile, Depl } String msPublicKey = _configDao.getValue("ssh.publickey"); buf.append(" authorized_key=").append(VirtualMachineGuru.getEncodedMsPublicKey(msPublicKey)); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Boot Args for " + profile + ": " + buf.toString()); + if (logger.isDebugEnabled()) { + logger.debug("Boot Args for " + profile + ": " + buf.toString()); } if (controlNic == null) { @@ -514,7 +512,7 @@ public boolean finalizeDeployment(Commands cmds, VirtualMachineProfile profile, public boolean finalizeStart(VirtualMachineProfile profile, long hostId, Commands cmds, ReservationContext context) { CheckSshAnswer answer = (CheckSshAnswer)cmds.getAnswer("checkSsh"); if (answer == null || !answer.getResult()) { - s_logger.warn("Unable to ssh to the ELB VM: " + (answer != null ? answer.getDetails() : "No answer (answer for \"checkSsh\" was null)")); + logger.warn("Unable to ssh to the ELB VM: " + (answer != null ? answer.getDetails() : "No answer (answer for \"checkSsh\" was null)")); return false; } @@ -549,7 +547,7 @@ public boolean finalizeCommandsOnStart(Commands cmds, VirtualMachineProfile prof } if (controlNic == null) { - s_logger.error("Control network doesn't exist for the ELB vm " + elbVm); + logger.error("Control network doesn't exist for the ELB vm " + elbVm); return false; } @@ -567,7 +565,7 @@ public boolean finalizeCommandsOnStart(Commands cmds, VirtualMachineProfile prof lbRules.add(loadBalancing); } - s_logger.debug("Found " + lbRules.size() + " load balancing rule(s) to apply as a part of ELB vm " + elbVm + " start."); + logger.debug("Found " + lbRules.size() + " load balancing rule(s) to apply as a part of ELB vm " + elbVm + " start."); if (!lbRules.isEmpty()) { createApplyLoadBalancingRulesCommands(lbRules, elbVm, cmds, guestNetworkId); } diff --git a/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/LoadBalanceRuleHandler.java b/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/LoadBalanceRuleHandler.java index ed52174d7e0a..6812fa495324 100644 --- a/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/LoadBalanceRuleHandler.java +++ b/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/LoadBalanceRuleHandler.java @@ -30,7 +30,8 @@ import org.apache.cloudstack.api.command.user.loadbalancer.CreateLoadBalancerRuleCmd; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.configuration.ConfigurationManagerImpl; import com.cloud.dc.DataCenter; @@ -100,7 +101,7 @@ public class LoadBalanceRuleHandler { - private static final Logger s_logger = Logger.getLogger(LoadBalanceRuleHandler.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject private IPAddressDao _ipAddressDao; @@ -162,7 +163,7 @@ public LoadBalanceRuleHandler(String instance, Account systemAcct) { public void handleDeleteLoadBalancerRule(final LoadBalancer lb, final long userId, final Account caller) { final List remainingLbs = _loadBalancerDao.listByIpAddress(lb.getSourceIpAddressId()); if (remainingLbs.size() == 0) { - s_logger.debug("ELB mgr: releasing ip " + lb.getSourceIpAddressId() + " since no LB rules remain for this ip address"); + logger.debug("ELB mgr: releasing ip " + lb.getSourceIpAddressId() + " since no LB rules remain for this ip address"); releaseIp(lb.getSourceIpAddressId(), userId, caller); } } @@ -181,7 +182,7 @@ public LoadBalancer handleCreateLoadBalancerRule(final CreateLoadBalancerRuleCmd account = _accountDao.acquireInLockTable(account.getId()); if (account == null) { - s_logger.warn("ELB: CreateLoadBalancer: Failed to acquire lock on account"); + logger.warn("ELB: CreateLoadBalancer: Failed to acquire lock on account"); throw new CloudRuntimeException("Failed to acquire lock on account"); } try { @@ -202,19 +203,19 @@ private DomainRouterVO deployLoadBalancerVM(final Long networkId, final IPAddres params.put(VirtualMachineProfile.Param.ReProgramGuestNetworks, true); final Account owner = _accountService.getActiveAccountByName("system", new Long(1)); final DeployDestination dest = new DeployDestination(dc, pod, null, null); - s_logger.debug("About to deploy ELB vm "); + logger.debug("About to deploy ELB vm "); try { final DomainRouterVO elbVm = deployELBVm(network, dest, owner, params); if (elbVm == null) { throw new InvalidParameterValueException("Could not deploy or find existing ELB VM"); } - s_logger.debug("Deployed ELB vm = " + elbVm); + logger.debug("Deployed ELB vm = " + elbVm); return elbVm; } catch (final Throwable t) { - s_logger.warn("Error while deploying ELB VM: ", t); + logger.warn("Error while deploying ELB VM: ", t); return null; } @@ -238,8 +239,8 @@ private DomainRouterVO deployELBVm(Network guestNetwork, final DeployDestination owner = _accountService.getSystemAccount(); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Starting a ELB vm for network configurations: " + guestNetwork + " in " + dest); + if (logger.isDebugEnabled()) { + logger.debug("Starting a ELB vm for network configurations: " + guestNetwork + " in " + dest); } assert guestNetwork.getState() == Network.State.Implemented || guestNetwork.getState() == Network.State.Setup || guestNetwork.getState() == Network.State.Implementing : "Network is not yet fully implemented: " + guestNetwork; @@ -251,8 +252,8 @@ private DomainRouterVO deployELBVm(Network guestNetwork, final DeployDestination if (elbVm == null) { final long id = _routerDao.getNextInSequence(Long.class, "id"); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Creating the ELB vm " + id); + if (logger.isDebugEnabled()) { + logger.debug("Creating the ELB vm " + id); } final List offerings = _networkModel.getSystemAccountNetworkOfferings(NetworkOffering.SystemControlNetwork); @@ -309,7 +310,7 @@ private DomainRouterVO deployELBVm(Network guestNetwork, final DeployDestination } private void releaseIp(final long ipId, final long userId, final Account caller) { - s_logger.info("ELB: Release public IP for loadbalancing " + ipId); + logger.info("ELB: Release public IP for loadbalancing " + ipId); final IPAddressVO ipvo = _ipAddressDao.findById(ipId); ipvo.setAssociatedWithNetworkId(null); _ipAddressDao.update(ipvo.getId(), ipvo); @@ -337,17 +338,17 @@ private LoadBalancer handleCreateLoadBalancerRuleWithLock(final CreateLoadBalanc if (lb.getSourceIpAddressId() != null) { throwExceptionIfSuppliedlLbNameIsNotAssociatedWithIpAddress(lb); } else { - s_logger.debug("Could not find any existing frontend ips for this account for this LB rule, acquiring a new frontent IP for ELB"); + logger.debug("Could not find any existing frontend ips for this account for this LB rule, acquiring a new frontent IP for ELB"); final PublicIp ip = allocDirectIp(account, networkId); ipId = ip.getId(); newIp = true; } } else { ipId = existingLbs.get(0).getSourceIpAddressId(); - s_logger.debug("ELB: Found existing frontend ip for this account for this LB rule " + ipId); + logger.debug("ELB: Found existing frontend ip for this account for this LB rule " + ipId); } } else { - s_logger.warn("ELB: Found existing load balancers matching requested new LB"); + logger.warn("ELB: Found existing load balancers matching requested new LB"); throw new NetworkRuleConflictException("ELB: Found existing load balancers matching requested new LB"); } @@ -360,7 +361,7 @@ private LoadBalancer handleCreateLoadBalancerRuleWithLock(final CreateLoadBalanc result = _lbMgr.createPublicLoadBalancer(lb.getXid(), lb.getName(), lb.getDescription(), lb.getSourcePortStart(), lb.getDefaultPortStart(), ipId.longValue(), lb.getProtocol(), lb.getAlgorithm(), false, CallContext.current(), lb.getLbProtocol(), true, null); } catch (final NetworkRuleConflictException e) { - s_logger.warn("Failed to create LB rule, not continuing with ELB deployment"); + logger.warn("Failed to create LB rule, not continuing with ELB deployment"); if (newIp) { releaseIp(ipId, CallContext.current().getCallingUserId(), account); } @@ -375,7 +376,7 @@ private LoadBalancer handleCreateLoadBalancerRuleWithLock(final CreateLoadBalanc elbVm = deployLoadBalancerVM(networkId, ipAddr); if (elbVm == null) { final Network network = _networkModel.getNetwork(networkId); - s_logger.warn("Failed to deploy a new ELB vm for ip " + ipAddr + " in network " + network + "lb name=" + lb.getName()); + logger.warn("Failed to deploy a new ELB vm for ip " + ipAddr + " in network " + network + "lb name=" + lb.getName()); if (newIp) { releaseIp(ipId, CallContext.current().getCallingUserId(), account); } @@ -390,8 +391,8 @@ private LoadBalancer handleCreateLoadBalancerRuleWithLock(final CreateLoadBalanc } if (elbVm == null) { - s_logger.warn("No ELB VM can be found or deployed"); - s_logger.warn("Deleting LB since we failed to deploy ELB VM"); + logger.warn("No ELB VM can be found or deployed"); + logger.warn("Deleting LB since we failed to deploy ELB VM"); _lbDao.remove(result.getId()); return null; } @@ -450,7 +451,7 @@ public PublicIp doInTransaction(final TransactionStatus status) throws Insuffici final IPAddressVO ipvo = _ipAddressDao.findById(ip.getId()); ipvo.setAssociatedWithNetworkId(frontEndNetwork.getId()); _ipAddressDao.update(ipvo.getId(), ipvo); - s_logger.info("Acquired frontend IP for ELB " + ip); + logger.info("Acquired frontend IP for ELB " + ip); return ip; } @@ -476,7 +477,7 @@ protected static void addCandidateVmIsPodIpMatches(final DomainRouterVO candidat } protected DomainRouterVO start(final DomainRouterVO elbVm, final Map params) throws ConcurrentOperationException { - s_logger.debug("Starting ELB VM " + elbVm); + logger.debug("Starting ELB VM " + elbVm); _itMgr.start(elbVm.getUuid(), params); return _routerDao.findById(elbVm.getId()); } diff --git a/plugins/network-elements/globodns/src/main/java/com/globo/globodns/cloudstack/element/GloboDnsElement.java b/plugins/network-elements/globodns/src/main/java/com/globo/globodns/cloudstack/element/GloboDnsElement.java index 28b2988cf6ab..09830d9a810f 100644 --- a/plugins/network-elements/globodns/src/main/java/com/globo/globodns/cloudstack/element/GloboDnsElement.java +++ b/plugins/network-elements/globodns/src/main/java/com/globo/globodns/cloudstack/element/GloboDnsElement.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; @@ -80,7 +79,6 @@ @Component public class GloboDnsElement extends AdapterBase implements ResourceStateAdapter, NetworkElement, GloboDnsElementService, Configurable { - private static final Logger s_logger = Logger.getLogger(GloboDnsElement.class); private static final Map> capabilities = setCapabilities(); @@ -132,7 +130,7 @@ public boolean prepare(final Network network, final NicProfile nic, final Virtua throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { if (!isTypeSupported(vm.getType())) { - s_logger.info("GloboDNS only manages records for VMs of type User, ConsoleProxy and DomainRouter. VM " + vm + " is " + vm.getType()); + logger.info("GloboDNS only manages records for VMs of type User, ConsoleProxy and DomainRouter. VM " + vm + " is " + vm.getType()); return false; } @@ -162,7 +160,7 @@ public boolean release(final Network network, NicProfile nic, final VirtualMachi ResourceUnavailableException { if (!isTypeSupported(vm.getType())) { - s_logger.info("GloboDNS only manages records for VMs of type User, ConsoleProxy and DomainRouter. VM " + vm + " is " + vm.getType()); + logger.info("GloboDNS only manages records for VMs of type User, ConsoleProxy and DomainRouter. VM " + vm + " is " + vm.getType()); return false; } diff --git a/plugins/network-elements/globodns/src/main/java/com/globo/globodns/cloudstack/resource/GloboDnsResource.java b/plugins/network-elements/globodns/src/main/java/com/globo/globodns/cloudstack/resource/GloboDnsResource.java index 84c1b5b44e57..9f399a92e0d9 100644 --- a/plugins/network-elements/globodns/src/main/java/com/globo/globodns/cloudstack/resource/GloboDnsResource.java +++ b/plugins/network-elements/globodns/src/main/java/com/globo/globodns/cloudstack/resource/GloboDnsResource.java @@ -21,7 +21,6 @@ import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.agent.IAgentControl; import com.cloud.agent.api.Answer; @@ -68,7 +67,6 @@ public class GloboDnsResource extends ManagerBase implements ServerResource { private static final String REVERSE_DOMAIN_SUFFIX = "in-addr.arpa"; private static final String DEFAULT_AUTHORITY_TYPE = "M"; - private static final Logger s_logger = Logger.getLogger(GloboDnsResource.class); @Override public boolean configure(String name, Map params) throws ConfigurationException { @@ -125,7 +123,7 @@ public Type getType() { @Override public StartupCommand[] initialize() { - s_logger.trace("initialize called"); + logger.trace("initialize called"); StartupCommand cmd = new StartupCommand(getType()); cmd.setName(_name); cmd.setGuid(_guid); @@ -197,7 +195,7 @@ public Answer execute(RemoveDomainCommand cmd) { if (!cmd.isOverride()) { for (Record record : _globoDns.getRecordAPI().listAll(domain.getId())) { if (record.getTypeNSRecordAttributes().getId() == null) { - s_logger.warn("There are records in domain " + cmd.getNetworkDomain() + " and override is not enable. I will not delete this domain."); + logger.warn("There are records in domain " + cmd.getNetworkDomain() + " and override is not enable. I will not delete this domain."); return new Answer(cmd, true, "Domain keeped"); } } @@ -205,7 +203,7 @@ public Answer execute(RemoveDomainCommand cmd) { _globoDns.getDomainAPI().removeDomain(domain.getId()); scheduleExportChangesToBind(); } else { - s_logger.warn("Domain " + cmd.getNetworkDomain() + " already been deleted."); + logger.warn("Domain " + cmd.getNetworkDomain() + " already been deleted."); } return new Answer(cmd, true, "Domain removed"); @@ -246,7 +244,7 @@ public Answer execute(CreateOrUpdateRecordAndReverseCommand cmd) { Domain domain = searchDomain(cmd.getNetworkDomain(), false); if (domain == null) { domain = _globoDns.getDomainAPI().createDomain(cmd.getNetworkDomain(), cmd.getReverseTemplateId(), DEFAULT_AUTHORITY_TYPE); - s_logger.warn("Domain " + cmd.getNetworkDomain() + " doesn't exist, maybe someone removed it. It was automatically created with template " + logger.warn("Domain " + cmd.getNetworkDomain() + " doesn't exist, maybe someone removed it. It was automatically created with template " + cmd.getReverseTemplateId()); } @@ -287,7 +285,7 @@ protected boolean createOrUpdateReverse(String networkIp, String reverseRecordCo Domain reverseDomain = searchDomain(reverseDomainName, true); if (reverseDomain == null) { reverseDomain = _globoDns.getDomainAPI().createReverseDomain(reverseDomainName, templateId, DEFAULT_AUTHORITY_TYPE); - s_logger.info("Created reverse domain " + reverseDomainName + " with template " + templateId); + logger.info("Created reverse domain " + reverseDomainName + " with template " + templateId); } // create reverse @@ -303,14 +301,14 @@ public Answer execute(CreateOrUpdateDomainCommand cmd) { if (domain == null) { // create domain = _globoDns.getDomainAPI().createDomain(cmd.getDomainName(), cmd.getTemplateId(), DEFAULT_AUTHORITY_TYPE); - s_logger.info("Created domain " + cmd.getDomainName() + " with template " + cmd.getTemplateId()); + logger.info("Created domain " + cmd.getDomainName() + " with template " + cmd.getTemplateId()); if (domain == null) { return new Answer(cmd, false, "Unable to create domain " + cmd.getDomainName()); } else { needsExport = true; } } else { - s_logger.warn("Domain " + cmd.getDomainName() + " already exist."); + logger.warn("Domain " + cmd.getDomainName() + " already exist."); } return new Answer(cmd); } catch (GloboDnsException e) { @@ -331,16 +329,16 @@ public Answer execute(CreateOrUpdateDomainCommand cmd) { protected boolean removeRecord(String recordName, String recordValue, String bindZoneName, boolean reverse, boolean override) { Domain domain = searchDomain(bindZoneName, reverse); if (domain == null) { - s_logger.warn("Domain " + bindZoneName + " doesn't exists in GloboDNS. Record " + recordName + " has already been removed."); + logger.warn("Domain " + bindZoneName + " doesn't exists in GloboDNS. Record " + recordName + " has already been removed."); return false; } Record record = searchRecord(recordName, domain.getId()); if (record == null) { - s_logger.warn("Record " + recordName + " in domain " + bindZoneName + " has already been removed."); + logger.warn("Record " + recordName + " in domain " + bindZoneName + " has already been removed."); return false; } else { if (!override && !record.getContent().equals(recordValue)) { - s_logger.warn("Record " + recordName + " in domain " + bindZoneName + " have different value from " + recordValue + logger.warn("Record " + recordName + " in domain " + bindZoneName + " have different value from " + recordValue + " and override is not enable. I will not delete it."); return false; } @@ -363,7 +361,7 @@ private boolean createOrUpdateRecord(Long domainId, String name, String ip, Stri if (record == null) { // Create new record record = _globoDns.getRecordAPI().createRecord(domainId, name, ip, type); - s_logger.info("Created record " + record.getName() + " in domain " + domainId); + logger.info("Created record " + record.getName() + " in domain " + domainId); } else { if (!ip.equals(record.getContent())) { if (Boolean.TRUE.equals(override)) { @@ -384,10 +382,10 @@ public void scheduleExportChangesToBind() { try { Export export = _globoDns.getExportAPI().scheduleExport(); if (export != null) { - s_logger.info("GloboDns Export: " + export.getResult()); + logger.info("GloboDns Export: " + export.getResult()); } } catch (GloboDnsException e) { - s_logger.warn("Error on scheduling export. Although everything was persist, someone need to manually force export in GloboDns", e); + logger.warn("Error on scheduling export. Although everything was persist, someone need to manually force export in GloboDns", e); } } @@ -428,11 +426,11 @@ private Record searchRecord(String recordName, Long domainId) { // GloboDns search name in name and content. We need to iterate to check if recordName exists only in name for (Record candidate : candidates) { if (recordName.equalsIgnoreCase(candidate.getName())) { - s_logger.debug("Record " + recordName + " in domain id " + domainId + " found in GloboDNS"); + logger.debug("Record " + recordName + " in domain id " + domainId + " found in GloboDNS"); return candidate; } } - s_logger.debug("Record " + recordName + " in domain id " + domainId + " not found in GloboDNS"); + logger.debug("Record " + recordName + " in domain id " + domainId + " not found in GloboDNS"); return null; } diff --git a/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/element/InternalLoadBalancerElement.java b/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/element/InternalLoadBalancerElement.java index 3e522f6be4f5..0bbc3e678d3e 100644 --- a/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/element/InternalLoadBalancerElement.java +++ b/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/element/InternalLoadBalancerElement.java @@ -28,7 +28,6 @@ import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Qualifier; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.command.admin.internallb.ConfigureInternalLoadBalancerElementCmd; import org.apache.cloudstack.api.command.admin.internallb.CreateInternalLoadBalancerElementCmd; @@ -86,7 +85,6 @@ import com.cloud.network.router.NetworkHelper; public class InternalLoadBalancerElement extends AdapterBase implements LoadBalancingServiceProvider, InternalLoadBalancerElementService, IpDeployer { - private static final Logger s_logger = Logger.getLogger(InternalLoadBalancerElement.class); protected static final Map> capabilities = setCapabilities(); private static InternalLoadBalancerElement internalLbElement = null; @@ -128,11 +126,11 @@ private boolean canHandle(Network config, Scheme lbScheme) { //works in Advance zone only DataCenter dc = _entityMgr.findById(DataCenter.class, config.getDataCenterId()); if (dc.getNetworkType() != NetworkType.Advanced) { - s_logger.trace("Not hanling zone of network type " + dc.getNetworkType()); + logger.trace("Not hanling zone of network type " + dc.getNetworkType()); return false; } if (config.getGuestType() != Network.GuestType.Isolated || config.getTrafficType() != TrafficType.Guest) { - s_logger.trace("Not handling network with Type " + config.getGuestType() + " and traffic type " + config.getTrafficType()); + logger.trace("Not handling network with Type " + config.getGuestType() + " and traffic type " + config.getTrafficType()); return false; } @@ -141,14 +139,14 @@ private boolean canHandle(Network config, Scheme lbScheme) { String schemeCaps = lbCaps.get(Capability.LbSchemes); if (schemeCaps != null && lbScheme != null) { if (!schemeCaps.contains(lbScheme.toString())) { - s_logger.debug("Scheme " + lbScheme.toString() + " is not supported by the provider " + getName()); + logger.debug("Scheme " + lbScheme.toString() + " is not supported by the provider " + getName()); return false; } } } if (!_ntwkModel.isProviderSupportServiceInNetwork(config.getId(), Service.Lb, getProvider())) { - s_logger.trace("Element " + getProvider().getName() + " doesn't support service " + Service.Lb + " in the network " + config); + logger.trace("Element " + getProvider().getName() + " doesn't support service " + Service.Lb + " in the network " + config); return false; } return true; @@ -169,7 +167,7 @@ public boolean implement(Network network, NetworkOffering offering, DeployDestin ResourceUnavailableException, InsufficientCapacityException { if (!canHandle(network, null)) { - s_logger.trace("No need to implement " + getName()); + logger.trace("No need to implement " + getName()); return true; } @@ -181,7 +179,7 @@ public boolean prepare(Network network, NicProfile nic, VirtualMachineProfile vm throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { if (!canHandle(network, null)) { - s_logger.trace("No need to prepare " + getName()); + logger.trace("No need to prepare " + getName()); return true; } @@ -200,16 +198,16 @@ protected boolean implementInternalLbVms(Network network, DeployDestination dest Ip sourceIp = new Ip(ip); long active = _appLbDao.countActiveBySourceIp(sourceIp, network.getId()); if (active > 0) { - s_logger.debug("Have to implement internal lb vm for source ip " + sourceIp + " as a part of network " + network + " implement as there are " + active + + logger.debug("Have to implement internal lb vm for source ip " + sourceIp + " as a part of network " + network + " implement as there are " + active + " internal lb rules exist for this ip"); List internalLbVms; try { internalLbVms = _internalLbMgr.deployInternalLbVm(network, sourceIp, dest, _accountMgr.getAccount(network.getAccountId()), null); } catch (InsufficientCapacityException e) { - s_logger.warn("Failed to deploy element " + getName() + " for ip " + sourceIp + " due to:", e); + logger.warn("Failed to deploy element " + getName() + " for ip " + sourceIp + " due to:", e); return false; } catch (ConcurrentOperationException e) { - s_logger.warn("Failed to deploy element " + getName() + " for ip " + sourceIp + " due to:", e); + logger.warn("Failed to deploy element " + getName() + " for ip " + sourceIp + " due to:", e); return false; } @@ -239,11 +237,11 @@ public boolean shutdown(Network network, ReservationContext context, boolean cle result = result && _internalLbMgr.destroyInternalLbVm(internalLbVm.getId(), context.getAccount(), context.getCaller().getId()); if (cleanup) { if (!result) { - s_logger.warn("Failed to stop internal lb element " + internalLbVm + ", but would try to process clean up anyway."); + logger.warn("Failed to stop internal lb element " + internalLbVm + ", but would try to process clean up anyway."); } result = (_internalLbMgr.destroyInternalLbVm(internalLbVm.getId(), context.getAccount(), context.getCaller().getId())); if (!result) { - s_logger.warn("Failed to clean up internal lb element " + internalLbVm); + logger.warn("Failed to clean up internal lb element " + internalLbVm); } } } @@ -312,7 +310,7 @@ public boolean applyLBRules(Network network, List rules) thro //2) Get rules to apply Map> rulesToApply = getLbRulesToApply(rules); - s_logger.debug("Applying " + rulesToApply.size() + " on element " + getName()); + logger.debug("Applying " + rulesToApply.size() + " on element " + getName()); for (Ip sourceIp : vmsToDestroy) { //2.1 Destroy internal lb vm @@ -320,11 +318,11 @@ public boolean applyLBRules(Network network, List rules) thro if (vms.size() > 0) { //only one internal lb per IP exists try { - s_logger.debug(String.format("Destroying internal lb vm for ip %s as all the rules for this vm are in Revoke state", sourceIp.addr())); + logger.debug(String.format("Destroying internal lb vm for ip %s as all the rules for this vm are in Revoke state", sourceIp.addr())); return _internalLbMgr.destroyInternalLbVm(vms.get(0).getId(), _accountMgr.getAccount(Account.ACCOUNT_ID_SYSTEM), _accountMgr.getUserIncludingRemoved(User.UID_SYSTEM).getId()); } catch (ConcurrentOperationException e) { - s_logger.warn(String.format("Failed to apply lb rule(s) for ip %s on the element %s due to: ", sourceIp.addr(), getName()), e); + logger.warn(String.format("Failed to apply lb rule(s) for ip %s on the element %s due to: ", sourceIp.addr(), getName()), e); return false; } } @@ -340,10 +338,10 @@ public boolean applyLBRules(Network network, List rules) thro DeployDestination dest = new DeployDestination(_entityMgr.findById(DataCenter.class, network.getDataCenterId()), null, null, null); internalLbVms = _internalLbMgr.deployInternalLbVm(network, sourceIp, dest, _accountMgr.getAccount(network.getAccountId()), null); } catch (InsufficientCapacityException e) { - s_logger.warn(String.format("Failed to apply lb rule(s) for ip %s on the element %s due to: ", sourceIp.addr(), getName()), e); + logger.warn(String.format("Failed to apply lb rule(s) for ip %s on the element %s due to: ", sourceIp.addr(), getName()), e); return false; } catch (ConcurrentOperationException e) { - s_logger.warn(String.format("Failed to apply lb rule(s) for ip %s on the element %s due to: ", sourceIp.addr(), getName()), e); + logger.warn(String.format("Failed to apply lb rule(s) for ip %s on the element %s due to: ", sourceIp.addr(), getName()), e); return false; } @@ -381,7 +379,7 @@ protected Set getVmsToDestroy(Network network, List rules for (Ip sourceIp : lbPublicIps) { //2) Check if there are non revoked rules for the source ip address if (_appLbDao.countBySourceIpAndNotRevoked(sourceIp, network.getId()) == 0) { - s_logger.debug("Have to destroy internal lb vm for source ip " + sourceIp + " as it has 0 rules in non-Revoke state"); + logger.debug("Have to destroy internal lb vm for source ip " + sourceIp + " as it has 0 rules in non-Revoke state"); vmsToDestroy.add(sourceIp); } } @@ -404,7 +402,7 @@ protected Map> groupBySourceIp(List params) th if (off != null) { _internalLbVmOfferingId = off.getId(); } else { - s_logger.warn("Invalid offering UUID is passed in " + Config.InternalLbVmServiceOfferingId.key() + "; the default offering will be used instead"); + logger.warn("Invalid offering UUID is passed in " + Config.InternalLbVmServiceOfferingId.key() + "; the default offering will be used instead"); } } @@ -392,15 +390,15 @@ public boolean configure(final String name, final Map params) th Storage.ProvisioningType.THIN, true, null, true, VirtualMachine.Type.InternalLoadBalancerVm, true); if (offerings == null || offerings.size() < 2) { String msg = "Data integrity problem : System Offering For Internal LB VM has been removed?"; - s_logger.error(msg); + logger.error(msg); throw new ConfigurationException(msg); } } _itMgr.registerGuru(VirtualMachine.Type.InternalLoadBalancerVm, this); - if (s_logger.isInfoEnabled()) { - s_logger.info(getName() + " has been configured"); + if (logger.isInfoEnabled()) { + logger.info(getName() + " has been configured"); } return true; @@ -431,7 +429,7 @@ protected void finalizeSshAndVersionOnStart(final Commands cmds, final VirtualMa } protected void finalizeLbRulesForIp(final Commands cmds, final DomainRouterVO internalLbVm, final Provider provider, final Ip sourceIp, final long guestNtwkId) { - s_logger.debug("Resending load balancing rules as a part of start for " + internalLbVm); + logger.debug("Resending load balancing rules as a part of start for " + internalLbVm); final List lbs = _lbDao.listBySrcIpSrcNtwkId(sourceIp, guestNtwkId); final List lbRules = new ArrayList(); if (_ntwkModel.isProviderSupportServiceInNetwork(guestNtwkId, Service.Lb, provider)) { @@ -445,7 +443,7 @@ protected void finalizeLbRulesForIp(final Commands cmds, final DomainRouterVO in } } - s_logger.debug("Found " + lbRules.size() + " load balancing rule(s) to apply as a part of Intenrnal LB vm" + internalLbVm + " start."); + logger.debug("Found " + lbRules.size() + " load balancing rule(s) to apply as a part of Intenrnal LB vm" + internalLbVm + " start."); if (!lbRules.isEmpty()) { createApplyLoadBalancingRulesCommands(lbRules, internalLbVm, cmds, guestNtwkId); } @@ -513,7 +511,7 @@ protected String getInternalLbControlIp(final long internalLbVmId) { } if (controlIpAddress == null) { - s_logger.warn("Unable to find Internal LB control ip in its attached NICs!. Internal LB vm: " + internalLbVmId); + logger.warn("Unable to find Internal LB control ip in its attached NICs!. Internal LB vm: " + internalLbVmId); final DomainRouterVO internalLbVm = _internalLbVmDao.findById(internalLbVmId); return internalLbVm.getPrivateIpAddress(); } @@ -523,8 +521,8 @@ protected String getInternalLbControlIp(final long internalLbVmId) { @Override public boolean destroyInternalLbVm(final long vmId, final Account caller, final Long callerUserId) throws ResourceUnavailableException, ConcurrentOperationException { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Attempting to destroy Internal LB vm " + vmId); + if (logger.isDebugEnabled()) { + logger.debug("Attempting to destroy Internal LB vm " + vmId); } final DomainRouterVO internalLbVm = _internalLbVmDao.findById(vmId); @@ -554,7 +552,7 @@ public VirtualRouter stopInternalLbVm(final long vmId, final boolean forced, fin protected VirtualRouter stopInternalLbVm(final DomainRouterVO internalLbVm, final boolean forced, final Account caller, final long callerUserId) throws ResourceUnavailableException, ConcurrentOperationException { - s_logger.debug("Stopping internal lb vm " + internalLbVm); + logger.debug("Stopping internal lb vm " + internalLbVm); try { _itMgr.advanceStop(internalLbVm.getUuid(), forced); return _internalLbVmDao.findById(internalLbVm.getId()); @@ -579,7 +577,7 @@ protected List startInternalLbVms(final Map param if (internalLbVms != null) { runningInternalLbVms = new ArrayList(); } else { - s_logger.debug("Have no internal lb vms to start"); + logger.debug("Have no internal lb vms to start"); return null; } @@ -605,8 +603,8 @@ protected List findOrDeployInternalLbVm(final Network guestNetwo throw new ConcurrentOperationException("Unable to lock network " + guestNetwork.getId()); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Lock is acquired for network id " + lock.getId() + " as a part of internal lb startup in " + dest); + if (logger.isDebugEnabled()) { + logger.debug("Lock is acquired for network id " + lock.getId() + " as a part of internal lb startup in " + dest); } final long internalLbProviderId = getInternalLbProviderId(guestNetwork); @@ -622,7 +620,7 @@ protected List findOrDeployInternalLbVm(final Network guestNetwo final DeploymentPlan plan = planAndInternalLbVms.first(); if (internalLbVms.size() > 0) { - s_logger.debug("Found " + internalLbVms.size() + " internal lb vms for the requested IP " + requestedGuestIp.addr()); + logger.debug("Found " + internalLbVms.size() + " internal lb vms for the requested IP " + requestedGuestIp.addr()); return internalLbVms; } @@ -642,8 +640,8 @@ protected List findOrDeployInternalLbVm(final Network guestNetwo } finally { if (lock != null) { _networkDao.releaseFromLockTable(lock.getId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Lock is released for network id " + lock.getId() + " as a part of internal lb vm startup in " + dest); + if (logger.isDebugEnabled()) { + logger.debug("Lock is released for network id " + lock.getId() + " as a part of internal lb vm startup in " + dest); } } } @@ -675,7 +673,7 @@ protected LinkedHashMap> createInternalLbVmN //1) Guest network - default if (guestNetwork != null) { - s_logger.debug("Adding nic for Internal LB in Guest network " + guestNetwork); + logger.debug("Adding nic for Internal LB in Guest network " + guestNetwork); final NicProfile guestNic = new NicProfile(); if (guestIp != null) { guestNic.setIPv4Address(guestIp.addr()); @@ -694,7 +692,7 @@ protected LinkedHashMap> createInternalLbVmN } //2) Control network - s_logger.debug("Adding nic for Internal LB vm in Control network "); + logger.debug("Adding nic for Internal LB vm in Control network "); final List offerings = _ntwkModel.getSystemAccountNetworkOfferings(NetworkOffering.SystemControlNetwork); final NetworkOffering controlOffering = offerings.get(0); final Network controlConfig = _ntwkMgr.setupNetwork(_accountMgr.getSystemAccount(), controlOffering, plan, null, null, false).get(0); @@ -746,8 +744,8 @@ protected DomainRouterVO deployInternalLbVm(final Account owner, final DeployDes final HypervisorType hType = iter.next(); try { final long id = _internalLbVmDao.getNextInSequence(Long.class, "id"); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Creating the internal lb vm " + id + " in datacenter " + dest.getDataCenter() + " with hypervisor type " + hType); + if (logger.isDebugEnabled()) { + logger.debug("Creating the internal lb vm " + id + " in datacenter " + dest.getDataCenter() + " with hypervisor type " + hType); } String templateName = null; switch (hType) { @@ -772,7 +770,7 @@ protected DomainRouterVO deployInternalLbVm(final Account owner, final DeployDes final VMTemplateVO template = _templateDao.findRoutingTemplate(hType, templateName); if (template == null) { - s_logger.debug(hType + " won't support system vm, skip it"); + logger.debug(hType + " won't support system vm, skip it"); continue; } @@ -793,7 +791,7 @@ protected DomainRouterVO deployInternalLbVm(final Account owner, final DeployDes internalLbVm = _internalLbVmDao.findById(internalLbVm.getId()); } catch (final InsufficientCapacityException ex) { if (allocateRetry < 2 && iter.hasNext()) { - s_logger.debug("Failed to allocate the Internal lb vm with hypervisor type " + hType + ", retrying one more time"); + logger.debug("Failed to allocate the Internal lb vm with hypervisor type " + hType + ", retrying one more time"); continue; } else { throw ex; @@ -808,7 +806,7 @@ protected DomainRouterVO deployInternalLbVm(final Account owner, final DeployDes break; } catch (final InsufficientCapacityException ex) { if (startRetry < 2 && iter.hasNext()) { - s_logger.debug("Failed to start the Internal lb vm " + internalLbVm + " with hypervisor type " + hType + ", " + + logger.debug("Failed to start the Internal lb vm " + internalLbVm + " with hypervisor type " + hType + ", " + "destroying it and recreating one more time"); // destroy the internal lb vm destroyInternalLbVm(internalLbVm.getId(), _accountMgr.getSystemAccount(), User.UID_SYSTEM); @@ -829,10 +827,10 @@ protected DomainRouterVO deployInternalLbVm(final Account owner, final DeployDes protected DomainRouterVO startInternalLbVm(DomainRouterVO internalLbVm, final Account caller, final long callerUserId, final Map params) throws StorageUnavailableException, InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException { - s_logger.debug("Starting Internal LB VM " + internalLbVm); + logger.debug("Starting Internal LB VM " + internalLbVm); _itMgr.start(internalLbVm.getUuid(), params, null, null); if (internalLbVm.isStopPending()) { - s_logger.info("Clear the stop pending flag of Internal LB VM " + internalLbVm.getHostName() + " after start router successfully!"); + logger.info("Clear the stop pending flag of Internal LB VM " + internalLbVm.getHostName() + " after start router successfully!"); internalLbVm.setStopPending(false); internalLbVm = _internalLbVmDao.persist(internalLbVm); } @@ -871,10 +869,10 @@ protected List getHypervisors(final DeployDestination dest, fina public boolean applyLoadBalancingRules(final Network network, final List rules, final List internalLbVms) throws ResourceUnavailableException { if (rules == null || rules.isEmpty()) { - s_logger.debug("No lb rules to be applied for network " + network); + logger.debug("No lb rules to be applied for network " + network); return true; } - s_logger.info("lb rules to be applied for network "); + logger.info("lb rules to be applied for network "); //only one internal lb vm is supported per ip address at this time if (internalLbVms == null || internalLbVms.isEmpty()) { throw new CloudRuntimeException("Can't apply the lb rules on network " + network + " as the list of internal lb vms is empty"); @@ -884,10 +882,10 @@ public boolean applyLoadBalancingRules(final Network network, final List + + ch.qos.reload4j + reload4j + mysql mysql-connector-java diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ContrailElementImpl.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ContrailElementImpl.java index 4771441e9e7c..44cbc6c305f0 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ContrailElementImpl.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ContrailElementImpl.java @@ -31,7 +31,6 @@ import org.apache.cloudstack.network.contrail.model.VMInterfaceModel; import org.apache.cloudstack.network.contrail.model.VirtualMachineModel; import org.apache.cloudstack.network.contrail.model.VirtualNetworkModel; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.deploy.DeployDestination; @@ -65,10 +64,11 @@ import com.cloud.vm.VirtualMachineProfile; import com.cloud.vm.dao.NicDao; + @Component public class ContrailElementImpl extends AdapterBase - implements ContrailElement, StaticNatServiceProvider, IpDeployer, SourceNatServiceProvider, DhcpServiceProvider { + implements ContrailElement, StaticNatServiceProvider, IpDeployer, SourceNatServiceProvider, DhcpServiceProvider { private final Map> _capabilities = InitCapabilities(); @Inject @@ -83,7 +83,6 @@ public class ContrailElementImpl extends AdapterBase NicDao _nicDao; @Inject ServerDBSync _dbSync; - private static final Logger s_logger = Logger.getLogger(ContrailElement.class); // PluggableService @Override @@ -119,10 +118,10 @@ public Map> getCapabilities() { */ @Override public boolean implement(Network network, NetworkOffering offering, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, - ResourceUnavailableException, InsufficientCapacityException { - s_logger.debug("NetworkElement implement: " + network.getName() + ", traffic type: " + network.getTrafficType()); + ResourceUnavailableException, InsufficientCapacityException { + logger.debug("NetworkElement implement: " + network.getName() + ", traffic type: " + network.getTrafficType()); if (network.getTrafficType() == TrafficType.Guest) { - s_logger.debug("ignore network " + network.getName()); + logger.debug("ignore network " + network.getName()); return true; } VirtualNetworkModel vnModel = _manager.getDatabase().lookupVirtualNetwork(network.getUuid(), _manager.getCanonicalName(network), network.getTrafficType()); @@ -137,23 +136,23 @@ public boolean implement(Network network, NetworkOffering offering, DeployDestin } _manager.getDatabase().getVirtualNetworks().add(vnModel); } catch (Exception ex) { - s_logger.warn("virtual-network update: ", ex); + logger.warn("virtual-network update: ", ex); } return true; } @Override public boolean prepare(Network network, NicProfile nicProfile, VirtualMachineProfile vm, DeployDestination dest, ReservationContext context) - throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { + throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { - s_logger.debug("NetworkElement prepare: " + network.getName() + ", traffic type: " + network.getTrafficType()); + logger.debug("NetworkElement prepare: " + network.getName() + ", traffic type: " + network.getTrafficType()); if (network.getTrafficType() == TrafficType.Guest) { - s_logger.debug("ignore network " + network.getName()); + logger.debug("ignore network " + network.getName()); return true; } - s_logger.debug("network: " + network.getId()); + logger.debug("network: " + network.getId()); VirtualNetworkModel vnModel = _manager.getDatabase().lookupVirtualNetwork(network.getUuid(), _manager.getCanonicalName(network), network.getTrafficType()); @@ -183,7 +182,7 @@ public boolean prepare(Network network, NicProfile nicProfile, VirtualMachinePro try { vmiModel.build(_manager.getModelController(), (VMInstanceVO)vm.getVirtualMachine(), nic); } catch (IOException ex) { - s_logger.warn("vm interface set", ex); + logger.warn("vm interface set", ex); return false; } @@ -197,7 +196,7 @@ public boolean prepare(Network network, NicProfile nicProfile, VirtualMachinePro try { vmModel.update(_manager.getModelController()); } catch (Exception ex) { - s_logger.warn("virtual-machine-update", ex); + logger.warn("virtual-machine-update", ex); return false; } _manager.getDatabase().getVirtualMachines().add(vmModel); @@ -207,11 +206,11 @@ public boolean prepare(Network network, NicProfile nicProfile, VirtualMachinePro @Override public boolean release(Network network, NicProfile nicProfile, VirtualMachineProfile vm, ReservationContext context) throws ConcurrentOperationException, - ResourceUnavailableException { + ResourceUnavailableException { if (network.getTrafficType() == TrafficType.Guest) { return true; } else if (!_manager.isManagedPhysicalNetwork(network)) { - s_logger.debug("release ignore network " + network.getId()); + logger.debug("release ignore network " + network.getId()); return true; } @@ -220,7 +219,7 @@ public boolean release(Network network, NicProfile nicProfile, VirtualMachinePro VirtualMachineModel vmModel = _manager.getDatabase().lookupVirtualMachine(vm.getUuid()); if (vmModel == null) { - s_logger.debug("vm " + vm.getInstanceName() + " not in local database"); + logger.debug("vm " + vm.getInstanceName() + " not in local database"); return true; } VMInterfaceModel vmiModel = vmModel.getVMInterface(nic.getUuid()); @@ -228,7 +227,7 @@ public boolean release(Network network, NicProfile nicProfile, VirtualMachinePro try { vmiModel.destroy(_manager.getModelController()); } catch (IOException ex) { - s_logger.warn("virtual-machine-interface delete", ex); + logger.warn("virtual-machine-interface delete", ex); } vmModel.removeSuccessor(vmiModel); } @@ -250,7 +249,7 @@ public boolean release(Network network, NicProfile nicProfile, VirtualMachinePro */ @Override public boolean shutdown(Network network, ReservationContext context, boolean cleanup) throws ConcurrentOperationException, ResourceUnavailableException { - s_logger.debug("NetworkElement shutdown"); + logger.debug("NetworkElement shutdown"); return true; } @@ -259,45 +258,45 @@ public boolean shutdown(Network network, ReservationContext context, boolean cle */ @Override public boolean destroy(Network network, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException { - s_logger.debug("NetworkElement destroy"); + logger.debug("NetworkElement destroy"); return true; } @Override public boolean isReady(PhysicalNetworkServiceProvider provider) { - Map serviceMap = ((ConfigurationServerImpl)_configServer).getServicesAndProvidersForNetwork( _manager.getRouterOffering().getId()); - List types = new ArrayList(); - types.add(TrafficType.Control); - types.add(TrafficType.Management); - types.add(TrafficType.Storage); - List systemNets = _manager.findSystemNetworks(types); - if (systemNets != null && !systemNets.isEmpty()) { - for (NetworkVO net: systemNets) { - s_logger.debug("update system network service: " + net.getName() + "; service provider: " + serviceMap); - _networksDao.update(net.getId(), net, serviceMap); - } - } else { - s_logger.debug("no system networks created yet"); - } - serviceMap = ((ConfigurationServerImpl)_configServer).getServicesAndProvidersForNetwork( _manager.getPublicRouterOffering().getId()); - types = new ArrayList(); - types.add(TrafficType.Public); - systemNets = _manager.findSystemNetworks(types); - if (systemNets != null && !systemNets.isEmpty()) { - for (NetworkVO net: systemNets) { - s_logger.debug("update system network service: " + net.getName() + "; service provider: " + serviceMap); - _networksDao.update(net.getId(), net, serviceMap); - } - } else { - s_logger.debug("no system networks created yet"); - } - return true; - } + Map serviceMap = ((ConfigurationServerImpl)_configServer).getServicesAndProvidersForNetwork( _manager.getRouterOffering().getId()); + List types = new ArrayList(); + types.add(TrafficType.Control); + types.add(TrafficType.Management); + types.add(TrafficType.Storage); + List systemNets = _manager.findSystemNetworks(types); + if (systemNets != null && !systemNets.isEmpty()) { + for (NetworkVO net: systemNets) { + logger.debug("update system network service: " + net.getName() + "; service provider: " + serviceMap); + _networksDao.update(net.getId(), net, serviceMap); + } + } else { + logger.debug("no system networks created yet"); + } + serviceMap = ((ConfigurationServerImpl)_configServer).getServicesAndProvidersForNetwork( _manager.getPublicRouterOffering().getId()); + types = new ArrayList(); + types.add(TrafficType.Public); + systemNets = _manager.findSystemNetworks(types); + if (systemNets != null && !systemNets.isEmpty()) { + for (NetworkVO net: systemNets) { + logger.debug("update system network service: " + net.getName() + "; service provider: " + serviceMap); + _networksDao.update(net.getId(), net, serviceMap); + } + } else { + logger.debug("no system networks created yet"); + } + return true; + } @Override public boolean shutdownProviderInstances(PhysicalNetworkServiceProvider provider, ReservationContext context) throws ConcurrentOperationException, - ResourceUnavailableException { - s_logger.debug("NetworkElement shutdown ProviderInstances"); + ResourceUnavailableException { + logger.debug("NetworkElement shutdown ProviderInstances"); return true; } @@ -309,8 +308,8 @@ public boolean canEnableIndividualServices() { @Override public boolean verifyServicesCombination(Set services) { // TODO Auto-generated method stub - s_logger.debug("NetworkElement verifyServices"); - s_logger.debug("Services: " + services); + logger.debug("NetworkElement verifyServices"); + logger.debug("Services: " + services); return true; } @@ -328,11 +327,11 @@ public boolean applyIps(Network network, List ipAddre } if (isFloatingIpCreate(ip)) { if (_manager.createFloatingIp(ip)) { - s_logger.debug("Successfully created floating ip: " + ip.getAddress().addr()); + logger.debug("Successfully created floating ip: " + ip.getAddress().addr()); } } else { if (_manager.deleteFloatingIp(ip)) { - s_logger.debug("Successfully deleted floating ip: " + ip.getAddress().addr()); + logger.debug("Successfully deleted floating ip: " + ip.getAddress().addr()); } } } @@ -353,26 +352,26 @@ private boolean isFloatingIpCreate(PublicIpAddress ip) { @Override public boolean addDhcpEntry(Network network, NicProfile nic, - VirtualMachineProfile vm, - DeployDestination dest, ReservationContext context) - throws ConcurrentOperationException, InsufficientCapacityException, - ResourceUnavailableException { - return false; + VirtualMachineProfile vm, + DeployDestination dest, ReservationContext context) + throws ConcurrentOperationException, InsufficientCapacityException, + ResourceUnavailableException { + return false; } @Override public boolean configDhcpSupportForSubnet(Network network, NicProfile nic, - VirtualMachineProfile vm, - DeployDestination dest, ReservationContext context) - throws ConcurrentOperationException, InsufficientCapacityException, - ResourceUnavailableException { - return false; + VirtualMachineProfile vm, + DeployDestination dest, ReservationContext context) + throws ConcurrentOperationException, InsufficientCapacityException, + ResourceUnavailableException { + return false; } @Override public boolean removeDhcpSupportForSubnet(Network network) - throws ResourceUnavailableException { - return false; + throws ResourceUnavailableException { + return false; } @Override diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ContrailGuru.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ContrailGuru.java index 775ca7e09066..6fb4c3eff972 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ContrailGuru.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ContrailGuru.java @@ -27,7 +27,6 @@ import org.apache.cloudstack.network.contrail.model.VMInterfaceModel; import org.apache.cloudstack.network.contrail.model.VirtualMachineModel; import org.apache.cloudstack.network.contrail.model.VirtualNetworkModel; -import org.apache.log4j.Logger; import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenter.NetworkType; @@ -89,7 +88,6 @@ public class ContrailGuru extends AdapterBase implements NetworkGuru { @Inject DataCenterDao _dcDao; - private static final Logger s_logger = Logger.getLogger(ContrailGuru.class); private static final TrafficType[] TrafficTypes = {TrafficType.Guest}; private boolean canHandle(NetworkOffering offering, NetworkType networkType, PhysicalNetwork physicalNetwork) { @@ -124,7 +122,7 @@ public Network design(NetworkOffering offering, DeploymentPlan plan, Network use PhysicalNetworkVO physnet = _physicalNetworkDao.findById(plan.getPhysicalNetworkId()); DataCenter dc = _dcDao.findById(plan.getDataCenterId()); if (!canHandle(offering, dc.getNetworkType(),physnet)) { - s_logger.debug("Refusing to design this network"); + logger.debug("Refusing to design this network"); return null; } NetworkVO network = @@ -134,14 +132,14 @@ public Network design(NetworkOffering offering, DeploymentPlan plan, Network use network.setCidr(userSpecified.getCidr()); network.setGateway(userSpecified.getGateway()); } - s_logger.debug("Allocated network " + userSpecified.getName() + (network.getCidr() == null ? "" : " subnet: " + network.getCidr())); + logger.debug("Allocated network " + userSpecified.getName() + (network.getCidr() == null ? "" : " subnet: " + network.getCidr())); return network; } @Override public Network implement(Network network, NetworkOffering offering, DeployDestination destination, ReservationContext context) throws InsufficientVirtualNetworkCapacityException { - s_logger.debug("Implement network: " + network.getName() + ", traffic type: " + network.getTrafficType()); + logger.debug("Implement network: " + network.getName() + ", traffic type: " + network.getTrafficType()); VirtualNetworkModel vnModel = _manager.getDatabase().lookupVirtualNetwork(network.getUuid(), _manager.getCanonicalName(network), network.getTrafficType()); if (vnModel == null) { @@ -154,7 +152,7 @@ public Network implement(Network network, NetworkOffering offering, DeployDestin vnModel.update(_manager.getModelController()); } } catch (Exception ex) { - s_logger.warn("virtual-network update: ", ex); + logger.warn("virtual-network update: ", ex); return network; } _manager.getDatabase().getVirtualNetworks().add(vnModel); @@ -162,7 +160,7 @@ public Network implement(Network network, NetworkOffering offering, DeployDestin if (network.getVpcId() != null) { List ips = _ipAddressDao.listByAssociatedVpc(network.getVpcId(), true); if (ips.isEmpty()) { - s_logger.debug("Creating a source nat ip for network " + network); + logger.debug("Creating a source nat ip for network " + network); Account owner = _accountMgr.getAccount(network.getAccountId()); try { PublicIp publicIp = _ipAddrMgr.assignSourceNatIpAddressToGuestNetwork(owner, network); @@ -172,7 +170,7 @@ public Network implement(Network network, NetworkOffering offering, DeployDestin _ipAddressDao.update(ip.getId(), ip); _ipAddressDao.releaseFromLockTable(ip.getId()); } catch (Exception e) { - s_logger.error("Unable to allocate source nat ip: " + e); + logger.error("Unable to allocate source nat ip: " + e); } } } @@ -188,7 +186,7 @@ public Network implement(Network network, NetworkOffering offering, DeployDestin @Override public NicProfile allocate(Network network, NicProfile profile, VirtualMachineProfile vm) throws InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException, ConcurrentOperationException { - s_logger.debug("allocate NicProfile on " + network.getName()); + logger.debug("allocate NicProfile on " + network.getName()); if (profile != null && profile.getRequestedIPv4() != null) { throw new CloudRuntimeException("Does not support custom ip allocation at this time: " + profile); @@ -202,7 +200,7 @@ public NicProfile allocate(Network network, NicProfile profile, VirtualMachinePr try { broadcastUri = new URI("vlan://untagged"); } catch (Exception e) { - s_logger.warn("unable to instantiate broadcast URI: " + e); + logger.warn("unable to instantiate broadcast URI: " + e); } profile.setBroadcastUri(broadcastUri); @@ -215,8 +213,8 @@ public NicProfile allocate(Network network, NicProfile profile, VirtualMachinePr @Override public void reserve(NicProfile nic, Network network, VirtualMachineProfile vm, DeployDestination dest, ReservationContext context) throws InsufficientVirtualNetworkCapacityException, InsufficientAddressCapacityException, ConcurrentOperationException { - s_logger.debug("reserve NicProfile on network id: " + network.getId() + " " + network.getName()); - s_logger.debug("deviceId: " + nic.getDeviceId()); + logger.debug("reserve NicProfile on network id: " + network.getId() + " " + network.getName()); + logger.debug("deviceId: " + nic.getDeviceId()); NicVO nicVO = _nicDao.findById(nic.getId()); assert nicVO != null; @@ -242,7 +240,7 @@ public void reserve(NicProfile nic, Network network, VirtualMachineProfile vm, D vmiModel.build(_manager.getModelController(), (VMInstanceVO)vm.getVirtualMachine(), nicVO); vmiModel.setActive(); } catch (IOException ex) { - s_logger.error("virtual-machine-interface set", ex); + logger.error("virtual-machine-interface set", ex); return; } @@ -251,17 +249,17 @@ public void reserve(NicProfile nic, Network network, VirtualMachineProfile vm, D ipModel = new InstanceIpModel(vm.getInstanceName(), nic.getDeviceId()); ipModel.addToVMInterface(vmiModel); } else { - s_logger.debug("Reuse existing instance-ip object on " + ipModel.getName()); + logger.debug("Reuse existing instance-ip object on " + ipModel.getName()); } if (nic.getIPv4Address() != null) { - s_logger.debug("Nic using existing IP address " + nic.getIPv4Address()); + logger.debug("Nic using existing IP address " + nic.getIPv4Address()); ipModel.setAddress(nic.getIPv4Address()); } try { vmModel.update(_manager.getModelController()); } catch (Exception ex) { - s_logger.warn("virtual-machine update", ex); + logger.warn("virtual-machine update", ex); return; } @@ -272,15 +270,15 @@ public void reserve(NicProfile nic, Network network, VirtualMachineProfile vm, D if (nic.getMacAddress() == null) { MacAddressesType macs = vmi.getMacAddresses(); if (macs == null) { - s_logger.debug("no mac address is allocated for Nic " + nicVO.getUuid()); + logger.debug("no mac address is allocated for Nic " + nicVO.getUuid()); } else { - s_logger.info("VMI " + _manager.getVifNameByVmUuid(vm.getUuid(), nicVO.getDeviceId()) + " got mac address: " + macs.getMacAddress().get(0)); + logger.info("VMI " + _manager.getVifNameByVmUuid(vm.getUuid(), nicVO.getDeviceId()) + " got mac address: " + macs.getMacAddress().get(0)); nic.setMacAddress(macs.getMacAddress().get(0)); } } if (nic.getIPv4Address() == null) { - s_logger.debug("Allocated IP address " + ipModel.getAddress()); + logger.debug("Allocated IP address " + ipModel.getAddress()); nic.setIPv4Address(ipModel.getAddress()); if (network.getCidr() != null) { nic.setIPv4Netmask(NetUtils.cidr2Netmask(network.getCidr())); @@ -296,7 +294,7 @@ public void reserve(NicProfile nic, Network network, VirtualMachineProfile vm, D @Override public boolean release(NicProfile nic, VirtualMachineProfile vm, String reservationId) { - s_logger.debug("release NicProfile " + nic.getId()); + logger.debug("release NicProfile " + nic.getId()); return true; } @@ -306,7 +304,7 @@ public boolean release(NicProfile nic, VirtualMachineProfile vm, String reservat */ @Override public void deallocate(Network network, NicProfile nic, VirtualMachineProfile vm) { - s_logger.debug("deallocate NicProfile " + nic.getId() + " on " + network.getName()); + logger.debug("deallocate NicProfile " + nic.getId() + " on " + network.getName()); NicVO nicVO = _nicDao.findById(nic.getId()); assert nicVO != null; @@ -330,7 +328,7 @@ public void deallocate(Network network, NicProfile nic, VirtualMachineProfile vm try { vmModel.delete(_manager.getModelController()); } catch (IOException ex) { - s_logger.warn("virtual-machine delete", ex); + logger.warn("virtual-machine delete", ex); return; } } @@ -340,12 +338,12 @@ public void deallocate(Network network, NicProfile nic, VirtualMachineProfile vm @Override public void updateNicProfile(NicProfile profile, Network network) { // TODO Auto-generated method stub - s_logger.debug("update NicProfile " + profile.getId() + " on " + network.getName()); + logger.debug("update NicProfile " + profile.getId() + " on " + network.getName()); } @Override public void shutdown(NetworkProfile network, NetworkOffering offering) { - s_logger.debug("NetworkGuru shutdown"); + logger.debug("NetworkGuru shutdown"); VirtualNetworkModel vnModel = _manager.getDatabase().lookupVirtualNetwork(network.getUuid(), _manager.getCanonicalName(network), network.getTrafficType()); if (vnModel == null) { return; @@ -354,21 +352,21 @@ public void shutdown(NetworkProfile network, NetworkOffering offering) { _manager.getDatabase().getVirtualNetworks().remove(vnModel); vnModel.delete(_manager.getModelController()); } catch (IOException e) { - s_logger.warn("virtual-network delete", e); + logger.warn("virtual-network delete", e); } } @Override public boolean trash(Network network, NetworkOffering offering) { // TODO Auto-generated method stub - s_logger.debug("NetworkGuru trash"); + logger.debug("NetworkGuru trash"); return true; } @Override public void updateNetworkProfile(NetworkProfile networkProfile) { // TODO Auto-generated method stub - s_logger.debug("NetworkGuru updateNetworkProfile"); + logger.debug("NetworkGuru updateNetworkProfile"); } @Override diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ContrailManagerImpl.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ContrailManagerImpl.java index 7021b9ac235e..1261a23b5692 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ContrailManagerImpl.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ContrailManagerImpl.java @@ -40,7 +40,6 @@ import org.apache.cloudstack.network.contrail.model.VirtualNetworkModel; import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.configuration.ConfigurationManager; import com.cloud.configuration.ConfigurationService; @@ -143,7 +142,6 @@ public class ContrailManagerImpl extends ManagerBase implements ContrailManager @Inject NetworkACLDao _networkAclDao; - private static final Logger s_logger = Logger.getLogger(ContrailManager.class); private ApiConnector _api; @@ -173,8 +171,8 @@ public boolean start() { try { _dbSyncTimer.schedule(new DBSyncTask(), 0, _dbSyncInterval); } catch (Exception ex) { - s_logger.debug("Unable to start DB Sync timer " + ex.getMessage()); - s_logger.debug("timer start", ex); + logger.debug("Unable to start DB Sync timer " + ex.getMessage()); + logger.debug("timer start", ex); } return true; } @@ -335,10 +333,10 @@ public boolean configure(String name, Map params) throws Configu } _api = ApiConnectorFactory.build(hostname, port); } catch (IOException ex) { - s_logger.warn("Unable to read " + configuration, ex); + logger.warn("Unable to read " + configuration, ex); throw new ConfigurationException(); } catch (Exception ex) { - s_logger.debug("Exception in configure: " + ex); + logger.debug("Exception in configure: " + ex); ex.printStackTrace(); throw new ConfigurationException(); } finally { @@ -355,7 +353,7 @@ public boolean configure(String name, Map params) throws Configu Provider.JuniperContrailVpcRouter); _vpcOffering = locateVpcOffering(); }catch (Exception ex) { - s_logger.debug("Exception in locating network offerings: " + ex); + logger.debug("Exception in locating network offerings: " + ex); ex.printStackTrace(); throw new ConfigurationException(); } @@ -519,12 +517,12 @@ public void findInfrastructureNetworks(PhysicalNetworkVO phys, List d public void syncNetworkDB(short syncMode) throws IOException { if (_dbSync.syncAll(syncMode) == ServerDBSync.SYNC_STATE_OUT_OF_SYNC) { if (syncMode == DBSyncGeneric.SYNC_MODE_CHECK) { - s_logger.info("# Cloudstack DB & VNC are out of sync #"); + logger.info("# Cloudstack DB & VNC are out of sync #"); } else { - s_logger.info("# Cloudstack DB & VNC were out of sync, performed re-sync operation #"); + logger.info("# Cloudstack DB & VNC were out of sync, performed re-sync operation #"); } } else { - s_logger.info("# Cloudstack DB & VNC are in sync #"); + logger.info("# Cloudstack DB & VNC are in sync #"); } } @@ -534,13 +532,13 @@ public class DBSyncTask extends TimerTask { @Override public void run() { try { - s_logger.debug("DB Sync task is running"); + logger.debug("DB Sync task is running"); syncNetworkDB(_syncMode); // Change to check mode _syncMode = DBSyncGeneric.SYNC_MODE_CHECK; } catch (Exception ex) { - s_logger.debug(ex); - s_logger.info("Unable to sync network db"); + logger.debug(ex); + logger.info("Unable to sync network db"); } } } @@ -591,7 +589,7 @@ public List findSystemNetworks(List types) { sc.setParameters("trafficType", types.toArray()); List dbNets = _networksDao.search(sc, null); if (dbNets == null) { - s_logger.debug("no system networks for the given traffic types: " + types.toString()); + logger.debug("no system networks for the given traffic types: " + types.toString()); dbNets = new ArrayList(); } @@ -666,7 +664,7 @@ public List findManagedNetworks(List types) { List dbNets = _networksDao.search(sc, null); if (dbNets == null) { - s_logger.debug("no juniper managed networks for the given traffic types: " + types.toString()); + logger.debug("no juniper managed networks for the given traffic types: " + types.toString()); dbNets = new ArrayList(); } @@ -708,7 +706,7 @@ public List findManagedVpcs() { sc.setParameters("vpcOffering", getVpcOffering().getId()); List vpcs = _vpcDao.search(sc, null); if (vpcs == null || vpcs.size() == 0) { - s_logger.debug("no vpcs found"); + logger.debug("no vpcs found"); return null; } return vpcs; @@ -732,7 +730,7 @@ public List findManagedACLs() { sc.setParameters("vpcId", vpcIds.toArray()); List acls = _networkAclDao.search(sc, null); if (acls == null || acls.size() == 0) { - s_logger.debug("no acls found"); + logger.debug("no acls found"); return null; } /* only return if acl is associated to any network */ @@ -756,7 +754,7 @@ public List findManagedPublicIps() { List dbNets = findManagedNetworks(null); if (dbNets == null || dbNets.isEmpty()) { - s_logger.debug("Juniper managed networks is empty"); + logger.debug("Juniper managed networks is empty"); return null; } @@ -778,7 +776,7 @@ public List findManagedPublicIps() { List publicIps = _ipAddressDao.search(sc, null); if (publicIps == null) { - s_logger.debug("no public ips"); + logger.debug("no public ips"); return null; } @@ -803,7 +801,7 @@ private void initializeDefaultVirtualNetworkModels() { vnModel.update(getModelController()); } } catch (Exception ex) { - s_logger.warn("virtual-network update: ", ex); + logger.warn("virtual-network update: ", ex); } getDatabase().getVirtualNetworks().add(vnModel); } @@ -918,7 +916,7 @@ public VirtualNetworkModel lookupPublicNetworkModel() { } getDatabase().getVirtualNetworks().add(vnModel); } catch (Exception ex) { - s_logger.warn("virtual-network update: ", ex); + logger.warn("virtual-network update: ", ex); } return vnModel; } @@ -938,7 +936,7 @@ public boolean createFloatingIp(PublicIpAddress ip) { fipPoolModel.update(getModelController()); vnModel.setFipPoolModel(fipPoolModel); } catch (Exception ex) { - s_logger.warn("floating-ip-pool create: ", ex); + logger.warn("floating-ip-pool create: ", ex); return false; } } @@ -952,7 +950,7 @@ public boolean createFloatingIp(PublicIpAddress ip) { try { fipModel.update(getModelController()); } catch (Exception ex) { - s_logger.warn("floating-ip create: ", ex); + logger.warn("floating-ip create: ", ex); return false; } } @@ -969,7 +967,7 @@ public boolean deleteFloatingIp(PublicIpAddress ip) { try { fipModel.destroy(getModelController()); } catch (IOException ex) { - s_logger.warn("floating ip delete", ex); + logger.warn("floating ip delete", ex); return false; } fipPoolModel.removeSuccessor(fipModel); @@ -993,7 +991,7 @@ public List getFloatingIps() { try { fipPool = (FloatingIpPool)_api.findByFQN(FloatingIpPool.class, fipPoolName); } catch (Exception ex) { - s_logger.debug(ex); + logger.debug(ex); } if (fipPool == null) { return null; @@ -1003,7 +1001,7 @@ public List getFloatingIps() { try { return (List)_api.getObjects(FloatingIp.class, ips); } catch (IOException ex) { - s_logger.debug(ex); + logger.debug(ex); return null; } } diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ContrailVpcElementImpl.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ContrailVpcElementImpl.java index 689b252b2a73..b73ed7feb608 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ContrailVpcElementImpl.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ContrailVpcElementImpl.java @@ -25,7 +25,6 @@ import org.apache.cloudstack.network.contrail.model.VirtualNetworkModel; import org.apache.cloudstack.network.contrail.model.NetworkPolicyModel; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.deploy.DeployDestination; @@ -46,8 +45,6 @@ @Component public class ContrailVpcElementImpl extends ContrailElementImpl implements NetworkACLServiceProvider, VpcProvider { - private static final Logger s_logger = - Logger.getLogger(ContrailElement.class); @Inject NetworkACLDao _networkACLDao; @@ -63,7 +60,7 @@ public boolean implementVpc(Vpc vpc, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { // TODO Auto-generated method stub - s_logger.debug("NetworkElement implementVpc"); + logger.debug("NetworkElement implementVpc"); return true; } @@ -71,7 +68,7 @@ public boolean implementVpc(Vpc vpc, DeployDestination dest, public boolean shutdownVpc(Vpc vpc, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException { // TODO Auto-generated method stub - s_logger.debug("NetworkElement shutdownVpc"); + logger.debug("NetworkElement shutdownVpc"); return true; } @@ -79,7 +76,7 @@ public boolean shutdownVpc(Vpc vpc, ReservationContext context) public boolean createPrivateGateway(PrivateGateway gateway) throws ConcurrentOperationException, ResourceUnavailableException { // TODO Auto-generated method stub - s_logger.debug("NetworkElement createPrivateGateway"); + logger.debug("NetworkElement createPrivateGateway"); return false; } @@ -87,7 +84,7 @@ public boolean createPrivateGateway(PrivateGateway gateway) public boolean deletePrivateGateway(PrivateGateway privateGateway) throws ConcurrentOperationException, ResourceUnavailableException { // TODO Auto-generated method stub - s_logger.debug("NetworkElement deletePrivateGateway"); + logger.debug("NetworkElement deletePrivateGateway"); return false; } @@ -95,7 +92,7 @@ public boolean deletePrivateGateway(PrivateGateway privateGateway) public boolean applyStaticRoutes(Vpc vpc, List routes) throws ResourceUnavailableException { // TODO Auto-generated method stub - s_logger.debug("NetworkElement applyStaticRoutes"); + logger.debug("NetworkElement applyStaticRoutes"); return true; } @@ -103,9 +100,9 @@ public boolean applyStaticRoutes(Vpc vpc, List routes) public boolean applyNetworkACLs(Network net, List rules) throws ResourceUnavailableException { - s_logger.debug("NetworkElement applyNetworkACLs"); + logger.debug("NetworkElement applyNetworkACLs"); if (rules == null || rules.isEmpty()) { - s_logger.debug("no rules to apply"); + logger.debug("no rules to apply"); return true; } @@ -125,7 +122,7 @@ public boolean applyNetworkACLs(Network net, project = _manager.getDefaultVncProject(); } } catch (IOException ex) { - s_logger.warn("read project", ex); + logger.warn("read project", ex); return false; } policyModel.setProject(project); @@ -143,7 +140,7 @@ public boolean applyNetworkACLs(Network net, try { policyModel.build(_manager.getModelController(), rules); } catch (Exception e) { - s_logger.error(e); + logger.error(e); e.printStackTrace(); return false; } @@ -154,7 +151,7 @@ public boolean applyNetworkACLs(Network net, } _manager.getDatabase().getNetworkPolicys().add(policyModel); } catch (Exception ex) { - s_logger.error("network-policy update: ", ex); + logger.error("network-policy update: ", ex); ex.printStackTrace(); return false; } @@ -190,7 +187,7 @@ public boolean applyACLItemsToPrivateGw(PrivateGateway privateGateway, List rules) throws ResourceUnavailableException { // TODO Auto-generated method stub - s_logger.debug("NetworkElement applyACLItemsToPrivateGw"); + logger.debug("NetworkElement applyACLItemsToPrivateGw"); return true; } diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/DBSyncGeneric.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/DBSyncGeneric.java index fdfd9df2a435..7cb47224ed72 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/DBSyncGeneric.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/DBSyncGeneric.java @@ -26,13 +26,14 @@ import net.juniper.contrail.api.ApiObjectBase; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.api.Identity; public class DBSyncGeneric { - private static final Logger s_logger = Logger.getLogger(DBSyncGeneric.class); + protected Logger logger = LogManager.getLogger(getClass()); /* for each synchronization VNC class, following methods * needs to be defined. @@ -141,7 +142,7 @@ private Boolean filter(Class cls, Object... parameters) throws InvocationTarg String filterMethod = filterMethodPrefix + getClassName(cls); Method method = _methodMap.get(filterMethod); if (method == null) { - s_logger.debug("Method not implemented: " + getClassName(_scope.getClass()) + ":" + filterMethod); + logger.debug("Method not implemented: " + getClassName(_scope.getClass()) + ":" + filterMethod); return false; } return (Boolean)method.invoke(_scope, parameters); @@ -151,7 +152,7 @@ private Boolean equal(Class cls, Object... parameters) throws InvocationTarge String equalMethod = equalMethodPrefix + getClassName(cls); Method method = _methodMap.get(equalMethod); if (method == null) { - s_logger.debug("Method not implemented: " + getClassName(_scope.getClass()) + ":" + equalMethod); + logger.debug("Method not implemented: " + getClassName(_scope.getClass()) + ":" + equalMethod); return true; } return (Boolean)method.invoke(_scope, parameters); @@ -300,7 +301,7 @@ public boolean syncGeneric(Class cls, List dbList, List vncList) throws SyncStats stats = new SyncStats(); stats.log("Sync log for <" + getClassName(cls) + ">"); - s_logger.debug("Generic db sync : " + getClassName(cls)); + logger.debug("Generic db sync : " + getClassName(cls)); java.util.Collections.sort(dbList, this.dbComparator(cls)); java.util.Collections.sort(vncList, this.vncComparator(cls)); @@ -308,16 +309,16 @@ public boolean syncGeneric(Class cls, List dbList, List vncList) throws syncCollections(cls, dbList, vncList, _syncMode != SYNC_MODE_CHECK, stats); if (_syncMode != SYNC_MODE_CHECK) { - s_logger.debug("Sync stats<" + getClassName(cls) + ">: " + stats.toString()); - s_logger.debug(stats.logMsg); - s_logger.debug("Generic db sync : " + getClassName(cls) + " done"); + logger.debug("Sync stats<" + getClassName(cls) + ">: " + stats.toString()); + logger.debug(stats.logMsg); + logger.debug("Generic db sync : " + getClassName(cls) + " done"); } else { - s_logger.debug("Sync state checking stats<" + getClassName(cls) + ">: " + stats.toString()); + logger.debug("Sync state checking stats<" + getClassName(cls) + ">: " + stats.toString()); if (!stats.isSynchronized()) { - s_logger.debug("DB and VNC objects out of sync is detected : " + getClassName(cls)); - s_logger.debug("Log message: \n" + stats.logMsg); + logger.debug("DB and VNC objects out of sync is detected : " + getClassName(cls)); + logger.debug("Log message: \n" + stats.logMsg); } else { - s_logger.debug("DB and VNC objects are in sync : " + getClassName(cls)); + logger.debug("DB and VNC objects are in sync : " + getClassName(cls)); } } diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/EventUtils.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/EventUtils.java index 78ec01344ca7..6f1a98846c5f 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/EventUtils.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/EventUtils.java @@ -23,7 +23,8 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.beans.factory.NoSuchBeanDefinitionException; import org.springframework.stereotype.Component; @@ -44,7 +45,7 @@ @Component public class EventUtils { - private static final Logger s_logger = Logger.getLogger(EventUtils.class); + protected static Logger LOGGER = LogManager.getLogger(EventUtils.class); protected static EventBus s_eventBus = null; @@ -75,14 +76,14 @@ private static void publishOnMessageBus(String eventCategory, String eventType, s_eventBus.publish(event); } catch (EventBusException evx) { String errMsg = "Failed to publish contrail event."; - s_logger.warn(errMsg, evx); + LOGGER.warn(errMsg, evx); } } public static class EventInterceptor implements ComponentMethodInterceptor, MethodInterceptor { - private static final Logger s_logger = Logger.getLogger(EventInterceptor.class); + protected Logger LOGGER = LogManager.getLogger(getClass()); public EventInterceptor() { @@ -155,7 +156,7 @@ public void interceptComplete(Method method, Object target, Object event) { @Override public void interceptException(Method method, Object target, Object event) { - s_logger.debug("interceptException"); + LOGGER.debug("interceptException"); } @Override diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ManagementNetworkGuru.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ManagementNetworkGuru.java index 6ad0746ba6b1..0959eabd832d 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ManagementNetworkGuru.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ManagementNetworkGuru.java @@ -28,7 +28,6 @@ import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.deploy.DeploymentPlan; @@ -48,7 +47,6 @@ */ @Component public class ManagementNetworkGuru extends ContrailGuru { - private static final Logger s_logger = Logger.getLogger(ManagementNetworkGuru.class); private static final TrafficType[] TrafficTypes = {TrafficType.Management}; private final String configuration = "contrail.properties"; @@ -71,7 +69,7 @@ public boolean configure(String name, Map params) throws Configu } inputFile = new FileInputStream(configFile); } catch (FileNotFoundException e) { - s_logger.error(e.getMessage()); + logger.error(e.getMessage()); throw new ConfigurationException(e.getMessage()); } @@ -79,14 +77,14 @@ public boolean configure(String name, Map params) throws Configu try { configProps.load(inputFile); } catch (IOException e) { - s_logger.error(e.getMessage()); + logger.error(e.getMessage()); throw new ConfigurationException(e.getMessage()); } finally { closeAutoCloseable(inputFile, "error closing config file"); } _mgmtCidr = configProps.getProperty("management.cidr"); _mgmtGateway = configProps.getProperty("management.gateway"); - s_logger.info("Management network " + _mgmtCidr + " gateway: " + _mgmtGateway); + logger.info("Management network " + _mgmtCidr + " gateway: " + _mgmtGateway); return true; } @@ -123,7 +121,7 @@ public Network design(NetworkOffering offering, DeploymentPlan plan, Network use network.setCidr(_mgmtCidr); network.setGateway(_mgmtGateway); } - s_logger.debug("Allocated network " + userSpecified.getName() + (network.getCidr() == null ? "" : " subnet: " + network.getCidr())); + logger.debug("Allocated network " + userSpecified.getName() + (network.getCidr() == null ? "" : " subnet: " + network.getCidr())); return network; } diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ServerDBSyncImpl.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ServerDBSyncImpl.java index 320ac489ba9d..70d8c6deef92 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ServerDBSyncImpl.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ServerDBSyncImpl.java @@ -41,7 +41,8 @@ import net.juniper.contrail.api.types.VirtualNetwork; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.stereotype.Component; import org.apache.cloudstack.network.contrail.model.FloatingIpModel; @@ -118,7 +119,7 @@ public class ServerDBSyncImpl implements ServerDBSync { _dbSync = new DBSyncGeneric(this); } - private static final Logger s_logger = Logger.getLogger(ServerDBSync.class); + protected Logger logger = LogManager.getLogger(getClass()); /* * API for syncing all classes of vnc objects with cloudstack @@ -131,7 +132,7 @@ public short syncAll(short syncMode) { short syncState = SYNC_STATE_IN_SYNC; /* vnc classes need to be synchronized with cloudstack */ - s_logger.debug("syncing cloudstack db with vnc"); + logger.debug("syncing cloudstack db with vnc"); try { for (Class cls : _vncClasses) { @@ -141,29 +142,29 @@ public short syncAll(short syncMode) { _dbSync.setSyncMode(syncMode); if (_dbSync.getSyncMode() == DBSyncGeneric.SYNC_MODE_CHECK) { - s_logger.debug("sync check start: " + DBSyncGeneric.getClassName(cls)); + logger.debug("sync check start: " + DBSyncGeneric.getClassName(cls)); } else { - s_logger.debug("sync start: " + DBSyncGeneric.getClassName(cls)); + logger.debug("sync start: " + DBSyncGeneric.getClassName(cls)); } if (_dbSync.sync(cls) == false) { if (_dbSync.getSyncMode() == DBSyncGeneric.SYNC_MODE_CHECK) { - s_logger.info("out of sync detected: " + DBSyncGeneric.getClassName(cls)); + logger.info("out of sync detected: " + DBSyncGeneric.getClassName(cls)); } else { - s_logger.info("out of sync detected and re-synced: " + DBSyncGeneric.getClassName(cls)); + logger.info("out of sync detected and re-synced: " + DBSyncGeneric.getClassName(cls)); } syncState = SYNC_STATE_OUT_OF_SYNC; } if (_dbSync.getSyncMode() == DBSyncGeneric.SYNC_MODE_CHECK) { - s_logger.debug("sync check finish: " + DBSyncGeneric.getClassName(cls)); + logger.debug("sync check finish: " + DBSyncGeneric.getClassName(cls)); } else { - s_logger.debug("sync finish: " + DBSyncGeneric.getClassName(cls)); + logger.debug("sync finish: " + DBSyncGeneric.getClassName(cls)); } /* unlock the sync mode */ _lockSyncMode.unlock(); } } catch (Exception ex) { - s_logger.warn("DB Synchronization", ex); + logger.warn("DB Synchronization", ex); syncState = SYNC_STATE_UNKNOWN; if (_lockSyncMode.isLocked()) { _lockSyncMode.unlock(); @@ -176,16 +177,16 @@ public short syncAll(short syncMode) { @Override public void syncClass(Class cls) { - s_logger.debug("syncClass: " + cls.getName()); + logger.debug("syncClass: " + cls.getName()); try { - s_logger.debug("sync start: " + DBSyncGeneric.getClassName(cls)); + logger.debug("sync start: " + DBSyncGeneric.getClassName(cls)); _lockSyncMode.lock(); _dbSync.setSyncMode(DBSyncGeneric.SYNC_MODE_UPDATE); _dbSync.sync(cls); _lockSyncMode.unlock(); - s_logger.debug("sync finish: " + DBSyncGeneric.getClassName(cls)); + logger.debug("sync finish: " + DBSyncGeneric.getClassName(cls)); } catch (Exception ex) { - s_logger.warn("Sync error: " + cls.getName(), ex); + logger.warn("Sync error: " + cls.getName(), ex); if (_lockSyncMode.isLocked()) { _lockSyncMode.unlock(); } @@ -240,7 +241,7 @@ public boolean syncDomain() throws Exception { List vncList = api.list(net.juniper.contrail.api.types.Domain.class, null); return _dbSync.syncGeneric(net.juniper.contrail.api.types.Domain.class, dbList, vncList); } catch (Exception ex) { - s_logger.warn("syncDomain", ex); + logger.warn("syncDomain", ex); throw ex; } } @@ -252,7 +253,7 @@ public void createDomain(DomainVO db, StringBuffer syncLogMesg) throws IOExcepti vnc.setName(db.getName()); vnc.setUuid(db.getUuid()); if (!api.create(vnc)) { - s_logger.error("Unable to create domain " + vnc.getName()); + logger.error("Unable to create domain " + vnc.getName()); syncLogMesg.append("Error: Virtual domain# VNC : Unable to create domain: " + vnc.getName() + "\n"); return; } @@ -268,7 +269,7 @@ public void deleteDomain(net.juniper.contrail.api.types.Domain vnc, StringBuffer try { deleteChildren(vnc.getProjects(), net.juniper.contrail.api.types.Project.class, syncLogMesg); } catch (Exception ex) { - s_logger.warn("deleteDomain", ex); + logger.warn("deleteDomain", ex); } api.delete(vnc); @@ -341,7 +342,7 @@ public boolean syncProject() throws Exception { List vncList = api.list(net.juniper.contrail.api.types.Project.class, null); return _dbSync.syncGeneric(net.juniper.contrail.api.types.Project.class, dbList, vncList); } catch (Exception ex) { - s_logger.warn("syncProject", ex); + logger.warn("syncProject", ex); throw ex; } } @@ -353,7 +354,7 @@ public void createProject(ProjectVO db, StringBuffer syncLogMesg) throws IOExcep vnc.setName(db.getName()); vnc.setUuid(db.getUuid()); if (!api.create(vnc)) { - s_logger.error("Unable to create project: " + vnc.getName()); + logger.error("Unable to create project: " + vnc.getName()); syncLogMesg.append("Error: Virtual project# VNC : Unable to create project: " + vnc.getName() + "\n"); return; } @@ -371,7 +372,7 @@ public void deleteProject(net.juniper.contrail.api.types.Project vnc, StringBuff deleteChildren(vnc.getNetworkIpams(), net.juniper.contrail.api.types.NetworkIpam.class, syncLogMesg); deleteChildren(vnc.getNetworkPolicys(), net.juniper.contrail.api.types.NetworkPolicy.class, syncLogMesg); } catch (Exception ex) { - s_logger.warn("deleteProject", ex); + logger.warn("deleteProject", ex); } api.delete(vnc); @@ -464,10 +465,10 @@ public boolean syncVirtualNetwork() throws Exception { vncList.add(vn); } } - s_logger.debug("sync VN - DB size: " + dbNets.size() + " VNC Size: " + vncList.size()); + logger.debug("sync VN - DB size: " + dbNets.size() + " VNC Size: " + vncList.size()); return _dbSync.syncGeneric(VirtualNetwork.class, dbNets, vncList); } catch (Exception ex) { - s_logger.warn("sync virtual-networks", ex); + logger.warn("sync virtual-networks", ex); throw ex; } } @@ -510,7 +511,7 @@ public void createVirtualNetwork(NetworkVO dbNet, StringBuffer syncLogMesg) thro syncLogMesg.append("VN# DB: " + _manager.getCanonicalName(dbNet) + "(" + dbNet.getUuid() + "); VNC: none; action: create\n"); if (_manager.getDatabase().lookupVirtualNetwork(dbNet.getUuid(), _manager.getCanonicalName(dbNet), dbNet.getTrafficType()) != null) { - s_logger.warn("VN model object is already present in DB: " + dbNet.getUuid() + ", name: " + dbNet.getName()); + logger.warn("VN model object is already present in DB: " + dbNet.getUuid() + ", name: " + dbNet.getName()); } VirtualNetworkModel vnModel = new VirtualNetworkModel(dbNet, dbNet.getUuid(), _manager.getCanonicalName(dbNet), dbNet.getTrafficType()); @@ -518,7 +519,7 @@ public void createVirtualNetwork(NetworkVO dbNet, StringBuffer syncLogMesg) thro NetworkACLVO acl = _networkACLDao.findById(dbNet.getNetworkACLId()); NetworkPolicyModel policyModel = _manager.getDatabase().lookupNetworkPolicy(acl.getUuid()); if (policyModel == null) { - s_logger.error("Network(" + dbNet.getName() + ") has ACL but policy model not created: " + + logger.error("Network(" + dbNet.getName() + ") has ACL but policy model not created: " + acl.getUuid() + ", name: " + acl.getName()); } else { vnModel.addToNetworkPolicy(policyModel); @@ -532,11 +533,11 @@ public void createVirtualNetwork(NetworkVO dbNet, StringBuffer syncLogMesg) thro vnModel.update(_manager.getModelController()); } } catch (InternalErrorException ex) { - s_logger.warn("create virtual-network", ex); + logger.warn("create virtual-network", ex); syncLogMesg.append("Error: VN# VNC : Unable to create network " + dbNet.getName() + "\n"); return; } - s_logger.debug("add model " + vnModel.getName()); + logger.debug("add model " + vnModel.getName()); _manager.getDatabase().getVirtualNetworks().add(vnModel); syncLogMesg.append("VN# VNC: " + dbNet.getUuid() + ", " + vnModel.getName() + " created\n"); } else { @@ -598,7 +599,7 @@ public Boolean equalVirtualNetwork(NetworkVO dbn, VirtualNetwork vnet, StringBuf NetworkACLVO acl = _networkACLDao.findById(dbn.getNetworkACLId()); NetworkPolicyModel policyModel = _manager.getDatabase().lookupNetworkPolicy(acl.getUuid()); if (policyModel == null) { - s_logger.error("Network(" + dbn.getName() + ") has ACL but policy model not created: " + + logger.error("Network(" + dbn.getName() + ") has ACL but policy model not created: " + acl.getUuid() + ", name: " + acl.getName()); } else { vnModel.addToNetworkPolicy(policyModel); @@ -615,14 +616,14 @@ public Boolean equalVirtualNetwork(NetworkVO dbn, VirtualNetwork vnet, StringBuf } _manager.getDatabase().getVirtualNetworks().remove(current); } - s_logger.debug("add model " + vnModel.getName()); + logger.debug("add model " + vnModel.getName()); _manager.getDatabase().getVirtualNetworks().add(vnModel); try { if (!vnModel.verify(_manager.getModelController())) { vnModel.update(_manager.getModelController()); } } catch (Exception ex) { - s_logger.warn("update virtual-network", ex); + logger.warn("update virtual-network", ex); } if (current != null) { NetworkPolicyModel oldPolicyModel = current.getNetworkPolicyModel(); @@ -661,10 +662,10 @@ public boolean syncVirtualMachine() { List vmDbList = _vmInstanceDao.listAll(); @SuppressWarnings("unchecked") List vncVmList = (List)api.list(VirtualMachine.class, null); - s_logger.debug("sync VM: CS size: " + vmDbList.size() + " VNC size: " + vncVmList.size()); + logger.debug("sync VM: CS size: " + vmDbList.size() + " VNC size: " + vncVmList.size()); return _dbSync.syncGeneric(VirtualMachine.class, vmDbList, vncVmList); } catch (Exception ex) { - s_logger.warn("sync virtual-machines", ex); + logger.warn("sync virtual-machines", ex); } return false; } @@ -699,7 +700,7 @@ public void createVirtualMachine(VMInstanceVO dbVm, StringBuffer syncLogMesg) th try { vmModel.update(_manager.getModelController()); } catch (InternalErrorException ex) { - s_logger.warn("create virtual-machine", ex); + logger.warn("create virtual-machine", ex); return; } _manager.getDatabase().getVirtualMachines().add(vmModel); @@ -757,7 +758,7 @@ public void deleteVirtualMachine(VirtualMachine vncVm, StringBuffer syncLogMesg) deleteVirtualMachineInterfaces(vncVm.getVirtualMachineInterfaces(), syncLogMesg); api.delete(VirtualMachine.class, vncVm.getUuid()); } catch (IOException ex) { - s_logger.warn("delete virtual-machine", ex); + logger.warn("delete virtual-machine", ex); return; } syncLogMesg.append("VM# VNC: " + vncVm.getName() + " deleted\n"); @@ -783,7 +784,7 @@ private void buildNicResources(VirtualMachineModel vmModel, VMInstanceVO dbVm, S VirtualNetworkModel vnModel = _manager.getDatabase().lookupVirtualNetwork(network.getUuid(), _manager.getCanonicalName(network), network.getTrafficType()); if (vnModel == null) { - s_logger.warn("Unable to locate virtual-network for network id " + network.getId()); + logger.warn("Unable to locate virtual-network for network id " + network.getId()); continue; } vmiModel.addToVirtualMachine(vmModel); @@ -805,7 +806,7 @@ public Boolean equalVirtualMachine(VMInstanceVO dbVm, VirtualMachine vncVm, Stri try { buildNicResources(vmModel, dbVm, syncLogMsg); } catch (IOException ex) { - s_logger.warn("build nic information for " + dbVm.getInstanceName(), ex); + logger.warn("build nic information for " + dbVm.getInstanceName(), ex); } } @@ -818,7 +819,7 @@ public Boolean equalVirtualMachine(VMInstanceVO dbVm, VirtualMachine vncVm, Stri try { vmModel.update(_manager.getModelController()); } catch (Exception ex) { - s_logger.warn("update virtual-machine", ex); + logger.warn("update virtual-machine", ex); } } else { //compare @@ -845,7 +846,7 @@ public boolean syncFloatingIp() throws Exception { try { status = _dbSync.syncGeneric(FloatingIp.class, ipList, vncList); } catch (Exception ex) { - s_logger.warn("sync floating-ips", ex); + logger.warn("sync floating-ips", ex); throw ex; } return status; @@ -915,21 +916,21 @@ public Boolean equalFloatingIp(IPAddressVO db, FloatingIp vnc, StringBuffer sync fipPoolModel.update(_manager.getModelController()); vnModel.setFipPoolModel(fipPoolModel); } catch (Exception ex) { - s_logger.warn("floating-ip-pool create: ", ex); + logger.warn("floating-ip-pool create: ", ex); return false; } } FloatingIpModel current = fipPoolModel.getFloatingIpModel(db.getUuid()); if (current == null) { - s_logger.debug("add model " + db.getAddress().addr()); + logger.debug("add model " + db.getAddress().addr()); FloatingIpModel fipModel = new FloatingIpModel(db.getUuid()); fipModel.addToFloatingIpPool(fipPoolModel); fipModel.build(_manager.getModelController(), PublicIp.createFromAddrAndVlan(db, _vlanDao.findById(db.getVlanId()))); try { fipModel.update(_manager.getModelController()); } catch (Exception ex) { - s_logger.warn("floating-ip create: ", ex); + logger.warn("floating-ip create: ", ex); return false; } } @@ -957,10 +958,10 @@ public boolean syncNetworkPolicy() throws Exception { vncList.add(policy); } } - s_logger.debug("sync Network Policy - DB size: " + dbAcls.size() + " VNC Size: " + vncList.size()); + logger.debug("sync Network Policy - DB size: " + dbAcls.size() + " VNC Size: " + vncList.size()); return _dbSync.syncGeneric(NetworkPolicy.class, dbAcls, vncList); } catch (Exception ex) { - s_logger.warn("sync network-policys", ex); + logger.warn("sync network-policys", ex); throw ex; } } @@ -988,7 +989,7 @@ public void createNetworkPolicy(NetworkACLVO db, StringBuffer syncLogMesg) throw "(" + db.getUuid() + "); VNC: none; action: create\n"); if (_manager.getDatabase().lookupNetworkPolicy(db.getUuid()) != null) { - s_logger.warn("Policy model object is already present in DB: " + + logger.warn("Policy model object is already present in DB: " + db.getUuid() + ", name: " + db.getName()); } NetworkPolicyModel policyModel = new NetworkPolicyModel(db.getUuid(), db.getName()); @@ -1001,7 +1002,7 @@ public void createNetworkPolicy(NetworkACLVO db, StringBuffer syncLogMesg) throw project = _manager.getDefaultVncProject(); } } catch (IOException ex) { - s_logger.warn("read project", ex); + logger.warn("read project", ex); throw ex; } policyModel.setProject(project); @@ -1018,12 +1019,12 @@ public void createNetworkPolicy(NetworkACLVO db, StringBuffer syncLogMesg) throw policyModel.update(_manager.getModelController()); } } catch (Exception ex) { - s_logger.warn("create network-policy", ex); + logger.warn("create network-policy", ex); syncLogMesg.append("Error: Policy# VNC : Unable to create network policy " + db.getName() + "\n"); return; } - s_logger.debug("add model " + policyModel.getName()); + logger.debug("add model " + policyModel.getName()); _manager.getDatabase().getNetworkPolicys().add(policyModel); syncLogMesg.append("Policy# VNC: " + db.getUuid() + ", " + policyModel.getName() + " created\n"); } else { @@ -1071,7 +1072,7 @@ public Boolean equalNetworkPolicy(NetworkACLVO db, NetworkPolicy policy, StringB project = _manager.getDefaultVncProject(); } } catch (IOException ex) { - s_logger.warn("read project", ex); + logger.warn("read project", ex); } policyModel.setProject(project); List rules = _networkACLItemDao.listByACL(db.getId()); @@ -1084,14 +1085,14 @@ public Boolean equalNetworkPolicy(NetworkACLVO db, NetworkPolicy policy, StringB if (current != null) { _manager.getDatabase().getNetworkPolicys().remove(current); } - s_logger.debug("add policy model " + policyModel.getName()); + logger.debug("add policy model " + policyModel.getName()); _manager.getDatabase().getNetworkPolicys().add(policyModel); try { if (!policyModel.verify(_manager.getModelController())) { policyModel.update(_manager.getModelController()); } } catch (Exception ex) { - s_logger.warn("update network-policy", ex); + logger.warn("update network-policy", ex); } } else { //compare @@ -1122,14 +1123,14 @@ public void createServiceInstance(ServiceInstanceModel siModel, StringBuffer log public void deleteServiceInstance(ServiceInstance siObj, StringBuffer logMsg) { final ApiConnector api = _manager.getApiConnector(); - s_logger.debug("delete " + siObj.getQualifiedName()); + logger.debug("delete " + siObj.getQualifiedName()); if (!_rwMode) { return; } try { api.delete(siObj); } catch (IOException ex) { - s_logger.warn("service-instance delete", ex); + logger.warn("service-instance delete", ex); } } @@ -1141,7 +1142,7 @@ public void deleteServiceInstance(ServiceInstance siObj, StringBuffer logMsg) { * @param logMsg */ public void equalServiceInstance(ServiceInstanceModel siModel, ServiceInstance siObj, StringBuffer logMsg) { - s_logger.debug("equal " + siModel.getQualifiedName()); + logger.debug("equal " + siModel.getQualifiedName()); } static class ServiceInstanceComparator implements Comparator, Serializable { @@ -1169,7 +1170,7 @@ public boolean syncServiceInstance() { _dbSync.syncCollections(ServiceInstance.class, _manager.getDatabase().getServiceInstances(), siList, _rwMode, stats); inSync = stats.create == 0 && stats.delete == 0; } catch (Exception ex) { - s_logger.warn("synchronize service-instances", ex); + logger.warn("synchronize service-instances", ex); return false; } return inSync; diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ServerEventHandlerImpl.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ServerEventHandlerImpl.java index 05dcdce1a92f..2ddb28ea9f22 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ServerEventHandlerImpl.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ServerEventHandlerImpl.java @@ -22,7 +22,8 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.stereotype.Component; import org.apache.cloudstack.framework.messagebus.MessageBus; @@ -62,7 +63,7 @@ public class ServerEventHandlerImpl implements ServerEventHandler { private HashMap _methodMap; private HashMap> _classMap; - private static final Logger s_logger = Logger.getLogger(MessageHandler.class); + protected Logger logger = LogManager.getLogger(getClass()); ServerEventHandlerImpl() { setMethodMap(); @@ -85,7 +86,7 @@ private void setClassMap() { @MessageHandler(topic = ".*") public void defaultMessageHandler(String subject, String topic, Object args) { - s_logger.info("DB Event Received - topic: " + topic + "; subject: " + subject); + logger.info("DB Event Received - topic: " + topic + "; subject: " + subject); org.apache.cloudstack.framework.events.Event event = (org.apache.cloudstack.framework.events.Event)args; @@ -108,18 +109,18 @@ public void defaultMessageHandler(String subject, String topic, Object args) { defaultHandler(subject, topic, event); } } catch (Exception e) { - s_logger.debug(e); + logger.debug(e); } } /* Default create handler */ void defaultCreateHandler(String subject, String topic, org.apache.cloudstack.framework.events.Event event) { - s_logger.debug("Default handler is invoked for subject: " + subject + "; topic: " + topic); - s_logger.debug("description: " + event.getDescription()); - s_logger.debug("category: " + event.getEventCategory()); - s_logger.debug("type: " + event.getResourceType()); - s_logger.debug("event-type: " + event.getEventType()); + logger.debug("Default handler is invoked for subject: " + subject + "; topic: " + topic); + logger.debug("description: " + event.getDescription()); + logger.debug("category: " + event.getEventCategory()); + logger.debug("type: " + event.getResourceType()); + logger.debug("event-type: " + event.getEventType()); Class cls = _classMap.get(event.getResourceType()); @@ -133,12 +134,12 @@ void defaultCreateHandler(String subject, String topic, org.apache.cloudstack.fr /* Default handler */ void defaultDeleteHandler(String subject, String topic, org.apache.cloudstack.framework.events.Event event) { - s_logger.debug("Default handler is invoked for subject: " + subject + "; topic: " + topic); + logger.debug("Default handler is invoked for subject: " + subject + "; topic: " + topic); - s_logger.debug("description: " + event.getDescription()); - s_logger.debug("category: " + event.getEventCategory()); - s_logger.debug("type: " + event.getResourceType()); - s_logger.debug("event-type: " + event.getEventType()); + logger.debug("description: " + event.getDescription()); + logger.debug("category: " + event.getEventCategory()); + logger.debug("type: " + event.getResourceType()); + logger.debug("event-type: " + event.getEventType()); Class cls = _classMap.get(event.getResourceType()); if (cls != null) { _dbSync.syncClass(cls); @@ -149,12 +150,12 @@ void defaultDeleteHandler(String subject, String topic, org.apache.cloudstack.fr /* Default handler */ void defaultHandler(String subject, String topic, org.apache.cloudstack.framework.events.Event event) { - s_logger.debug("Default handler is invoked for subject: " + subject + "; topic: " + topic); + logger.debug("Default handler is invoked for subject: " + subject + "; topic: " + topic); - s_logger.debug("description: " + event.getDescription()); - s_logger.debug("category: " + event.getEventCategory()); - s_logger.debug("type: " + event.getResourceType()); - s_logger.debug("event-type: " + event.getEventType()); + logger.debug("description: " + event.getDescription()); + logger.debug("category: " + event.getEventCategory()); + logger.debug("type: " + event.getResourceType()); + logger.debug("event-type: " + event.getEventType()); Class cls = _classMap.get(event.getResourceType()); if (cls != null) { _dbSync.syncClass(cls); @@ -177,19 +178,19 @@ private long parseForId(String resourceType, String description) { try { id = Long.parseLong(idStr.trim()); } catch (Exception e) { - s_logger.debug("Unable to parse id string<" + idStr.trim() + "> for long value, ignored"); + logger.debug("Unable to parse id string<" + idStr.trim() + "> for long value, ignored"); } return id; } public void onDomainCreate(String subject, String topic, org.apache.cloudstack.framework.events.Event event) { - s_logger.info("onDomainCreate; topic: " + topic + "; subject: " + subject); + logger.info("onDomainCreate; topic: " + topic + "; subject: " + subject); try { long id = parseForId(event.getResourceType(), event.getDescription()); if (id != 0) { DomainVO domain = _domainDao.findById(id); if (domain != null) { - s_logger.info("createDomain for name: " + domain.getName() + "; uuid: " + domain.getUuid()); + logger.info("createDomain for name: " + domain.getName() + "; uuid: " + domain.getUuid()); StringBuffer logMesg = new StringBuffer(); _dbSync.createDomain(domain, logMesg); } else { @@ -201,18 +202,18 @@ public void onDomainCreate(String subject, String topic, org.apache.cloudstack.f _dbSync.syncClass(net.juniper.contrail.api.types.Domain.class); } } catch (Exception e) { - s_logger.debug(e); + logger.debug(e); } } public void onProjectCreate(String subject, String topic, org.apache.cloudstack.framework.events.Event event) { - s_logger.info("onProjectCreate; topic: " + topic + "; subject: " + subject); + logger.info("onProjectCreate; topic: " + topic + "; subject: " + subject); try { long id = parseForId(event.getResourceType(), event.getDescription()); if (id != 0) { ProjectVO project = _projectDao.findById(id); if (project != null) { - s_logger.info("createProject for name: " + project.getName() + "; uuid: " + project.getUuid()); + logger.info("createProject for name: " + project.getName() + "; uuid: " + project.getUuid()); StringBuffer logMesg = new StringBuffer(); _dbSync.createProject(project, logMesg); } else { @@ -224,7 +225,7 @@ public void onProjectCreate(String subject, String topic, org.apache.cloudstack. _dbSync.syncClass(net.juniper.contrail.api.types.Project.class); } } catch (Exception e) { - s_logger.info(e); + logger.info(e); } } diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ServiceManagerImpl.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ServiceManagerImpl.java index d754e143411e..08941c56e3c5 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ServiceManagerImpl.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ServiceManagerImpl.java @@ -31,7 +31,8 @@ import org.apache.cloudstack.network.contrail.model.ServiceInstanceModel; import org.apache.cloudstack.network.contrail.model.VirtualMachineModel; import org.apache.cloudstack.network.contrail.model.VirtualNetworkModel; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.api.ApiDBUtils; import com.cloud.dc.DataCenter; @@ -63,7 +64,7 @@ import net.juniper.contrail.api.types.ServiceInstance; public class ServiceManagerImpl implements ServiceManager { - private static final Logger s_logger = Logger.getLogger(ServiceManager.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject UserDao _userDao; @@ -140,7 +141,7 @@ private ServiceVirtualMachine createServiceVM(DataCenter zone, Account owner, Vi @Override public ServiceVirtualMachine createServiceInstance(DataCenter zone, Account owner, VirtualMachineTemplate template, ServiceOffering serviceOffering, String name, Network left, Network right) { - s_logger.debug("createServiceInstance by " + owner.getAccountName()); + logger.debug("createServiceInstance by " + owner.getAccountName()); // TODO: permission model. // service instances need to be able to access the public network. if (left.getTrafficType() == TrafficType.Guest) { @@ -166,7 +167,7 @@ public ServiceVirtualMachine createServiceInstance(DataCenter zone, Account owne try { project = _manager.getVncProject(owner.getDomainId(), owner.getAccountId()); } catch (IOException ex) { - s_logger.warn("read project", ex); + logger.warn("read project", ex); throw new CloudRuntimeException(ex); } @@ -176,7 +177,7 @@ public ServiceVirtualMachine createServiceInstance(DataCenter zone, Account owne throw new InvalidParameterValueException("service-instance " + name + " already exists uuid=" + srvid); } } catch (IOException ex) { - s_logger.warn("service-instance lookup", ex); + logger.warn("service-instance lookup", ex); throw new CloudRuntimeException(ex); } @@ -187,18 +188,18 @@ public ServiceVirtualMachine createServiceInstance(DataCenter zone, Account owne try { serviceModel.update(_manager.getModelController()); } catch (Exception ex) { - s_logger.warn("service-instance update", ex); + logger.warn("service-instance update", ex); throw new CloudRuntimeException(ex); } - s_logger.debug("service-instance object created"); + logger.debug("service-instance object created"); ServiceInstance siObj; try { _manager.getDatabase().getServiceInstances().add(serviceModel); siObj = serviceModel.getServiceInstance(); } catch (Exception ex) { - s_logger.warn("DB add", ex); + logger.warn("DB add", ex); throw new CloudRuntimeException(ex); } @@ -206,7 +207,7 @@ public ServiceVirtualMachine createServiceInstance(DataCenter zone, Account owne String svmName = name.replace(" ", "_") + "-1"; ServiceVirtualMachine svm = createServiceVM(zone, owner, template, serviceOffering, svmName, siObj, left, right); - s_logger.debug("created VMInstance " + svm.getUuid()); + logger.debug("created VMInstance " + svm.getUuid()); // 3. Create the virtual-machine model and push the update. VirtualMachineModel instanceModel = new VirtualMachineModel(svm, svm.getUuid()); @@ -215,7 +216,7 @@ public ServiceVirtualMachine createServiceInstance(DataCenter zone, Account owne instanceModel.setServiceInstance(_manager.getModelController(), svm, serviceModel); instanceModel.update(_manager.getModelController()); } catch (Exception ex) { - s_logger.warn("service virtual-machine update", ex); + logger.warn("service virtual-machine update", ex); throw new CloudRuntimeException(ex); } @@ -224,7 +225,7 @@ public ServiceVirtualMachine createServiceInstance(DataCenter zone, Account owne @Override public void startServiceInstance(long instanceId) { - s_logger.debug("start service instance " + instanceId); + logger.debug("start service instance " + instanceId); UserVmVO vm = _vmDao.findById(instanceId); _vmManager.start(vm.getUuid(), null); @@ -232,7 +233,7 @@ public void startServiceInstance(long instanceId) { @Override public ServiceInstanceResponse createServiceInstanceResponse(long instanceId) { - s_logger.debug("ServiceInstance response for id: " + instanceId); + logger.debug("ServiceInstance response for id: " + instanceId); UserVmVO vm = _vmDao.findById(instanceId); ServiceInstanceResponse response = new ServiceInstanceResponse(); diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/FloatingIpModel.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/FloatingIpModel.java index 23bd911d8fef..4a411fa809d1 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/FloatingIpModel.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/FloatingIpModel.java @@ -22,7 +22,6 @@ import net.juniper.contrail.api.ApiConnector; import net.juniper.contrail.api.types.FloatingIp; -import org.apache.log4j.Logger; import org.apache.cloudstack.network.contrail.management.ContrailManager; @@ -34,7 +33,6 @@ import com.cloud.vm.VMInstanceVO; public class FloatingIpModel extends ModelObjectBase { - private static final Logger s_logger = Logger.getLogger(FloatingIpModel.class); private String _uuid; private long _id; @@ -95,7 +93,7 @@ public void delete(ModelController controller) throws IOException { try { api.delete(FloatingIp.class, _uuid); } catch (IOException ex) { - s_logger.warn("floating ip delete", ex); + logger.warn("floating ip delete", ex); } } @@ -159,7 +157,7 @@ public void update(ModelController controller) throws InternalErrorException, IO Long vmId = ipAddrVO.getAssociatedWithVmId(); Long networkId = ipAddrVO.getAssociatedWithNetworkId(); if (vmId == null || networkId == null) { - s_logger.debug("Floating ip is not yet associated to either vm or network"); + logger.debug("Floating ip is not yet associated to either vm or network"); return; } NicVO nic = controller.getNicDao().findByNtwkIdAndInstanceId(networkId, vmId); @@ -180,7 +178,7 @@ public void update(ModelController controller) throws InternalErrorException, IO try { api.create(fip); } catch (Exception ex) { - s_logger.debug("floating ip create", ex); + logger.debug("floating ip create", ex); throw new CloudRuntimeException("Failed to create floating ip", ex); } _fip = fip; @@ -188,7 +186,7 @@ public void update(ModelController controller) throws InternalErrorException, IO try { api.update(fip); } catch (IOException ex) { - s_logger.warn("floating ip update", ex); + logger.warn("floating ip update", ex); throw new CloudRuntimeException("Unable to update floating ip object", ex); } } diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/FloatingIpPoolModel.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/FloatingIpPoolModel.java index 31a29b79abbe..1ae7dc924d32 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/FloatingIpPoolModel.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/FloatingIpPoolModel.java @@ -23,7 +23,6 @@ import net.juniper.contrail.api.ApiConnector; import net.juniper.contrail.api.types.FloatingIpPool; -import org.apache.log4j.Logger; import org.apache.cloudstack.network.contrail.management.ContrailManager; @@ -31,7 +30,6 @@ import com.cloud.utils.exception.CloudRuntimeException; public class FloatingIpPoolModel extends ModelObjectBase { - private static final Logger s_logger = Logger.getLogger(FloatingIpPoolModel.class); private String _name; @@ -87,7 +85,7 @@ public void delete(ModelController controller) throws IOException { } _fipPool = null; } catch (IOException ex) { - s_logger.warn("floating ip pool delete", ex); + logger.warn("floating ip pool delete", ex); } } @@ -140,7 +138,7 @@ public void update(ModelController controller) throws InternalErrorException, IO try { api.create(fipPool); } catch (Exception ex) { - s_logger.debug("floating ip pool create", ex); + logger.debug("floating ip pool create", ex); throw new CloudRuntimeException("Failed to create floating ip pool", ex); } _fipPool = fipPool; @@ -148,7 +146,7 @@ public void update(ModelController controller) throws InternalErrorException, IO try { api.update(fipPool); } catch (IOException ex) { - s_logger.warn("floating ip pool update", ex); + logger.warn("floating ip pool update", ex); throw new CloudRuntimeException("Unable to update floating ip ppol object", ex); } } diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/InstanceIpModel.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/InstanceIpModel.java index 8693e61a72b0..2acc0fb14314 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/InstanceIpModel.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/InstanceIpModel.java @@ -25,12 +25,10 @@ import net.juniper.contrail.api.types.VirtualMachineInterface; import net.juniper.contrail.api.types.VirtualNetwork; -import org.apache.log4j.Logger; import com.cloud.exception.InternalErrorException; public class InstanceIpModel extends ModelObjectBase { - private static final Logger s_logger = Logger.getLogger(InstanceIpModel.class); private String _name; private String _uuid; @@ -47,7 +45,7 @@ public void addToVMInterface(VMInterfaceModel vmiModel) { _vmiModel = vmiModel; if (vmiModel != null) { vmiModel.addSuccessor(this); - s_logger.debug("vmiModel has " + vmiModel.successors().size() + " IP addresses"); + logger.debug("vmiModel has " + vmiModel.successors().size() + " IP addresses"); } } diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/ModelObject.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/ModelObject.java index f829d3c45297..fce3a46e2e96 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/ModelObject.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/ModelObject.java @@ -22,7 +22,8 @@ import java.lang.ref.WeakReference; import java.util.TreeSet; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.exception.InternalErrorException; @@ -45,7 +46,7 @@ public interface ModelObject { public static class ModelReference implements Comparable, Serializable { private static final long serialVersionUID = -2019113974956703526L; - private static final Logger s_logger = Logger.getLogger(ModelReference.class); + protected Logger logger = LogManager.getLogger(getClass()); /* * WeakReference class is not serializable by definition. So, we cannot enforce its serialization unless we write the implementation of diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/ModelObjectBase.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/ModelObjectBase.java index 52bcd9392268..0c13951a8aaf 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/ModelObjectBase.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/ModelObjectBase.java @@ -17,11 +17,15 @@ package org.apache.cloudstack.network.contrail.model; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; + import java.io.Serializable; import java.util.Comparator; import java.util.TreeSet; public abstract class ModelObjectBase implements ModelObject { + protected Logger logger = LogManager.getLogger(getClass()); public static class UuidComparator implements Comparator, Serializable { @Override public int compare(ModelObject lhs, ModelObject rhs) { diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/NetworkPolicyModel.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/NetworkPolicyModel.java index 1b509dc26550..d53d045ed5b1 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/NetworkPolicyModel.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/NetworkPolicyModel.java @@ -22,7 +22,6 @@ import java.util.List; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.network.Networks; @@ -43,7 +42,6 @@ import com.cloud.utils.exception.CloudRuntimeException; public class NetworkPolicyModel extends ModelObjectBase { - private static final Logger s_logger = Logger.getLogger(NetworkPolicyModel.class); private String _uuid; private String _fqName; @@ -82,7 +80,7 @@ public NetworkVO cidrToNetwork(ModelController controller, String cidr) { return null; } if (dbNets.size() > 1) { - s_logger.warn("more than one network found with cidr: " + cidr); + logger.warn("more than one network found with cidr: " + cidr); } return dbNets.get(0); } @@ -235,7 +233,7 @@ public String getUuid() { public void update(ModelController controller) throws InternalErrorException, IOException { ApiConnector api = controller.getApiAccessor(); if (_project == null) { - s_logger.debug("Project is null for the policy: " + _name); + logger.debug("Project is null for the policy: " + _name); throw new IOException("Project is null for the policy: " + _name); } @@ -254,7 +252,7 @@ public void update(ModelController controller) throws InternalErrorException, IO policy.setParent(_project); } } catch (IOException ex) { - s_logger.warn("network-policy read", ex); + logger.warn("network-policy read", ex); return; } } @@ -264,7 +262,7 @@ public void update(ModelController controller) throws InternalErrorException, IO try { api.create(policy); } catch (Exception ex) { - s_logger.debug("network policy create", ex); + logger.debug("network policy create", ex); throw new CloudRuntimeException("Failed to create network policy", ex); } _policy = policy; @@ -272,7 +270,7 @@ public void update(ModelController controller) throws InternalErrorException, IO try { api.update(policy); } catch (IOException ex) { - s_logger.warn("network policy update", ex); + logger.warn("network policy update", ex); throw new CloudRuntimeException("Unable to update network policy", ex); } } diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/ServiceInstanceModel.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/ServiceInstanceModel.java index d0db7b80b521..7f2bfe73e891 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/ServiceInstanceModel.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/ServiceInstanceModel.java @@ -32,14 +32,12 @@ import org.apache.cloudstack.network.contrail.management.ContrailManager; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.offering.ServiceOffering; import com.cloud.template.VirtualMachineTemplate; import com.cloud.utils.exception.CloudRuntimeException; public class ServiceInstanceModel extends ModelObjectBase { - private static final Logger s_logger = Logger.getLogger(ServiceInstanceModel.class); private String _uuid; private String _fqName; @@ -125,7 +123,7 @@ public void build(ModelController controller, ServiceInstance siObj) { ServiceTemplate tmpl = (ServiceTemplate)api.findById(ServiceTemplate.class, ref.getUuid()); _templateId = tmpl.getUuid(); } catch (IOException ex) { - s_logger.warn("service-template read", ex); + logger.warn("service-template read", ex); } } } @@ -149,7 +147,7 @@ private ServiceInstance createServiceInstance(ModelController controller) { ApiConnector api = controller.getApiAccessor(); project = (Project)api.findById(Project.class, _projectId); } catch (IOException ex) { - s_logger.warn("project read", ex); + logger.warn("project read", ex); throw new CloudRuntimeException("Unable to create service-instance object", ex); } } @@ -165,7 +163,7 @@ private ServiceInstance createServiceInstance(ModelController controller) { ApiConnector api = controller.getApiAccessor(); api.create(si_obj); } catch (IOException ex) { - s_logger.warn("service-instance create", ex); + logger.warn("service-instance create", ex); throw new CloudRuntimeException("Unable to create service-instance object", ex); } @@ -180,13 +178,13 @@ private void clearServicePolicy(ModelController controller) { _policy.delete(controller.getManager().getModelController()); _policy = null; } catch (Exception e) { - s_logger.error(e); + logger.error(e); } try { _left.update(controller.getManager().getModelController()); _right.update(controller.getManager().getModelController()); } catch (Exception ex) { - s_logger.error("virtual-network update for policy delete: ", ex); + logger.error("virtual-network update for policy delete: ", ex); } } @@ -200,7 +198,7 @@ private NetworkPolicyModel setServicePolicy(ModelController controller) { try { policyModel.build(controller.getManager().getModelController(), _leftName, _rightName, "in-network", siList, "pass"); } catch (Exception e) { - s_logger.error(e); + logger.error(e); return null; } try { @@ -209,7 +207,7 @@ private NetworkPolicyModel setServicePolicy(ModelController controller) { } controller.getManager().getDatabase().getNetworkPolicys().add(policyModel); } catch (Exception ex) { - s_logger.error("network-policy update: ", ex); + logger.error("network-policy update: ", ex); } return policyModel; } @@ -241,7 +239,7 @@ private ServiceTemplate locateServiceTemplate(ModelController controller) { ApiConnector api = controller.getApiAccessor(); tmpl = (ServiceTemplate)api.findById(ServiceTemplate.class, _templateId); } catch (IOException ex) { - s_logger.warn("service-template read", ex); + logger.warn("service-template read", ex); throw new CloudRuntimeException("Unable to create service-template object", ex); } if (tmpl == null) { diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/VMInterfaceModel.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/VMInterfaceModel.java index dbfb969f9aeb..87d57b29afff 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/VMInterfaceModel.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/VMInterfaceModel.java @@ -20,7 +20,6 @@ import java.io.IOException; import org.apache.cloudstack.network.contrail.management.ContrailManager; -import org.apache.log4j.Logger; import com.cloud.exception.InternalErrorException; import com.cloud.network.Network; @@ -33,7 +32,6 @@ import net.juniper.contrail.api.types.VirtualMachineInterfacePropertiesType; public class VMInterfaceModel extends ModelObjectBase { - private static final Logger s_logger = Logger.getLogger(VMInterfaceModel.class); private String _uuid; @@ -187,7 +185,7 @@ void setServiceTag(String tag) { @Override public void update(ModelController controller) throws InternalErrorException, IOException { if (!_netActive || !_nicActive) { - s_logger.debug("vm interface update, _netActive: " + _netActive + ", _nicActive: " + _nicActive); + logger.debug("vm interface update, _netActive: " + _netActive + ", _nicActive: " + _nicActive); delete(controller); return; } @@ -246,7 +244,7 @@ public void update(ModelController controller) throws InternalErrorException, IO // TODO: if there are no instance-ip successors present and we have an instance-ip object reference // delete the object. if (ipCount == 0) { - s_logger.warn("virtual-machine-interface " + _uuid + " has no instance-ip"); + logger.warn("virtual-machine-interface " + _uuid + " has no instance-ip"); } } diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/VirtualMachineModel.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/VirtualMachineModel.java index 550bdde1c216..479ef2a0e5db 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/VirtualMachineModel.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/VirtualMachineModel.java @@ -30,7 +30,6 @@ import org.apache.cloudstack.network.contrail.management.ContrailManager; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.exception.InternalErrorException; import com.cloud.network.dao.NetworkDao; @@ -45,7 +44,6 @@ import com.google.gson.reflect.TypeToken; public class VirtualMachineModel extends ModelObjectBase { - private static final Logger s_logger = Logger.getLogger(VirtualMachineModel.class); private final String _uuid; private long _instanceId; @@ -81,7 +79,7 @@ public void build(ModelController controller, VMInstanceVO instance) { setProperties(controller, instance); UserVm userVm = controller.getVmDao().findById(instance.getId()); if (userVm != null && userVm.getUserData() != null) { - s_logger.debug("vm " + instance.getInstanceName() + " user data: " + userVm.getUserData()); + logger.debug("vm " + instance.getInstanceName() + " user data: " + userVm.getUserData()); final Gson json = new Gson(); Map kvmap = json.fromJson(userVm.getUserData(), new TypeToken>() { }.getType()); @@ -102,7 +100,7 @@ public void build(ModelController controller, VMInstanceVO instance) { // Throw a CloudRuntimeException in case the UUID is not valid. String message = "Invalid UUID ({0}) given for the service-instance for VM {1}."; message = MessageFormat.format(message, instance.getId(), serviceUuid); - s_logger.warn(message); + logger.warn(message); throw new CloudRuntimeException(message); } } @@ -124,7 +122,7 @@ private void buildServiceInstance(ModelController controller, String serviceUuid try { siObj = (ServiceInstance) api.findById(ServiceInstance.class, serviceUuid); } catch (IOException ex) { - s_logger.warn("service-instance read", ex); + logger.warn("service-instance read", ex); throw new CloudRuntimeException("Unable to read service-instance object", ex); } @@ -166,7 +164,7 @@ public void delete(ModelController controller) throws IOException { try { api.delete(VirtualMachine.class, _uuid); } catch (IOException ex) { - s_logger.warn("virtual-machine delete", ex); + logger.warn("virtual-machine delete", ex); } if (_serviceModel != null) { @@ -235,7 +233,7 @@ boolean isActiveInstance(VMInstanceVO instance) { return false; default: - s_logger.warn("Unknown VMInstance state " + instance.getState().getDescription()); + logger.warn("Unknown VMInstance state " + instance.getState().getDescription()); } return true; } @@ -252,7 +250,7 @@ public void setProperties(ModelController controller, VMInstanceVO instance) { try { _projectId = manager.getProjectId(instance.getDomainId(), instance.getAccountId()); } catch (IOException ex) { - s_logger.warn("project read", ex); + logger.warn("project read", ex); throw new CloudRuntimeException(ex); } _initialized = true; @@ -321,7 +319,7 @@ public void update(ModelController controller) throws InternalErrorException, IO try { project = (Project)api.findById(Project.class, _projectId); } catch (IOException ex) { - s_logger.debug("project read", ex); + logger.debug("project read", ex); throw new CloudRuntimeException("Failed to read project", ex); } vm.setParent(project); @@ -339,7 +337,7 @@ public void update(ModelController controller) throws InternalErrorException, IO try { api.create(vm); } catch (Exception ex) { - s_logger.debug("virtual-machine create", ex); + logger.debug("virtual-machine create", ex); throw new CloudRuntimeException("Failed to create virtual-machine", ex); } _vm = vm; @@ -347,7 +345,7 @@ public void update(ModelController controller) throws InternalErrorException, IO try { api.update(vm); } catch (IOException ex) { - s_logger.warn("virtual-machine update", ex); + logger.warn("virtual-machine update", ex); throw new CloudRuntimeException("Unable to update virtual-machine object", ex); } } @@ -367,7 +365,7 @@ public boolean verify(ModelController controller) { try { _vm = (VirtualMachine) api.findById(VirtualMachine.class, _uuid); } catch (IOException e) { - s_logger.error("virtual-machine verify", e); + logger.error("virtual-machine verify", e); } if (_vm == null) { diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/VirtualNetworkModel.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/VirtualNetworkModel.java index 7563714528bb..08a4609c43ef 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/VirtualNetworkModel.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/VirtualNetworkModel.java @@ -33,7 +33,6 @@ import net.juniper.contrail.api.types.VnSubnetsType; import org.apache.cloudstack.network.contrail.management.ContrailManager; -import org.apache.log4j.Logger; import com.cloud.dc.VlanVO; import com.cloud.dc.dao.VlanDao; @@ -44,7 +43,6 @@ import com.cloud.utils.net.NetUtils; public class VirtualNetworkModel extends ModelObjectBase { - private static final Logger s_logger = Logger.getLogger(VirtualNetworkModel.class); private String _uuid; private long _id; @@ -141,7 +139,7 @@ public void delete(ModelController controller) throws IOException { try { api.delete(VirtualNetwork.class, _uuid); } catch (IOException ex) { - s_logger.warn("virtual-network delete", ex); + logger.warn("virtual-network delete", ex); } } @@ -182,7 +180,7 @@ public void setProperties(ModelController controller, Network network) { try { _uuid = manager.findVirtualNetworkId(network); } catch (IOException ex) { - s_logger.warn("Unable to read virtual-network", ex); + logger.warn("Unable to read virtual-network", ex); } } @@ -191,7 +189,7 @@ public void setProperties(ModelController controller, Network network) { try { _projectId = manager.getProjectId(network.getDomainId(), network.getAccountId()); } catch (IOException ex) { - s_logger.warn("project read", ex); + logger.warn("project read", ex); throw new CloudRuntimeException(ex); } @@ -223,7 +221,7 @@ public void update(ModelController controller) throws InternalErrorException, IO try { project = (Project)api.findById(Project.class, _projectId); } catch (IOException ex) { - s_logger.debug("project read", ex); + logger.debug("project read", ex); throw new CloudRuntimeException("Failed to read project", ex); } vn.setParent(project); @@ -248,16 +246,16 @@ public void update(ModelController controller) throws InternalErrorException, IO try { String ipam_id = api.findByName(NetworkIpam.class, null, "default-network-ipam"); if (ipam_id == null) { - s_logger.debug("could not find default-network-ipam"); + logger.debug("could not find default-network-ipam"); return; } ipam = (NetworkIpam)api.findById(NetworkIpam.class, ipam_id); if (ipam == null) { - s_logger.debug("could not find NetworkIpam with ipam_id: " + ipam_id); + logger.debug("could not find NetworkIpam with ipam_id: " + ipam_id); return; } } catch (IOException ex) { - s_logger.error(ex); + logger.error(ex); return; } _ipam = ipam; @@ -287,7 +285,7 @@ public void update(ModelController controller) throws InternalErrorException, IO try { api.create(vn); } catch (Exception ex) { - s_logger.debug("virtual-network create", ex); + logger.debug("virtual-network create", ex); throw new CloudRuntimeException("Failed to create virtual-network", ex); } _vn = vn; @@ -295,7 +293,7 @@ public void update(ModelController controller) throws InternalErrorException, IO try { api.update(vn); } catch (IOException ex) { - s_logger.warn("virtual-network update", ex); + logger.warn("virtual-network update", ex); throw new CloudRuntimeException("Unable to update virtual-network object", ex); } } @@ -321,16 +319,16 @@ public void read(ModelController controller) { try { String ipam_id = api.findByName(NetworkIpam.class, null, "default-network-ipam"); if (ipam_id == null) { - s_logger.debug("could not find default-network-ipam"); + logger.debug("could not find default-network-ipam"); return; } ipam = (NetworkIpam)api.findById(NetworkIpam.class, ipam_id); if (ipam == null) { - s_logger.debug("could not find NetworkIpam with ipam_id: " + ipam_id); + logger.debug("could not find NetworkIpam with ipam_id: " + ipam_id); return; } } catch (IOException ex) { - s_logger.error(ex); + logger.error(ex); return; } _ipam = ipam; @@ -415,7 +413,7 @@ public boolean verify(ModelController controller) { diff.removeAll(vncSubnets); if (!diff.isEmpty()) { - s_logger.debug("Subnets changed, network: " + _name + "; db: " + dbSubnets + ", vnc: " + vncSubnets + ", diff: " + diff); + logger.debug("Subnets changed, network: " + _name + "; db: " + dbSubnets + ", vnc: " + vncSubnets + ", diff: " + diff); return false; } @@ -451,7 +449,7 @@ public boolean compare(ModelController controller, ModelObject o) { try { latest = (VirtualNetworkModel)o; } catch (ClassCastException ex) { - s_logger.warn("Invalid model object is passed to cast to VirtualNetworkModel"); + logger.warn("Invalid model object is passed to cast to VirtualNetworkModel"); return false; } @@ -469,7 +467,7 @@ public boolean compare(ModelController controller, ModelObject o) { List newSubnets = new ArrayList(); if ((currentIpamRefs == null && newIpamRefs != null) || (currentIpamRefs != null && newIpamRefs == null)) { //Check for existence only - s_logger.debug("ipams differ: current=" + currentIpamRefs + ", new=" + newIpamRefs); + logger.debug("ipams differ: current=" + currentIpamRefs + ", new=" + newIpamRefs); return false; } if (currentIpamRefs == null) { @@ -502,7 +500,7 @@ public boolean compare(ModelController controller, ModelObject o) { diff.removeAll(newSubnets); if (!diff.isEmpty()) { - s_logger.debug("Subnets differ, network: " + _name + "; db: " + currentSubnets + ", vnc: " + newSubnets + ", diff: " + diff); + logger.debug("Subnets differ, network: " + _name + "; db: " + currentSubnets + ", vnc: " + newSubnets + ", diff: " + diff); return false; } diff --git a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/ApiConnectorMockito.java b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/ApiConnectorMockito.java index 0c5df0695571..2cd929479995 100644 --- a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/ApiConnectorMockito.java +++ b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/ApiConnectorMockito.java @@ -30,12 +30,10 @@ import net.juniper.contrail.api.ObjectReference; import net.juniper.contrail.api.types.NetworkIpam; -import org.apache.log4j.Logger; import com.google.common.collect.ImmutableMap; public class ApiConnectorMockito implements ApiConnector { - private static final Logger s_logger = Logger.getLogger(ApiConnectorMockito.class); static final Map object_map = new ImmutableMap.Builder().put("network-ipam:default-network-ipam", new NetworkIpam()) .build(); @@ -53,19 +51,16 @@ public ApiConnector getSpy() { @Override public boolean create(ApiObjectBase arg0) throws IOException { - s_logger.debug("create " + arg0.getClass().getName() + " id: " + arg0.getUuid()); return _spy.create(arg0); } @Override public void delete(ApiObjectBase arg0) throws IOException { - s_logger.debug("delete " + arg0.getClass().getName() + " id: " + arg0.getUuid()); _spy.delete(arg0); } @Override public void delete(Class arg0, String arg1) throws IOException { - s_logger.debug("create " + arg0.getName() + " id: " + arg1); _spy.delete(arg0, arg1); } @@ -83,19 +78,16 @@ public ApiObjectBase find(Class arg0, ApiObjectBase arg @Override public ApiObjectBase findByFQN(Class arg0, String arg1) throws IOException { - s_logger.debug("find " + arg0.getName() + " name: " + arg1); return _mock.findByFQN(arg0, arg1); } @Override public ApiObjectBase findById(Class arg0, String arg1) throws IOException { - s_logger.debug("find " + arg0.getName() + " id: " + arg1); return _mock.findById(arg0, arg1); } @Override public String findByName(Class arg0, List arg1) throws IOException { - s_logger.debug("find " + arg0.getName() + " name: " + arg1); return _mock.findByName(arg0, arg1); } @@ -107,31 +99,26 @@ public String findByName(Class arg0, ApiObjectBase arg1 msg.append(" parent: " + arg1.getName()); } msg.append(" name: " + arg2); - s_logger.debug(msg.toString()); return _mock.findByName(arg0, arg1, arg2); } @Override public List getObjects(Class arg0, List> arg1) throws IOException { - s_logger.debug("getObjects" + arg0.getName()); return _mock.getObjects(arg0, arg1); } @Override public List list(Class arg0, List arg1) throws IOException { - s_logger.debug("list" + arg0.getName()); return _mock.list(arg0, arg1); } @Override public boolean read(ApiObjectBase arg0) throws IOException { - s_logger.debug("read " + arg0.getClass().getName() + " id: " + arg0.getUuid()); return _mock.read(arg0); } @Override public boolean update(ApiObjectBase arg0) throws IOException { - s_logger.debug("update " + arg0.getClass().getName() + " id: " + arg0.getUuid()); return _spy.update(arg0); } diff --git a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/ManagementServerMock.java b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/ManagementServerMock.java index 740dfc95560d..c630f0bf6b97 100644 --- a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/ManagementServerMock.java +++ b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/ManagementServerMock.java @@ -27,7 +27,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.mockito.ArgumentMatchers; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; @@ -87,7 +86,6 @@ import com.cloud.vm.dao.UserVmDao; public class ManagementServerMock { - private static final Logger s_logger = Logger.getLogger(ManagementServerMock.class); @Inject private AccountManager _accountMgr; @@ -126,7 +124,6 @@ static void setParameter(BaseCmd cmd, String name, BaseCmd.CommandType fieldType try { field = cls.getDeclaredField(name); } catch (Exception ex) { - s_logger.warn("class: " + cls.getName() + "\t" + ex); return; } field.setAccessible(true); @@ -135,7 +132,6 @@ static void setParameter(BaseCmd cmd, String name, BaseCmd.CommandType fieldType try { field.set(cmd, value); } catch (Exception ex) { - s_logger.warn(ex); return; } break; @@ -144,7 +140,6 @@ static void setParameter(BaseCmd cmd, String name, BaseCmd.CommandType fieldType try { field.setLong(cmd, -1L); } catch (Exception ex) { - s_logger.warn(ex); return; } } @@ -153,7 +148,6 @@ static void setParameter(BaseCmd cmd, String name, BaseCmd.CommandType fieldType try { field.set(cmd, value); } catch (Exception ex) { - s_logger.warn(ex); return; } break; @@ -161,7 +155,6 @@ static void setParameter(BaseCmd cmd, String name, BaseCmd.CommandType fieldType try { field.set(cmd, value); } catch (Exception ex) { - s_logger.warn(ex); return; } break; @@ -186,8 +179,6 @@ private void createPublicVlanIpRange() { if (nets != null && !nets.isEmpty()) { NetworkVO public_net = nets.get(0); public_net_id = public_net.getId(); - } else { - s_logger.debug("no public network found in the zone: " + _zone.getId()); } Account system = _accountMgr.getSystemAccount(); @@ -200,11 +191,9 @@ private void createPublicVlanIpRange() { setParameter(cmd, "networkID", BaseCmd.CommandType.LONG, public_net_id); setParameter(cmd, "zoneId", BaseCmd.CommandType.LONG, _zone.getId()); setParameter(cmd, "vlan", BaseCmd.CommandType.STRING, "untagged"); - s_logger.debug("createPublicVlanIpRange execute : zone id: " + _zone.getId() + ", public net id: " + public_net_id); try { _configService.createVlanAndPublicIpRange(cmd); } catch (Exception e) { - s_logger.debug("createPublicVlanIpRange: " + e); } } @@ -360,7 +349,6 @@ private void locatePhysicalNetwork() { Pair, Integer> providers = _networkService.listNetworkServiceProviders(_znet.getId(), Provider.JuniperContrailRouter.getName(), null, null, null); if (providers.second() == 0) { - s_logger.debug("Add " + Provider.JuniperContrailRouter.getName() + " to network " + _znet.getName()); PhysicalNetworkServiceProvider provider = _networkService.addProviderToPhysicalNetwork(_znet.getId(), Provider.JuniperContrailRouter.getName(), null, null); _networkService.updateNetworkServiceProvider(provider.getId(), PhysicalNetworkServiceProvider.State.Enabled.toString(), null); } else { @@ -371,12 +359,10 @@ private void locatePhysicalNetwork() { } providers = _networkService.listNetworkServiceProviders(_znet.getId(), null, PhysicalNetworkServiceProvider.State.Enabled.toString(), null, null); - s_logger.debug(_znet.getName() + " has " + providers.second().toString() + " Enabled providers"); for (PhysicalNetworkServiceProvider provider : providers.first()) { if (provider.getProviderName().equals(Provider.JuniperContrailRouter.getName())) { continue; } - s_logger.debug("Disabling " + provider.getProviderName()); _networkService.updateNetworkServiceProvider(provider.getId(), PhysicalNetworkServiceProvider.State.Disabled.toString(), null); } } diff --git a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/MockAccountManager.java b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/MockAccountManager.java index 67cfe1df3e1b..836bb7213296 100644 --- a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/MockAccountManager.java +++ b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/MockAccountManager.java @@ -31,7 +31,6 @@ import org.apache.cloudstack.api.response.UserTwoFactorAuthenticationSetupResponse; import org.apache.cloudstack.auth.UserTwoFactorAuthenticator; import org.apache.cloudstack.framework.config.ConfigKey; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.RoleType; @@ -73,7 +72,6 @@ import com.cloud.utils.db.TransactionStatus; public class MockAccountManager extends ManagerBase implements AccountManager { - private static final Logger s_logger = Logger.getLogger(MockAccountManager.class); @Inject AccountDao _accountDao; @@ -98,7 +96,7 @@ public boolean configure(final String name, final Map params) th throw new ConfigurationException("Unable to find the system user using " + User.UID_SYSTEM); } CallContext.register(_systemUser, _systemAccount); - s_logger.info("MockAccountManager initialization successful"); + logger.info("MockAccountManager initialization successful"); return true; } diff --git a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/NetworkProviderTest.java b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/NetworkProviderTest.java index 3ad36acc1601..cbd93669c689 100644 --- a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/NetworkProviderTest.java +++ b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/NetworkProviderTest.java @@ -34,7 +34,6 @@ import org.apache.cloudstack.api.command.user.project.DeleteProjectCmd; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.utils.identity.ManagementServerNode; -import org.apache.log4j.Logger; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -89,7 +88,6 @@ * Exercise the public API. */ public class NetworkProviderTest extends TestCase { - private static final Logger s_logger = Logger.getLogger(NetworkProviderTest.class); @Inject public ContrailManager _contrailMgr; @@ -122,9 +120,7 @@ public class NetworkProviderTest extends TestCase { @BeforeClass public static void globalSetUp() throws Exception { ApiConnectorFactory.setImplementation(ApiConnectorMock.class); - s_logger.info("mysql server is getting launched "); s_mysqlSrverPort = TestDbSetup.init(null); - s_logger.info("mysql server launched on port " + s_mysqlSrverPort); s_msId = ManagementServerNode.getManagementServerId(); s_lockController = Merovingian2.createLockController(s_msId); @@ -143,7 +139,6 @@ public static void globalTearDown() throws Exception { } ctx.close(); - s_logger.info("destroying mysql server instance running at port <" + s_mysqlSrverPort + ">"); TestDbSetup.destroy(s_mysqlSrverPort, null); } @@ -154,7 +149,6 @@ public void setUp() throws Exception { ComponentContext.initComponentsLifeCycle(); } catch (Exception ex) { ex.printStackTrace(); - s_logger.error(ex.getMessage()); } Account system = _accountMgr.getSystemAccount(); User user = _accountMgr.getSystemUser(); @@ -177,7 +171,6 @@ private void purgeTestNetwork() { DataCenter zone = _server.getZone(); List list = _networkService.getIsolatedNetworksOwnedByAccountInZone(zone.getId(), system); for (Network net : list) { - s_logger.debug("Delete network " + net.getName()); _networkService.deleteNetwork(net.getId(), false); } } @@ -264,7 +257,6 @@ public void deleteFloatingIp(IPAddressVO ip) throws Exception { try { proxy.execute(); } catch (Exception e) { - s_logger.debug("DisableStaticNatCmd exception: " + e); e.printStackTrace(); throw e; } @@ -284,7 +276,6 @@ public IPAddressVO createFloatingIp(Network network, UserVm vm) throws Exception ((AssociateIPAddrCmd)cmd).create(); ((AssociateIPAddrCmd)cmd).execute(); } catch (Exception e) { - s_logger.debug("AssociateIPAddrCmd exception: " + e); e.printStackTrace(); throw e; } @@ -310,7 +301,6 @@ public IPAddressVO createFloatingIp(Network network, UserVm vm) throws Exception try { proxy.execute(); } catch (Exception e) { - s_logger.debug("EnableStaticNatCmd exception: " + e); e.printStackTrace(); throw e; } @@ -330,7 +320,6 @@ public void createProject(String name) { ((CreateProjectCmd)proxy).create(); ((CreateProjectCmd)proxy).execute(); } catch (Exception e) { - s_logger.debug("CreateProjectCmd exception: " + e); e.printStackTrace(); fail("create project cmd failed"); } @@ -465,11 +454,11 @@ public void dbSyncTest() { //now db sync if (_dbSync.syncAll(DBSyncGeneric.SYNC_MODE_UPDATE) == ServerDBSync.SYNC_STATE_OUT_OF_SYNC) { - s_logger.info("# Cloudstack DB & VNC are out of sync - resync done"); + //# Cloudstack DB & VNC are out of sync - resync done } if (_dbSync.syncAll(DBSyncGeneric.SYNC_MODE_CHECK) == ServerDBSync.SYNC_STATE_OUT_OF_SYNC) { - s_logger.info("# Cloudstack DB & VNC are still out of sync"); + //# Cloudstack DB & VNC are still out of sync fail("DB Sync failed"); } } diff --git a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/PublicNetworkTest.java b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/PublicNetworkTest.java index 9564ec0a24ab..914545e26830 100644 --- a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/PublicNetworkTest.java +++ b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/PublicNetworkTest.java @@ -33,7 +33,6 @@ import net.juniper.contrail.api.types.VirtualMachineInterface; import net.juniper.contrail.api.types.VirtualNetwork; -import org.apache.log4j.Logger; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -60,7 +59,6 @@ @RunWith(SpringJUnit4ClassRunner.class) @ContextConfiguration(locations = "classpath:/publicNetworkContext.xml") public class PublicNetworkTest extends TestCase { - private static final Logger s_logger = Logger.getLogger(PublicNetworkTest.class); @Inject public ContrailManager _contrailMgr; @@ -77,9 +75,7 @@ public class PublicNetworkTest extends TestCase { @BeforeClass public static void globalSetUp() throws Exception { ApiConnectorFactory.setImplementation(ApiConnectorMockito.class); - s_logger.info("mysql server is getting launched "); s_mysqlServerPort = TestDbSetup.init(null); - s_logger.info("mysql server launched on port " + s_mysqlServerPort); s_msId = ManagementServerNode.getManagementServerId(); s_lockController = Merovingian2.createLockController(s_msId); } @@ -97,7 +93,6 @@ public static void globalTearDown() throws Exception { } ctx.close(); - s_logger.info("destroying mysql server instance running at port <" + s_mysqlServerPort + ">"); TestDbSetup.destroy(s_mysqlServerPort, null); } @@ -108,7 +103,6 @@ public void setUp() throws Exception { ComponentContext.initComponentsLifeCycle(); } catch (Exception ex) { ex.printStackTrace(); - s_logger.error(ex.getMessage()); } _server = ComponentContext.inject(new ManagementServerMock()); diff --git a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/model/InstanceIpModelTest.java b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/model/InstanceIpModelTest.java index 9a4d0b6ebe11..fa0f2afcc6d5 100644 --- a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/model/InstanceIpModelTest.java +++ b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/model/InstanceIpModelTest.java @@ -27,7 +27,6 @@ import java.util.UUID; import org.apache.cloudstack.network.contrail.management.ContrailManagerImpl; -import org.apache.log4j.Logger; import org.junit.Test; import com.cloud.network.Network; @@ -44,8 +43,6 @@ import net.juniper.contrail.api.ApiConnectorMock; public class InstanceIpModelTest extends TestCase { - private static final Logger s_logger = - Logger.getLogger(InstanceIpModelTest.class); @Test public void testCreateInstanceIp() throws IOException { diff --git a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/model/VMInterfaceModelTest.java b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/model/VMInterfaceModelTest.java index 423ac616cd0b..5339066dd007 100644 --- a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/model/VMInterfaceModelTest.java +++ b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/model/VMInterfaceModelTest.java @@ -27,7 +27,6 @@ import java.util.UUID; import org.apache.cloudstack.network.contrail.management.ContrailManagerImpl; -import org.apache.log4j.Logger; import org.junit.Test; import com.cloud.network.Network; @@ -45,8 +44,6 @@ import net.juniper.contrail.api.types.VirtualMachineInterface; public class VMInterfaceModelTest extends TestCase { - private static final Logger s_logger = - Logger.getLogger(VMInterfaceModelTest.class); @Test public void testCreateVMInterface() throws IOException { diff --git a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/model/VirtualMachineModelTest.java b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/model/VirtualMachineModelTest.java index 757a7ab93395..0219c3200deb 100644 --- a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/model/VirtualMachineModelTest.java +++ b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/model/VirtualMachineModelTest.java @@ -30,7 +30,6 @@ import org.apache.cloudstack.network.contrail.management.ContrailManagerImpl; import org.apache.cloudstack.network.contrail.management.ModelDatabase; -import org.apache.log4j.Logger; import org.junit.Test; import com.cloud.network.Network; @@ -40,8 +39,6 @@ import com.cloud.vm.dao.UserVmDao; public class VirtualMachineModelTest extends TestCase { - private static final Logger s_logger = - Logger.getLogger(VirtualMachineModelTest.class); @Test public void testVirtualMachineDBLookup() { @@ -60,7 +57,6 @@ public void testVirtualMachineDBLookup() { VirtualMachineModel vm2 = new VirtualMachineModel(vm, "fbc1f8fa-4b78-45ee-bba0-b551dbf94575"); db.getVirtualMachines().add(vm2); - s_logger.debug("No of Vitual Machines added to database : " + db.getVirtualMachines().size()); assertEquals(3, db.getVirtualMachines().size()); diff --git a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/model/VirtualNetworkModelTest.java b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/model/VirtualNetworkModelTest.java index e4abfc97711e..2b2cd9af9f06 100644 --- a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/model/VirtualNetworkModelTest.java +++ b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/model/VirtualNetworkModelTest.java @@ -36,7 +36,6 @@ import org.apache.cloudstack.network.contrail.management.ContrailManager; import org.apache.cloudstack.network.contrail.management.ContrailManagerImpl; import org.apache.cloudstack.network.contrail.management.ModelDatabase; -import org.apache.log4j.Logger; import org.junit.Before; import org.junit.Test; @@ -47,7 +46,6 @@ public class VirtualNetworkModelTest extends TestCase { - private static final Logger s_logger = Logger.getLogger(VirtualNetworkModelTest.class); private ModelController controller; @@ -144,8 +142,6 @@ public void testDBLookup() { db.getVirtualNetworks().add(guestModel1); VirtualNetworkModel guestModel2 = new VirtualNetworkModel(network, UUID.randomUUID().toString(), "test", TrafficType.Guest); db.getVirtualNetworks().add(guestModel2); - s_logger.debug("networks: " + db.getVirtualNetworks().size()); - s_logger.debug("No of Vitual Networks added to database : " + db.getVirtualNetworks().size()); assertEquals(4, db.getVirtualNetworks().size()); assertSame(storageModel, db.lookupVirtualNetwork(null, storageModel.getName(), TrafficType.Storage)); assertSame(mgmtModel, db.lookupVirtualNetwork(null, mgmtModel.getName(), TrafficType.Management)); diff --git a/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/AddNetscalerLoadBalancerCmd.java b/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/AddNetscalerLoadBalancerCmd.java index a67256b89b72..c4b16e676772 100644 --- a/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/AddNetscalerLoadBalancerCmd.java +++ b/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/AddNetscalerLoadBalancerCmd.java @@ -17,7 +17,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -43,7 +42,6 @@ requestHasSensitiveInfo = true, responseHasSensitiveInfo = false) public class AddNetscalerLoadBalancerCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(AddNetscalerLoadBalancerCmd.class.getName()); @Inject NetscalerLoadBalancerElementService _netsclarLbService; diff --git a/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/ConfigureNetscalerLoadBalancerCmd.java b/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/ConfigureNetscalerLoadBalancerCmd.java index 59f6597061df..3f2620656acb 100644 --- a/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/ConfigureNetscalerLoadBalancerCmd.java +++ b/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/ConfigureNetscalerLoadBalancerCmd.java @@ -18,7 +18,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -44,7 +43,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ConfigureNetscalerLoadBalancerCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(ConfigureNetscalerLoadBalancerCmd.class.getName()); @Inject NetscalerLoadBalancerElementService _netsclarLbService; diff --git a/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/DeleteNetscalerControlCenterCmd.java b/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/DeleteNetscalerControlCenterCmd.java index cf988624b543..0ec1184a862b 100644 --- a/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/DeleteNetscalerControlCenterCmd.java +++ b/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/DeleteNetscalerControlCenterCmd.java @@ -20,7 +20,6 @@ import javax.inject.Inject; import javax.persistence.EntityExistsException; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -39,7 +38,6 @@ @APICommand(name = "deleteNetscalerControlCenter", responseObject = SuccessResponse.class, description = "Delete Netscaler Control Center") public class DeleteNetscalerControlCenterCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(DeleteNetscalerControlCenterCmd.class.getName()); private static final String s_name = "deleteNetscalerControlCenter"; @Inject NetscalerLoadBalancerElementService _netsclarLbService; diff --git a/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/DeleteNetscalerLoadBalancerCmd.java b/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/DeleteNetscalerLoadBalancerCmd.java index 74a939c10d9c..01c478b48e59 100644 --- a/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/DeleteNetscalerLoadBalancerCmd.java +++ b/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/DeleteNetscalerLoadBalancerCmd.java @@ -17,7 +17,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -42,7 +41,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteNetscalerLoadBalancerCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteNetscalerLoadBalancerCmd.class.getName()); @Inject NetscalerLoadBalancerElementService _netsclarLbService; diff --git a/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/DeleteServicePackageOfferingCmd.java b/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/DeleteServicePackageOfferingCmd.java index c6fbec18ee30..7776aeae5fb0 100644 --- a/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/DeleteServicePackageOfferingCmd.java +++ b/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/DeleteServicePackageOfferingCmd.java @@ -20,7 +20,6 @@ import javax.inject.Inject; import javax.persistence.EntityExistsException; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -40,7 +39,6 @@ @APICommand(name = "deleteServicePackageOffering", responseObject = SuccessResponse.class, description = "Delete Service Package") public class DeleteServicePackageOfferingCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(DeleteServicePackageOfferingCmd.class.getName()); private static final String s_name = "deleteServicePackage"; @Inject NetscalerLoadBalancerElementService _netsclarLbService; diff --git a/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/DeployNetscalerVpxCmd.java b/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/DeployNetscalerVpxCmd.java index 8089599d50bb..58129d0e6d99 100644 --- a/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/DeployNetscalerVpxCmd.java +++ b/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/DeployNetscalerVpxCmd.java @@ -20,7 +20,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.ACL; import org.apache.cloudstack.api.APICommand; @@ -52,7 +51,6 @@ requestHasSensitiveInfo = true, responseHasSensitiveInfo = false) public class DeployNetscalerVpxCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeployNetscalerVpxCmd.class.getName()); private static final String s_name = "deployNetscalerVpx"; @Inject NetscalerLoadBalancerElementService _netsclarLbService; diff --git a/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/ListNetscalerControlCenterCmd.java b/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/ListNetscalerControlCenterCmd.java index a15c8afa623e..7e72e470a0cc 100644 --- a/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/ListNetscalerControlCenterCmd.java +++ b/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/ListNetscalerControlCenterCmd.java @@ -22,7 +22,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiErrorCode; @@ -42,7 +41,6 @@ @APICommand(name = "listNetscalerControlCenter", responseObject = NetscalerControlCenterResponse.class, description = "list control center", requestHasSensitiveInfo = true, responseHasSensitiveInfo = false) public class ListNetscalerControlCenterCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListNetscalerControlCenterCmd.class.getName()); private static final String s_name = "listNetscalerControlCenter"; @Inject diff --git a/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/ListNetscalerLoadBalancerNetworksCmd.java b/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/ListNetscalerLoadBalancerNetworksCmd.java index 73e0d6904320..917c0adc8878 100644 --- a/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/ListNetscalerLoadBalancerNetworksCmd.java +++ b/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/ListNetscalerLoadBalancerNetworksCmd.java @@ -19,7 +19,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -47,7 +46,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListNetscalerLoadBalancerNetworksCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListNetscalerLoadBalancerNetworksCmd.class.getName()); @Inject NetscalerLoadBalancerElementService _netsclarLbService; diff --git a/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/ListNetscalerLoadBalancersCmd.java b/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/ListNetscalerLoadBalancersCmd.java index 2d9ca24fb188..aa9c1ee50516 100644 --- a/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/ListNetscalerLoadBalancersCmd.java +++ b/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/ListNetscalerLoadBalancersCmd.java @@ -19,7 +19,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -44,7 +43,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListNetscalerLoadBalancersCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListNetscalerLoadBalancersCmd.class.getName()); private static final String s_name = "listnetscalerloadbalancerresponse"; @Inject NetscalerLoadBalancerElementService _netsclarLbService; diff --git a/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/ListRegisteredServicePackageCmd.java b/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/ListRegisteredServicePackageCmd.java index 6838833ef947..fcc929b8385c 100644 --- a/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/ListRegisteredServicePackageCmd.java +++ b/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/ListRegisteredServicePackageCmd.java @@ -22,7 +22,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiErrorCode; @@ -43,7 +42,6 @@ @APICommand(name = "listRegisteredServicePackages", responseObject = NetScalerServicePackageResponse.class, description = "lists registered service packages", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListRegisteredServicePackageCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListRegisteredServicePackageCmd.class.getName()); private static final String s_name = "listregisteredservicepackage"; @Inject NetscalerLoadBalancerElementService _netsclarLbService; diff --git a/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/RegisterNetscalerControlCenterCmd.java b/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/RegisterNetscalerControlCenterCmd.java index 852fa472e26d..4cc9644e1180 100644 --- a/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/RegisterNetscalerControlCenterCmd.java +++ b/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/RegisterNetscalerControlCenterCmd.java @@ -17,7 +17,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -43,7 +42,6 @@ requestHasSensitiveInfo = true, responseHasSensitiveInfo = false) public class RegisterNetscalerControlCenterCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(RegisterNetscalerControlCenterCmd.class.getName()); @Inject NetscalerLoadBalancerElementService _netsclarLbService; @@ -78,9 +76,6 @@ public String getPassword() { } - public static Logger getsLogger() { - return s_logger; - } public NetscalerLoadBalancerElementService get_netsclarLbService() { return _netsclarLbService; diff --git a/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/RegisterServicePackageCmd.java b/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/RegisterServicePackageCmd.java index 9b18b45bd2f8..7b5dc29d11c7 100644 --- a/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/RegisterServicePackageCmd.java +++ b/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/RegisterServicePackageCmd.java @@ -17,7 +17,6 @@ import javax.inject.Inject; import javax.persistence.EntityExistsException; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -36,7 +35,6 @@ @APICommand(name = "registerNetscalerServicePackage", responseObject = NetScalerServicePackageResponse.class, description = "Registers NCC Service Package") public class RegisterServicePackageCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(RegisterServicePackageCmd.class.getName()); private static final String s_name = "registerNetscalerServicePackage"; @Inject diff --git a/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/StopNetScalerVMCmd.java b/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/StopNetScalerVMCmd.java index 288e867277a4..b4771b57b508 100644 --- a/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/StopNetScalerVMCmd.java +++ b/plugins/network-elements/netscaler/src/main/java/com/cloud/api/commands/StopNetScalerVMCmd.java @@ -18,7 +18,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -44,7 +43,6 @@ @APICommand(name = "stopNetScalerVpx", description = "Stops a NetScalervm.", responseObject = DomainRouterResponse.class, entityType = {VirtualMachine.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class StopNetScalerVMCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(StopNetScalerVMCmd.class.getName()); private static final String s_name = "stopNetScalerVmresponse"; @Inject diff --git a/plugins/network-elements/netscaler/src/main/java/com/cloud/network/element/NetscalerElement.java b/plugins/network-elements/netscaler/src/main/java/com/cloud/network/element/NetscalerElement.java index 1339113711e4..48b9006f34c1 100644 --- a/plugins/network-elements/netscaler/src/main/java/com/cloud/network/element/NetscalerElement.java +++ b/plugins/network-elements/netscaler/src/main/java/com/cloud/network/element/NetscalerElement.java @@ -35,7 +35,6 @@ import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.network.ExternalNetworkDeviceManager.NetworkDevice; import org.apache.cloudstack.region.gslb.GslbServiceProvider; -import org.apache.log4j.Logger; import org.json.JSONException; import org.json.JSONObject; @@ -159,7 +158,6 @@ public class NetscalerElement extends ExternalLoadBalancerDeviceManagerImpl implements LoadBalancingServiceProvider, NetscalerLoadBalancerElementService, ExternalLoadBalancerDeviceManager, IpDeployer, StaticNatServiceProvider, GslbServiceProvider { - private static final Logger s_logger = Logger.getLogger(NetscalerElement.class); @Inject NetworkModel _networkManager; @@ -224,7 +222,7 @@ private boolean canHandle(Network config, Service service) { && config.getGuestType() == Network.GuestType.Shared && config.getTrafficType() == TrafficType.Guest); if (!(handleInAdvanceZone || handleInBasicZone)) { - s_logger.trace("Not handling network with Type " + config.getGuestType() + " and traffic type " + logger.trace("Not handling network with Type " + config.getGuestType() + " and traffic type " + config.getTrafficType() + " in zone of type " + zone.getNetworkType()); return false; } @@ -250,7 +248,7 @@ public boolean implement(Network guestConfig, NetworkOffering offering, DeployDe if (_ntwkSrvcDao.canProviderSupportServiceInNetwork(guestConfig.getId(), Service.StaticNat, Network.Provider.Netscaler) && !isBasicZoneNetwok(guestConfig)) { - s_logger.error("NetScaler provider can not be Static Nat service provider for the network " + logger.error("NetScaler provider can not be Static Nat service provider for the network " + guestConfig.getGuestType() + " and traffic type " + guestConfig.getTrafficType()); return false; } @@ -312,7 +310,7 @@ public boolean manageGuestNetworkWithNetscalerControlCenter(boolean add, Network throws ResourceUnavailableException, InsufficientCapacityException, ConfigurationException { if (guestConfig.getTrafficType() != TrafficType.Guest) { - s_logger.trace("External load balancer can only be used for guest networks."); + logger.trace("External load balancer can only be used for guest networks."); return false; } @@ -331,13 +329,13 @@ public boolean manageGuestNetworkWithNetscalerControlCenter(boolean add, Network if (lbDeviceVO == null) { String msg = "failed to allocate Netscaler ControlCenter Resource for the zone in the network " + guestConfig.getId(); - s_logger.error(msg); + logger.error(msg); throw new InsufficientNetworkCapacityException(msg, DataCenter.class, guestConfig.getDataCenterId()); } } netscalerControlCenter = _hostDao.findById(lbDeviceVO.getId()); - s_logger.debug("Allocated Netscaler Control Center device:" + lbDeviceVO.getId() + " for the network: " + logger.debug("Allocated Netscaler Control Center device:" + lbDeviceVO.getId() + " for the network: " + guestConfig.getId()); } else { // find the load balancer device allocated for the network @@ -346,7 +344,7 @@ public boolean manageGuestNetworkWithNetscalerControlCenter(boolean add, Network // on restart network, device could have been allocated already, skip allocation if a device is assigned lbDeviceVO = getNetScalerControlCenterForNetwork(guestConfig); if (lbDeviceVO == null) { - s_logger.warn( + logger.warn( "Network shutdwon requested on external load balancer element, which did not implement the network." + " Either network implement failed half way through or already network shutdown is completed. So just returning."); return true; @@ -371,7 +369,7 @@ public boolean manageGuestNetworkWithNetscalerControlCenter(boolean add, Network selfIp = _ipAddrMgr.acquireGuestIpAddress(guestConfig, null); if (selfIp == null) { String msg = "failed to acquire guest IP address so not implementing the network on the NetscalerControlCenter"; - s_logger.error(msg); + logger.error(msg); throw new InsufficientNetworkCapacityException(msg, Network.class, guestConfig.getId()); } networkDetails.put("snip", selfIp); @@ -585,7 +583,7 @@ public ExternalLoadBalancerDeviceVO addNetscalerLoadBalancer(AddNetscalerLoadBal } catch (Exception e) { String msg = "Error parsing the url parameter specified in addNetscalerLoadBalancer command due to " + e.getMessage(); - s_logger.debug(msg); + logger.debug(msg); throw new InvalidParameterValueException(msg); } Map configParams = new HashMap(); @@ -595,7 +593,7 @@ public ExternalLoadBalancerDeviceVO addNetscalerLoadBalancer(AddNetscalerLoadBal if (dedicatedUse && !deviceName.equals(NetworkDevice.NetscalerVPXLoadBalancer.getName())) { String msg = "Only Netscaler VPX load balancers can be specified for dedicated use"; - s_logger.debug(msg); + logger.debug(msg); throw new InvalidParameterValueException(msg); } @@ -604,13 +602,13 @@ public ExternalLoadBalancerDeviceVO addNetscalerLoadBalancer(AddNetscalerLoadBal if (!deviceName.equals(NetworkDevice.NetscalerVPXLoadBalancer.getName()) && !deviceName.equals(NetworkDevice.NetscalerMPXLoadBalancer.getName())) { String msg = "Only Netscaler VPX or MPX load balancers can be specified as GSLB service provider"; - s_logger.debug(msg); + logger.debug(msg); throw new InvalidParameterValueException(msg); } if (cmd.getSitePublicIp() == null || cmd.getSitePrivateIp() == null) { String msg = "Public and Privae IP needs to provided for NetScaler that will be GSLB provider"; - s_logger.debug(msg); + logger.debug(msg); throw new InvalidParameterValueException(msg); } @@ -762,7 +760,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { try { _agentMgr.reconnect(host.getId()); } catch (AgentUnavailableException e) { - s_logger.warn("failed to reconnect host " + host, e); + logger.warn("failed to reconnect host " + host, e); } return lbDeviceVo; } @@ -927,7 +925,7 @@ public boolean deleteNetscalerControlCenter(DeleteNetscalerControlCenterCmd cmd) _hostDao.update(ncc.getId(), ncc); _resourceMgr.deleteHost(ncc.getId(), false, false); } catch (Exception e) { - s_logger.debug(e); + logger.debug(e); return false; } } @@ -1049,7 +1047,7 @@ public boolean verifyServicesCombination(Set services) { // NetScaler can only act as Lb and Static Nat service provider if (services != null && !services.isEmpty() && !netscalerServices.containsAll(services)) { - s_logger.warn( + logger.warn( "NetScaler network element can only support LB and Static NAT services and service combination " + services + " is not supported."); @@ -1058,10 +1056,10 @@ public boolean verifyServicesCombination(Set services) { buff.append(service.getName()); buff.append(" "); } - s_logger.warn( + logger.warn( "NetScaler network element can only support LB and Static NAT services and service combination " + buff.toString() + " is not supported."); - s_logger.warn( + logger.warn( "NetScaler network element can only support LB and Static NAT services and service combination " + services + " is not supported."); return false; @@ -1103,14 +1101,14 @@ public boolean applyElasticLoadBalancerRules(Network network, List rules) } catch (Exception e) { errMsg = "Could not allocate a NetSclaer load balancer for configuring static NAT rules due to" + e.getMessage(); - s_logger.error(errMsg); + logger.error(errMsg); throw new ResourceUnavailableException(errMsg, this.getClass(), 0); } } if (!isNetscalerDevice(lbDevice.getDeviceName())) { errMsg = "There are no NetScaler load balancer assigned for this network. So NetScaler element will not be handling the static nat rules."; - s_logger.error(errMsg); + logger.error(errMsg); throw new ResourceUnavailableException(errMsg, this.getClass(), 0); } SetStaticNatRulesAnswer answer = null; @@ -1214,7 +1212,7 @@ public boolean applyStaticNats(Network config, List rules) if (lbDevice == null) { String errMsg = "There is no NetScaler device configured to perform EIP to guest IP address: " + rule.getDestIpAddress(); - s_logger.error(errMsg); + logger.error(errMsg); throw new ResourceUnavailableException(errMsg, this.getClass(), 0); } @@ -1231,7 +1229,7 @@ public boolean applyStaticNats(Network config, List rules) cmd); if (answer == null) { String errMsg = "Failed to configure INAT rule on NetScaler device " + lbDevice.getHostId(); - s_logger.error(errMsg); + logger.error(errMsg); throw new ResourceUnavailableException(errMsg, this.getClass(), 0); } } @@ -1240,7 +1238,7 @@ public boolean applyStaticNats(Network config, List rules) } return true; } catch (Exception e) { - s_logger.error("Failed to configure StaticNat rule due to " + e.getMessage()); + logger.error("Failed to configure StaticNat rule due to " + e.getMessage()); return false; } } @@ -1278,14 +1276,14 @@ public List getElasticLBRulesHealthCheck(Network network, ExternalLoadBalancerDeviceVO lbDeviceVO = getExternalLoadBalancerForNetwork(network); if (lbDeviceVO == null) { - s_logger.warn( + logger.warn( "There is no external load balancer device assigned to this network either network is not implement are already shutdown so just returning"); return null; } if (!isNetscalerDevice(lbDeviceVO.getDeviceName())) { errMsg = "There are no NetScaler load balancer assigned for this network. So NetScaler element can not be handle elastic load balancer rules."; - s_logger.error(errMsg); + logger.error(errMsg); throw new ResourceUnavailableException(errMsg, this.getClass(), 0); } @@ -1332,10 +1330,10 @@ public List updateHealthChecks(Network network, List rules) { if (schemeCaps != null) { for (LoadBalancingRule rule : rules) { if (!schemeCaps.contains(rule.getScheme().toString())) { - s_logger.debug("Scheme " + rules.get(0).getScheme() + " is not supported by the provider " + logger.debug("Scheme " + rules.get(0).getScheme() + " is not supported by the provider " + getName()); return false; } diff --git a/plugins/network-elements/netscaler/src/main/java/com/cloud/network/resource/NetScalerControlCenterResource.java b/plugins/network-elements/netscaler/src/main/java/com/cloud/network/resource/NetScalerControlCenterResource.java index c447d6005610..72186a677c69 100644 --- a/plugins/network-elements/netscaler/src/main/java/com/cloud/network/resource/NetScalerControlCenterResource.java +++ b/plugins/network-elements/netscaler/src/main/java/com/cloud/network/resource/NetScalerControlCenterResource.java @@ -50,7 +50,8 @@ import org.apache.http.impl.client.DefaultHttpClient; import org.apache.http.impl.conn.BasicClientConnectionManager; import org.apache.http.util.EntityUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.json.JSONArray; import org.json.JSONException; import org.json.JSONObject; @@ -117,7 +118,7 @@ public class NetScalerControlCenterResource implements ServerResource { private String _sessionid; public static final int DEFAULT_PORT = 443; private static final Gson s_gson = GsonHelper.getGson(); - private static final Logger s_logger = Logger.getLogger(NetScalerControlCenterResource.class); + protected Logger logger = LogManager.getLogger(NetScalerControlCenterResource.class); protected Gson _gson; private final String _objectNamePathSep = "-"; final String protocol="https"; @@ -188,7 +189,7 @@ public boolean configure(String name, Map params) throws Configu } catch (ConfigurationException e) { throw new ConfigurationException(e.getMessage()); } catch (ExecutionException e) { - s_logger.debug("Execution Exception :" + e.getMessage()); + logger.debug("Execution Exception :" + e.getMessage()); throw new ConfigurationException("Failed to add the device. Please check the device is NCC and It is reachable from Management Server."); } } @@ -204,10 +205,10 @@ public void getServicePackages() throws ExecutionException { org.json.JSONObject jsonBody = new JSONObject(); org.json.JSONObject jsonCredentials = new JSONObject(); result = getHttpRequest(jsonBody.toString(), agentUri, _sessionid); - s_logger.debug("List of Service Packages in NCC:: " + result); + logger.debug("List of Service Packages in NCC:: " + result); } catch (URISyntaxException e) { String errMsg = "Could not generate URI for Hyper-V agent"; - s_logger.error(errMsg, e); + logger.error(errMsg, e); } catch (Exception e) { throw new ExecutionException("Failed to log in to NCC device at " + _ip + " due to " + e.getMessage()); @@ -235,18 +236,18 @@ private synchronized String login() throws ExecutionException{ jsonResponse = new JSONObject(result); org.json.JSONArray loginResponse = jsonResponse.getJSONArray("login"); _sessionid = jsonResponse.getJSONArray("login").getJSONObject(0).getString("sessionid"); - s_logger.debug("New Session id from NCC :" + _sessionid); + logger.debug("New Session id from NCC :" + _sessionid); set_nccsession(_sessionid); - s_logger.debug("session on Static Session variable" + get_nccsession()); + logger.debug("session on Static Session variable" + get_nccsession()); } - s_logger.debug("Login to NCC Device response :: " + result); + logger.debug("Login to NCC Device response :: " + result); return result; } catch (URISyntaxException e) { String errMsg = "Could not generate URI for Hyper-V agent"; - s_logger.error(errMsg, e); + logger.error(errMsg, e); } catch (JSONException e) { - s_logger.debug("JSON Exception :" + e.getMessage()); + logger.debug("JSON Exception :" + e.getMessage()); throw new ExecutionException("Failed to log in to NCC device at " + _ip + " due to " + e.getMessage()); } catch (Exception e) { throw new ExecutionException("Failed to log in to NCC device at " + _ip + " due to " + e.getMessage()); @@ -315,7 +316,7 @@ private void keepSessionAlive() throws ExecutionException { "/cs/cca/v1/cloudstacks", null, null); org.json.JSONObject jsonBody = new JSONObject(); getHttpRequest(jsonBody.toString(), agentUri, _sessionid); - s_logger.debug("Keeping Session Alive"); + logger.debug("Keeping Session Alive"); } catch (URISyntaxException e) { e.printStackTrace(); } @@ -336,10 +337,10 @@ private String queryAsyncJob(String jobId) throws ExecutionException { result = getHttpRequest(jsonBody.toString(), agentUri, _sessionid); JSONObject response = new JSONObject(result); if(response != null ) { - s_logger.debug("Job Status result for ["+jobId + "]:: " + result + " Tick and currentTime :" + System.currentTimeMillis() +" -" + startTick + "job cmd timeout :" +_nccCmdTimeout); + logger.debug("Job Status result for ["+jobId + "]:: " + result + " Tick and currentTime :" + System.currentTimeMillis() +" -" + startTick + "job cmd timeout :" +_nccCmdTimeout); String status = response.getJSONObject("journalcontext").getString("status").toUpperCase(); String message = response.getJSONObject("journalcontext").getString("message"); - s_logger.debug("Job Status Progress Status ["+ jobId + "]:: " + status); + logger.debug("Job Status Progress Status ["+ jobId + "]:: " + status); switch(status) { case "FINISHED": return status; @@ -357,7 +358,7 @@ private String queryAsyncJob(String jobId) throws ExecutionException { } catch (URISyntaxException e) { String errMsg = "Could not generate URI for NetScaler ControlCenter"; - s_logger.error(errMsg, e); + logger.error(errMsg, e); } catch (JSONException e) { e.printStackTrace(); } @@ -371,25 +372,25 @@ private synchronized Answer execute(NetScalerImplementNetworkCommand cmd, int nu new URI("https", null, _ip, DEFAULT_PORT, "/cs/adcaas/v1/networks", null, null); org.json.JSONObject jsonBody = new JSONObject(cmd.getDetails()); - s_logger.debug("Sending Network Implement to NCC:: " + jsonBody); + logger.debug("Sending Network Implement to NCC:: " + jsonBody); result = postHttpRequest(jsonBody.toString(), agentUri, _sessionid); - s_logger.debug("Result of Network Implement to NCC:: " + result); + logger.debug("Result of Network Implement to NCC:: " + result); result = queryAsyncJob(result); - s_logger.debug("Done query async of network implement request :: " + result); + logger.debug("Done query async of network implement request :: " + result); return new Answer(cmd, true, "Successfully allocated device"); } catch (URISyntaxException e) { String errMsg = "Could not generate URI for NetScaler ControlCenter "; - s_logger.error(errMsg, e); + logger.error(errMsg, e); } catch (ExecutionException e) { if(e.getMessage().equalsIgnoreCase(NccHttpCode.NOT_FOUND)) { return new Answer(cmd, true, "Successfully unallocated the device"); }else if(e.getMessage().startsWith("ERROR, ROLLBACK") ) { - s_logger.error(e.getMessage()); + logger.error(e.getMessage()); return new Answer(cmd, false, e.getMessage()); } else { if (shouldRetry(numRetries)) { - s_logger.debug("Retrying the command NetScalerImplementNetworkCommand retry count: " + numRetries, e); + logger.debug("Retrying the command NetScalerImplementNetworkCommand retry count: " + numRetries, e); return retry(cmd, numRetries); } else { return new Answer(cmd, false, e.getMessage()); @@ -397,7 +398,7 @@ private synchronized Answer execute(NetScalerImplementNetworkCommand cmd, int nu } } catch (Exception e) { if (shouldRetry(numRetries)) { - s_logger.debug("Retrying the command NetScalerImplementNetworkCommand retry count: " + numRetries, e); + logger.debug("Retrying the command NetScalerImplementNetworkCommand retry count: " + numRetries, e); return retry(cmd, numRetries); } else { return new Answer(cmd, false, e.getMessage()); @@ -448,14 +449,14 @@ private Answer execute(HealthCheckLBConfigCommand cmd, int numRetries) { hcLB.add(loadBalancer); } } catch (ExecutionException e) { - s_logger.error("Failed to execute HealthCheckLBConfigCommand due to ", e); + logger.error("Failed to execute HealthCheckLBConfigCommand due to ", e); if (shouldRetry(numRetries)) { return retry(cmd, numRetries); } else { return new HealthCheckLBConfigAnswer(hcLB); } } catch (Exception e) { - s_logger.error("Failed to execute HealthCheckLBConfigCommand due to ", e); + logger.error("Failed to execute HealthCheckLBConfigCommand due to ", e); if (shouldRetry(numRetries)) { return retry(cmd, numRetries); } else { @@ -474,7 +475,7 @@ private String getLBHealthChecks(long networkid) throws ExecutionException { "/cs/adcaas/v1/networks/"+ networkid +"/lbhealthstatus", null, null); org.json.JSONObject jsonBody = new JSONObject(); response = getHttpRequest(jsonBody.toString(), agentUri, _sessionid); - s_logger.debug("LBHealthcheck Response :" + response); + logger.debug("LBHealthcheck Response :" + response); } catch (URISyntaxException e) { e.printStackTrace(); } @@ -494,24 +495,24 @@ private synchronized Answer execute(LoadBalancerConfigCommand cmd, int numRetrie "/cs/adcaas/v1/loadbalancerCmds", null, null); JSONObject lbConfigCmd = new JSONObject(); JSONObject lbcmd = new JSONObject(gsonLBConfig); - s_logger.debug("LB config from gsonstring to JSONObject : " + lbcmd.toString() + "\n" + "gson cmd is :: \t" + gsonLBConfig); + logger.debug("LB config from gsonstring to JSONObject : " + lbcmd.toString() + "\n" + "gson cmd is :: \t" + gsonLBConfig); lbConfigCmd.put("LoadBalancerConfigCommand", lbcmd.getJSONArray("loadBalancers")); - s_logger.debug("LB config paylod : " + lbConfigCmd.toString()); + logger.debug("LB config paylod : " + lbConfigCmd.toString()); String result = postHttpRequest(lbConfigCmd.toString(), agentUri, _sessionid); - s_logger.debug("Result of lbconfigcmg is "+ result); + logger.debug("Result of lbconfigcmg is "+ result); result = queryAsyncJob(result); - s_logger.debug("Done query async of LB ConfigCmd implement request and result:: " + result); + logger.debug("Done query async of LB ConfigCmd implement request and result:: " + result); return new Answer(cmd); } catch (ExecutionException e) { - s_logger.error("Failed to execute LoadBalancerConfigCommand due to ", e); + logger.error("Failed to execute LoadBalancerConfigCommand due to ", e); if(e.getMessage().equalsIgnoreCase(NccHttpCode.NOT_FOUND)) { return new Answer(cmd, true, "LB Rule is not present in NS device. So returning as removed the LB Rule"); } else if(e.getMessage().startsWith("ERROR, ROLLBACK COMPLETED") || e.getMessage().startsWith("ERROR, ROLLBACK FAILED")) { - s_logger.error("Failed to execute LoadBalancerConfigCommand due to : " + e.getMessage()); + logger.error("Failed to execute LoadBalancerConfigCommand due to : " + e.getMessage()); return new Answer(cmd, false, e.getMessage()); } else if (e.getMessage().startsWith(NccHttpCode.INTERNAL_ERROR)) { - s_logger.error("Failed to execute LoadBalancerConfigCommand as Internal Error returning Internal error ::" + e.getMessage() ); + logger.error("Failed to execute LoadBalancerConfigCommand as Internal Error returning Internal error ::" + e.getMessage() ); return new Answer(cmd, false, e.getMessage()); } if (shouldRetry(numRetries)) { @@ -520,7 +521,7 @@ private synchronized Answer execute(LoadBalancerConfigCommand cmd, int numRetrie return new Answer(cmd, false, e.getMessage()); } } catch (Exception e) { - s_logger.error("Failed to execute LoadBalancerConfigCommand due to ", e); + logger.error("Failed to execute LoadBalancerConfigCommand due to ", e); if (shouldRetry(numRetries)) { return retry(cmd, numRetries); } else { @@ -614,16 +615,16 @@ private ExternalNetworkResourceUsageAnswer getPublicIpBytesSentAndReceived(Exter } } } - s_logger.debug("IPStats Response :" + response); + logger.debug("IPStats Response :" + response); } catch (URISyntaxException e) { e.printStackTrace(); } catch (ExecutionException e) { - s_logger.debug("Seesion Alive" + e.getMessage()); + logger.debug("Seesion Alive" + e.getMessage()); e.printStackTrace(); } } catch (Exception e) { - s_logger.error("Failed to get bytes sent and received statistics due to " + e); + logger.error("Failed to get bytes sent and received statistics due to " + e); throw new ExecutionException(e.getMessage()); } @@ -632,7 +633,7 @@ private ExternalNetworkResourceUsageAnswer getPublicIpBytesSentAndReceived(Exter private Answer retry(Command cmd, int numRetries) { int numRetriesRemaining = numRetries - 1; - s_logger.warn("Retrying " + cmd.getClass().getSimpleName() + ". Number of retries remaining: " + numRetriesRemaining); + logger.warn("Retrying " + cmd.getClass().getSimpleName() + ". Number of retries remaining: " + numRetriesRemaining); return executeRequest(cmd, numRetriesRemaining); } @@ -643,7 +644,7 @@ private boolean shouldRetry(int numRetries) { return true; } } catch (Exception e) { - s_logger.error("Failed to log in to Netscaler ControlCenter device at " + _ip + " due to " + e.getMessage()); + logger.error("Failed to log in to Netscaler ControlCenter device at " + _ip + " due to " + e.getMessage()); return false; } return false; @@ -661,7 +662,7 @@ private boolean refreshNCCConnection() { keepSessionAlive(); return true; } catch (ExecutionException ex) { - s_logger.debug("Failed to keep up the session alive ", ex); + logger.debug("Failed to keep up the session alive ", ex); } return ret; } @@ -748,7 +749,7 @@ public static String cleanPassword(String logString) { } return cleanLogString; } - public static HttpClient getHttpClient() { + public HttpClient getHttpClient() { HttpClient httpClient = null; TrustStrategy easyStrategy = new TrustStrategy() { @@ -766,18 +767,18 @@ public boolean isTrusted(X509Certificate[] chain, String authType) ClientConnectionManager ccm = new BasicClientConnectionManager(registry); httpClient = new DefaultHttpClient(ccm); } catch (KeyManagementException e) { - s_logger.error("failed to initialize http client " + e.getMessage()); + logger.error("failed to initialize http client " + e.getMessage()); } catch (UnrecoverableKeyException e) { - s_logger.error("failed to initialize http client " + e.getMessage()); + logger.error("failed to initialize http client " + e.getMessage()); } catch (NoSuchAlgorithmException e) { - s_logger.error("failed to initialize http client " + e.getMessage()); + logger.error("failed to initialize http client " + e.getMessage()); } catch (KeyStoreException e) { - s_logger.error("failed to initialize http client " + e.getMessage()); + logger.error("failed to initialize http client " + e.getMessage()); } return httpClient; } - public static String getHttpRequest(final String jsonCmd, final URI agentUri, String sessionID) throws ExecutionException { + public String getHttpRequest(final String jsonCmd, final URI agentUri, String sessionID) throws ExecutionException { // Using Apache's HttpClient for HTTP POST // Java-only approach discussed at on StackOverflow concludes with // comment to use Apache HttpClient @@ -785,7 +786,7 @@ public static String getHttpRequest(final String jsonCmd, final URI agentUri, St // use Apache. String logMessage = StringEscapeUtils.unescapeJava(jsonCmd); logMessage = cleanPassword(logMessage); - s_logger.debug("GET request to " + agentUri.toString() + logger.debug("GET request to " + agentUri.toString() + " with contents " + logMessage); // Create request @@ -802,40 +803,40 @@ public static String getHttpRequest(final String jsonCmd, final URI agentUri, St StringEntity cmdJson = new StringEntity(jsonCmd); request.addHeader("content-type", "application/json"); request.addHeader("Cookie", "SessId=" + sessionID); - s_logger.debug("Sending cmd to " + agentUri.toString() + logger.debug("Sending cmd to " + agentUri.toString() + " cmd data:" + logMessage); HttpResponse response = httpClient.execute(request); // Unsupported commands will not route. if (response.getStatusLine().getStatusCode() == HttpStatus.SC_NOT_FOUND) { String errMsg = "Failed to send : HTTP error code : " + response.getStatusLine().getStatusCode(); - s_logger.error(errMsg); + logger.error(errMsg); String unsupportMsg = "Unsupported command " + agentUri.getPath() + ". Are you sure you got the right f of" + " server?"; Answer ans = new UnsupportedAnswer(null, unsupportMsg); - s_logger.error(ans); + logger.error(ans); result = s_gson.toJson(new Answer[] {ans}); } else if (response.getStatusLine().getStatusCode() != HttpStatus.SC_OK) { String errMsg = "Failed send to " + agentUri.toString() + " : HTTP error code : " + response.getStatusLine().getStatusCode(); - s_logger.error(errMsg); + logger.error(errMsg); throw new ExecutionException("UNAUTHORIZED"); } else { result = EntityUtils.toString(response.getEntity()); String logResult = cleanPassword(StringEscapeUtils.unescapeJava(result)); - s_logger.debug("Get response is " + logResult); + logger.debug("Get response is " + logResult); } } catch (ClientProtocolException protocolEx) { // Problem with HTTP message exchange - s_logger.error(protocolEx); + logger.error(protocolEx); } catch (IOException connEx) { // Problem with underlying communications - s_logger.error(connEx); + logger.error(connEx); } finally { httpClient.getConnectionManager().shutdown(); } return result; } - public static String postHttpRequest(final String jsonCmd, final URI agentUri, String sessionID) throws ExecutionException { + public String postHttpRequest(final String jsonCmd, final URI agentUri, String sessionID) throws ExecutionException { // Using Apache's HttpClient for HTTP POST // Java-only approach discussed at on StackOverflow concludes with // comment to use Apache HttpClient @@ -843,7 +844,7 @@ public static String postHttpRequest(final String jsonCmd, final URI agentUri, S // use Apache. String logMessage = StringEscapeUtils.unescapeJava(jsonCmd); logMessage = cleanPassword(logMessage); - s_logger.debug("POST request to " + agentUri.toString() + logger.debug("POST request to " + agentUri.toString() + " with contents " + logMessage); // Create request @@ -863,13 +864,13 @@ public boolean isTrusted(X509Certificate[] chain, String authType) ClientConnectionManager ccm = new BasicClientConnectionManager(registry); httpClient = new DefaultHttpClient(ccm); } catch (KeyManagementException e) { - s_logger.error("failed to initialize http client " + e.getMessage()); + logger.error("failed to initialize http client " + e.getMessage()); } catch (UnrecoverableKeyException e) { - s_logger.error("failed to initialize http client " + e.getMessage()); + logger.error("failed to initialize http client " + e.getMessage()); } catch (NoSuchAlgorithmException e) { - s_logger.error("failed to initialize http client " + e.getMessage()); + logger.error("failed to initialize http client " + e.getMessage()); } catch (KeyStoreException e) { - s_logger.error("failed to initialize http client " + e.getMessage()); + logger.error("failed to initialize http client " + e.getMessage()); } String result = null; @@ -885,7 +886,7 @@ public boolean isTrusted(X509Certificate[] chain, String authType) request.addHeader("content-type", "application/json"); request.addHeader("Cookie", "SessId=" + sessionID); request.setEntity(cmdJson); - s_logger.debug("Sending cmd to " + agentUri.toString() + logger.debug("Sending cmd to " + agentUri.toString() + " cmd data:" + logMessage + "SEssion id: " + sessionID); HttpResponse response = httpClient.execute(request); @@ -895,7 +896,7 @@ public boolean isTrusted(X509Certificate[] chain, String authType) throw new ExecutionException(NccHttpCode.NOT_FOUND); } else if ((response.getStatusLine().getStatusCode() != HttpStatus.SC_OK ) && (response.getStatusLine().getStatusCode() != HttpStatus.SC_CREATED )) { String errMsg = "Command Not Success " + agentUri.toString() + " : HTTP error code : " + response.getStatusLine().getStatusCode(); - s_logger.error(errMsg); + logger.error(errMsg); throw new ExecutionException(NccHttpCode.INTERNAL_ERROR + " " + errMsg); } else if (response.getStatusLine().getStatusCode() == HttpStatus.SC_UNAUTHORIZED) { //Successfully created the resource in the NCC, Now get the Job ID and send to the response @@ -907,15 +908,15 @@ public boolean isTrusted(X509Certificate[] chain, String authType) } else { result = EntityUtils.toString(response.getEntity()); String logResult = cleanPassword(StringEscapeUtils.unescapeJava(result)); - s_logger.debug("POST response is " + logResult); + logger.debug("POST response is " + logResult); } } catch (ClientProtocolException protocolEx) { // Problem with HTTP message exchange - s_logger.error(protocolEx); + logger.error(protocolEx); } catch (IOException connEx) { // Problem with underlying communications - s_logger.error(connEx); + logger.error(connEx); } finally { httpClient.getConnectionManager().shutdown(); } diff --git a/plugins/network-elements/netscaler/src/main/java/com/cloud/network/resource/NetscalerResource.java b/plugins/network-elements/netscaler/src/main/java/com/cloud/network/resource/NetscalerResource.java index 99f7102b13b9..548f5509a890 100644 --- a/plugins/network-elements/netscaler/src/main/java/com/cloud/network/resource/NetscalerResource.java +++ b/plugins/network-elements/netscaler/src/main/java/com/cloud/network/resource/NetscalerResource.java @@ -30,7 +30,8 @@ import org.apache.cloudstack.api.ApiConstants; import org.apache.commons.io.output.ByteArrayOutputStream; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.bouncycastle.util.io.pem.PemObject; import org.bouncycastle.util.io.pem.PemWriter; @@ -162,7 +163,7 @@ public class NetscalerResource implements ServerResource { private String _publicIPNetmask; private String _publicIPVlan; - private static final Logger s_logger = Logger.getLogger(NetscalerResource.class); + protected static Logger LOGGER = LogManager.getLogger(NetscalerResource.class); protected Gson _gson; private final String _objectNamePathSep = "-"; @@ -471,12 +472,12 @@ private synchronized Answer execute(final IpAssocCommand cmd, final int numRetri saveConfiguration(); results[i++] = ip.getPublicIp() + " - success"; final String action = ip.isAdd() ? "associate" : "remove"; - if (s_logger.isDebugEnabled()) { - s_logger.debug("Netscaler load balancer " + _ip + " successfully executed IPAssocCommand to " + action + " IP " + ip); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Netscaler load balancer " + _ip + " successfully executed IPAssocCommand to " + action + " IP " + ip); } } } catch (final ExecutionException e) { - s_logger.error("Netscaler loadbalancer " + _ip + " failed to execute IPAssocCommand due to " + e.getMessage()); + LOGGER.error("Netscaler loadbalancer " + _ip + " failed to execute IPAssocCommand due to " + e.getMessage()); if (shouldRetry(numRetries)) { return retry(cmd, numRetries); } else { @@ -526,14 +527,14 @@ private Answer execute(final HealthCheckLBConfigCommand cmd, final int numRetrie } } catch (final ExecutionException e) { - s_logger.error("Failed to execute HealthCheckLBConfigCommand due to ", e); + LOGGER.error("Failed to execute HealthCheckLBConfigCommand due to ", e); if (shouldRetry(numRetries)) { return retry(cmd, numRetries); } else { return new HealthCheckLBConfigAnswer(hcLB); } } catch (final Exception e) { - s_logger.error("Failed to execute HealthCheckLBConfigCommand due to ", e); + LOGGER.error("Failed to execute HealthCheckLBConfigCommand due to ", e); if (shouldRetry(numRetries)) { return retry(cmd, numRetries); } else { @@ -583,8 +584,8 @@ private synchronized Answer execute(final LoadBalancerConfigCommand cmd, final i // create a load balancing virtual server addLBVirtualServer(nsVirtualServerName, srcIp, srcPort, lbAlgorithm, lbProtocol, loadBalancer.getStickinessPolicies(), null); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Created load balancing virtual server " + nsVirtualServerName + " on the Netscaler device"); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Created load balancing virtual server " + nsVirtualServerName + " on the Netscaler device"); } // create a new monitor @@ -700,9 +701,9 @@ private synchronized Answer execute(final LoadBalancerConfigCommand cmd, final i pemWriter.writeObject(pemObject); pemWriter.flush(); } catch (final IOException e) { - if (s_logger.isDebugEnabled()) + if (LOGGER.isDebugEnabled()) { - s_logger.debug("couldn't write PEM to a string", e); + LOGGER.debug("couldn't write PEM to a string", e); } // else just close the certDataStream } @@ -732,9 +733,9 @@ private synchronized Answer execute(final LoadBalancerConfigCommand cmd, final i SSL.createSslCertKey(_netscalerService, certFilename, keyFilename, certKeyName, sslCert.getPassword()); } } catch (final IOException e) { - if (s_logger.isDebugEnabled()) + if (LOGGER.isDebugEnabled()) { - s_logger.debug("couldn't open buffer for certificate", e); + LOGGER.debug("couldn't open buffer for certificate", e); } // else just close the certDataStream } @@ -747,8 +748,8 @@ private synchronized Answer execute(final LoadBalancerConfigCommand cmd, final i } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully added LB destination: " + destination.getDestIp() + ":" + destination.getDestPort() + " to load balancer " + + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Successfully added LB destination: " + destination.getDestIp() + ":" + destination.getDestPort() + " to load balancer " + srcIp + ":" + srcPort); } @@ -885,21 +886,21 @@ private synchronized Answer execute(final LoadBalancerConfigCommand cmd, final i } - if (s_logger.isInfoEnabled()) { - s_logger.info("Successfully executed resource LoadBalancerConfigCommand: " + _gson.toJson(cmd)); + if (LOGGER.isInfoEnabled()) { + LOGGER.info("Successfully executed resource LoadBalancerConfigCommand: " + _gson.toJson(cmd)); } saveConfiguration(); return new Answer(cmd); } catch (final ExecutionException e) { - s_logger.error("Failed to execute LoadBalancerConfigCommand due to ", e); + LOGGER.error("Failed to execute LoadBalancerConfigCommand due to ", e); if (shouldRetry(numRetries)) { return retry(cmd, numRetries); } else { return new Answer(cmd, e); } } catch (final Exception e) { - s_logger.error("Failed to execute LoadBalancerConfigCommand due to ", e); + LOGGER.error("Failed to execute LoadBalancerConfigCommand due to ", e); if (shouldRetry(numRetries)) { return retry(cmd, numRetries); } else { @@ -965,7 +966,7 @@ private synchronized Answer execute(final CreateLoadBalancerApplianceCommand cmd try { Thread.sleep(10000); } catch (final InterruptedException e) { - s_logger.debug("[ignored] interrupted while waiting for netscaler to be 'up'."); + LOGGER.debug("[ignored] interrupted while waiting for netscaler to be 'up'."); } final ns refreshNsObj = new ns(); refreshNsObj.set_id(newVpx.get_id()); @@ -1002,8 +1003,8 @@ private synchronized Answer execute(final CreateLoadBalancerApplianceCommand cmd return new Answer(cmd, new ExecutionException("Failed to create VPX instance " + vpxName + " on the netscaler SDX device " + _ip)); } - if (s_logger.isInfoEnabled()) { - s_logger.info("Successfully provisioned VPX instance " + vpxName + " on the Netscaler SDX device " + _ip); + if (LOGGER.isInfoEnabled()) { + LOGGER.info("Successfully provisioned VPX instance " + vpxName + " on the Netscaler SDX device " + _ip); } // physical interfaces on the SDX range from 10/1 to 10/8 & 1/1 to 1/8 of which two different port or same port can be used for public and private interfaces @@ -1218,13 +1219,13 @@ private static void createSite(final nitro_service client, final String siteName } else { gslbsite.add(client, site); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully created GSLB site: " + siteName); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Successfully created GSLB site: " + siteName); } } catch (final Exception e) { final String errMsg = "Failed to create GSLB site: " + siteName + " due to " + e.getMessage(); - if (s_logger.isDebugEnabled()) { - s_logger.debug(errMsg); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(errMsg); } throw new ExecutionException(errMsg); } @@ -1237,23 +1238,23 @@ private static void deleteSite(final nitro_service client, final String siteName if (site != null) { final gslbsite_gslbservice_binding[] serviceBindings = gslbsite_gslbservice_binding.get(client, siteName); if (serviceBindings != null && serviceBindings.length > 0) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("There are services associated with GSLB site: " + siteName + " so ignoring site deletion"); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("There are services associated with GSLB site: " + siteName + " so ignoring site deletion"); } } gslbsite.delete(client, siteName); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully deleted GSLB site: " + siteName); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Successfully deleted GSLB site: " + siteName); } } else { - if (s_logger.isDebugEnabled()) { - s_logger.warn("Ignoring delete request for non existing GSLB site: " + siteName); + if (LOGGER.isDebugEnabled()) { + LOGGER.warn("Ignoring delete request for non existing GSLB site: " + siteName); } } } catch (final Exception e) { final String errMsg = "Failed to delete GSLB site: " + siteName + " due to " + e.getMessage(); - if (s_logger.isDebugEnabled()) { - s_logger.debug(errMsg); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(errMsg); } throw new ExecutionException(errMsg); } @@ -1265,8 +1266,8 @@ private static void updateSite(final nitro_service client, final String siteType gslbsite site; site = getSiteObject(client, siteName); if (site == null) { - if (s_logger.isDebugEnabled()) { - s_logger.warn("Ignoring update request for non existing GSLB site: " + siteName); + if (LOGGER.isDebugEnabled()) { + LOGGER.warn("Ignoring update request for non existing GSLB site: " + siteName); } return; } @@ -1280,14 +1281,14 @@ private static void updateSite(final nitro_service client, final String siteType site.set_sessionexchange("ENABLED"); gslbsite.update(client, site); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully updated GSLB site: " + siteName); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Successfully updated GSLB site: " + siteName); } } catch (final Exception e) { final String errMsg = "Failed to update GSLB site: " + siteName + " due to " + e.getMessage(); - if (s_logger.isDebugEnabled()) { - s_logger.debug(errMsg); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(errMsg); } throw new ExecutionException(errMsg); } @@ -1335,14 +1336,14 @@ private static void updateSite(final nitro_service client, final String siteType gslbvserver.add(client, vserver); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully added GSLB virtual server: " + vserverName); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Successfully added GSLB virtual server: " + vserverName); } } catch (final Exception e) { final String errMsg = "Failed to add GSLB virtual server: " + vserverName + " due to " + e.getMessage(); - if (s_logger.isDebugEnabled()) { - s_logger.debug(errMsg); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(errMsg); } throw new ExecutionException(errMsg); } @@ -1354,18 +1355,18 @@ private static void deleteVirtualServer(final nitro_service client, final String final gslbvserver vserver = getVserverObject(client, vserverName); if (vserver != null) { gslbvserver.delete(client, vserver); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully deleted GSLB virtual server: " + vserverName); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Successfully deleted GSLB virtual server: " + vserverName); } } else { - if (s_logger.isDebugEnabled()) { - s_logger.warn("Ignoring delete request for non existing GSLB virtual server: " + vserverName); + if (LOGGER.isDebugEnabled()) { + LOGGER.warn("Ignoring delete request for non existing GSLB virtual server: " + vserverName); } } } catch (final Exception e) { final String errMsg = "Failed to delete GSLB virtual server: " + vserverName + " due to " + e.getMessage(); - if (s_logger.isDebugEnabled()) { - s_logger.debug(errMsg); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(errMsg); } throw new ExecutionException(errMsg); } @@ -1410,13 +1411,13 @@ private static void createService(final nitro_service client, final String servi } else { gslbservice.add(client, service); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully created service: " + serviceName + " at site: " + siteName); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Successfully created service: " + serviceName + " at site: " + siteName); } } catch (final Exception e) { final String errMsg = "Failed to created service: " + serviceName + " at site: " + siteName + " due to " + e.getMessage(); - if (s_logger.isDebugEnabled()) { - s_logger.debug(errMsg); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(errMsg); } throw new ExecutionException(errMsg); } @@ -1427,18 +1428,18 @@ private static void deleteService(final nitro_service client, final String servi final gslbservice service = getServiceObject(client, serviceName); if (service != null) { gslbservice.delete(client, serviceName); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully deleted service: " + serviceName); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Successfully deleted service: " + serviceName); } } else { - if (s_logger.isDebugEnabled()) { - s_logger.warn("Ignoring delete request for non existing service: " + serviceName); + if (LOGGER.isDebugEnabled()) { + LOGGER.warn("Ignoring delete request for non existing service: " + serviceName); } } } catch (final Exception e) { final String errMsg = "Failed to delete service: " + serviceName + " due to " + e.getMessage(); - if (s_logger.isDebugEnabled()) { - s_logger.debug(errMsg); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(errMsg); } throw new ExecutionException(errMsg); } @@ -1453,22 +1454,22 @@ private static void createVserverServiceBinding(final nitro_service client, fina binding.set_servicename(serviceName); binding.set_weight(weight); gslbvserver_gslbservice_binding.add(client, binding); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully created service: " + serviceName + " and virtual server: " + vserverName + " binding"); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Successfully created service: " + serviceName + " and virtual server: " + vserverName + " binding"); } } catch (final nitro_exception ne) { if (ne.getErrorCode() == 273) { return; } errMsg = "Failed to create service: " + serviceName + " and virtual server: " + vserverName + " binding due to " + ne.getMessage(); - if (s_logger.isDebugEnabled()) { - s_logger.debug(errMsg); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(errMsg); } throw new ExecutionException(errMsg); } catch (final Exception e) { errMsg = "Failed to create service: " + serviceName + " and virtual server: " + vserverName + " binding due to " + e.getMessage(); - if (s_logger.isDebugEnabled()) { - s_logger.debug(errMsg); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(errMsg); } throw new ExecutionException(errMsg); } @@ -1481,8 +1482,8 @@ private static void deleteVserverServiceBinding(final nitro_service client, fina for (final gslbvserver_gslbservice_binding binding : bindings) { if (binding.get_servicename().equalsIgnoreCase(serviceName) && binding.get_name().equals(vserverName)) { gslbvserver_gslbservice_binding.delete(client, binding); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully deleted service: " + serviceName + " and virtual server: " + vserverName + " binding"); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Successfully deleted service: " + serviceName + " and virtual server: " + vserverName + " binding"); } break; } @@ -1490,8 +1491,8 @@ private static void deleteVserverServiceBinding(final nitro_service client, fina } } catch (final Exception e) { final String errMsg = "Failed to create service: " + serviceName + " and virtual server: " + vserverName + " binding due to " + e.getMessage(); - if (s_logger.isDebugEnabled()) { - s_logger.debug(errMsg); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(errMsg); } throw new ExecutionException(errMsg); } @@ -1505,8 +1506,8 @@ private static void createVserverDomainBinding(final nitro_service client, final binding.set_domainname(domainName); binding.set_name(vserverName); gslbvserver_domain_binding.add(client, binding); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully added virtual server: " + vserverName + " domain name: " + domainName + " binding"); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Successfully added virtual server: " + vserverName + " domain name: " + domainName + " binding"); } return; } catch (final nitro_exception e) { @@ -1518,8 +1519,8 @@ private static void createVserverDomainBinding(final nitro_service client, final errMsg = e.getMessage(); } errMsg = "Failed to create virtual server: " + vserverName + " domain name: " + domainName + " binding" + errMsg; - if (s_logger.isDebugEnabled()) { - s_logger.debug(errMsg); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(errMsg); } throw new ExecutionException(errMsg); } @@ -1531,8 +1532,8 @@ private static void deleteVserverDomainBinding(final nitro_service client, final for (final gslbvserver_domain_binding binding : bindings) { if (binding.get_domainname().equalsIgnoreCase(domainName)) { gslbvserver_domain_binding.delete(client, binding); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully deleted virtual server: " + vserverName + " and " + " domain: " + domainName + " binding"); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Successfully deleted virtual server: " + vserverName + " and " + " domain: " + domainName + " binding"); } break; } @@ -1540,8 +1541,8 @@ private static void deleteVserverDomainBinding(final nitro_service client, final } } catch (final Exception e) { final String errMsg = "Failed to delete virtual server: " + vserverName + " and domain " + domainName + " binding due to " + e.getMessage(); - if (s_logger.isDebugEnabled()) { - s_logger.debug(errMsg); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(errMsg); } throw new ExecutionException(errMsg); } @@ -1562,8 +1563,8 @@ private static void createGslbServiceMonitor(final nitro_service nsService, fina } } catch (final Exception e) { final String errMsg = "Failed to create GSLB monitor for service public ip" + servicePublicIp; - if (s_logger.isDebugEnabled()) { - s_logger.debug(errMsg); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(errMsg); } throw new ExecutionException(errMsg); } @@ -1578,12 +1579,12 @@ private static void deleteGslbServiceMonitor(final nitro_service nsService, fina } catch (final nitro_exception ne) { if (ne.getErrorCode() != NitroError.NS_RESOURCE_NOT_EXISTS) { final String errMsg = "Failed to delete monitor " + monitorName + " for GSLB service due to " + ne.getMessage(); - s_logger.debug(errMsg); + LOGGER.debug(errMsg); throw new com.cloud.utils.exception.ExecutionException(errMsg); } } catch (final Exception e) { final String errMsg = "Failed to delete monitor " + monitorName + " for GSLB service due to " + e.getMessage(); - s_logger.debug(errMsg); + LOGGER.debug(errMsg); throw new com.cloud.utils.exception.ExecutionException(errMsg); } } @@ -1597,7 +1598,7 @@ private static void createGslbServiceGslbMonitorBinding(final nitro_service nsSe } catch (final Exception e) { // TODO: Nitro API version 10.* is not compatible for NetScalers 9.*, so may fail // against NetScaler version lesser than 10 hence ignore the exception - s_logger.warn("Failed to bind monitor to GSLB service due to " + e.getMessage()); + LOGGER.warn("Failed to bind monitor to GSLB service due to " + e.getMessage()); } } @@ -1607,13 +1608,13 @@ private static void deleteGslbServiceGslbMonitorBinding(final nitro_service nsSe if (monitorBindings != null && monitorBindings.length > 0) { for (final gslbservice_lbmonitor_binding binding : monitorBindings) { if (binding.get_monitor_name().equalsIgnoreCase(monitorName)) { - s_logger.info("Found a binding between monitor " + binding.get_monitor_name() + " and " + binding.get_servicename()); + LOGGER.info("Found a binding between monitor " + binding.get_monitor_name() + " and " + binding.get_servicename()); gslbservice_lbmonitor_binding.delete(nsService, binding); } } } } catch (final Exception e) { - s_logger.debug("Failed to delete GSLB monitor " + monitorName + " and GSLB service " + serviceName + " binding due to " + e.getMessage() + + LOGGER.debug("Failed to delete GSLB monitor " + monitorName + " and GSLB service " + serviceName + " binding due to " + e.getMessage() + " but moving on ..., will be cleaned up as part of GSLB " + " service delete any way.."); } } @@ -1626,7 +1627,7 @@ private static gslbsite getSiteObject(final nitro_service client, final String s return site; } } catch (final Exception e) { - s_logger.info("[ignored]" + LOGGER.info("[ignored]" + "error getting site: " + e.getLocalizedMessage()); } return null; @@ -1747,7 +1748,7 @@ private static void deleteKeyFile(final String nsIp, final String username, fina } private static void createSslCertKey(final nitro_service ns, final String certFilename, final String keyFilename, final String certKeyName, final String password) throws ExecutionException { - s_logger.debug("Adding cert to netscaler"); + LOGGER.debug("Adding cert to netscaler"); try { final sslcertkey certkey = new sslcertkey(); certkey.set_certkey(certKeyName); @@ -1772,7 +1773,7 @@ private static void createSslCertKey(final nitro_service ns, final String certFi } private static void bindCertKeyToVserver(final nitro_service ns, final String certKeyName, final String vserver) throws ExecutionException { - s_logger.debug("Adding cert to netscaler"); + LOGGER.debug("Adding cert to netscaler"); try { final sslvserver_sslcertkey_binding cert_binding = new sslvserver_sslcertkey_binding(); @@ -1999,7 +2000,7 @@ private synchronized Answer execute(final DestroyLoadBalancerApplianceCommand cm if (vpxToDelete == null) { final String msg = "There is no VPX instance " + vpxName + " on the Netscaler SDX device " + _ip + " to delete"; - s_logger.warn(msg); + LOGGER.warn(msg); return new DestroyLoadBalancerApplianceAnswer(cmd, true, msg); } @@ -2008,7 +2009,7 @@ private synchronized Answer execute(final DestroyLoadBalancerApplianceCommand cm nsDelObj.set_id(vpxToDelete.get_id()); vpxToDelete = ns.delete(_netscalerSdxService, nsDelObj); final String msg = "Deleted VPX instance " + vpxName + " on Netscaler SDX " + _ip + " successfully."; - s_logger.info(msg); + LOGGER.info(msg); return new DestroyLoadBalancerApplianceAnswer(cmd, true, msg); } catch (final Exception e) { if (shouldRetry(numRetries)) { @@ -2060,7 +2061,7 @@ private synchronized Answer execute(final SetStaticNatRulesCommand cmd, final in throw e; } } - s_logger.debug("Created Inat rule on the Netscaler device " + _ip + " to enable static NAT from " + srcIp + " to " + dstIP); + LOGGER.debug("Created Inat rule on the Netscaler device " + _ip + " to enable static NAT from " + srcIp + " to " + dstIP); } try { final rnat[] rnatRules = rnat.get(_netscalerService); @@ -2088,7 +2089,7 @@ private synchronized Answer execute(final SetStaticNatRulesCommand cmd, final in throw e; } } - s_logger.debug("Created Rnat rule on the Netscaler device " + _ip + " to enable revese static NAT from " + dstIP + " to " + srcIp); + LOGGER.debug("Created Rnat rule on the Netscaler device " + _ip + " to enable revese static NAT from " + dstIP + " to " + srcIp); } } else { try { @@ -2108,7 +2109,7 @@ private synchronized Answer execute(final SetStaticNatRulesCommand cmd, final in throw e; } } - s_logger.debug("Deleted Inat rule on the Netscaler device " + _ip + " to remove static NAT from " + srcIp + " to " + dstIP); + LOGGER.debug("Deleted Inat rule on the Netscaler device " + _ip + " to remove static NAT from " + srcIp + " to " + dstIP); } saveConfiguration(); @@ -2692,8 +2693,8 @@ private void addLBVirtualServer(final String virtualServerName, final String pub throw new ExecutionException("Failed to create new load balancing virtual server:" + virtualServerName + " due to " + apiCallResult.message); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Created load balancing virtual server " + virtualServerName + " on the Netscaler device"); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Created load balancing virtual server " + virtualServerName + " on the Netscaler device"); } } catch (final nitro_exception e) { throw new ExecutionException("Failed to create new virtual server:" + virtualServerName + " due to " + e.getMessage()); @@ -2742,9 +2743,9 @@ private void addLBMonitor(final String nsMonitorName, final String lbProtocol, f csMon.set_type(lbProtocol); if (lbProtocol.equalsIgnoreCase("HTTP")) { csMon.set_httprequest(hcp.getpingPath()); - s_logger.trace("LB Protocol is HTTP, Applying ping path on HealthCheck Policy"); + LOGGER.trace("LB Protocol is HTTP, Applying ping path on HealthCheck Policy"); } else { - s_logger.debug("LB Protocol is not HTTP, Skipping to apply ping path on HealthCheck Policy"); + LOGGER.debug("LB Protocol is not HTTP, Skipping to apply ping path on HealthCheck Policy"); } csMon.set_interval(hcp.getHealthcheckInterval()); @@ -2752,11 +2753,11 @@ private void addLBMonitor(final String nsMonitorName, final String lbProtocol, f csMon.set_resptimeout(hcp.getResponseTime()); csMon.set_failureretries(hcp.getUnhealthThresshold()); csMon.set_successretries(hcp.getHealthcheckThresshold()); - s_logger.debug("Monitor properites going to get created :interval :: " + csMon.get_interval() + "respTimeOUt:: " + csMon.get_resptimeout() + + LOGGER.debug("Monitor properites going to get created :interval :: " + csMon.get_interval() + "respTimeOUt:: " + csMon.get_resptimeout() + "failure retires(unhealththresshold) :: " + csMon.get_failureretries() + "successtries(healththresshold) ::" + csMon.get_successretries()); lbmonitor.add(_netscalerService, csMon); } else { - s_logger.debug("Monitor :" + nsMonitorName + " is already existing. Skipping to delete and create it"); + LOGGER.debug("Monitor :" + nsMonitorName + " is already existing. Skipping to delete and create it"); } } catch (final nitro_exception e) { throw new ExecutionException("Failed to create new monitor :" + nsMonitorName + " due to " + e.getMessage()); @@ -2776,9 +2777,9 @@ private void bindServiceToMonitor(final String nsServiceName, final String nsMon serviceMonitor.set_monitor_name(nsMonitorName); serviceMonitor.set_name(nsServiceName); serviceMonitor.set_monstate("ENABLED"); - s_logger.debug("Trying to bind the monitor :" + nsMonitorName + " to the service :" + nsServiceName); + LOGGER.debug("Trying to bind the monitor :" + nsMonitorName + " to the service :" + nsServiceName); com.citrix.netscaler.nitro.resource.config.basic.service_lbmonitor_binding.add(_netscalerService, serviceMonitor); - s_logger.debug("Successfully binded the monitor :" + nsMonitorName + " to the service :" + nsServiceName); + LOGGER.debug("Successfully binded the monitor :" + nsMonitorName + " to the service :" + nsServiceName); } } catch (final nitro_exception e) { throw new ExecutionException("Failed to create new monitor :" + nsMonitorName + " due to " + e.getMessage()); @@ -2798,9 +2799,9 @@ private void unBindServiceToMonitor(final String nsServiceName, final String nsM new com.citrix.netscaler.nitro.resource.config.basic.service_lbmonitor_binding(); serviceMonitor.set_monitor_name(nsMonitorName); serviceMonitor.set_name(nsServiceName); - s_logger.debug("Trying to unbind the monitor :" + nsMonitorName + " from the service :" + nsServiceName); + LOGGER.debug("Trying to unbind the monitor :" + nsMonitorName + " from the service :" + nsServiceName); service_lbmonitor_binding.delete(_netscalerService, serviceMonitor); - s_logger.debug("Successfully unbinded the monitor :" + nsMonitorName + " from the service :" + nsServiceName); + LOGGER.debug("Successfully unbinded the monitor :" + nsMonitorName + " from the service :" + nsServiceName); } } catch (final nitro_exception e) { @@ -2822,7 +2823,7 @@ private void removeLBMonitor(final String nsMonitorName) throws ExecutionExcepti final lbmonitor monitorObj = lbmonitor.get(_netscalerService, nsMonitorName); monitorObj.set_respcode(null); lbmonitor.delete(_netscalerService, monitorObj); - s_logger.info("Successfully deleted monitor : " + nsMonitorName); + LOGGER.info("Successfully deleted monitor : " + nsMonitorName); } } catch (final nitro_exception e) { if (e.getErrorCode() == NitroError.NS_RESOURCE_NOT_EXISTS) { @@ -2849,8 +2850,8 @@ public synchronized void applyAutoScaleConfig(final LoadBalancerTO loadBalancer) } // AutoScale APIs are successful executed, now save the configuration. saveConfiguration(); - if (s_logger.isInfoEnabled()) { - s_logger.info("Successfully executed resource AutoScaleConfig"); + if (LOGGER.isInfoEnabled()) { + LOGGER.info("Successfully executed resource AutoScaleConfig"); } } @@ -2863,8 +2864,8 @@ private synchronized boolean createAutoScaleConfig(final LoadBalancerTO loadBala generateAutoScaleVmGroupIdentifier(loadBalancerTO); final String nsVirtualServerName = generateNSVirtualServerName(srcIp, srcPort); final AutoScaleVmGroupTO vmGroupTO = loadBalancerTO.getAutoScaleVmGroupTO(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Created load balancing virtual server " + nsVirtualServerName + " on the Netscaler device"); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Created load balancing virtual server " + nsVirtualServerName + " on the Netscaler device"); } addLBVirtualServer(nsVirtualServerName, srcIp, srcPort, lbAlgorithm, lbProtocol, loadBalancerTO.getStickinessPolicies(), vmGroupTO); @@ -3507,7 +3508,7 @@ private boolean isAutoScaleSupportedInNetScaler() throws ExecutionException { // TODO: Config team has introduce a new command to check // the list of entities supported in a NetScaler. Can use that // once it is present in AutoScale branch. - s_logger.warn("AutoScale is not supported in NetScaler"); + LOGGER.warn("AutoScale is not supported in NetScaler"); return false; } return true; @@ -3563,7 +3564,7 @@ private ExternalNetworkResourceUsageAnswer getPublicIpBytesSentAndReceived(final } } } catch (final Exception e) { - s_logger.error("Failed to get bytes sent and received statistics due to " + e); + LOGGER.error("Failed to get bytes sent and received statistics due to " + e); throw new ExecutionException(e.getMessage()); } @@ -3572,7 +3573,7 @@ private ExternalNetworkResourceUsageAnswer getPublicIpBytesSentAndReceived(final private Answer retry(final Command cmd, final int numRetries) { final int numRetriesRemaining = numRetries - 1; - s_logger.warn("Retrying " + cmd.getClass().getSimpleName() + ". Number of retries remaining: " + numRetriesRemaining); + LOGGER.warn("Retrying " + cmd.getClass().getSimpleName() + ". Number of retries remaining: " + numRetriesRemaining); return executeRequest(cmd, numRetriesRemaining); } @@ -3583,7 +3584,7 @@ private boolean shouldRetry(final int numRetries) { return true; } } catch (final Exception e) { - s_logger.error("Failed to log in to Netscaler device at " + _ip + " due to " + e.getMessage()); + LOGGER.error("Failed to log in to Netscaler device at " + _ip + " due to " + e.getMessage()); } return false; } diff --git a/plugins/network-elements/netscaler/src/main/java/com/cloud/network/vm/NetScalerVMManagerImpl.java b/plugins/network-elements/netscaler/src/main/java/com/cloud/network/vm/NetScalerVMManagerImpl.java index 2293ccbbaa55..7b2ef012bed6 100644 --- a/plugins/network-elements/netscaler/src/main/java/com/cloud/network/vm/NetScalerVMManagerImpl.java +++ b/plugins/network-elements/netscaler/src/main/java/com/cloud/network/vm/NetScalerVMManagerImpl.java @@ -30,7 +30,6 @@ import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.lb.dao.ApplicationLoadBalancerRuleDao; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -95,7 +94,6 @@ import com.cloud.vm.dao.VMInstanceDao; public class NetScalerVMManagerImpl extends ManagerBase implements NetScalerVMManager, VirtualMachineGuru { - private static final Logger s_logger = Logger.getLogger(NetScalerVMManagerImpl.class); static final private String NetScalerLbVmNamePrefix = "NS"; @Inject @@ -196,8 +194,8 @@ public void finalizeUnmanage(VirtualMachine vm) { @Override public boolean configure(String name, Map params) throws ConfigurationException { _itMgr.registerGuru(VirtualMachine.Type.NetScalerVm, this); - if (s_logger.isInfoEnabled()) { - s_logger.info(getName() + " has been configured"); + if (logger.isInfoEnabled()) { + logger.info(getName() + " has been configured"); } return true; } @@ -208,7 +206,7 @@ public String getName() { } protected VirtualRouter stopInternalLbVm(DomainRouterVO internalLbVm, boolean forced, Account caller, long callerUserId) throws ResourceUnavailableException, ConcurrentOperationException { - s_logger.debug("Stopping internal lb vm " + internalLbVm); + logger.debug("Stopping internal lb vm " + internalLbVm); try { _itMgr.advanceStop(internalLbVm.getUuid(), forced); return _internalLbVmDao.findById(internalLbVm.getId()); @@ -220,7 +218,7 @@ protected VirtualRouter stopInternalLbVm(DomainRouterVO internalLbVm, boolean fo public VirtualRouterProvider addNetScalerLoadBalancerElement(long ntwkSvcProviderId) { VirtualRouterProviderVO element = _vrProviderDao.findByNspIdAndType(ntwkSvcProviderId, com.cloud.network.VirtualRouterProvider.Type.NetScalerVm); if (element != null) { - s_logger.debug("There is already an " + getName() + " with service provider id " + ntwkSvcProviderId); + logger.debug("There is already an " + getName() + " with service provider id " + ntwkSvcProviderId); return element; } @@ -261,7 +259,7 @@ public Map deployNsVpx(Account owner, DeployDestination dest, De Account systemAcct = _accountMgr.getSystemAccount(); if (template == null) { - s_logger.error(" Unable to find the NS VPX template"); + logger.error(" Unable to find the NS VPX template"); throw new CloudRuntimeException("Unable to find the Template" + templateId); } long dataCenterId = dest.getDataCenter().getId(); @@ -384,7 +382,7 @@ public Map deployNsVpx(Account owner, DeployDestination dest, De protected void startNsVpx(VMInstanceVO nsVpx, Map params) throws StorageUnavailableException, InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException { - s_logger.debug("Starting NS Vpx " + nsVpx); + logger.debug("Starting NS Vpx " + nsVpx); _itMgr.start(nsVpx.getUuid(), params, null, null); } @@ -409,7 +407,7 @@ public Map deployNetscalerServiceVm(DeployNetscalerVpxCmd cmd) { protected VirtualRouter stopNetScalerVm(final long vmId, final boolean forced, final Account caller, final long callerUserId) throws ResourceUnavailableException, ConcurrentOperationException { final DomainRouterVO netscalerVm = _routerDao.findById(vmId); - s_logger.debug("Stopping NetScaler vm " + netscalerVm); + logger.debug("Stopping NetScaler vm " + netscalerVm); if (netscalerVm == null || netscalerVm.getRole() != Role.NETSCALER_VM) { throw new InvalidParameterValueException("Can't find NetScaler vm by id specified"); @@ -433,7 +431,7 @@ public VirtualRouter stopNetscalerServiceVm(Long id, boolean forced, Account cal @Override public VirtualRouter stopNetScalerVm(Long vmId, boolean forced, Account caller, long callingUserId) { final DomainRouterVO netscalerVm = _routerDao.findById(vmId); - s_logger.debug("Stopping NetScaler vm " + netscalerVm); + logger.debug("Stopping NetScaler vm " + netscalerVm); if (netscalerVm == null || netscalerVm.getRole() != Role.NETSCALER_VM) { throw new InvalidParameterValueException("Can't find NetScaler vm by id specified"); diff --git a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/api/commands/ListNiciraNvpDeviceNetworksCmd.java b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/api/commands/ListNiciraNvpDeviceNetworksCmd.java index a3217cc7284e..864fb6c68991 100644 --- a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/api/commands/ListNiciraNvpDeviceNetworksCmd.java +++ b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/api/commands/ListNiciraNvpDeviceNetworksCmd.java @@ -24,7 +24,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -50,7 +49,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListNiciraNvpDeviceNetworksCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListNiciraNvpDeviceNetworksCmd.class.getName()); private static final String s_name = "listniciranvpdevicenetworks"; @Inject protected NiciraNvpElementService niciraNvpElementService; diff --git a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/element/NiciraNvpElement.java b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/element/NiciraNvpElement.java index 1146a5435ca4..356b452a9e48 100644 --- a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/element/NiciraNvpElement.java +++ b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/element/NiciraNvpElement.java @@ -31,7 +31,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.network.ExternalNetworkDeviceManager.NetworkDevice; @@ -139,7 +138,6 @@ public class NiciraNvpElement extends AdapterBase implements ConnectivityProvide private static final int MAX_PORT = 65535; private static final int MIN_PORT = 0; - private static final Logger s_logger = Logger.getLogger(NiciraNvpElement.class); private static final Map> capabilities = setCapabilities(); @@ -189,18 +187,18 @@ public Provider getProvider() { } protected boolean canHandle(Network network, Service service) { - s_logger.debug("Checking if NiciraNvpElement can handle service " + service.getName() + " on network " + network.getDisplayText()); + logger.debug("Checking if NiciraNvpElement can handle service " + service.getName() + " on network " + network.getDisplayText()); if (network.getBroadcastDomainType() != BroadcastDomainType.Lswitch) { return false; } if (!networkModel.isProviderForNetwork(getProvider(), network.getId())) { - s_logger.debug("NiciraNvpElement is not a provider for network " + network.getDisplayText()); + logger.debug("NiciraNvpElement is not a provider for network " + network.getDisplayText()); return false; } if (!ntwkSrvcDao.canProviderSupportServiceInNetwork(network.getId(), service, Network.Provider.NiciraNvp)) { - s_logger.debug("NiciraNvpElement can't provide the " + service.getName() + " service on network " + network.getDisplayText()); + logger.debug("NiciraNvpElement can't provide the " + service.getName() + " service on network " + network.getDisplayText()); return false; } @@ -217,20 +215,20 @@ public boolean configure(String name, Map params) throws Configu @Override public boolean implement(Network network, NetworkOffering offering, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { - s_logger.debug("entering NiciraNvpElement implement function for network " + network.getDisplayText() + " (state " + network.getState() + ")"); + logger.debug("entering NiciraNvpElement implement function for network " + network.getDisplayText() + " (state " + network.getState() + ")"); if (!canHandle(network, Service.Connectivity)) { return false; } if (network.getBroadcastUri() == null) { - s_logger.error("Nic has no broadcast Uri with the LSwitch Uuid"); + logger.error("Nic has no broadcast Uri with the LSwitch Uuid"); return false; } List devices = niciraNvpDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - s_logger.error("No NiciraNvp Controller on physical network " + network.getPhysicalNetworkId()); + logger.error("No NiciraNvp Controller on physical network " + network.getPhysicalNetworkId()); return false; } NiciraNvpDeviceVO niciraNvpDevice = devices.get(0); @@ -252,7 +250,7 @@ public boolean implement(Network network, NetworkOffering offering, DeployDestin } else if (network.getGuestType().equals(GuestType.Isolated) && networkModel.isProviderSupportServiceInNetwork(network.getId(), Service.SourceNat, Provider.NiciraNvp)) { // Implement SourceNat immediately as we have al the info already - s_logger.debug("Apparently we are supposed to provide SourceNat on this network"); + logger.debug("Apparently we are supposed to provide SourceNat on this network"); PublicIp sourceNatIp = ipAddrMgr.assignSourceNatIpAddressToGuestNetwork(owner, network); String publicCidr = sourceNatIp.getAddress().addr() + "/" + NetUtils.getCidrSize(sourceNatIp.getVlanNetmask()); @@ -278,7 +276,7 @@ else if (network.getGuestType().equals(GuestType.Isolated) && networkModel.isPro context.getAccount().getAccountName()); CreateLogicalRouterAnswer answer = (CreateLogicalRouterAnswer)agentMgr.easySend(niciraNvpHost.getId(), cmd); if (answer.getResult() == false) { - s_logger.error("Failed to create Logical Router for network " + network.getDisplayText()); + logger.error("Failed to create Logical Router for network " + network.getDisplayText()); return false; } @@ -315,7 +313,7 @@ private boolean sharedNetworkSupportUUIDVlanId(Network network, String lSwitchUu new ConfigureSharedNetworkUuidCommand(lRouterUuid, lSwitchUuid, portIpAddress, ownerName, network.getId()); ConfigureSharedNetworkUuidAnswer answer = (ConfigureSharedNetworkUuidAnswer)agentMgr.easySend(niciraNvpHost.getId(), cmd); if (answer.getResult() == false) { - s_logger.error("Failed to configure Logical Router for Shared network " + network.getDisplayText()); + logger.error("Failed to configure Logical Router for Shared network " + network.getDisplayText()); return false; } return true; @@ -334,7 +332,7 @@ private boolean sharedNetworkSupportNumericalVlanId(Network network, String lSwi new ConfigureSharedNetworkVlanIdCommand(lSwitchUuid, l2GatewayServiceUuid , vlanId, ownerName, network.getId()); ConfigureSharedNetworkVlanIdAnswer answer = (ConfigureSharedNetworkVlanIdAnswer)agentMgr.easySend(niciraNvpHost.getId(), cmd); if (answer.getResult() == false) { - s_logger.error("Failed to configure Shared network " + network.getDisplayText()); + logger.error("Failed to configure Shared network " + network.getDisplayText()); return false; } } @@ -359,7 +357,7 @@ public boolean prepare(Network network, NicProfile nic, VirtualMachineProfile vm } if (network.getBroadcastUri() == null) { - s_logger.error("Nic has no broadcast Uri with the LSwitch Uuid"); + logger.error("Nic has no broadcast Uri with the LSwitch Uuid"); return false; } @@ -367,7 +365,7 @@ public boolean prepare(Network network, NicProfile nic, VirtualMachineProfile vm List devices = niciraNvpDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - s_logger.error("No NiciraNvp Controller on physical network " + network.getPhysicalNetworkId()); + logger.error("No NiciraNvp Controller on physical network " + network.getPhysicalNetworkId()); return false; } NiciraNvpDeviceVO niciraNvpDevice = devices.get(0); @@ -379,14 +377,14 @@ public boolean prepare(Network network, NicProfile nic, VirtualMachineProfile vm FindLogicalSwitchPortAnswer answer = (FindLogicalSwitchPortAnswer)agentMgr.easySend(niciraNvpHost.getId(), findCmd); if (answer.getResult()) { - s_logger.warn("Existing Logical Switchport found for nic " + nic.getName() + " with uuid " + existingNicMap.getLogicalSwitchPortUuid()); + logger.warn("Existing Logical Switchport found for nic " + nic.getName() + " with uuid " + existingNicMap.getLogicalSwitchPortUuid()); UpdateLogicalSwitchPortCommand cmd = new UpdateLogicalSwitchPortCommand(existingNicMap.getLogicalSwitchPortUuid(), BroadcastDomainType.getValue(network.getBroadcastUri()), nicVO.getUuid(), context.getDomain().getName() + "-" + context.getAccount().getAccountName(), nic.getName()); agentMgr.easySend(niciraNvpHost.getId(), cmd); return true; } else { - s_logger.error("Stale entry found for nic " + nic.getName() + " with logical switchport uuid " + existingNicMap.getLogicalSwitchPortUuid()); + logger.error("Stale entry found for nic " + nic.getName() + " with logical switchport uuid " + existingNicMap.getLogicalSwitchPortUuid()); niciraNvpNicMappingDao.remove(existingNicMap.getId()); } } @@ -397,7 +395,7 @@ public boolean prepare(Network network, NicProfile nic, VirtualMachineProfile vm CreateLogicalSwitchPortAnswer answer = (CreateLogicalSwitchPortAnswer)agentMgr.easySend(niciraNvpHost.getId(), cmd); if (answer == null || !answer.getResult()) { - s_logger.error("CreateLogicalSwitchPortCommand failed"); + logger.error("CreateLogicalSwitchPortCommand failed"); return false; } @@ -417,7 +415,7 @@ public boolean release(Network network, NicProfile nic, VirtualMachineProfile vm } if (network.getBroadcastUri() == null) { - s_logger.error("Nic has no broadcast Uri with the LSwitch Uuid"); + logger.error("Nic has no broadcast Uri with the LSwitch Uuid"); return false; } @@ -425,7 +423,7 @@ public boolean release(Network network, NicProfile nic, VirtualMachineProfile vm List devices = niciraNvpDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - s_logger.error("No NiciraNvp Controller on physical network " + network.getPhysicalNetworkId()); + logger.error("No NiciraNvp Controller on physical network " + network.getPhysicalNetworkId()); return false; } NiciraNvpDeviceVO niciraNvpDevice = devices.get(0); @@ -433,7 +431,7 @@ public boolean release(Network network, NicProfile nic, VirtualMachineProfile vm NiciraNvpNicMappingVO nicMap = niciraNvpNicMappingDao.findByNicUuid(nicVO.getUuid()); if (nicMap == null) { - s_logger.error("No mapping for nic " + nic.getName()); + logger.error("No mapping for nic " + nic.getName()); return false; } @@ -441,7 +439,7 @@ public boolean release(Network network, NicProfile nic, VirtualMachineProfile vm DeleteLogicalSwitchPortAnswer answer = (DeleteLogicalSwitchPortAnswer)agentMgr.easySend(niciraNvpHost.getId(), cmd); if (answer == null || !answer.getResult()) { - s_logger.error("DeleteLogicalSwitchPortCommand failed"); + logger.error("DeleteLogicalSwitchPortCommand failed"); return false; } @@ -458,7 +456,7 @@ public boolean shutdown(Network network, ReservationContext context, boolean cle List devices = niciraNvpDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - s_logger.error("No NiciraNvp Controller on physical network " + network.getPhysicalNetworkId()); + logger.error("No NiciraNvp Controller on physical network " + network.getPhysicalNetworkId()); return false; } NiciraNvpDeviceVO niciraNvpDevice = devices.get(0); @@ -466,13 +464,13 @@ public boolean shutdown(Network network, ReservationContext context, boolean cle //Dont destroy logical router when removing Shared Networks if (! network.getGuestType().equals(GuestType.Shared) && networkModel.isProviderSupportServiceInNetwork(network.getId(), Service.SourceNat, Provider.NiciraNvp)) { - s_logger.debug("Apparently we were providing SourceNat on this network"); + logger.debug("Apparently we were providing SourceNat on this network"); // Deleting the LogicalRouter will also take care of all provisioned // nat rules. NiciraNvpRouterMappingVO routermapping = niciraNvpRouterMappingDao.findByNetworkId(network.getId()); if (routermapping == null) { - s_logger.warn("No logical router uuid found for network " + network.getDisplayText()); + logger.warn("No logical router uuid found for network " + network.getDisplayText()); // This might be cause by a failed deployment, so don't make shutdown fail as well. return true; } @@ -480,7 +478,7 @@ public boolean shutdown(Network network, ReservationContext context, boolean cle DeleteLogicalRouterCommand cmd = new DeleteLogicalRouterCommand(routermapping.getLogicalRouterUuid()); DeleteLogicalRouterAnswer answer = (DeleteLogicalRouterAnswer)agentMgr.easySend(niciraNvpHost.getId(), cmd); if (answer.getResult() == false) { - s_logger.error("Failed to delete LogicalRouter for network " + network.getDisplayText()); + logger.error("Failed to delete LogicalRouter for network " + network.getDisplayText()); return false; } @@ -521,11 +519,11 @@ public boolean verifyServicesCombination(Set services) { // This element can only function in a Nicra Nvp based // SDN network, so Connectivity needs to be present here if (!services.contains(Service.Connectivity)) { - s_logger.warn("Unable to provide services without Connectivity service enabled for this element"); + logger.warn("Unable to provide services without Connectivity service enabled for this element"); return false; } if ((services.contains(Service.PortForwarding) || services.contains(Service.StaticNat)) && !services.contains(Service.SourceNat)) { - s_logger.warn("Unable to provide StaticNat and/or PortForwarding without the SourceNat service"); + logger.warn("Unable to provide StaticNat and/or PortForwarding without the SourceNat service"); return false; } return true; @@ -807,7 +805,7 @@ public boolean applyIps(Network network, List ipAddre // SourceNat is required for StaticNat and PortForwarding List devices = niciraNvpDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - s_logger.error("No NiciraNvp Controller on physical network " + network.getPhysicalNetworkId()); + logger.error("No NiciraNvp Controller on physical network " + network.getPhysicalNetworkId()); return false; } NiciraNvpDeviceVO niciraNvpDevice = devices.get(0); @@ -816,7 +814,7 @@ public boolean applyIps(Network network, List ipAddre NiciraNvpRouterMappingVO routermapping = niciraNvpRouterMappingDao.findByNetworkId(network.getId()); if (routermapping == null) { - s_logger.error("No logical router uuid found for network " + network.getDisplayText()); + logger.error("No logical router uuid found for network " + network.getDisplayText()); return false; } @@ -835,7 +833,7 @@ public boolean applyIps(Network network, List ipAddre //FIXME answer can be null if the host is down return answer.getResult(); } else { - s_logger.debug("No need to provision ip addresses as we are not providing L3 services."); + logger.debug("No need to provision ip addresses as we are not providing L3 services."); } return true; @@ -852,7 +850,7 @@ public boolean applyStaticNats(Network network, List rules) List devices = niciraNvpDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - s_logger.error("No NiciraNvp Controller on physical network " + network.getPhysicalNetworkId()); + logger.error("No NiciraNvp Controller on physical network " + network.getPhysicalNetworkId()); return false; } NiciraNvpDeviceVO niciraNvpDevice = devices.get(0); @@ -860,7 +858,7 @@ public boolean applyStaticNats(Network network, List rules) NiciraNvpRouterMappingVO routermapping = niciraNvpRouterMappingDao.findByNetworkId(network.getId()); if (routermapping == null) { - s_logger.error("No logical router uuid found for network " + network.getDisplayText()); + logger.error("No logical router uuid found for network " + network.getDisplayText()); return false; } @@ -892,7 +890,7 @@ public boolean applyPFRules(Network network, List rules) thr List devices = niciraNvpDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { - s_logger.error("No NiciraNvp Controller on physical network " + network.getPhysicalNetworkId()); + logger.error("No NiciraNvp Controller on physical network " + network.getPhysicalNetworkId()); return false; } NiciraNvpDeviceVO niciraNvpDevice = devices.get(0); @@ -900,7 +898,7 @@ public boolean applyPFRules(Network network, List rules) thr NiciraNvpRouterMappingVO routermapping = niciraNvpRouterMappingDao.findByNetworkId(network.getId()); if (routermapping == null) { - s_logger.error("No logical router uuid found for network " + network.getDisplayText()); + logger.error("No logical router uuid found for network " + network.getDisplayText()); return false; } diff --git a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/guru/NiciraNvpGuestNetworkGuru.java b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/guru/NiciraNvpGuestNetworkGuru.java index 3ffc601990cd..61dcf914e499 100644 --- a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/guru/NiciraNvpGuestNetworkGuru.java +++ b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/guru/NiciraNvpGuestNetworkGuru.java @@ -27,7 +27,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.CreateLogicalSwitchAnswer; @@ -82,7 +81,6 @@ public class NiciraNvpGuestNetworkGuru extends GuestNetworkGuru implements NetworkGuruAdditionalFunctions{ private static final int MAX_NAME_LENGTH = 40; - private static final Logger s_logger = Logger.getLogger(NiciraNvpGuestNetworkGuru.class); @Inject protected NetworkModel networkModel; @@ -143,18 +141,18 @@ public Network design(final NetworkOffering offering, final DeploymentPlan plan, final PhysicalNetworkVO physnet = physicalNetworkDao.findById(plan.getPhysicalNetworkId()); final DataCenter dc = _dcDao.findById(plan.getDataCenterId()); if (!canHandle(offering, dc.getNetworkType(), physnet)) { - s_logger.debug("Refusing to design this network"); + logger.debug("Refusing to design this network"); return null; } final List devices = niciraNvpDao.listByPhysicalNetwork(physnet.getId()); if (devices.isEmpty()) { - s_logger.error("No NiciraNvp Controller on physical network " + physnet.getName()); + logger.error("No NiciraNvp Controller on physical network " + physnet.getName()); return null; } - s_logger.debug("Nicira Nvp " + devices.get(0).getUuid() + " found on physical network " + physnet.getId()); + logger.debug("Nicira Nvp " + devices.get(0).getUuid() + " found on physical network " + physnet.getId()); - s_logger.debug("Physical isolation type is supported, asking GuestNetworkGuru to design this network"); + logger.debug("Physical isolation type is supported, asking GuestNetworkGuru to design this network"); final NetworkVO networkObject = (NetworkVO) super.design(offering, plan, userSpecified, owner); if (networkObject == null) { return null; @@ -203,7 +201,7 @@ public Network implement(final Network network, final NetworkOffering offering, final List devices = niciraNvpDao.listByPhysicalNetwork(physicalNetworkId); if (devices.isEmpty()) { - s_logger.error("No NiciraNvp Controller on physical network " + physicalNetworkId); + logger.error("No NiciraNvp Controller on physical network " + physicalNetworkId); return null; } final NiciraNvpDeviceVO niciraNvpDevice = devices.get(0); @@ -217,7 +215,7 @@ public Network implement(final Network network, final NetworkOffering offering, checkL2GatewayServiceSharedNetwork(niciraNvpHost); } catch (Exception e){ - s_logger.error("L2 Gateway Service Issue: " + e.getMessage()); + logger.error("L2 Gateway Service Issue: " + e.getMessage()); return null; } } @@ -227,16 +225,16 @@ public Network implement(final Network network, final NetworkOffering offering, final CreateLogicalSwitchAnswer answer = (CreateLogicalSwitchAnswer) agentMgr.easySend(niciraNvpHost.getId(), cmd); if (answer == null || !answer.getResult()) { - s_logger.error("CreateLogicalSwitchCommand failed"); + logger.error("CreateLogicalSwitchCommand failed"); return null; } try { implemented.setBroadcastUri(new URI("lswitch", answer.getLogicalSwitchUuid(), null)); implemented.setBroadcastDomainType(BroadcastDomainType.Lswitch); - s_logger.info("Implemented OK, network linked to = " + implemented.getBroadcastUri().toString()); + logger.info("Implemented OK, network linked to = " + implemented.getBroadcastUri().toString()); } catch (final URISyntaxException e) { - s_logger.error("Unable to store logical switch id in broadcast uri, uuid = " + implemented.getUuid(), e); + logger.error("Unable to store logical switch id in broadcast uri, uuid = " + implemented.getUuid(), e); return null; } @@ -278,13 +276,13 @@ public boolean release(final NicProfile nic, final VirtualMachineProfile vm, fin public void shutdown(final NetworkProfile profile, final NetworkOffering offering) { final NetworkVO networkObject = networkDao.findById(profile.getId()); if (networkObject.getBroadcastDomainType() != BroadcastDomainType.Lswitch || networkObject.getBroadcastUri() == null) { - s_logger.warn("BroadcastUri is empty or incorrect for guestnetwork " + networkObject.getDisplayText()); + logger.warn("BroadcastUri is empty or incorrect for guestnetwork " + networkObject.getDisplayText()); return; } final List devices = niciraNvpDao.listByPhysicalNetwork(networkObject.getPhysicalNetworkId()); if (devices.isEmpty()) { - s_logger.error("No NiciraNvp Controller on physical network " + networkObject.getPhysicalNetworkId()); + logger.error("No NiciraNvp Controller on physical network " + networkObject.getPhysicalNetworkId()); return; } final NiciraNvpDeviceVO niciraNvpDevice = devices.get(0); @@ -300,7 +298,7 @@ public void shutdown(final NetworkProfile profile, final NetworkOffering offerin final DeleteLogicalSwitchAnswer answer = (DeleteLogicalSwitchAnswer) agentMgr.easySend(niciraNvpHost.getId(), cmd); if (answer == null || !answer.getResult()) { - s_logger.error("DeleteLogicalSwitchCommand failed"); + logger.error("DeleteLogicalSwitchCommand failed"); } super.shutdown(profile, offering); @@ -310,30 +308,30 @@ private void sharedNetworksCleanup(NetworkVO networkObject, String logicalSwitch NiciraNvpRouterMappingVO routermapping = niciraNvpRouterMappingDao.findByNetworkId(networkObject.getId()); if (routermapping == null) { // Case 1: Numerical Vlan Provided -> No lrouter used. - s_logger.info("Shared Network " + networkObject.getDisplayText() + " didn't use Logical Router"); + logger.info("Shared Network " + networkObject.getDisplayText() + " didn't use Logical Router"); } else { //Case 2: Logical Router's UUID provided as Vlan id -> Remove lrouter port but not lrouter. String lRouterUuid = routermapping.getLogicalRouterUuid(); - s_logger.debug("Finding Logical Router Port on Logical Router " + lRouterUuid + " with attachment_lswitch_uuid=" + logicalSwitchUuid + " to delete it"); + logger.debug("Finding Logical Router Port on Logical Router " + lRouterUuid + " with attachment_lswitch_uuid=" + logicalSwitchUuid + " to delete it"); final FindLogicalRouterPortCommand cmd = new FindLogicalRouterPortCommand(lRouterUuid, logicalSwitchUuid); final FindLogicalRouterPortAnswer answer = (FindLogicalRouterPortAnswer) agentMgr.easySend(niciraNvpHost.getId(), cmd); if (answer != null && answer.getResult()) { String logicalRouterPortUuid = answer.getLogicalRouterPortUuid(); - s_logger.debug("Found Logical Router Port " + logicalRouterPortUuid + ", deleting it"); + logger.debug("Found Logical Router Port " + logicalRouterPortUuid + ", deleting it"); final DeleteLogicalRouterPortCommand cmdDeletePort = new DeleteLogicalRouterPortCommand(lRouterUuid, logicalRouterPortUuid); final DeleteLogicalRouterPortAnswer answerDelete = (DeleteLogicalRouterPortAnswer) agentMgr.easySend(niciraNvpHost.getId(), cmdDeletePort); if (answerDelete != null && answerDelete.getResult()){ - s_logger.info("Successfully deleted Logical Router Port " + logicalRouterPortUuid); + logger.info("Successfully deleted Logical Router Port " + logicalRouterPortUuid); } else { - s_logger.error("Could not delete Logical Router Port " + logicalRouterPortUuid); + logger.error("Could not delete Logical Router Port " + logicalRouterPortUuid); } } else { - s_logger.error("Find Logical Router Port failed"); + logger.error("Find Logical Router Port failed"); } } } diff --git a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/nicira/NiciraNvpTag.java b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/nicira/NiciraNvpTag.java index 625e49c21a53..1c1fd7f3ace6 100644 --- a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/nicira/NiciraNvpTag.java +++ b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/nicira/NiciraNvpTag.java @@ -19,11 +19,12 @@ package com.cloud.network.nicira; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; public class NiciraNvpTag { private static final int TAG_MAX_LEN = 40; - private static final Logger s_logger = Logger.getLogger(NiciraNvpTag.class); + protected Logger logger = LogManager.getLogger(getClass()); private String scope; private String tag; @@ -33,7 +34,7 @@ public NiciraNvpTag() { public NiciraNvpTag(String scope, String tag) { this.scope = scope; if (tag.length() > 40) { - s_logger.warn("tag \"" + tag + "\" too long, truncating to 40 characters"); + logger.warn("tag \"" + tag + "\" too long, truncating to 40 characters"); this.tag = tag.substring(0, TAG_MAX_LEN); } else { this.tag = tag; @@ -54,7 +55,7 @@ public String getTag() { public void setTag(String tag) { if (tag.length() > 40) { - s_logger.warn("tag \"" + tag + "\" too long, truncating to 40 characters"); + logger.warn("tag \"" + tag + "\" too long, truncating to 40 characters"); this.tag = tag.substring(0, 40); } else { this.tag = tag; diff --git a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/nicira/NiciraRestClient.java b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/nicira/NiciraRestClient.java index aa428b08dc3d..f9c86be9d08f 100644 --- a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/nicira/NiciraRestClient.java +++ b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/nicira/NiciraRestClient.java @@ -30,7 +30,6 @@ import org.apache.http.client.protocol.HttpClientContext; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.util.EntityUtils; -import org.apache.log4j.Logger; import com.cloud.utils.rest.BasicRestClient; import com.cloud.utils.rest.CloudstackRESTException; @@ -41,7 +40,6 @@ public class NiciraRestClient extends BasicRestClient { - private static final Logger s_logger = Logger.getLogger(NiciraRestClient.class); private static final String CONTENT_TYPE = HttpConstants.CONTENT_TYPE; private static final String TEXT_HTML_CONTENT_TYPE = HttpConstants.TEXT_HTML_CONTENT_TYPE; @@ -81,12 +79,12 @@ CloseableHttpResponse execute(final HttpUriRequest request, final int previousSt throw new CloudstackRESTException("Reached max executions limit of " + executionLimit); } counter.incrementExecutionCounter(); - s_logger.debug("Executing " + request.getMethod() + " request [execution count = " + counter.getValue() + "]"); + logger.debug("Executing " + request.getMethod() + " request [execution count = " + counter.getValue() + "]"); final CloseableHttpResponse response = super.execute(request); final StatusLine statusLine = response.getStatusLine(); final int statusCode = statusLine.getStatusCode(); - s_logger.debug("Status of last request: " + statusLine.toString()); + logger.debug("Status of last request: " + statusLine.toString()); if (HttpStatusCodeHelper.isUnauthorized(statusCode)) { return handleUnauthorizedResponse(request, previousStatusCode, response, statusCode); } else if (HttpStatusCodeHelper.isSuccess(statusCode)) { @@ -102,7 +100,7 @@ private CloseableHttpResponse handleUnauthorizedResponse(final HttpUriRequest re throws CloudstackRESTException { super.closeResponse(response); if (HttpStatusCodeHelper.isUnauthorized(previousStatusCode)) { - s_logger.error(responseToErrorMessage(response)); + logger.error(responseToErrorMessage(response)); throw new CloudstackRESTException("Two consecutive failed attempts to authenticate against REST server"); } final HttpUriRequest authenticateRequest = createAuthenticationRequest(); @@ -138,7 +136,7 @@ private String responseToErrorMessage(final CloseableHttpResponse response) { final String respobnseBody = EntityUtils.toString(entity); errorMessage = respobnseBody.subSequence(0, maxResponseErrorMesageLength).toString(); } catch (final IOException e) { - s_logger.debug("Could not read response body. Response: " + response, e); + logger.debug("Could not read response body. Response: " + response, e); } } diff --git a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/NiciraNvpResource.java b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/NiciraNvpResource.java index 80a9386e4f43..c2841f18c1cb 100644 --- a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/NiciraNvpResource.java +++ b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/NiciraNvpResource.java @@ -26,7 +26,8 @@ import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.IAgentControl; import com.cloud.agent.api.Answer; @@ -52,7 +53,7 @@ public class NiciraNvpResource implements ServerResource { - private static final Logger s_logger = Logger.getLogger(NiciraNvpResource.class); + protected Logger logger = LogManager.getLogger(getClass()); public static final int NAME_MAX_LEN = 40; public static final int NUM_RETRIES = 2; @@ -176,11 +177,11 @@ public PingCommand getCurrentStatus(final long id) { final ControlClusterStatus ccs = niciraNvpApi.getControlClusterStatus(); getApiProviderMajorityVersion(ccs); if (!"stable".equals(ccs.getClusterStatus())) { - s_logger.error("ControlCluster state is not stable: " + ccs.getClusterStatus()); + logger.error("ControlCluster state is not stable: " + ccs.getClusterStatus()); return null; } } catch (final NiciraNvpApiException e) { - s_logger.error("getControlClusterStatus failed", e); + logger.error("getControlClusterStatus failed", e); return null; } return new PingCommand(Host.Type.L2Networking, id); @@ -210,7 +211,7 @@ public Answer executeRequest(final Command cmd) { try { return wrapper.execute(cmd, this); } catch (final Exception e) { - s_logger.debug("Received unsupported command " + cmd.toString()); + logger.debug("Received unsupported command " + cmd.toString()); return Answer.createUnsupportedCommandAnswer(cmd); } } diff --git a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraCheckHealthCommandWrapper.java b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraCheckHealthCommandWrapper.java index 34e4548cf816..821b9f6d87c0 100644 --- a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraCheckHealthCommandWrapper.java +++ b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraCheckHealthCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.network.resource.wrapper; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.CheckHealthAnswer; @@ -35,7 +34,6 @@ public class NiciraCheckHealthCommandWrapper extends CommandWrapper { private static final String CONTROL_CLUSTER_STATUS_IS_STABLE = "stable"; - private static final Logger s_logger = Logger.getLogger(NiciraCheckHealthCommandWrapper.class); @Override public Answer execute(final CheckHealthCommand command, final NiciraNvpResource serverResource) { @@ -45,11 +43,11 @@ public Answer execute(final CheckHealthCommand command, final NiciraNvpResource final ControlClusterStatus clusterStatus = niciraNvpApi.getControlClusterStatus(); final String status = clusterStatus.getClusterStatus(); if (clusterIsUnstable(status)) { - s_logger.warn("Control cluster is not stable. Current status is " + status); + logger.warn("Control cluster is not stable. Current status is " + status); healthy = false; } } catch (final NiciraNvpApiException e) { - s_logger.error("Exception caught while checking control cluster status during health check", e); + logger.error("Exception caught while checking control cluster status during health check", e); healthy = false; } diff --git a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpConfigurePortForwardingRulesCommandWrapper.java b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpConfigurePortForwardingRulesCommandWrapper.java index bb19e75b704f..7b7108c3d419 100644 --- a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpConfigurePortForwardingRulesCommandWrapper.java +++ b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpConfigurePortForwardingRulesCommandWrapper.java @@ -23,7 +23,6 @@ import java.util.List; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.ConfigurePortForwardingRulesOnLogicalRouterAnswer; @@ -40,7 +39,6 @@ @ResourceWrapper(handles = ConfigurePortForwardingRulesOnLogicalRouterCommand.class) public final class NiciraNvpConfigurePortForwardingRulesCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(NiciraNvpConfigurePortForwardingRulesCommandWrapper.class); @Override public Answer execute(final ConfigurePortForwardingRulesOnLogicalRouterCommand command, final NiciraNvpResource niciraNvpResource) { @@ -71,14 +69,14 @@ public Answer execute(final ConfigurePortForwardingRulesOnLogicalRouterCommand c if (storedRule.equalsIgnoreUuid(rulepair[1])) { // The outgoing rule exists outgoing = storedRule; - s_logger.debug("Found matching outgoing rule " + outgoing.getUuid()); + logger.debug("Found matching outgoing rule " + outgoing.getUuid()); if (incoming != null) { break; } } else if (storedRule.equalsIgnoreUuid(rulepair[0])) { // The incoming rule exists incoming = storedRule; - s_logger.debug("Found matching incoming rule " + incoming.getUuid()); + logger.debug("Found matching incoming rule " + incoming.getUuid()); if (outgoing != null) { break; } @@ -86,26 +84,26 @@ public Answer execute(final ConfigurePortForwardingRulesOnLogicalRouterCommand c } if (incoming != null && outgoing != null) { if (rule.revoked()) { - s_logger.debug("Deleting incoming rule " + incoming.getUuid()); + logger.debug("Deleting incoming rule " + incoming.getUuid()); niciraNvpApi.deleteLogicalRouterNatRule(command.getLogicalRouterUuid(), incoming.getUuid()); - s_logger.debug("Deleting outgoing rule " + outgoing.getUuid()); + logger.debug("Deleting outgoing rule " + outgoing.getUuid()); niciraNvpApi.deleteLogicalRouterNatRule(command.getLogicalRouterUuid(), outgoing.getUuid()); } } else { if (rule.revoked()) { - s_logger.warn("Tried deleting a rule that does not exist, " + rule.getSrcIp() + " -> " + rule.getDstIp()); + logger.warn("Tried deleting a rule that does not exist, " + rule.getSrcIp() + " -> " + rule.getDstIp()); break; } rulepair[0] = niciraNvpApi.createLogicalRouterNatRule(command.getLogicalRouterUuid(), rulepair[0]); - s_logger.debug("Created " + niciraNvpResource.natRuleToString(rulepair[0])); + logger.debug("Created " + niciraNvpResource.natRuleToString(rulepair[0])); try { rulepair[1] = niciraNvpApi.createLogicalRouterNatRule(command.getLogicalRouterUuid(), rulepair[1]); - s_logger.debug("Created " + niciraNvpResource.natRuleToString(rulepair[1])); + logger.debug("Created " + niciraNvpResource.natRuleToString(rulepair[1])); } catch (final NiciraNvpApiException ex) { - s_logger.warn("NiciraNvpApiException during create call, rolling back previous create"); + logger.warn("NiciraNvpApiException during create call, rolling back previous create"); niciraNvpApi.deleteLogicalRouterNatRule(command.getLogicalRouterUuid(), rulepair[0].getUuid()); throw ex; // Rethrow the original exception } diff --git a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpConfigureSharedNetworkUuidCommandWrapper.java b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpConfigureSharedNetworkUuidCommandWrapper.java index 5f3198abb4f3..bdbf612d3afd 100644 --- a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpConfigureSharedNetworkUuidCommandWrapper.java +++ b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpConfigureSharedNetworkUuidCommandWrapper.java @@ -25,7 +25,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.ConfigureSharedNetworkUuidAnswer; @@ -47,7 +46,6 @@ @ResourceWrapper(handles = ConfigureSharedNetworkUuidCommand.class) public final class NiciraNvpConfigureSharedNetworkUuidCommandWrapper extends CommandWrapper{ - private static final Logger s_logger = Logger.getLogger(NiciraNvpConfigureSharedNetworkUuidCommandWrapper.class); @Override public Answer execute(ConfigureSharedNetworkUuidCommand command, NiciraNvpResource niciraNvpResource) { @@ -60,10 +58,10 @@ public Answer execute(ConfigureSharedNetworkUuidCommand command, NiciraNvpResour final NiciraNvpApi niciraNvpApi = niciraNvpResource.getNiciraNvpApi(); - s_logger.debug("Attaching Logical Switch " + logicalSwitchUuid + " on Logical Router " + logicalRouterUuid + " for Shared Network " + networkId); + logger.debug("Attaching Logical Switch " + logicalSwitchUuid + " on Logical Router " + logicalRouterUuid + " for Shared Network " + networkId); //Step 1: Get lSwitch displayName - s_logger.info("Looking for Logical Switch " + logicalSwitchUuid + " display name"); + logger.info("Looking for Logical Switch " + logicalSwitchUuid + " display name"); String logicalSwitchDisplayName; try{ List lSwitchList = niciraNvpApi.findLogicalSwitch(logicalSwitchUuid); @@ -72,30 +70,30 @@ public Answer execute(ConfigureSharedNetworkUuidCommand command, NiciraNvpResour logicalSwitchDisplayName = lSwitchList.get(0).getDisplayName(); } else { - s_logger.error("More than one Logical Switch found with uuid " + logicalSwitchUuid); + logger.error("More than one Logical Switch found with uuid " + logicalSwitchUuid); throw new CloudRuntimeException("More than one Logical Switch found with uuid=" + logicalSwitchUuid); } } else { - s_logger.error("Logical Switch " + logicalSwitchUuid + " not found"); + logger.error("Logical Switch " + logicalSwitchUuid + " not found"); throw new CloudRuntimeException("Logical Switch " + logicalSwitchUuid + " not found"); } } catch (NiciraNvpApiException e){ - s_logger.warn("Logical Switch " + logicalSwitchUuid + " not found, retrying"); + logger.warn("Logical Switch " + logicalSwitchUuid + " not found, retrying"); final CommandRetryUtility retryUtility = niciraNvpResource.getRetryUtility(); retryUtility.addRetry(command, NUM_RETRIES); return retryUtility.retry(command, ConfigureSharedNetworkUuidAnswer.class, e); } catch (CloudRuntimeException e){ - s_logger.info("Shared network UUID vlan id failed due to : " + e.getMessage()); + logger.info("Shared network UUID vlan id failed due to : " + e.getMessage()); return new ConfigureSharedNetworkUuidAnswer(command, false, e.getMessage()); } - s_logger.info("Found display name " + logicalSwitchDisplayName + " for Logical Switch " + logicalSwitchUuid); + logger.info("Found display name " + logicalSwitchDisplayName + " for Logical Switch " + logicalSwitchUuid); //Step 2: Create lRouterPort - s_logger.debug("Creating Logical Router Port in Logical Router " + logicalRouterUuid); + logger.debug("Creating Logical Router Port in Logical Router " + logicalRouterUuid); LogicalRouterPort lRouterPort = null; try { lRouterPort = new LogicalRouterPort(); @@ -108,85 +106,85 @@ public Answer execute(ConfigureSharedNetworkUuidCommand command, NiciraNvpResour lRouterPort = niciraNvpApi.createLogicalRouterPort(logicalRouterUuid, lRouterPort); } catch (NiciraNvpApiException e){ - s_logger.warn("Could not create Logical Router Port on Logical Router " + logicalRouterUuid + " due to: " + e.getMessage() + ", retrying"); + logger.warn("Could not create Logical Router Port on Logical Router " + logicalRouterUuid + " due to: " + e.getMessage() + ", retrying"); return handleException(e, command, niciraNvpResource); } - s_logger.debug("Logical Router Port " + lRouterPort.getUuid() + " (" + lRouterPort.getDisplayName() + ") successfully created in Logical Router " + logicalRouterUuid); + logger.debug("Logical Router Port " + lRouterPort.getUuid() + " (" + lRouterPort.getDisplayName() + ") successfully created in Logical Router " + logicalRouterUuid); //Step 3: Create lSwitchPort - s_logger.debug("Creating Logical Switch Port in Logical Switch " + logicalSwitchUuid + " (" + logicalSwitchDisplayName + ")"); + logger.debug("Creating Logical Switch Port in Logical Switch " + logicalSwitchUuid + " (" + logicalSwitchDisplayName + ")"); LogicalSwitchPort lSwitchPort = null; try { lSwitchPort = new LogicalSwitchPort(niciraNvpResource.truncate("lrouter-uplink", NAME_MAX_LEN), tags, true); lSwitchPort = niciraNvpApi.createLogicalSwitchPort(logicalSwitchUuid, lSwitchPort); } catch (NiciraNvpApiException e){ - s_logger.warn("Could not create Logical Switch Port on Logical Switch " + logicalSwitchUuid + " (" + logicalSwitchDisplayName + ") due to: " + e.getMessage()); + logger.warn("Could not create Logical Switch Port on Logical Switch " + logicalSwitchUuid + " (" + logicalSwitchDisplayName + ") due to: " + e.getMessage()); cleanupLRouterPort(logicalRouterUuid, lRouterPort, niciraNvpApi); return handleException(e, command, niciraNvpResource); } - s_logger.debug("Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") successfully created in Logical Switch " + logicalSwitchUuid + " (" + logicalSwitchDisplayName + ")"); + logger.debug("Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") successfully created in Logical Switch " + logicalSwitchUuid + " (" + logicalSwitchDisplayName + ")"); //Step 4: Attach lRouterPort to lSwitchPort with a PatchAttachment - s_logger.debug("Attaching Logical Router Port " + lRouterPort.getUuid() + " (" + lRouterPort.getDisplayName() + ") to Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") with a PatchAttachment"); + logger.debug("Attaching Logical Router Port " + lRouterPort.getUuid() + " (" + lRouterPort.getDisplayName() + ") to Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") with a PatchAttachment"); try { niciraNvpApi.updateLogicalRouterPortAttachment(logicalRouterUuid, lRouterPort.getUuid(), new PatchAttachment(lSwitchPort.getUuid())); } catch (NiciraNvpApiException e) { - s_logger.warn("Could not attach Logical Router Port " + lRouterPort.getUuid() + " (" + lRouterPort.getDisplayName() + ") to Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") due to: " + e.getMessage() + ", retrying"); + logger.warn("Could not attach Logical Router Port " + lRouterPort.getUuid() + " (" + lRouterPort.getDisplayName() + ") to Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") due to: " + e.getMessage() + ", retrying"); cleanupLRouterPort(logicalRouterUuid, lRouterPort, niciraNvpApi); cleanupLSwitchPort(logicalSwitchUuid, lSwitchPort, niciraNvpApi); return handleException(e, command, niciraNvpResource); } - s_logger.debug("Logical Router Port " + lRouterPort.getUuid() + " (" + lRouterPort.getDisplayName() + ") successfully attached to Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") with a PatchAttachment"); + logger.debug("Logical Router Port " + lRouterPort.getUuid() + " (" + lRouterPort.getDisplayName() + ") successfully attached to Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") with a PatchAttachment"); //Step 5: Attach lSwitchPort to lRouterPort with a PatchAttachment - s_logger.debug("Attaching Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") to Logical Router Port " + lRouterPort.getUuid() + " (" + lRouterPort.getDisplayName() + ") with a PatchAttachment"); + logger.debug("Attaching Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") to Logical Router Port " + lRouterPort.getUuid() + " (" + lRouterPort.getDisplayName() + ") with a PatchAttachment"); try { niciraNvpApi.updateLogicalSwitchPortAttachment(logicalSwitchUuid, lSwitchPort.getUuid(), new PatchAttachment(lRouterPort.getUuid())); } catch (NiciraNvpApiException e){ - s_logger.warn("Could not attach Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") to Logical Router Port " + lRouterPort.getUuid() + " (" + lRouterPort.getDisplayName() + ") due to: " + e.getMessage() + ", retrying"); + logger.warn("Could not attach Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") to Logical Router Port " + lRouterPort.getUuid() + " (" + lRouterPort.getDisplayName() + ") due to: " + e.getMessage() + ", retrying"); cleanupLRouterPort(logicalRouterUuid, lRouterPort, niciraNvpApi); cleanupLSwitchPort(logicalSwitchUuid, lSwitchPort, niciraNvpApi); return handleException(e, command, niciraNvpResource); } - s_logger.debug("Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") successfully attached to Logical Router Port " + lRouterPort.getUuid() + " (" + lRouterPort.getDisplayName() + ") with a PatchAttachment"); + logger.debug("Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") successfully attached to Logical Router Port " + lRouterPort.getUuid() + " (" + lRouterPort.getDisplayName() + ") with a PatchAttachment"); - s_logger.info("Successfully attached Logical Switch " + logicalSwitchUuid + " on Logical Router " + logicalRouterUuid + " for Shared Network " + networkId); + logger.info("Successfully attached Logical Switch " + logicalSwitchUuid + " on Logical Router " + logicalRouterUuid + " for Shared Network " + networkId); return new ConfigureSharedNetworkUuidAnswer(command, true, "OK"); } private void cleanupLSwitchPort(String logicalSwitchUuid, LogicalSwitchPort lSwitchPort, NiciraNvpApi niciraNvpApi) { - s_logger.warn("Deleting previously created Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") from Logical Switch " + logicalSwitchUuid); + logger.warn("Deleting previously created Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") from Logical Switch " + logicalSwitchUuid); try { niciraNvpApi.deleteLogicalSwitchPort(logicalSwitchUuid, lSwitchPort.getUuid()); } catch (NiciraNvpApiException exceptionDeleteLSwitchPort) { - s_logger.error("Error while deleting Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") from Logical Switch " + logicalSwitchUuid + " due to: " + exceptionDeleteLSwitchPort.getMessage()); + logger.error("Error while deleting Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") from Logical Switch " + logicalSwitchUuid + " due to: " + exceptionDeleteLSwitchPort.getMessage()); } - s_logger.warn("Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") successfully deleted"); + logger.warn("Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") successfully deleted"); } private void cleanupLRouterPort(String logicalRouterUuid, LogicalRouterPort lRouterPort, NiciraNvpApi niciraNvpApi) { - s_logger.warn("Deleting previously created Logical Router Port " + lRouterPort.getUuid() + " (" + lRouterPort.getDisplayName() + ") from Logical Router " + logicalRouterUuid + " and retrying"); + logger.warn("Deleting previously created Logical Router Port " + lRouterPort.getUuid() + " (" + lRouterPort.getDisplayName() + ") from Logical Router " + logicalRouterUuid + " and retrying"); try { niciraNvpApi.deleteLogicalRouterPort(logicalRouterUuid, lRouterPort.getUuid()); } catch (NiciraNvpApiException exceptionDelete) { - s_logger.error("Error while deleting Logical Router Port " + lRouterPort.getUuid() + " (" + lRouterPort.getDisplayName() + ") from Logical Router " + logicalRouterUuid + " due to: " + exceptionDelete.getMessage()); + logger.error("Error while deleting Logical Router Port " + lRouterPort.getUuid() + " (" + lRouterPort.getDisplayName() + ") from Logical Router " + logicalRouterUuid + " due to: " + exceptionDelete.getMessage()); } - s_logger.warn("Logical Router Port " + lRouterPort.getUuid() + " (" + lRouterPort.getDisplayName() + ") successfully deleted"); + logger.warn("Logical Router Port " + lRouterPort.getUuid() + " (" + lRouterPort.getDisplayName() + ") successfully deleted"); } private Answer handleException(NiciraNvpApiException e, ConfigureSharedNetworkUuidCommand command, NiciraNvpResource niciraNvpResource) { if (HttpStatusCodeHelper.isConflict(e.getErrorCode())){ - s_logger.warn("There's been a conflict in NSX side, aborting implementation"); + logger.warn("There's been a conflict in NSX side, aborting implementation"); return new ConfigureSharedNetworkUuidAnswer(command, false, "FAILED: There's been a conflict in NSX side"); } else { - s_logger.warn("Error code: " + e.getErrorCode() + ", retrying"); + logger.warn("Error code: " + e.getErrorCode() + ", retrying"); final CommandRetryUtility retryUtility = niciraNvpResource.getRetryUtility(); retryUtility.addRetry(command, NUM_RETRIES); return retryUtility.retry(command, ConfigureSharedNetworkUuidAnswer.class, e); diff --git a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpConfigureSharedNetworkVlanIdCommandWrapper.java b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpConfigureSharedNetworkVlanIdCommandWrapper.java index 4fa9876976b0..ebc84f23754c 100644 --- a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpConfigureSharedNetworkVlanIdCommandWrapper.java +++ b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpConfigureSharedNetworkVlanIdCommandWrapper.java @@ -25,7 +25,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.ConfigureSharedNetworkVlanIdAnswer; @@ -44,7 +43,6 @@ @ResourceWrapper(handles = ConfigureSharedNetworkVlanIdCommand.class) public class NiciraNvpConfigureSharedNetworkVlanIdCommandWrapper extends CommandWrapper{ - private static final Logger s_logger = Logger.getLogger(NiciraNvpConfigureSharedNetworkVlanIdCommandWrapper.class); @Override public Answer execute(ConfigureSharedNetworkVlanIdCommand command, NiciraNvpResource niciraNvpResource) { @@ -55,10 +53,10 @@ public Answer execute(ConfigureSharedNetworkVlanIdCommand command, NiciraNvpReso tags.add(new NiciraNvpTag("cs_account", command.getOwnerName())); final long networkId = command.getNetworkId(); - s_logger.debug("Connecting Logical Switch " + logicalSwitchUuid + " to L2 Gateway Service " + l2GatewayServiceUuid + ", vlan id " + vlanId + " network " + networkId); + logger.debug("Connecting Logical Switch " + logicalSwitchUuid + " to L2 Gateway Service " + l2GatewayServiceUuid + ", vlan id " + vlanId + " network " + networkId); final NiciraNvpApi niciraNvpApi = niciraNvpResource.getNiciraNvpApi(); - s_logger.debug("Creating Logical Switch Port in Logical Switch " + logicalSwitchUuid); + logger.debug("Creating Logical Switch Port in Logical Switch " + logicalSwitchUuid); LogicalSwitchPort lSwitchPort = null; try { lSwitchPort = new LogicalSwitchPort(); @@ -68,12 +66,12 @@ public Answer execute(ConfigureSharedNetworkVlanIdCommand command, NiciraNvpReso lSwitchPort = niciraNvpApi.createLogicalSwitchPort(logicalSwitchUuid, lSwitchPort); } catch (NiciraNvpApiException e){ - s_logger.warn("Could not create Logical Switch Port on Logical Switch " + logicalSwitchUuid + " due to: " + e.getMessage() + ", retrying"); + logger.warn("Could not create Logical Switch Port on Logical Switch " + logicalSwitchUuid + " due to: " + e.getMessage() + ", retrying"); return handleException(e, command, niciraNvpResource); } - s_logger.debug("Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") successfully created in Logical Switch " + logicalSwitchUuid); + logger.debug("Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") successfully created in Logical Switch " + logicalSwitchUuid); - s_logger.debug("Attaching Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") on VLAN " + command.getVlanId() + " using L2GatewayAttachment"); + logger.debug("Attaching Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") on VLAN " + command.getVlanId() + " using L2GatewayAttachment"); try { final L2GatewayAttachment attachment = new L2GatewayAttachment(l2GatewayServiceUuid); if (command.getVlanId() != 0) { @@ -82,33 +80,33 @@ public Answer execute(ConfigureSharedNetworkVlanIdCommand command, NiciraNvpReso niciraNvpApi.updateLogicalSwitchPortAttachment(logicalSwitchUuid, lSwitchPort.getUuid(), attachment); } catch (NiciraNvpApiException e){ - s_logger.warn("Could not attach Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") to Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") due to: " + e.getMessage() + ", errorCode: " + e.getErrorCode()); + logger.warn("Could not attach Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") to Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") due to: " + e.getMessage() + ", errorCode: " + e.getErrorCode()); cleanup(logicalSwitchUuid, lSwitchPort, niciraNvpApi); return handleException(e, command, niciraNvpResource); } - s_logger.debug("Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") successfully attached on VLAN " + command.getVlanId() + " using L2GatewayAttachment"); + logger.debug("Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") successfully attached on VLAN " + command.getVlanId() + " using L2GatewayAttachment"); - s_logger.debug("Successfully connected Logical Switch " + logicalSwitchUuid + " to L2 Gateway Service " + l2GatewayServiceUuid + ", vlan id " + vlanId + ", network " + networkId + ", through Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ")"); + logger.debug("Successfully connected Logical Switch " + logicalSwitchUuid + " to L2 Gateway Service " + l2GatewayServiceUuid + ", vlan id " + vlanId + ", network " + networkId + ", through Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ")"); return new ConfigureSharedNetworkVlanIdAnswer(command, true, "OK"); } private void cleanup(String logicalSwitchUuid, LogicalSwitchPort lSwitchPort, NiciraNvpApi niciraNvpApi) { - s_logger.warn("Deleting previously created Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") from Logical Switch " + logicalSwitchUuid); + logger.warn("Deleting previously created Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") from Logical Switch " + logicalSwitchUuid); try { niciraNvpApi.deleteLogicalSwitchPort(logicalSwitchUuid, lSwitchPort.getUuid()); } catch (NiciraNvpApiException exceptionDeleteLSwitchPort) { - s_logger.error("Error while deleting Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") from Logical Switch " + logicalSwitchUuid + " due to: " + exceptionDeleteLSwitchPort.getMessage()); + logger.error("Error while deleting Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") from Logical Switch " + logicalSwitchUuid + " due to: " + exceptionDeleteLSwitchPort.getMessage()); } - s_logger.warn("Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") successfully deteled"); + logger.warn("Logical Switch Port " + lSwitchPort.getUuid() + " (" + lSwitchPort.getDisplayName() + ") successfully deteled"); } private Answer handleException(NiciraNvpApiException e, ConfigureSharedNetworkVlanIdCommand command, NiciraNvpResource niciraNvpResource) { if (HttpStatusCodeHelper.isConflict(e.getErrorCode())){ - s_logger.warn("There's been a conflict in NSX side, aborting implementation"); + logger.warn("There's been a conflict in NSX side, aborting implementation"); return new ConfigureSharedNetworkVlanIdAnswer(command, false, "FAILED: There's been a conflict in NSX side"); } else { - s_logger.warn("Error code: " + e.getErrorCode() + ", retrying"); + logger.warn("Error code: " + e.getErrorCode() + ", retrying"); final CommandRetryUtility retryUtility = niciraNvpResource.getRetryUtility(); retryUtility.addRetry(command, NUM_RETRIES); return retryUtility.retry(command, ConfigureSharedNetworkVlanIdAnswer.class, e); diff --git a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpConfigureStaticNatRulesCommandWrapper.java b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpConfigureStaticNatRulesCommandWrapper.java index 595a623b0e40..bc6c03f87aac 100644 --- a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpConfigureStaticNatRulesCommandWrapper.java +++ b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpConfigureStaticNatRulesCommandWrapper.java @@ -23,7 +23,6 @@ import java.util.List; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.ConfigureStaticNatRulesOnLogicalRouterAnswer; @@ -40,7 +39,6 @@ @ResourceWrapper(handles = ConfigureStaticNatRulesOnLogicalRouterCommand.class) public final class NiciraNvpConfigureStaticNatRulesCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(NiciraNvpConfigureStaticNatRulesCommandWrapper.class); @Override public Answer execute(final ConfigureStaticNatRulesOnLogicalRouterCommand command, final NiciraNvpResource niciraNvpResource) { @@ -63,14 +61,14 @@ public Answer execute(final ConfigureStaticNatRulesOnLogicalRouterCommand comman if (storedRule.equalsIgnoreUuid(rulepair[1])) { // The outgoing rule exists outgoing = storedRule; - s_logger.debug("Found matching outgoing rule " + outgoing.getUuid()); + logger.debug("Found matching outgoing rule " + outgoing.getUuid()); if (incoming != null) { break; } } else if (storedRule.equalsIgnoreUuid(rulepair[0])) { // The incoming rule exists incoming = storedRule; - s_logger.debug("Found matching incoming rule " + incoming.getUuid()); + logger.debug("Found matching incoming rule " + incoming.getUuid()); if (outgoing != null) { break; } @@ -78,26 +76,26 @@ public Answer execute(final ConfigureStaticNatRulesOnLogicalRouterCommand comman } if (incoming != null && outgoing != null) { if (rule.revoked()) { - s_logger.debug("Deleting incoming rule " + incoming.getUuid()); + logger.debug("Deleting incoming rule " + incoming.getUuid()); niciraNvpApi.deleteLogicalRouterNatRule(command.getLogicalRouterUuid(), incoming.getUuid()); - s_logger.debug("Deleting outgoing rule " + outgoing.getUuid()); + logger.debug("Deleting outgoing rule " + outgoing.getUuid()); niciraNvpApi.deleteLogicalRouterNatRule(command.getLogicalRouterUuid(), outgoing.getUuid()); } } else { if (rule.revoked()) { - s_logger.warn("Tried deleting a rule that does not exist, " + rule.getSrcIp() + " -> " + rule.getDstIp()); + logger.warn("Tried deleting a rule that does not exist, " + rule.getSrcIp() + " -> " + rule.getDstIp()); break; } rulepair[0] = niciraNvpApi.createLogicalRouterNatRule(command.getLogicalRouterUuid(), rulepair[0]); - s_logger.debug("Created " + niciraNvpResource.natRuleToString(rulepair[0])); + logger.debug("Created " + niciraNvpResource.natRuleToString(rulepair[0])); try { rulepair[1] = niciraNvpApi.createLogicalRouterNatRule(command.getLogicalRouterUuid(), rulepair[1]); - s_logger.debug("Created " + niciraNvpResource.natRuleToString(rulepair[1])); + logger.debug("Created " + niciraNvpResource.natRuleToString(rulepair[1])); } catch (final NiciraNvpApiException ex) { - s_logger.debug("Failed to create SourceNatRule, rolling back DestinationNatRule"); + logger.debug("Failed to create SourceNatRule, rolling back DestinationNatRule"); niciraNvpApi.deleteLogicalRouterNatRule(command.getLogicalRouterUuid(), rulepair[0].getUuid()); throw ex; // Rethrow original exception } diff --git a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpCreateLogicalRouterCommandWrapper.java b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpCreateLogicalRouterCommandWrapper.java index 1031b3b78702..267a59da09eb 100644 --- a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpCreateLogicalRouterCommandWrapper.java +++ b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpCreateLogicalRouterCommandWrapper.java @@ -25,7 +25,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.CreateLogicalRouterAnswer; @@ -50,7 +49,6 @@ @ResourceWrapper(handles = CreateLogicalRouterCommand.class) public final class NiciraNvpCreateLogicalRouterCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(NiciraNvpCreateLogicalRouterCommandWrapper.class); @Override public Answer execute(final CreateLogicalRouterCommand command, final NiciraNvpResource niciraNvpResource) { @@ -65,7 +63,7 @@ public Answer execute(final CreateLogicalRouterCommand command, final NiciraNvpR final String publicNetworkIpAddress = command.getPublicIpCidr(); final String internalNetworkAddress = command.getInternalIpCidr(); - s_logger.debug("Creating a logical router with external ip " + publicNetworkIpAddress + " and internal ip " + internalNetworkAddress + "on gateway service " + + logger.debug("Creating a logical router with external ip " + publicNetworkIpAddress + " and internal ip " + internalNetworkAddress + "on gateway service " + gatewayServiceUuid); final NiciraNvpApi niciraNvpApi = niciraNvpResource.getNiciraNvpApi(); diff --git a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpCreateLogicalSwitchPortCommandWrapper.java b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpCreateLogicalSwitchPortCommandWrapper.java index 63df438239c7..a0d30542fd6d 100644 --- a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpCreateLogicalSwitchPortCommandWrapper.java +++ b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpCreateLogicalSwitchPortCommandWrapper.java @@ -21,7 +21,6 @@ import static com.cloud.network.resource.NiciraNvpResource.NUM_RETRIES; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.CreateLogicalSwitchPortAnswer; @@ -39,7 +38,6 @@ @ResourceWrapper(handles = CreateLogicalSwitchPortCommand.class) public final class NiciraNvpCreateLogicalSwitchPortCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(NiciraNvpCreateLogicalSwitchPortCommandWrapper.class); @Override public Answer execute(final CreateLogicalSwitchPortCommand command, final NiciraNvpResource niciraNvpResource) { @@ -56,7 +54,7 @@ public Answer execute(final CreateLogicalSwitchPortCommand command, final Nicira try { niciraNvpApi.updateLogicalSwitchPortAttachment(command.getLogicalSwitchUuid(), newPort.getUuid(), new VifAttachment(attachmentUuid)); } catch (final NiciraNvpApiException ex) { - s_logger.warn("modifyLogicalSwitchPort failed after switchport was created, removing switchport"); + logger.warn("modifyLogicalSwitchPort failed after switchport was created, removing switchport"); niciraNvpApi.deleteLogicalSwitchPort(command.getLogicalSwitchUuid(), newPort.getUuid()); throw ex; // Rethrow the original exception } diff --git a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpDeleteLogicalRouterPortCommandWrapper.java b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpDeleteLogicalRouterPortCommandWrapper.java index a087f0731974..a585641bebfd 100644 --- a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpDeleteLogicalRouterPortCommandWrapper.java +++ b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpDeleteLogicalRouterPortCommandWrapper.java @@ -21,7 +21,6 @@ import static com.cloud.network.resource.NiciraNvpResource.NUM_RETRIES; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.DeleteLogicalRouterPortAnswer; @@ -36,7 +35,6 @@ @ResourceWrapper(handles = DeleteLogicalRouterPortCommand.class) public class NiciraNvpDeleteLogicalRouterPortCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(NiciraNvpDeleteLogicalRouterPortCommandWrapper.class); @Override public Answer execute(DeleteLogicalRouterPortCommand command, NiciraNvpResource niciraNvpResource) { @@ -44,7 +42,7 @@ public Answer execute(DeleteLogicalRouterPortCommand command, NiciraNvpResource final String logicalRouterPortUuid = command.getLogicalRouterPortUuid(); final NiciraNvpApi niciraNvpApi = niciraNvpResource.getNiciraNvpApi(); - s_logger.debug("Deleting Logical Router Port " + logicalRouterPortUuid + " in Logical Router " + logicalRouterUuid); + logger.debug("Deleting Logical Router Port " + logicalRouterPortUuid + " in Logical Router " + logicalRouterUuid); try { niciraNvpApi.deleteLogicalRouterPort(logicalRouterUuid, logicalRouterPortUuid); return new DeleteLogicalRouterPortAnswer(command, true, "Logical Router Port " + logicalRouterPortUuid + " deleted"); diff --git a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpFindL2GatewayServiceCommandWrapper.java b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpFindL2GatewayServiceCommandWrapper.java index 621f503d0634..03858d197305 100644 --- a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpFindL2GatewayServiceCommandWrapper.java +++ b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpFindL2GatewayServiceCommandWrapper.java @@ -23,7 +23,6 @@ import java.util.List; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.FindL2GatewayServiceAnswer; @@ -40,7 +39,6 @@ @ResourceWrapper(handles = FindL2GatewayServiceCommand.class) public class NiciraNvpFindL2GatewayServiceCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(NiciraNvpFindL2GatewayServiceCommandWrapper.class); @Override public Answer execute(FindL2GatewayServiceCommand command, NiciraNvpResource niciraNvpResource) { @@ -49,7 +47,7 @@ public Answer execute(FindL2GatewayServiceCommand command, NiciraNvpResource nic final String type = config.getType(); final NiciraNvpApi niciraNvpApi = niciraNvpResource.getNiciraNvpApi(); - s_logger.info("Looking for L2 Gateway Service " + uuid + " of type " + type); + logger.info("Looking for L2 Gateway Service " + uuid + " of type " + type); try { List lstGW = niciraNvpApi.findL2GatewayServiceByUuidAndType(uuid, type); @@ -59,7 +57,7 @@ public Answer execute(FindL2GatewayServiceCommand command, NiciraNvpResource nic return new FindL2GatewayServiceAnswer(command, true, "L2 Gateway Service " + lstGW.get(0).getDisplayName()+ " found", lstGW.get(0).getUuid()); } } catch (NiciraNvpApiException e) { - s_logger.error("Error finding Gateway Service due to: " + e.getMessage()); + logger.error("Error finding Gateway Service due to: " + e.getMessage()); final CommandRetryUtility retryUtility = niciraNvpResource.getRetryUtility(); retryUtility.addRetry(command, NUM_RETRIES); return retryUtility.retry(command, FindL2GatewayServiceAnswer.class, e); diff --git a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpFindLogicalRouterPortCommandWrapper.java b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpFindLogicalRouterPortCommandWrapper.java index 364d478ae162..f0ee21662985 100644 --- a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpFindLogicalRouterPortCommandWrapper.java +++ b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/resource/wrapper/NiciraNvpFindLogicalRouterPortCommandWrapper.java @@ -23,7 +23,6 @@ import java.util.List; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.FindLogicalRouterPortAnswer; @@ -39,7 +38,6 @@ @ResourceWrapper(handles = FindLogicalRouterPortCommand.class) public class NiciraNvpFindLogicalRouterPortCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(NiciraNvpFindLogicalRouterPortCommandWrapper.class); @Override public Answer execute(FindLogicalRouterPortCommand command, NiciraNvpResource niciraNvpResource) { @@ -47,7 +45,7 @@ public Answer execute(FindLogicalRouterPortCommand command, NiciraNvpResource ni final String attachmentLswitchUuid = command.getAttachmentLswitchUuid(); final NiciraNvpApi niciraNvpApi = niciraNvpResource.getNiciraNvpApi(); - s_logger.debug("Finding Logical Router Port in Logical Router " + logicalRouterUuid + " and attachmentLSwitchUuid " + attachmentLswitchUuid); + logger.debug("Finding Logical Router Port in Logical Router " + logicalRouterUuid + " and attachmentLSwitchUuid " + attachmentLswitchUuid); try{ List lRouterPorts = niciraNvpApi.findLogicalRouterPortByAttachmentLSwitchUuid(logicalRouterUuid, attachmentLswitchUuid); @@ -58,7 +56,7 @@ public Answer execute(FindLogicalRouterPortCommand command, NiciraNvpResource ni } } catch (NiciraNvpApiException e){ - s_logger.error("Error finding Logical Router Port due to: " + e.getMessage()); + logger.error("Error finding Logical Router Port due to: " + e.getMessage()); final CommandRetryUtility retryUtility = niciraNvpResource.getRetryUtility(); retryUtility.addRetry(command, NUM_RETRIES); return retryUtility.retry(command, FindLogicalRouterPortAnswer.class, e); diff --git a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/utils/CommandRetryUtility.java b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/utils/CommandRetryUtility.java index f097cbcb83a0..3fd933ccbe13 100644 --- a/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/utils/CommandRetryUtility.java +++ b/plugins/network-elements/nicira-nvp/src/main/java/com/cloud/network/utils/CommandRetryUtility.java @@ -23,7 +23,8 @@ import java.lang.reflect.InvocationTargetException; import java.util.concurrent.ConcurrentHashMap; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.Command; @@ -31,7 +32,7 @@ public class CommandRetryUtility { - private static final Logger s_logger = Logger.getLogger(CommandRetryUtility.class); + protected Logger logger = LogManager.getLogger(getClass()); private static final int ZERO = 0; private static CommandRetryUtility instance; @@ -72,7 +73,7 @@ public Answer retry(final Command command, final Class answerC if (numRetries > ZERO) { commandsToRetry.put(command, --numRetries); - s_logger.warn("Retrying " + command.getClass().getSimpleName() + ". Number of retries remaining: " + numRetries); + logger.warn("Retrying " + command.getClass().getSimpleName() + ". Number of retries remaining: " + numRetries); return serverResource.executeRequest(command); } else { diff --git a/plugins/network-elements/nicira-nvp/src/test/java/com/cloud/network/element/NiciraNvpElementTest.java b/plugins/network-elements/nicira-nvp/src/test/java/com/cloud/network/element/NiciraNvpElementTest.java index e037810cd8f4..4c3288d73448 100644 --- a/plugins/network-elements/nicira-nvp/src/test/java/com/cloud/network/element/NiciraNvpElementTest.java +++ b/plugins/network-elements/nicira-nvp/src/test/java/com/cloud/network/element/NiciraNvpElementTest.java @@ -267,7 +267,7 @@ public boolean matches(final ConfigurePublicIpsOnLogicalRouterCommand command) { @Test public void implementSharedNetworkUuidVlanIdTest() throws URISyntaxException, ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { - // SHARED NETWORKS CASE 1: LOGICAL ROUTER'S UUID AS VLAN ID + // SHARED NETWORKS CASE 1: loggerICAL ROUTER'S UUID AS VLAN ID final Network network = mock(Network.class); when(network.getBroadcastDomainType()).thenReturn(BroadcastDomainType.Lswitch); when(network.getBroadcastUri()).thenReturn(new URI("lswitch:aaaaa")); diff --git a/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/OpendaylightElement.java b/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/OpendaylightElement.java index 9bae4bd19e6f..b35190217d32 100644 --- a/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/OpendaylightElement.java +++ b/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/OpendaylightElement.java @@ -27,7 +27,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.network.opendaylight.agent.commands.StartupOpenDaylightControllerCommand; @@ -59,7 +58,6 @@ @Component public class OpendaylightElement extends AdapterBase implements ConnectivityProvider, ResourceStateAdapter { - private static final Logger s_logger = Logger.getLogger(OpendaylightElement.class); private static final Map> s_capabilities = setCapabilities(); @Inject diff --git a/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/OpendaylightGuestNetworkGuru.java b/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/OpendaylightGuestNetworkGuru.java index e99ec555b889..659caf046490 100644 --- a/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/OpendaylightGuestNetworkGuru.java +++ b/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/OpendaylightGuestNetworkGuru.java @@ -60,14 +60,12 @@ import org.apache.cloudstack.network.opendaylight.agent.responses.DestroyPortAnswer; import org.apache.cloudstack.network.opendaylight.dao.OpenDaylightControllerMappingDao; import org.apache.cloudstack.network.opendaylight.dao.OpenDaylightControllerVO; -import org.apache.log4j.Logger; import javax.inject.Inject; import java.util.List; import java.util.UUID; public class OpendaylightGuestNetworkGuru extends GuestNetworkGuru { - private static final Logger s_logger = Logger.getLogger(OpendaylightGuestNetworkGuru.class); @Inject protected NetworkOfferingServiceMapDao ntwkOfferingSrvcDao; @@ -93,7 +91,7 @@ protected boolean canHandle(NetworkOffering offering, NetworkType networkType, P && ntwkOfferingSrvcDao.isProviderForNetworkOffering(offering.getId(), Provider.Opendaylight)) { return true; } else { - s_logger.trace("We only take care of Guest networks of type " + GuestType.Isolated + " in zone of type " + NetworkType.Advanced); + logger.trace("We only take care of Guest networks of type " + GuestType.Isolated + " in zone of type " + NetworkType.Advanced); return false; } } @@ -103,17 +101,17 @@ public Network design(NetworkOffering offering, DeploymentPlan plan, Network use PhysicalNetworkVO physnet = physicalNetworkDao.findById(plan.getPhysicalNetworkId()); DataCenter dc = _dcDao.findById(plan.getDataCenterId()); if (!canHandle(offering, dc.getNetworkType(), physnet)) { - s_logger.debug("Refusing to design this network"); + logger.debug("Refusing to design this network"); return null; } List devices = openDaylightControllerMappingDao.listByPhysicalNetwork(physnet.getId()); if (devices.isEmpty()) { - s_logger.error("No Controller on physical network " + physnet.getName()); + logger.error("No Controller on physical network " + physnet.getName()); return null; } - s_logger.debug("Controller " + devices.get(0).getUuid() + " found on physical network " + physnet.getId()); - s_logger.debug("Physical isolation type is ODL, asking GuestNetworkGuru to design this network"); + logger.debug("Controller " + devices.get(0).getUuid() + " found on physical network " + physnet.getId()); + logger.debug("Physical isolation type is ODL, asking GuestNetworkGuru to design this network"); NetworkVO networkObject = (NetworkVO)super.design(offering, plan, userSpecified, owner); if (networkObject == null) { @@ -158,7 +156,7 @@ public Network implement(Network network, NetworkOffering offering, DeployDestin List devices = openDaylightControllerMappingDao.listByPhysicalNetwork(physicalNetworkId); if (devices.isEmpty()) { - s_logger.error("No Controller on physical network " + physicalNetworkId); + logger.error("No Controller on physical network " + physicalNetworkId); return null; } OpenDaylightControllerVO controller = devices.get(0); @@ -167,13 +165,13 @@ public Network implement(Network network, NetworkOffering offering, DeployDestin ConfigureNetworkAnswer answer = (ConfigureNetworkAnswer)agentManager.easySend(controller.getHostId(), cmd); if (answer == null || !answer.getResult()) { - s_logger.error("ConfigureNetworkCommand failed"); + logger.error("ConfigureNetworkCommand failed"); return null; } implemented.setBroadcastUri(BroadcastDomainType.OpenDaylight.toUri(answer.getNetworkUuid())); implemented.setBroadcastDomainType(BroadcastDomainType.OpenDaylight); - s_logger.info("Implemented OK, network linked to = " + implemented.getBroadcastUri().toString()); + logger.info("Implemented OK, network linked to = " + implemented.getBroadcastUri().toString()); return implemented; } @@ -188,7 +186,7 @@ public void reserve(NicProfile nic, Network network, VirtualMachineProfile vm, D List devices = openDaylightControllerMappingDao.listByPhysicalNetwork(physicalNetworkId); if (devices.isEmpty()) { - s_logger.error("No Controller on physical network " + physicalNetworkId); + logger.error("No Controller on physical network " + physicalNetworkId); throw new InsufficientVirtualNetworkCapacityException("No OpenDaylight Controller configured for this network", dest.getPod().getId()); } OpenDaylightControllerVO controller = devices.get(0); @@ -196,7 +194,7 @@ public void reserve(NicProfile nic, Network network, VirtualMachineProfile vm, D AddHypervisorCommand addCmd = new AddHypervisorCommand(dest.getHost().getUuid(), dest.getHost().getPrivateIpAddress()); AddHypervisorAnswer addAnswer = (AddHypervisorAnswer)agentManager.easySend(controller.getHostId(), addCmd); if (addAnswer == null || !addAnswer.getResult()) { - s_logger.error("Failed to add " + dest.getHost().getName() + " as a node to the controller"); + logger.error("Failed to add " + dest.getHost().getName() + " as a node to the controller"); throw new InsufficientVirtualNetworkCapacityException("Failed to add destination hypervisor to the OpenDaylight Controller", dest.getPod().getId()); } @@ -205,7 +203,7 @@ public void reserve(NicProfile nic, Network network, VirtualMachineProfile vm, D ConfigurePortAnswer answer = (ConfigurePortAnswer)agentManager.easySend(controller.getHostId(), cmd); if (answer == null || !answer.getResult()) { - s_logger.error("ConfigureNetworkCommand failed"); + logger.error("ConfigureNetworkCommand failed"); throw new InsufficientVirtualNetworkCapacityException("Failed to configure the port on the OpenDaylight Controller", dest.getPod().getId()); } @@ -222,7 +220,7 @@ public boolean release(NicProfile nic, VirtualMachineProfile vm, String reservat List devices = openDaylightControllerMappingDao.listByPhysicalNetwork(physicalNetworkId); if (devices.isEmpty()) { - s_logger.error("No Controller on physical network " + physicalNetworkId); + logger.error("No Controller on physical network " + physicalNetworkId); throw new CloudRuntimeException("No OpenDaylight controller on this physical network"); } OpenDaylightControllerVO controller = devices.get(0); @@ -231,7 +229,7 @@ public boolean release(NicProfile nic, VirtualMachineProfile vm, String reservat DestroyPortAnswer answer = (DestroyPortAnswer)agentManager.easySend(controller.getHostId(), cmd); if (answer == null || !answer.getResult()) { - s_logger.error("DestroyPortCommand failed"); + logger.error("DestroyPortCommand failed"); success = false; } } @@ -243,13 +241,13 @@ public boolean release(NicProfile nic, VirtualMachineProfile vm, String reservat public void shutdown(NetworkProfile profile, NetworkOffering offering) { NetworkVO networkObject = networkDao.findById(profile.getId()); if (networkObject.getBroadcastDomainType() != BroadcastDomainType.OpenDaylight || networkObject.getBroadcastUri() == null) { - s_logger.warn("BroadcastUri is empty or incorrect for guestnetwork " + networkObject.getDisplayText()); + logger.warn("BroadcastUri is empty or incorrect for guestnetwork " + networkObject.getDisplayText()); return; } List devices = openDaylightControllerMappingDao.listByPhysicalNetwork(networkObject.getPhysicalNetworkId()); if (devices.isEmpty()) { - s_logger.error("No Controller on physical network " + networkObject.getPhysicalNetworkId()); + logger.error("No Controller on physical network " + networkObject.getPhysicalNetworkId()); return; } OpenDaylightControllerVO controller = devices.get(0); @@ -258,7 +256,7 @@ public void shutdown(NetworkProfile profile, NetworkOffering offering) { DestroyNetworkAnswer answer = (DestroyNetworkAnswer)agentManager.easySend(controller.getHostId(), cmd); if (answer == null || !answer.getResult()) { - s_logger.error("DestroyNetworkCommand failed"); + logger.error("DestroyNetworkCommand failed"); } super.shutdown(profile, offering); diff --git a/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/agent/OpenDaylightControllerResource.java b/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/agent/OpenDaylightControllerResource.java index f6046ddc4248..8ea65f4e729f 100644 --- a/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/agent/OpenDaylightControllerResource.java +++ b/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/agent/OpenDaylightControllerResource.java @@ -31,7 +31,8 @@ import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.network.opendaylight.agent.commands.AddHypervisorCommand; import org.apache.cloudstack.network.opendaylight.agent.commands.ConfigureNetworkCommand; @@ -71,7 +72,7 @@ import com.cloud.resource.ServerResource; public class OpenDaylightControllerResource implements ServerResource { - private static final Logger s_logger = Logger.getLogger(OpenDaylightControllerResource.class); + protected Logger logger = LogManager.getLogger(getClass()); private Map configuration = new HashMap(); private URL controllerUrl; @@ -182,7 +183,7 @@ public Answer executeRequest(Command cmd) { @Override public void disconnected() { - s_logger.warn("OpenDaylightControllerResource is disconnected from the controller at " + controllerUrl); + logger.warn("OpenDaylightControllerResource is disconnected from the controller at " + controllerUrl); } @@ -225,7 +226,7 @@ private Answer executeRequest(ConfigureNetworkCommand cmd) { break; } } catch (NeutronRestApiException e) { - s_logger.error("Failed to list existing networks on the ODL Controller", e); + logger.error("Failed to list existing networks on the ODL Controller", e); return new ConfigureNetworkAnswer(cmd, e); } @@ -246,7 +247,7 @@ private Answer executeRequest(ConfigureNetworkCommand cmd) { try { wrapper = configureNetwork.createNeutronNetwork(wrapper); } catch (NeutronRestApiException e) { - s_logger.error("createNeutronNetwork failed", e); + logger.error("createNeutronNetwork failed", e); return new ConfigureNetworkAnswer(cmd, e); } @@ -258,7 +259,7 @@ private Answer executeRequest(DestroyNetworkCommand cmd) { try { configureNetwork.deleteNeutronNetwork(cmd.getNetworkUuid()); } catch (NeutronRestApiException e) { - s_logger.error("deleteNeutronNetwork failed", e); + logger.error("deleteNeutronNetwork failed", e); return new DestroyNetworkAnswer(cmd, e); } @@ -287,7 +288,7 @@ private Answer executeRequest(ConfigurePortCommand cmd) { try { portWrapper = configurePort.createNeutronPort(portWrapper); } catch (NeutronRestApiException e) { - s_logger.error("createPortCommand failed", e); + logger.error("createPortCommand failed", e); return new ConfigurePortAnswer(cmd, e); } @@ -300,7 +301,7 @@ private Answer executeRequest(DestroyPortCommand cmd) { try { configurePort.deleteNeutronPort(cmd.getPortId().toString()); } catch (NeutronRestApiException e) { - s_logger.error("deleteNeutronPort failed", e); + logger.error("deleteNeutronPort failed", e); return new DestroyPortAnswer(cmd, e); } @@ -323,7 +324,7 @@ private Answer executeRequest(AddHypervisorCommand cmd) { // Not found in the existing node list, add it nodeActions.updateNeutronNodeV2("OVS", cmd.getHostId(), cmd.getIpAddress(), 6640); } catch (NeutronRestApiException e) { - s_logger.error("Call to OpenDaylight failed", e); + logger.error("Call to OpenDaylight failed", e); return new AddHypervisorAnswer(cmd, e); } return new AddHypervisorAnswer(cmd, true, "Hypervisor " + cmd.getHostId() + " added"); diff --git a/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/agent/OpenDaylightControllerResourceManagerImpl.java b/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/agent/OpenDaylightControllerResourceManagerImpl.java index 013c3028071c..8bf68f0c289b 100644 --- a/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/agent/OpenDaylightControllerResourceManagerImpl.java +++ b/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/agent/OpenDaylightControllerResourceManagerImpl.java @@ -28,7 +28,8 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.network.ExternalNetworkDeviceManager.NetworkDevice; import org.apache.cloudstack.network.opendaylight.api.commands.AddOpenDaylightControllerCmd; @@ -62,7 +63,7 @@ import com.cloud.utils.exception.CloudRuntimeException; public class OpenDaylightControllerResourceManagerImpl implements OpenDaylightControllerResourceManager { - private final static Logger s_logger = Logger.getLogger(OpenDaylightControllerResourceManagerImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject HostDao hostDao; diff --git a/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/api/NeutronRestApi.java b/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/api/NeutronRestApi.java index 980936d8ecef..20ba46c45dd3 100644 --- a/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/api/NeutronRestApi.java +++ b/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/api/NeutronRestApi.java @@ -31,7 +31,8 @@ import org.apache.commons.httpclient.protocol.Protocol; import org.apache.commons.httpclient.protocol.ProtocolSocketFactory; import org.apache.commons.httpclient.protocol.SecureProtocolSocketFactory; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import javax.net.ssl.SSLContext; import javax.net.ssl.SSLSocket; @@ -53,7 +54,7 @@ public class NeutronRestApi { - private static final Logger s_logger = Logger.getLogger(NeutronRestApi.class); + protected Logger logger = LogManager.getLogger(getClass()); private static final MultiThreadedHttpConnectionManager s_httpClientManager = new MultiThreadedHttpConnectionManager(); private static final String PROTOCOL = "https"; @@ -77,7 +78,7 @@ protected NeutronRestApi(final Class httpClazz, final // with the SecureProtocolSocketFactory parameter Protocol.registerProtocol(protocol, new Protocol(protocol, (ProtocolSocketFactory) new TrustingProtocolSocketFactory(), HTTPS_PORT)); } catch (IOException e) { - s_logger.warn("Failed to register the TrustingProtocolSocketFactory, falling back to default SSLSocketFactory", e); + logger.warn("Failed to register the TrustingProtocolSocketFactory, falling back to default SSLSocketFactory", e); } } @@ -97,31 +98,31 @@ public HttpMethodBase createMethod(final URL neutronUrl, final String uri) throw return httpMethod; } catch (MalformedURLException e) { String error = "Unable to build Neutron API URL"; - s_logger.error(error, e); + logger.error(error, e); throw new NeutronRestApiException(error, e); } catch (NoSuchMethodException e) { String error = "Unable to build Neutron API URL due to reflection error"; - s_logger.error(error, e); + logger.error(error, e); throw new NeutronRestApiException(error, e); } catch (SecurityException e) { String error = "Unable to build Neutron API URL due to security violation"; - s_logger.error(error, e); + logger.error(error, e); throw new NeutronRestApiException(error, e); } catch (InstantiationException e) { String error = "Unable to build Neutron API due to instantiation error"; - s_logger.error(error, e); + logger.error(error, e); throw new NeutronRestApiException(error, e); } catch (IllegalAccessException e) { String error = "Unable to build Neutron API URL due to absence of access modifier"; - s_logger.error(error, e); + logger.error(error, e); throw new NeutronRestApiException(error, e); } catch (IllegalArgumentException e) { String error = "Unable to build Neutron API URL due to wrong argument in constructor"; - s_logger.error(error, e); + logger.error(error, e); throw new NeutronRestApiException(error, e); } catch (InvocationTargetException e) { String error = "Unable to build Neutron API URL due to target error"; - s_logger.error(error, e); + logger.error(error, e); throw new NeutronRestApiException(error, e); } } @@ -130,11 +131,11 @@ public void executeMethod(final HttpMethodBase method) throws NeutronRestApiExce try { client.executeMethod(method); } catch (HttpException e) { - s_logger.error("HttpException caught while trying to connect to the Neutron Controller", e); + logger.error("HttpException caught while trying to connect to the Neutron Controller", e); method.releaseConnection(); throw new NeutronRestApiException("API call to Neutron Controller Failed", e); } catch (IOException e) { - s_logger.error("IOException caught while trying to connect to the Neutron Controller", e); + logger.error("IOException caught while trying to connect to the Neutron Controller", e); method.releaseConnection(); throw new NeutronRestApiException("API call to Neutron Controller Failed", e); } diff --git a/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/api/resources/Action.java b/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/api/resources/Action.java index 0e8e4319c067..d27789dab953 100644 --- a/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/api/resources/Action.java +++ b/plugins/network-elements/opendaylight/src/main/java/org/apache/cloudstack/network/opendaylight/api/resources/Action.java @@ -40,11 +40,12 @@ import org.apache.commons.httpclient.methods.PostMethod; import org.apache.commons.httpclient.methods.PutMethod; import org.apache.commons.httpclient.methods.StringRequestEntity; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; public abstract class Action { - private static final Logger s_logger = Logger.getLogger(Action.class); + protected Logger logger = LogManager.getLogger(getClass()); private static final int BODY_RESP_MAX_LEN = 1024; // private static final String DEFAULT @@ -95,14 +96,14 @@ public String executeGet(final String uri, final Map parameters) if (getMethod.getStatusCode() != HttpStatus.SC_OK) { String errorMessage = responseToErrorMessage(getMethod); getMethod.releaseConnection(); - s_logger.error("Failed to retrieve object : " + errorMessage); + logger.error("Failed to retrieve object : " + errorMessage); throw new NeutronRestApiException("Failed to retrieve object : " + errorMessage); } return getMethod.getResponseBodyAsString(); } catch (NeutronRestApiException e) { - s_logger.error("NeutronRestApiException caught while trying to execute HTTP Method on the Neutron Controller", e); + logger.error("NeutronRestApiException caught while trying to execute HTTP Method on the Neutron Controller", e); throw new NeutronRestApiException("API call to Neutron Controller Failed", e); } catch (IOException e) { throw new NeutronRestApiException(e); @@ -135,13 +136,13 @@ protected String executePost(final String uri, final StringRequestEntity entity) if (postMethod.getStatusCode() != HttpStatus.SC_CREATED) { String errorMessage = responseToErrorMessage(postMethod); postMethod.releaseConnection(); - s_logger.error("Failed to create object : " + errorMessage); + logger.error("Failed to create object : " + errorMessage); throw new NeutronRestApiException("Failed to create object : " + errorMessage); } return postMethod.getResponseBodyAsString(); } catch (NeutronRestApiException e) { - s_logger.error("NeutronRestApiException caught while trying to execute HTTP Method on the Neutron Controller", e); + logger.error("NeutronRestApiException caught while trying to execute HTTP Method on the Neutron Controller", e); throw new NeutronRestApiException("API call to Neutron Controller Failed", e); } catch (IOException e) { throw new NeutronRestApiException("Failed to load json response body", e); @@ -174,11 +175,11 @@ protected void executePut(final String uri, final StringRequestEntity entity) th if (putMethod.getStatusCode() != HttpStatus.SC_OK) { String errorMessage = responseToErrorMessage(putMethod); putMethod.releaseConnection(); - s_logger.error("Failed to update object : " + errorMessage); + logger.error("Failed to update object : " + errorMessage); throw new NeutronRestApiException("Failed to update object : " + errorMessage); } } catch (NeutronRestApiException e) { - s_logger.error("NeutronRestApiException caught while trying to execute HTTP Method on the Neutron Controller", e); + logger.error("NeutronRestApiException caught while trying to execute HTTP Method on the Neutron Controller", e); throw new NeutronRestApiException("API call to Neutron Controller Failed", e); } finally { putMethod.releaseConnection(); @@ -206,13 +207,13 @@ protected String executePut(final String uri) throws NeutronRestApiException { if (putMethod.getStatusCode() != HttpStatus.SC_OK) { String errorMessage = responseToErrorMessage(putMethod); putMethod.releaseConnection(); - s_logger.error("Failed to update object : " + errorMessage); + logger.error("Failed to update object : " + errorMessage); throw new NeutronRestApiException("Failed to update object : " + errorMessage); } return putMethod.getResponseBodyAsString(); } catch (NeutronRestApiException e) { - s_logger.error("NeutronRestApiException caught while trying to execute HTTP Method on the Neutron Controller", e); + logger.error("NeutronRestApiException caught while trying to execute HTTP Method on the Neutron Controller", e); throw new NeutronRestApiException("API call to Neutron Controller Failed", e); } catch (IOException e) { throw new NeutronRestApiException("Failed to load json response body", e); @@ -244,11 +245,11 @@ protected void executeDelete(final String uri) throws NeutronRestApiException { if (deleteMethod.getStatusCode() != HttpStatus.SC_NO_CONTENT) { String errorMessage = responseToErrorMessage(deleteMethod); deleteMethod.releaseConnection(); - s_logger.error("Failed to delete object : " + errorMessage); + logger.error("Failed to delete object : " + errorMessage); throw new NeutronRestApiException("Failed to delete object : " + errorMessage); } } catch (NeutronRestApiException e) { - s_logger.error("NeutronRestApiException caught while trying to execute HTTP Method on the Neutron Controller", e); + logger.error("NeutronRestApiException caught while trying to execute HTTP Method on the Neutron Controller", e); throw new NeutronRestApiException("API call to Neutron Controller Failed", e); } finally { deleteMethod.releaseConnection(); @@ -279,7 +280,7 @@ private String responseToErrorMessage(final HttpMethodBase method) { try { return method.getResponseBodyAsString(BODY_RESP_MAX_LEN); } catch (IOException e) { - s_logger.debug("Error while loading response body", e); + logger.debug("Error while loading response body", e); } } diff --git a/plugins/network-elements/ovs/src/main/java/com/cloud/network/element/OvsElement.java b/plugins/network-elements/ovs/src/main/java/com/cloud/network/element/OvsElement.java index 85dd243306f3..698919542642 100644 --- a/plugins/network-elements/ovs/src/main/java/com/cloud/network/element/OvsElement.java +++ b/plugins/network-elements/ovs/src/main/java/com/cloud/network/element/OvsElement.java @@ -27,7 +27,6 @@ import org.apache.cloudstack.network.topology.NetworkTopology; import org.apache.cloudstack.network.topology.NetworkTopologyContext; -import org.apache.log4j.Logger; import com.cloud.agent.api.StartupCommand; import com.cloud.agent.api.StartupOvsCommand; @@ -102,7 +101,6 @@ public class OvsElement extends AdapterBase implements NetworkElement, @Inject NetworkTopologyContext _networkTopologyContext; - private static final Logger s_logger = Logger.getLogger(OvsElement.class); private static final Map> capabilities = setCapabilities(); @Override @@ -116,21 +114,21 @@ public Provider getProvider() { } protected boolean canHandle(final Network network, final Service service) { - s_logger.debug("Checking if OvsElement can handle service " + logger.debug("Checking if OvsElement can handle service " + service.getName() + " on network " + network.getDisplayText()); if (network.getBroadcastDomainType() != BroadcastDomainType.Vswitch) { return false; } if (!_networkModel.isProviderForNetwork(getProvider(), network.getId())) { - s_logger.debug("OvsElement is not a provider for network " + logger.debug("OvsElement is not a provider for network " + network.getDisplayText()); return false; } if (!_ntwkSrvcDao.canProviderSupportServiceInNetwork(network.getId(), service, Network.Provider.Ovs)) { - s_logger.debug("OvsElement can't provide the " + service.getName() + logger.debug("OvsElement can't provide the " + service.getName() + " service on network " + network.getDisplayText()); return false; } @@ -151,7 +149,7 @@ public boolean implement(final Network network, final NetworkOffering offering, final DeployDestination dest, final ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { - s_logger.debug("entering OvsElement implement function for network " + logger.debug("entering OvsElement implement function for network " + network.getDisplayText() + " (state " + network.getState() + ")"); @@ -249,7 +247,7 @@ public boolean canEnableIndividualServices() { @Override public boolean verifyServicesCombination(final Set services) { if (!services.contains(Service.Connectivity)) { - s_logger.warn("Unable to provide services without Connectivity service enabled for this element"); + logger.warn("Unable to provide services without Connectivity service enabled for this element"); return false; } @@ -439,7 +437,7 @@ public boolean applyIps(final Network network, final List routers = _routerDao.listByNetworkAndRole( network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - s_logger.debug("Virtual router element doesn't need to associate ip addresses on the backend; virtual " + logger.debug("Virtual router element doesn't need to associate ip addresses on the backend; virtual " + "router doesn't exist in the network " + network.getId()); return true; @@ -464,7 +462,7 @@ public boolean applyStaticNats(final Network network, final List routers = _routerDao.listByNetworkAndRole( network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - s_logger.debug("Ovs element doesn't need to apply static nat on the backend; virtual " + logger.debug("Ovs element doesn't need to apply static nat on the backend; virtual " + "router doesn't exist in the network " + network.getId()); return true; } @@ -487,7 +485,7 @@ public boolean applyPFRules(final Network network, final List routers = _routerDao.listByNetworkAndRole( network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - s_logger.debug("Ovs element doesn't need to apply firewall rules on the backend; virtual " + logger.debug("Ovs element doesn't need to apply firewall rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); return true; } @@ -513,7 +511,7 @@ public boolean applyLBRules(final Network network, final List final List routers = _routerDao.listByNetworkAndRole( network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - s_logger.debug("Virtual router elemnt doesn't need to apply load balancing rules on the backend; virtual " + logger.debug("Virtual router elemnt doesn't need to apply load balancing rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); return true; @@ -525,7 +523,7 @@ public boolean applyLBRules(final Network network, final List for (final DomainRouterVO domainRouterVO : routers) { result = result && networkTopology.applyLoadBalancingRules(network, rules, domainRouterVO); if (!result) { - s_logger.debug("Failed to apply load balancing rules in network " + network.getId()); + logger.debug("Failed to apply load balancing rules in network " + network.getId()); } } } @@ -566,7 +564,7 @@ private boolean canHandleLbRules(final List rules) { if (schemeCaps != null) { for (final LoadBalancingRule rule : rules) { if (!schemeCaps.contains(rule.getScheme().toString())) { - s_logger.debug("Scheme " + rules.get(0).getScheme() + logger.debug("Scheme " + rules.get(0).getScheme() + " is not supported by the provider " + getName()); return false; diff --git a/plugins/network-elements/ovs/src/main/java/com/cloud/network/guru/OvsGuestNetworkGuru.java b/plugins/network-elements/ovs/src/main/java/com/cloud/network/guru/OvsGuestNetworkGuru.java index f8d851e586e1..240af3ac78c1 100644 --- a/plugins/network-elements/ovs/src/main/java/com/cloud/network/guru/OvsGuestNetworkGuru.java +++ b/plugins/network-elements/ovs/src/main/java/com/cloud/network/guru/OvsGuestNetworkGuru.java @@ -21,7 +21,6 @@ import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.dc.DataCenter; @@ -55,8 +54,6 @@ @Component public class OvsGuestNetworkGuru extends GuestNetworkGuru { - private static final Logger s_logger = Logger - .getLogger(OvsGuestNetworkGuru.class); @Inject OvsTunnelManager _ovsTunnelMgr; @@ -89,7 +86,7 @@ && isMyIsolationMethod(physicalNetwork) && physicalNetwork.getIsolationMethods().contains("GRE")) { return true; } else { - s_logger.trace(String.format("We only take care of Guest networks of type %s with Service %s or type with %s provider %s in %s zone", + logger.trace(String.format("We only take care of Guest networks of type %s with Service %s or type with %s provider %s in %s zone", GuestType.Isolated, Service.Connectivity, GuestType.Shared, Network.Provider.Ovs, NetworkType.Advanced)); return false; } @@ -103,7 +100,7 @@ public Network design(NetworkOffering offering, DeploymentPlan plan, .getPhysicalNetworkId()); DataCenter dc = _dcDao.findById(plan.getDataCenterId()); if (!canHandle(offering, dc.getNetworkType(), physnet)) { - s_logger.debug("Refusing to design this network"); + logger.debug("Refusing to design this network"); return null; } NetworkVO config = (NetworkVO)super.design(offering, plan, @@ -141,7 +138,7 @@ public Network implement(Network network, NetworkOffering offering, .findById(physicalNetworkId); if (!canHandle(offering, nwType, physnet)) { - s_logger.debug("Refusing to implement this network"); + logger.debug("Refusing to implement this network"); return null; } NetworkVO implemented = (NetworkVO)super.implement(network, offering, @@ -190,13 +187,13 @@ public void shutdown(NetworkProfile profile, NetworkOffering offering) { NetworkVO networkObject = _networkDao.findById(profile.getId()); if (networkObject.getBroadcastDomainType() != BroadcastDomainType.Vswitch || networkObject.getBroadcastUri() == null) { - s_logger.warn("BroadcastUri is empty or incorrect for guestnetwork " + logger.warn("BroadcastUri is empty or incorrect for guestnetwork " + networkObject.getDisplayText()); return; } if (profile.getBroadcastDomainType() == BroadcastDomainType.Vswitch ) { - s_logger.debug("Releasing vnet for the network id=" + profile.getId()); + logger.debug("Releasing vnet for the network id=" + profile.getId()); _dcDao.releaseVnet(BroadcastDomainType.getValue(profile.getBroadcastUri()), profile.getDataCenterId(), profile.getPhysicalNetworkId(), profile.getAccountId(), profile.getReservationId()); } diff --git a/plugins/network-elements/ovs/src/main/java/com/cloud/network/ovs/OvsTunnelManagerImpl.java b/plugins/network-elements/ovs/src/main/java/com/cloud/network/ovs/OvsTunnelManagerImpl.java index aca36092feae..c2fed4b75bce 100644 --- a/plugins/network-elements/ovs/src/main/java/com/cloud/network/ovs/OvsTunnelManagerImpl.java +++ b/plugins/network-elements/ovs/src/main/java/com/cloud/network/ovs/OvsTunnelManagerImpl.java @@ -29,7 +29,6 @@ import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.messagebus.MessageBus; import org.apache.cloudstack.framework.messagebus.MessageSubscriber; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; @@ -92,7 +91,6 @@ @Component public class OvsTunnelManagerImpl extends ManagerBase implements OvsTunnelManager, StateListener { - public static final Logger s_logger = Logger.getLogger(OvsTunnelManagerImpl.class.getName()); // boolean _isEnabled; ScheduledExecutorService _executorPool; @@ -159,13 +157,13 @@ protected OvsTunnelInterfaceVO createInterfaceRecord(String ip, OvsTunnelInterfaceVO lock = _tunnelInterfaceDao .acquireInLockTable(Long.valueOf(1)); if (lock == null) { - s_logger.warn("Cannot lock table ovs_tunnel_account"); + logger.warn("Cannot lock table ovs_tunnel_account"); return null; } _tunnelInterfaceDao.persist(ti); _tunnelInterfaceDao.releaseFromLockTable(lock.getId()); } catch (EntityExistsException e) { - s_logger.debug("A record for the interface for network " + label + logger.debug("A record for the interface for network " + label + " on host id " + hostId + " already exists"); } return ti; @@ -181,7 +179,7 @@ private String handleFetchInterfaceAnswer(Answer[] answers, Long hostId) { } } // Fetch interface failed! - s_logger.warn("Unable to fetch the IP address for the GRE tunnel endpoint" + logger.warn("Unable to fetch the IP address for the GRE tunnel endpoint" + ans.getDetails()); return null; } @@ -193,13 +191,13 @@ protected OvsTunnelNetworkVO createTunnelRecord(long from, long to, long network ta = new OvsTunnelNetworkVO(from, to, key, networkId); OvsTunnelNetworkVO lock = _tunnelNetworkDao.acquireInLockTable(Long.valueOf(1)); if (lock == null) { - s_logger.warn("Cannot lock table ovs_tunnel_account"); + logger.warn("Cannot lock table ovs_tunnel_account"); return null; } _tunnelNetworkDao.persist(ta); _tunnelNetworkDao.releaseFromLockTable(lock.getId()); } catch (EntityExistsException e) { - s_logger.debug("A record for the tunnel from " + from + " to " + to + " already exists"); + logger.debug("A record for the tunnel from " + from + " to " + to + " already exists"); } return ta; } @@ -221,12 +219,12 @@ private void handleCreateTunnelAnswer(Answer[] answers) { } if (!r.getResult()) { tunnel.setState(OvsTunnel.State.Failed.name()); - s_logger.warn("Create GRE tunnel from " + from + " to " + to + " failed due to " + r.getDetails() + logger.warn("Create GRE tunnel from " + from + " to " + to + " failed due to " + r.getDetails() + s); } else { tunnel.setState(OvsTunnel.State.Established.name()); tunnel.setPortName(r.getInPortName()); - s_logger.info("Create GRE tunnel from " + from + " to " + to + " succeeded." + r.getDetails() + s); + logger.info("Create GRE tunnel from " + from + " to " + to + " succeeded." + r.getDetails() + s); } _tunnelNetworkDao.update(tunnel.getId(), tunnel); } @@ -271,7 +269,7 @@ private String getGreEndpointIP(Host host, Network nw) //for network with label on target host Commands fetchIfaceCmds = new Commands(new OvsFetchInterfaceCommand(physNetLabel)); - s_logger.debug("Ask host " + host.getId() + + logger.debug("Ask host " + host.getId() + " to retrieve interface for phy net with label:" + physNetLabel); Answer[] fetchIfaceAnswers = _agentMgr.send(host.getId(), fetchIfaceCmds); @@ -297,7 +295,7 @@ private int getGreKey(Network network) { return key; } catch (NumberFormatException e) { - s_logger.debug("Well well, how did '" + key + logger.debug("Well well, how did '" + key + "' end up in the broadcast URI for the network?"); throw new CloudRuntimeException(String.format( "Invalid GRE key parsed from" @@ -309,7 +307,7 @@ private int getGreKey(Network network) { @DB protected void checkAndCreateTunnel(Network nw, Host host) { - s_logger.debug("Creating tunnels with OVS tunnel manager"); + logger.debug("Creating tunnels with OVS tunnel manager"); long hostId = host.getId(); int key = getGreKey(nw); @@ -324,7 +322,7 @@ protected void checkAndCreateTunnel(Network nw, Host host) { OvsTunnelNetworkVO ta = _tunnelNetworkDao.findByFromToNetwork(hostId, rh.longValue(), nw.getId()); // Try and create the tunnel even if a previous attempt failed if (ta == null || ta.getState().equals(OvsTunnel.State.Failed.name())) { - s_logger.debug("Attempting to create tunnel from:" + hostId + " to:" + rh.longValue()); + logger.debug("Attempting to create tunnel from:" + hostId + " to:" + rh.longValue()); if (ta == null) { createTunnelRecord(hostId, rh.longValue(), nw.getId(), key); } @@ -337,7 +335,7 @@ protected void checkAndCreateTunnel(Network nw, Host host) { hostId, nw.getId()); // Try and create the tunnel even if a previous attempt failed if (ta == null || ta.getState().equals(OvsTunnel.State.Failed.name())) { - s_logger.debug("Attempting to create tunnel from:" + + logger.debug("Attempting to create tunnel from:" + rh.longValue() + " to:" + hostId); if (ta == null) { createTunnelRecord(rh.longValue(), hostId, @@ -365,8 +363,8 @@ protected void checkAndCreateTunnel(Network nw, Host host) { Commands cmds = new Commands( new OvsCreateTunnelCommand(otherIp, key, Long.valueOf(hostId), i, nw.getId(), myIp, bridgeName, nw.getUuid())); - s_logger.debug("Attempting to create tunnel from:" + hostId + " to:" + i + " for the network " + nw.getId()); - s_logger.debug("Ask host " + hostId + logger.debug("Attempting to create tunnel from:" + hostId + " to:" + i + " for the network " + nw.getId()); + logger.debug("Ask host " + hostId + " to create gre tunnel to " + i); Answer[] answers = _agentMgr.send(hostId, cmds); handleCreateTunnelAnswer(answers); @@ -378,7 +376,7 @@ protected void checkAndCreateTunnel(Network nw, Host host) { String otherIp = getGreEndpointIP(rHost, nw); Commands cmds = new Commands(new OvsCreateTunnelCommand(myIp, key, i, Long.valueOf(hostId), nw.getId(), otherIp, bridgeName, nw.getUuid())); - s_logger.debug("Ask host " + i + " to create gre tunnel to " + logger.debug("Ask host " + i + " to create gre tunnel to " + hostId); Answer[] answers = _agentMgr.send(i, cmds); handleCreateTunnelAnswer(answers); @@ -389,13 +387,13 @@ protected void checkAndCreateTunnel(Network nw, Host host) { // anyway. This will ensure VIF rules will be triggered if (noHost) { Commands cmds = new Commands(new OvsSetupBridgeCommand(bridgeName, hostId, nw.getId())); - s_logger.debug("Ask host " + hostId + " to configure bridge for network:" + nw.getId()); + logger.debug("Ask host " + hostId + " to configure bridge for network:" + nw.getId()); Answer[] answers = _agentMgr.send(hostId, cmds); handleSetupBridgeAnswer(answers); } } catch (GreTunnelException | OperationTimedoutException | AgentUnavailableException e) { // I really thing we should do a better handling of these exceptions - s_logger.warn("Ovs Tunnel network created tunnel failed", e); + logger.warn("Ovs Tunnel network created tunnel failed", e); } } @@ -425,7 +423,7 @@ private void handleDestroyTunnelAnswer(Answer ans, long from, long to, long netw if (ans.getResult()) { OvsTunnelNetworkVO lock = _tunnelNetworkDao.acquireInLockTable(Long.valueOf(1)); if (lock == null) { - s_logger.warn(String.format("failed to lock" + + logger.warn(String.format("failed to lock" + "ovs_tunnel_account, remove record of " + "tunnel(from=%1$s, to=%2$s account=%3$s) failed", from, to, networkId)); @@ -435,11 +433,11 @@ private void handleDestroyTunnelAnswer(Answer ans, long from, long to, long netw _tunnelNetworkDao.removeByFromToNetwork(from, to, networkId); _tunnelNetworkDao.releaseFromLockTable(lock.getId()); - s_logger.debug(String.format("Destroy tunnel(account:%1$s," + + logger.debug(String.format("Destroy tunnel(account:%1$s," + "from:%2$s, to:%3$s) successful", networkId, from, to)); } else { - s_logger.debug(String.format("Destroy tunnel(account:%1$s," + "from:%2$s, to:%3$s) failed", networkId, from, to)); + logger.debug(String.format("Destroy tunnel(account:%1$s," + "from:%2$s, to:%3$s) failed", networkId, from, to)); } } @@ -449,24 +447,24 @@ private void handleDestroyBridgeAnswer(Answer ans, long hostId, long networkId) if (ans.getResult()) { OvsTunnelNetworkVO lock = _tunnelNetworkDao.acquireInLockTable(Long.valueOf(1)); if (lock == null) { - s_logger.warn("failed to lock ovs_tunnel_network," + "remove record"); + logger.warn("failed to lock ovs_tunnel_network," + "remove record"); return; } _tunnelNetworkDao.removeByFromNetwork(hostId, networkId); _tunnelNetworkDao.releaseFromLockTable(lock.getId()); - s_logger.debug(String.format("Destroy bridge for" + + logger.debug(String.format("Destroy bridge for" + "network %1$s successful", networkId)); } else { - s_logger.debug(String.format("Destroy bridge for" + + logger.debug(String.format("Destroy bridge for" + "network %1$s failed", networkId)); } } private void handleSetupBridgeAnswer(Answer[] answers) { //TODO: Add some error management here? - s_logger.debug("Placeholder for something more meanginful to come"); + logger.debug("Placeholder for something more meanginful to come"); } @Override @@ -493,7 +491,7 @@ public void checkAndRemoveHostFromTunnelNetwork(Network nw, Host host) { if (p.getState().equals(OvsTunnel.State.Established.name())) { Command cmd= new OvsDestroyTunnelCommand(p.getNetworkId(), bridgeName, p.getPortName()); - s_logger.debug("Destroying tunnel to " + host.getId() + + logger.debug("Destroying tunnel to " + host.getId() + " from " + p.getFrom()); Answer ans = _agentMgr.send(p.getFrom(), cmd); handleDestroyTunnelAnswer(ans, p.getFrom(), p.getTo(), p.getNetworkId()); @@ -503,11 +501,11 @@ public void checkAndRemoveHostFromTunnelNetwork(Network nw, Host host) { Command cmd = new OvsDestroyBridgeCommand(nw.getId(), generateBridgeNameForVpc(nw.getVpcId()), host.getId()); - s_logger.debug("Destroying bridge for network " + nw.getId() + " on host:" + host.getId()); + logger.debug("Destroying bridge for network " + nw.getId() + " on host:" + host.getId()); Answer ans = _agentMgr.send(host.getId(), cmd); handleDestroyBridgeAnswer(ans, host.getId(), nw.getId()); } catch (Exception e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "exception while removing host from networks: " + e.getLocalizedMessage()); } } else { @@ -521,7 +519,7 @@ public void checkAndRemoveHostFromTunnelNetwork(Network nw, Host host) { int key = getGreKey(nw); String bridgeName = generateBridgeName(nw, key); Command cmd = new OvsDestroyBridgeCommand(nw.getId(), bridgeName, host.getId()); - s_logger.debug("Destroying bridge for network " + nw.getId() + " on host:" + host.getId()); + logger.debug("Destroying bridge for network " + nw.getId() + " on host:" + host.getId()); Answer ans = _agentMgr.send(host.getId(), cmd); handleDestroyBridgeAnswer(ans, host.getId(), nw.getId()); @@ -534,7 +532,7 @@ public void checkAndRemoveHostFromTunnelNetwork(Network nw, Host host) { if (p.getState().equals(OvsTunnel.State.Established.name())) { cmd = new OvsDestroyTunnelCommand(p.getNetworkId(), bridgeName, p.getPortName()); - s_logger.debug("Destroying tunnel to " + host.getId() + + logger.debug("Destroying tunnel to " + host.getId() + " from " + p.getFrom()); ans = _agentMgr.send(p.getFrom(), cmd); handleDestroyTunnelAnswer(ans, p.getFrom(), @@ -542,7 +540,7 @@ public void checkAndRemoveHostFromTunnelNetwork(Network nw, Host host) { } } } catch (Exception e) { - s_logger.warn("Destroy tunnel failed", e); + logger.warn("Destroy tunnel failed", e); } } } @@ -571,12 +569,12 @@ protected void checkAndCreateVpcTunnelNetworks(Host host, long vpcId) { // since this is the first VM from the VPC being launched on the host, first setup the bridge try { Commands cmds = new Commands(new OvsSetupBridgeCommand(bridgeName, hostId, null)); - s_logger.debug("Ask host " + hostId + " to create bridge for vpc " + vpcId + " and configure the " + logger.debug("Ask host " + hostId + " to create bridge for vpc " + vpcId + " and configure the " + " bridge for distributed routing."); Answer[] answers = _agentMgr.send(hostId, cmds); handleSetupBridgeAnswer(answers); } catch (OperationTimedoutException | AgentUnavailableException e) { - s_logger.warn("Ovs Tunnel network created bridge failed", e); + logger.warn("Ovs Tunnel network created bridge failed", e); } // now that bridge is setup, populate network acl's before the VM gets created @@ -584,7 +582,7 @@ protected void checkAndCreateVpcTunnelNetworks(Host host, long vpcId) { cmd.setSequenceNumber(getNextRoutingPolicyUpdateSequenceNumber(vpcId)); if (!sendVpcRoutingPolicyChangeUpdate(cmd, hostId, bridgeName)) { - s_logger.debug("Failed to send VPC routing policy change update to host : " + hostId + + logger.debug("Failed to send VPC routing policy change update to host : " + hostId + ". But moving on with sending the updates to the rest of the hosts."); } } @@ -608,7 +606,7 @@ protected void checkAndCreateVpcTunnelNetworks(Host host, long vpcId) { tunnelRecord = _tunnelNetworkDao.findByFromToNetwork(hostId, rh.longValue(), vpcNetwork.getId()); // Try and create the tunnel if does not exit or previous attempt failed if (tunnelRecord == null || tunnelRecord.getState().equals(OvsTunnel.State.Failed.name())) { - s_logger.debug("Attempting to create tunnel from:" + hostId + " to:" + rh.longValue()); + logger.debug("Attempting to create tunnel from:" + hostId + " to:" + rh.longValue()); if (tunnelRecord == null) { createTunnelRecord(hostId, rh.longValue(), vpcNetwork.getId(), key); } @@ -619,7 +617,7 @@ protected void checkAndCreateVpcTunnelNetworks(Host host, long vpcId) { tunnelRecord = _tunnelNetworkDao.findByFromToNetwork(rh.longValue(), hostId, vpcNetwork.getId()); // Try and create the tunnel if does not exit or previous attempt failed if (tunnelRecord == null || tunnelRecord.getState().equals(OvsTunnel.State.Failed.name())) { - s_logger.debug("Attempting to create tunnel from:" + rh.longValue() + " to:" + hostId); + logger.debug("Attempting to create tunnel from:" + rh.longValue() + " to:" + hostId); if (tunnelRecord == null) { createTunnelRecord(rh.longValue(), hostId, vpcNetwork.getId(), key); } @@ -645,9 +643,9 @@ protected void checkAndCreateVpcTunnelNetworks(Host host, long vpcId) { + "Failure is on host:" + rHost.getId()); Commands cmds = new Commands( new OvsCreateTunnelCommand(otherIp, key, Long.valueOf(hostId), i, vpcNetwork.getId(), myIp, bridgeName, vpcNetwork.getUuid())); - s_logger.debug("Attempting to create tunnel from:" + hostId + " to:" + i + " for the network " + logger.debug("Attempting to create tunnel from:" + hostId + " to:" + i + " for the network " + vpcNetwork.getId()); - s_logger.debug("Ask host " + hostId + logger.debug("Ask host " + hostId + " to create gre tunnel to " + i); Answer[] answers = _agentMgr.send(hostId, cmds); handleCreateTunnelAnswer(answers); @@ -659,14 +657,14 @@ protected void checkAndCreateVpcTunnelNetworks(Host host, long vpcId) { Commands cmds = new Commands(new OvsCreateTunnelCommand(myIp, key, i, Long.valueOf(hostId), vpcNetwork.getId(), otherIp, bridgeName, vpcNetwork.getUuid())); - s_logger.debug("Ask host " + i + " to create gre tunnel to " + logger.debug("Ask host " + i + " to create gre tunnel to " + hostId); Answer[] answers = _agentMgr.send(i, cmds); handleCreateTunnelAnswer(answers); } } catch (GreTunnelException | OperationTimedoutException | AgentUnavailableException e) { // I really thing we should do a better handling of these exceptions - s_logger.warn("Ovs Tunnel network created tunnel failed", e); + logger.warn("Ovs Tunnel network created tunnel failed", e); } } } @@ -723,7 +721,7 @@ private void handleVmStateChange(VMInstanceVO vm) { // send topology change update to VPC spanned hosts for (Long id: vpcSpannedHostIds) { if (!sendVpcTopologyChangeUpdate(topologyConfigCommand, id, bridgeName)) { - s_logger.debug("Failed to send VPC topology change update to host : " + id + ". Moving on " + + logger.debug("Failed to send VPC topology change update to host : " + id + ". Moving on " + "with rest of the host update."); } } @@ -732,19 +730,19 @@ private void handleVmStateChange(VMInstanceVO vm) { public boolean sendVpcTopologyChangeUpdate(OvsVpcPhysicalTopologyConfigCommand updateCmd, long hostId, String bridgeName) { try { - s_logger.debug("Sending VPC topology change update to the host " + hostId); + logger.debug("Sending VPC topology change update to the host " + hostId); updateCmd.setHostId(hostId); updateCmd.setBridgeName(bridgeName); Answer ans = _agentMgr.send(hostId, updateCmd); if (ans.getResult()) { - s_logger.debug("Successfully updated the host " + hostId + " with latest VPC topology." ); + logger.debug("Successfully updated the host " + hostId + " with latest VPC topology." ); return true; } else { - s_logger.debug("Failed to update the host " + hostId + " with latest VPC topology." ); + logger.debug("Failed to update the host " + hostId + " with latest VPC topology." ); return false; } } catch (Exception e) { - s_logger.debug("Failed to updated the host " + hostId + " with latest VPC topology.", e ); + logger.debug("Failed to updated the host " + hostId + " with latest VPC topology.", e ); return false; } } @@ -768,7 +766,7 @@ OvsVpcPhysicalTopologyConfigCommand prepareVpcTopologyUpdate(long vpcId) { try { remoteIp = getGreEndpointIP(hostDetails, network); } catch (Exception e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "error getting GRE endpoint: " + e.getLocalizedMessage()); } } @@ -836,13 +834,13 @@ public void onPublishMessage(String senderAddress, String subject, Object args) List vpcSpannedHostIds = _ovsNetworkToplogyGuru.getVpcSpannedHosts(vpcId); for (Long id: vpcSpannedHostIds) { if (!sendVpcRoutingPolicyChangeUpdate(cmd, id, bridgeName)) { - s_logger.debug("Failed to send VPC routing policy change update to host : " + id + + logger.debug("Failed to send VPC routing policy change update to host : " + id + ". But moving on with sending the updates to the rest of the hosts."); } } } } catch (Exception e) { - s_logger.debug("Failed to send VPC routing policy change updates all hosts in vpc", e); + logger.debug("Failed to send VPC routing policy change updates all hosts in vpc", e); } } } @@ -893,19 +891,19 @@ private OvsVpcRoutingPolicyConfigCommand prepareVpcRoutingPolicyUpdate(long vpcI private boolean sendVpcRoutingPolicyChangeUpdate(OvsVpcRoutingPolicyConfigCommand updateCmd, long hostId, String bridgeName) { try { - s_logger.debug("Sending VPC routing policies change update to the host " + hostId); + logger.debug("Sending VPC routing policies change update to the host " + hostId); updateCmd.setHostId(hostId); updateCmd.setBridgeName(bridgeName); Answer ans = _agentMgr.send(hostId, updateCmd); if (ans.getResult()) { - s_logger.debug("Successfully updated the host " + hostId + " with latest VPC routing policies." ); + logger.debug("Successfully updated the host " + hostId + " with latest VPC routing policies." ); return true; } else { - s_logger.debug("Failed to update the host " + hostId + " with latest routing policies." ); + logger.debug("Failed to update the host " + hostId + " with latest routing policies." ); return false; } } catch (Exception e) { - s_logger.debug("Failed to updated the host " + hostId + " with latest routing policies due to" , e ); + logger.debug("Failed to updated the host " + hostId + " with latest routing policies due to" , e ); return false; } } diff --git a/plugins/network-elements/ovs/src/main/java/com/cloud/network/ovs/dao/VpcDistributedRouterSeqNoDaoImpl.java b/plugins/network-elements/ovs/src/main/java/com/cloud/network/ovs/dao/VpcDistributedRouterSeqNoDaoImpl.java index 92d1e9717456..eb9cbbc849af 100644 --- a/plugins/network-elements/ovs/src/main/java/com/cloud/network/ovs/dao/VpcDistributedRouterSeqNoDaoImpl.java +++ b/plugins/network-elements/ovs/src/main/java/com/cloud/network/ovs/dao/VpcDistributedRouterSeqNoDaoImpl.java @@ -17,7 +17,6 @@ package com.cloud.network.ovs.dao; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.utils.db.GenericDaoBase; @@ -26,7 +25,6 @@ @Component public class VpcDistributedRouterSeqNoDaoImpl extends GenericDaoBase implements VpcDistributedRouterSeqNoDao { - protected static final Logger s_logger = Logger.getLogger(VpcDistributedRouterSeqNoDaoImpl.class); private SearchBuilder VpcIdSearch; protected VpcDistributedRouterSeqNoDaoImpl() { diff --git a/plugins/network-elements/palo-alto/src/main/java/com/cloud/api/commands/AddPaloAltoFirewallCmd.java b/plugins/network-elements/palo-alto/src/main/java/com/cloud/api/commands/AddPaloAltoFirewallCmd.java index ba1342496b54..214e35d293ea 100644 --- a/plugins/network-elements/palo-alto/src/main/java/com/cloud/api/commands/AddPaloAltoFirewallCmd.java +++ b/plugins/network-elements/palo-alto/src/main/java/com/cloud/api/commands/AddPaloAltoFirewallCmd.java @@ -18,7 +18,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -43,7 +42,6 @@ @APICommand(name = "addPaloAltoFirewall", responseObject = PaloAltoFirewallResponse.class, description = "Adds a Palo Alto firewall device", requestHasSensitiveInfo = true, responseHasSensitiveInfo = false) public class AddPaloAltoFirewallCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(AddPaloAltoFirewallCmd.class.getName()); @Inject PaloAltoFirewallElementService _paFwService; diff --git a/plugins/network-elements/palo-alto/src/main/java/com/cloud/api/commands/ConfigurePaloAltoFirewallCmd.java b/plugins/network-elements/palo-alto/src/main/java/com/cloud/api/commands/ConfigurePaloAltoFirewallCmd.java index a1d8ea87794c..77c96d73fd44 100644 --- a/plugins/network-elements/palo-alto/src/main/java/com/cloud/api/commands/ConfigurePaloAltoFirewallCmd.java +++ b/plugins/network-elements/palo-alto/src/main/java/com/cloud/api/commands/ConfigurePaloAltoFirewallCmd.java @@ -18,7 +18,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -43,7 +42,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ConfigurePaloAltoFirewallCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(ConfigurePaloAltoFirewallCmd.class.getName()); @Inject PaloAltoFirewallElementService _paFwService; diff --git a/plugins/network-elements/palo-alto/src/main/java/com/cloud/api/commands/DeletePaloAltoFirewallCmd.java b/plugins/network-elements/palo-alto/src/main/java/com/cloud/api/commands/DeletePaloAltoFirewallCmd.java index 40b930977488..378bad4dad89 100644 --- a/plugins/network-elements/palo-alto/src/main/java/com/cloud/api/commands/DeletePaloAltoFirewallCmd.java +++ b/plugins/network-elements/palo-alto/src/main/java/com/cloud/api/commands/DeletePaloAltoFirewallCmd.java @@ -18,7 +18,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -42,7 +41,6 @@ @APICommand(name = "deletePaloAltoFirewall", responseObject = SuccessResponse.class, description = " delete a Palo Alto firewall device", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeletePaloAltoFirewallCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeletePaloAltoFirewallCmd.class.getName()); @Inject PaloAltoFirewallElementService _paElementService; diff --git a/plugins/network-elements/palo-alto/src/main/java/com/cloud/api/commands/ListPaloAltoFirewallNetworksCmd.java b/plugins/network-elements/palo-alto/src/main/java/com/cloud/api/commands/ListPaloAltoFirewallNetworksCmd.java index e2d5f96f06fc..f319d2ae6d4c 100644 --- a/plugins/network-elements/palo-alto/src/main/java/com/cloud/api/commands/ListPaloAltoFirewallNetworksCmd.java +++ b/plugins/network-elements/palo-alto/src/main/java/com/cloud/api/commands/ListPaloAltoFirewallNetworksCmd.java @@ -21,7 +21,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -47,7 +46,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListPaloAltoFirewallNetworksCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListPaloAltoFirewallNetworksCmd.class.getName()); @Inject PaloAltoFirewallElementService _paFwService; diff --git a/plugins/network-elements/palo-alto/src/main/java/com/cloud/api/commands/ListPaloAltoFirewallsCmd.java b/plugins/network-elements/palo-alto/src/main/java/com/cloud/api/commands/ListPaloAltoFirewallsCmd.java index cce3ac2b36b1..a3e77dba2bd0 100644 --- a/plugins/network-elements/palo-alto/src/main/java/com/cloud/api/commands/ListPaloAltoFirewallsCmd.java +++ b/plugins/network-elements/palo-alto/src/main/java/com/cloud/api/commands/ListPaloAltoFirewallsCmd.java @@ -21,7 +21,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -46,7 +45,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListPaloAltoFirewallsCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListPaloAltoFirewallsCmd.class.getName()); private static final String s_name = "listpaloaltofirewallresponse"; @Inject PaloAltoFirewallElementService _paFwService; diff --git a/plugins/network-elements/palo-alto/src/main/java/com/cloud/network/element/PaloAltoExternalFirewallElement.java b/plugins/network-elements/palo-alto/src/main/java/com/cloud/network/element/PaloAltoExternalFirewallElement.java index d631d9921017..c81ac5f5f0c3 100644 --- a/plugins/network-elements/palo-alto/src/main/java/com/cloud/network/element/PaloAltoExternalFirewallElement.java +++ b/plugins/network-elements/palo-alto/src/main/java/com/cloud/network/element/PaloAltoExternalFirewallElement.java @@ -24,7 +24,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.network.ExternalNetworkDeviceManager.NetworkDevice; @@ -85,7 +84,6 @@ public class PaloAltoExternalFirewallElement extends ExternalFirewallDeviceManagerImpl implements SourceNatServiceProvider, FirewallServiceProvider, PortForwardingServiceProvider, IpDeployer, PaloAltoFirewallElementService, StaticNatServiceProvider { - private static final Logger s_logger = Logger.getLogger(PaloAltoExternalFirewallElement.class); private static final Map> capabilities = setCapabilities(); @@ -121,18 +119,18 @@ public class PaloAltoExternalFirewallElement extends ExternalFirewallDeviceManag private boolean canHandle(Network network, Service service) { DataCenter zone = _entityMgr.findById(DataCenter.class, network.getDataCenterId()); if (zone.getNetworkType() == NetworkType.Advanced && network.getGuestType() != Network.GuestType.Isolated) { - s_logger.trace("Element " + getProvider().getName() + "is not handling network type = " + network.getGuestType()); + logger.trace("Element " + getProvider().getName() + "is not handling network type = " + network.getGuestType()); return false; } if (service == null) { if (!_networkManager.isProviderForNetwork(getProvider(), network.getId())) { - s_logger.trace("Element " + getProvider().getName() + " is not a provider for the network " + network); + logger.trace("Element " + getProvider().getName() + " is not a provider for the network " + network); return false; } } else { if (!_networkManager.isProviderSupportServiceInNetwork(network.getId(), service, getProvider())) { - s_logger.trace("Element " + getProvider().getName() + " doesn't support service " + service.getName() + " in the network " + network); + logger.trace("Element " + getProvider().getName() + " doesn't support service " + service.getName() + " in the network " + network); return false; } } @@ -147,7 +145,7 @@ public boolean implement(Network network, NetworkOffering offering, DeployDestin // don't have to implement network is Basic zone if (zone.getNetworkType() == NetworkType.Basic) { - s_logger.debug("Not handling network implement in zone of type " + NetworkType.Basic); + logger.debug("Not handling network implement in zone of type " + NetworkType.Basic); return false; } @@ -160,7 +158,7 @@ public boolean implement(Network network, NetworkOffering offering, DeployDestin } catch (InsufficientCapacityException capacityException) { // TODO: handle out of capacity exception in more gracefule manner when multiple providers are present for // the network - s_logger.error("Fail to implement the Palo Alto for network " + network, capacityException); + logger.error("Fail to implement the Palo Alto for network " + network, capacityException); return false; } } @@ -182,7 +180,7 @@ public boolean shutdown(Network network, ReservationContext context, boolean cle // don't have to implement network is Basic zone if (zone.getNetworkType() == NetworkType.Basic) { - s_logger.debug("Not handling network shutdown in zone of type " + NetworkType.Basic); + logger.debug("Not handling network shutdown in zone of type " + NetworkType.Basic); return false; } @@ -430,7 +428,7 @@ public PaloAltoFirewallResponse createPaloAltoFirewallResponse(ExternalFirewallD @Override public boolean verifyServicesCombination(Set services) { if (!services.contains(Service.Firewall)) { - s_logger.warn("Palo Alto must be used as Firewall Service Provider in the network"); + logger.warn("Palo Alto must be used as Firewall Service Provider in the network"); return false; } return true; diff --git a/plugins/network-elements/palo-alto/src/main/java/com/cloud/network/resource/PaloAltoResource.java b/plugins/network-elements/palo-alto/src/main/java/com/cloud/network/resource/PaloAltoResource.java index ca45ddb66473..9e60db93efd5 100644 --- a/plugins/network-elements/palo-alto/src/main/java/com/cloud/network/resource/PaloAltoResource.java +++ b/plugins/network-elements/palo-alto/src/main/java/com/cloud/network/resource/PaloAltoResource.java @@ -52,7 +52,8 @@ import org.apache.http.impl.client.DefaultHttpClient; import org.apache.http.message.BasicNameValuePair; import org.apache.http.protocol.HTTP; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.w3c.dom.Document; import org.w3c.dom.Node; import org.w3c.dom.NodeList; @@ -109,7 +110,7 @@ public class PaloAltoResource implements ServerResource { private String _threatProfile; private String _logProfile; private String _pingManagementProfile; - private static final Logger s_logger = Logger.getLogger(PaloAltoResource.class); + protected Logger logger = LogManager.getLogger(getClass()); private static String s_apiUri = "/api"; private static HttpClient s_httpclient; @@ -377,7 +378,7 @@ private boolean refreshPaloAltoConnection() { try { return login(_username, _password); } catch (ExecutionException e) { - s_logger.error("Failed to login due to " + e.getMessage()); + logger.error("Failed to login due to " + e.getMessage()); return false; } } @@ -487,11 +488,11 @@ private Answer execute(IpAssocCommand cmd, int numRetries) { results[i++] = ip.getPublicIp() + " - success"; } catch (ExecutionException e) { - s_logger.error(e); + logger.error(e); if (numRetries > 0 && refreshPaloAltoConnection()) { int numRetriesRemaining = numRetries - 1; - s_logger.debug("Retrying IPAssocCommand. Number of retries remaining: " + numRetriesRemaining); + logger.debug("Retrying IPAssocCommand. Number of retries remaining: " + numRetriesRemaining); return execute(cmd, numRetriesRemaining); } else { results[i++] = IpAssocAnswer.errorResult; @@ -516,7 +517,7 @@ private void implementGuestNetwork(ArrayList cmdList, GuestNet String msg = "Implemented guest network with type " + type + ". Guest VLAN tag: " + privateVlanTag + ", guest gateway: " + privateGateway + "/" + privateCidrNumber; msg += type.equals(GuestNetworkType.SOURCE_NAT) ? ", source NAT IP: " + publicIp : ""; - s_logger.debug(msg); + logger.debug(msg); } private void shutdownGuestNetwork(ArrayList cmdList, GuestNetworkType type, Long publicVlanTag, String sourceNatIpAddress, long privateVlanTag, @@ -536,7 +537,7 @@ private void shutdownGuestNetwork(ArrayList cmdList, GuestNetw String msg = "Shut down guest network with type " + type + ". Guest VLAN tag: " + privateVlanTag + ", guest gateway: " + privateGateway + "/" + privateCidrSize; msg += type.equals(GuestNetworkType.SOURCE_NAT) ? ", source NAT IP: " + sourceNatIpAddress : ""; - s_logger.debug(msg); + logger.debug(msg); } /* @@ -564,11 +565,11 @@ private Answer execute(SetFirewallRulesCommand cmd, int numRetries) { return new Answer(cmd); } catch (ExecutionException e) { - s_logger.error(e); + logger.error(e); if (numRetries > 0 && refreshPaloAltoConnection()) { int numRetriesRemaining = numRetries - 1; - s_logger.debug("Retrying SetFirewallRulesCommand. Number of retries remaining: " + numRetriesRemaining); + logger.debug("Retrying SetFirewallRulesCommand. Number of retries remaining: " + numRetriesRemaining); return execute(cmd, numRetriesRemaining); } else { return new Answer(cmd, e); @@ -603,11 +604,11 @@ private Answer execute(SetStaticNatRulesCommand cmd, int numRetries) { return new Answer(cmd); } catch (ExecutionException e) { - s_logger.error(e); + logger.error(e); if (numRetries > 0 && refreshPaloAltoConnection()) { int numRetriesRemaining = numRetries - 1; - s_logger.debug("Retrying SetStaticNatRulesCommand. Number of retries remaining: " + numRetriesRemaining); + logger.debug("Retrying SetStaticNatRulesCommand. Number of retries remaining: " + numRetriesRemaining); return execute(cmd, numRetriesRemaining); } else { return new Answer(cmd, e); @@ -641,11 +642,11 @@ private Answer execute(SetPortForwardingRulesCommand cmd, int numRetries) { return new Answer(cmd); } catch (ExecutionException e) { - s_logger.error(e); + logger.error(e); if (numRetries > 0 && refreshPaloAltoConnection()) { int numRetriesRemaining = numRetries - 1; - s_logger.debug("Retrying SetPortForwardingRulesCommand. Number of retries remaining: " + numRetriesRemaining); + logger.debug("Retrying SetPortForwardingRulesCommand. Number of retries remaining: " + numRetriesRemaining); return execute(cmd, numRetriesRemaining); } else { return new Answer(cmd, e); @@ -678,7 +679,7 @@ public boolean managePrivateInterface(ArrayList cmdList, PaloA "']/layer3/units/entry[@name='" + interfaceName + "']"); String response = request(PaloAltoMethod.GET, params); boolean result = (validResponse(response) && responseNotEmpty(response)); - s_logger.debug("Private sub-interface exists: " + interfaceName + ", " + result); + logger.debug("Private sub-interface exists: " + interfaceName + ", " + result); return result; case ADD: @@ -763,7 +764,7 @@ public boolean managePrivateInterface(ArrayList cmdList, PaloA return true; default: - s_logger.debug("Unrecognized command."); + logger.debug("Unrecognized command."); return false; } } @@ -796,7 +797,7 @@ public boolean managePublicInterface(ArrayList cmdList, PaloAl "']/layer3/units/entry[@name='" + interfaceName + "']/ip/entry[@name='" + publicIp + "']"); String response = request(PaloAltoMethod.GET, params); boolean result = (validResponse(response) && responseNotEmpty(response)); - s_logger.debug("Public sub-interface & IP exists: " + interfaceName + " : " + publicIp + ", " + result); + logger.debug("Public sub-interface & IP exists: " + interfaceName + " : " + publicIp + ", " + result); return result; case ADD: @@ -855,7 +856,7 @@ public boolean managePublicInterface(ArrayList cmdList, PaloAl return true; default: - s_logger.debug("Unrecognized command."); + logger.debug("Unrecognized command."); return false; } } @@ -888,7 +889,7 @@ public boolean manageSrcNatRule(ArrayList cmdList, PaloAltoPri params.put("xpath", "/config/devices/entry/vsys/entry[@name='vsys1']/rulebase/nat/rules/entry[@name='" + srcNatName + "']"); String response = request(PaloAltoMethod.GET, params); boolean result = (validResponse(response) && responseNotEmpty(response)); - s_logger.debug("Source NAT exists: " + srcNatName + ", " + result); + logger.debug("Source NAT exists: " + srcNatName + ", " + result); return result; case ADD: @@ -932,7 +933,7 @@ public boolean manageSrcNatRule(ArrayList cmdList, PaloAltoPri return true; default: - s_logger.debug("Unrecognized command."); + logger.debug("Unrecognized command."); return false; } } @@ -971,7 +972,7 @@ public boolean manageDstNatRule(ArrayList cmdList, PaloAltoPri params.put("xpath", "/config/devices/entry/vsys/entry[@name='vsys1']/rulebase/nat/rules/entry[@name='" + dstNatName + "']"); String response = request(PaloAltoMethod.GET, params); boolean result = (validResponse(response) && responseNotEmpty(response)); - s_logger.debug("Destination NAT exists: " + dstNatName + ", " + result); + logger.debug("Destination NAT exists: " + dstNatName + ", " + result); return result; case ADD: @@ -1079,7 +1080,7 @@ public boolean manageDstNatRule(ArrayList cmdList, PaloAltoPri return true; default: - s_logger.debug("Unrecognized command."); + logger.debug("Unrecognized command."); return false; } } @@ -1118,7 +1119,7 @@ public boolean manageStcNatRule(ArrayList cmdList, PaloAltoPri params.put("xpath", "/config/devices/entry/vsys/entry[@name='vsys1']/rulebase/nat/rules/entry[@name='" + stcNatName + "']"); String response = request(PaloAltoMethod.GET, params); boolean result = (validResponse(response) && responseNotEmpty(response)); - s_logger.debug("Static NAT exists: " + stcNatName + ", " + result); + logger.debug("Static NAT exists: " + stcNatName + ", " + result); return result; case ADD: @@ -1178,7 +1179,7 @@ public boolean manageStcNatRule(ArrayList cmdList, PaloAltoPri return true; default: - s_logger.debug("Unrecognized command."); + logger.debug("Unrecognized command."); return false; } } @@ -1212,7 +1213,7 @@ public boolean manageFirewallRule(ArrayList cmdList, PaloAltoP params.put("xpath", "/config/devices/entry/vsys/entry[@name='vsys1']/rulebase/security/rules/entry[@name='" + ruleName + "']"); String response = request(PaloAltoMethod.GET, params); boolean result = (validResponse(response) && responseNotEmpty(response)); - s_logger.debug("Firewall policy exists: " + ruleName + ", " + result); + logger.debug("Firewall policy exists: " + ruleName + ", " + result); return result; case ADD: @@ -1333,7 +1334,7 @@ public boolean manageFirewallRule(ArrayList cmdList, PaloAltoP // there is an existing default rule, so we need to remove it and add it back after the new rule is added. if (has_default) { - s_logger.debug("Moving the default egress rule after the new rule: " + ruleName); + logger.debug("Moving the default egress rule after the new rule: " + ruleName); NodeList response_body; Document doc = getDocument(e_response); XPath xpath = XPathFactory.newInstance().newXPath(); @@ -1372,7 +1373,7 @@ public boolean manageFirewallRule(ArrayList cmdList, PaloAltoP da_params.put("xpath", "/config/devices/entry/vsys/entry[@name='vsys1']/rulebase/security/rules/entry[@name='policy_0_" + rule.getSrcVlanTag() + "']"); da_params.put("element", defaultEgressRule); cmdList.add(new DefaultPaloAltoCommand(PaloAltoMethod.POST, da_params)); - s_logger.debug("Completed move of the default egress rule after rule: " + ruleName); + logger.debug("Completed move of the default egress rule after rule: " + ruleName); } return true; @@ -1391,7 +1392,7 @@ public boolean manageFirewallRule(ArrayList cmdList, PaloAltoP return true; default: - s_logger.debug("Unrecognized command."); + logger.debug("Unrecognized command."); return false; } } @@ -1444,7 +1445,7 @@ public boolean manageNetworkIsolation(ArrayList cmdList, PaloA params.put("xpath", "/config/devices/entry/vsys/entry[@name='vsys1']/rulebase/security/rules/entry[@name='" + ruleName + "']"); String response = request(PaloAltoMethod.GET, params); boolean result = (validResponse(response) && responseNotEmpty(response)); - s_logger.debug("Firewall policy exists: " + ruleName + ", " + result); + logger.debug("Firewall policy exists: " + ruleName + ", " + result); return result; case ADD: @@ -1486,7 +1487,7 @@ public boolean manageNetworkIsolation(ArrayList cmdList, PaloA return true; default: - s_logger.debug("Unrecognized command."); + logger.debug("Unrecognized command."); return false; } } @@ -1503,7 +1504,7 @@ public boolean managePingProfile(ArrayList cmdList, PaloAltoPr params.put("xpath", "/config/devices/entry/network/profiles/interface-management-profile/entry[@name='" + _pingManagementProfile + "']"); String response = request(PaloAltoMethod.GET, params); boolean result = (validResponse(response) && responseNotEmpty(response)); - s_logger.debug("Management profile exists: " + _pingManagementProfile + ", " + result); + logger.debug("Management profile exists: " + _pingManagementProfile + ", " + result); return result; case ADD: @@ -1536,7 +1537,7 @@ public boolean managePingProfile(ArrayList cmdList, PaloAltoPr return true; default: - s_logger.debug("Unrecognized command."); + logger.debug("Unrecognized command."); return false; } } @@ -1565,7 +1566,7 @@ public boolean manageService(ArrayList cmdList, PaloAltoPrimat params.put("xpath", "/config/devices/entry/vsys/entry[@name='vsys1']/service/entry[@name='" + serviceName + "']"); String response = request(PaloAltoMethod.GET, params); boolean result = (validResponse(response) && responseNotEmpty(response)); - s_logger.debug("Service exists: " + serviceName + ", " + result); + logger.debug("Service exists: " + serviceName + ", " + result); return result; case ADD: @@ -1604,7 +1605,7 @@ public boolean manageService(ArrayList cmdList, PaloAltoPrimat return true; default: - s_logger.debug("Unrecognized command."); + logger.debug("Unrecognized command."); return false; } } @@ -1711,7 +1712,7 @@ protected String request(PaloAltoMethod method, Map params) thro debug_msg = debug_msg + prettyFormat(responseBody); debug_msg = debug_msg + "\n" + responseBody.replace("\"", "\\\"") + "\n\n"; // test cases - //s_logger.debug(debug_msg); // this can be commented if we don't want to show each request in the log. + //logger.debug(debug_msg); // this can be commented if we don't want to show each request in the log. return responseBody; } @@ -2064,7 +2065,7 @@ private Document getDocument(String xml) throws ExecutionException { try { doc = ParserUtils.getSaferDocumentBuilderFactory().newDocumentBuilder().parse(xmlSource); } catch (Exception e) { - s_logger.error(e); + logger.error(e); throw new ExecutionException(e.getMessage()); } diff --git a/plugins/network-elements/palo-alto/src/test/java/com/cloud/network/resource/PaloAltoResourceTest.java b/plugins/network-elements/palo-alto/src/test/java/com/cloud/network/resource/PaloAltoResourceTest.java index 931e6ccaed00..58f962f13727 100644 --- a/plugins/network-elements/palo-alto/src/test/java/com/cloud/network/resource/PaloAltoResourceTest.java +++ b/plugins/network-elements/palo-alto/src/test/java/com/cloud/network/resource/PaloAltoResourceTest.java @@ -98,7 +98,7 @@ public void setUp() { _context.put("public_using_ethernet", "true"); _context.put("private_using_ethernet", "true"); _context.put("has_management_profile", "true"); - _context.put("enable_console_output", "false"); // CHANGE TO "true" TO ENABLE CONSOLE LOGGING OF TESTS + _context.put("enable_console_output", "false"); // CHANGE TO "true" TO ENABLE CONSOLE loggerGING OF TESTS _resource.setMockContext(_context); } diff --git a/plugins/network-elements/stratosphere-ssp/src/main/java/org/apache/cloudstack/api/commands/AddSspCmd.java b/plugins/network-elements/stratosphere-ssp/src/main/java/org/apache/cloudstack/api/commands/AddSspCmd.java index 8558c8c05222..7b4e2b1a30f4 100644 --- a/plugins/network-elements/stratosphere-ssp/src/main/java/org/apache/cloudstack/api/commands/AddSspCmd.java +++ b/plugins/network-elements/stratosphere-ssp/src/main/java/org/apache/cloudstack/api/commands/AddSspCmd.java @@ -18,7 +18,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -40,7 +39,6 @@ @APICommand(name = "addStratosphereSsp", responseObject = SspResponse.class, description = "Adds stratosphere ssp server", requestHasSensitiveInfo = true, responseHasSensitiveInfo = false) public class AddSspCmd extends BaseCmd { - private static final Logger s_logger = Logger.getLogger(AddSspCmd.class.getName()); @Inject SspService _service; @Inject @@ -77,7 +75,7 @@ public long getEntityOwnerId() { @Override public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException { - s_logger.trace("execute"); + logger.trace("execute"); Host host = _service.addSspHost(this); SspResponse response = new SspResponse(); response.setResponseName(getCommandName()); diff --git a/plugins/network-elements/stratosphere-ssp/src/main/java/org/apache/cloudstack/api/commands/DeleteSspCmd.java b/plugins/network-elements/stratosphere-ssp/src/main/java/org/apache/cloudstack/api/commands/DeleteSspCmd.java index 3faa0922338f..c4d13140546f 100644 --- a/plugins/network-elements/stratosphere-ssp/src/main/java/org/apache/cloudstack/api/commands/DeleteSspCmd.java +++ b/plugins/network-elements/stratosphere-ssp/src/main/java/org/apache/cloudstack/api/commands/DeleteSspCmd.java @@ -18,7 +18,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -38,7 +37,6 @@ @APICommand(name = "deleteStratosphereSsp", responseObject = SuccessResponse.class, description = "Removes stratosphere ssp server", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteSspCmd extends BaseCmd { - private static final Logger s_logger = Logger.getLogger(DeleteSspCmd.class.getName()); @Inject SspService _service; @@ -58,7 +56,7 @@ public long getEntityOwnerId() { @Override public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException { - s_logger.trace("execute"); + logger.trace("execute"); SuccessResponse resp = new SuccessResponse(); resp.setSuccess(_service.deleteSspHost(this)); this.setResponseObject(resp); diff --git a/plugins/network-elements/stratosphere-ssp/src/main/java/org/apache/cloudstack/network/dao/SspUuidDaoImpl.java b/plugins/network-elements/stratosphere-ssp/src/main/java/org/apache/cloudstack/network/dao/SspUuidDaoImpl.java index 91d60913103f..dccc1d7ffa90 100644 --- a/plugins/network-elements/stratosphere-ssp/src/main/java/org/apache/cloudstack/network/dao/SspUuidDaoImpl.java +++ b/plugins/network-elements/stratosphere-ssp/src/main/java/org/apache/cloudstack/network/dao/SspUuidDaoImpl.java @@ -19,7 +19,6 @@ import java.util.List; -import org.apache.log4j.Logger; import com.cloud.network.Network; import com.cloud.utils.db.GenericDaoBase; @@ -30,7 +29,6 @@ public class SspUuidDaoImpl extends GenericDaoBase implements SspUuidDao { - private static final Logger s_logger = Logger.getLogger(SspUuidDaoImpl.class); protected final SearchBuilder native2uuid; protected final SearchBuilder uuid2native; diff --git a/plugins/network-elements/stratosphere-ssp/src/main/java/org/apache/cloudstack/network/element/SspClient.java b/plugins/network-elements/stratosphere-ssp/src/main/java/org/apache/cloudstack/network/element/SspClient.java index 30630a35d69b..c60813badf62 100644 --- a/plugins/network-elements/stratosphere-ssp/src/main/java/org/apache/cloudstack/network/element/SspClient.java +++ b/plugins/network-elements/stratosphere-ssp/src/main/java/org/apache/cloudstack/network/element/SspClient.java @@ -40,7 +40,8 @@ import org.apache.http.impl.conn.PoolingClientConnectionManager; import org.apache.http.message.BasicNameValuePair; import org.apache.http.params.CoreConnectionPNames; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.google.gson.Gson; import com.google.gson.annotations.SerializedName; @@ -49,7 +50,7 @@ * Stratosphere sdn platform api client */ public class SspClient { - private static final Logger s_logger = Logger.getLogger(SspClient.class); + protected Logger logger = LogManager.getLogger(getClass()); private static final HttpClient s_client = new DefaultHttpClient( new PoolingClientConnectionManager()); static { @@ -79,27 +80,27 @@ private String executeMethod(HttpRequestBase req, String path) { req.setURI(new URI(base.getScheme(), base.getUserInfo(), base.getHost(), base.getPort(), path, null, null)); } catch (URISyntaxException e) { - s_logger.error("invalid API URL " + apiUrl + " path " + path, e); + logger.error("invalid API URL " + apiUrl + " path " + path, e); return null; } try { String content = null; try { content = getHttpClient().execute(req, new BasicResponseHandler()); - s_logger.info("ssp api call: " + req); + logger.info("ssp api call: " + req); } catch (HttpResponseException e) { - s_logger.info("ssp api call failed: " + req, e); + logger.info("ssp api call failed: " + req, e); if (e.getStatusCode() == HttpStatus.SC_UNAUTHORIZED && login()) { req.reset(); content = getHttpClient().execute(req, new BasicResponseHandler()); - s_logger.info("ssp api retry call: " + req); + logger.info("ssp api retry call: " + req); } } return content; } catch (ClientProtocolException e) { // includes HttpResponseException - s_logger.error("ssp api call failed: " + req, e); + logger.error("ssp api call failed: " + req, e); } catch (IOException e) { - s_logger.error("ssp api call failed: " + req, e); + logger.error("ssp api call failed: " + req, e); } return null; } @@ -111,7 +112,7 @@ public boolean login() { new BasicNameValuePair("username", username), new BasicNameValuePair("password", password)))); } catch (UnsupportedEncodingException e) { - s_logger.error("invalid username or password", e); + logger.error("invalid username or password", e); return false; } if (executeMethod(method, "/ws.v1/login") != null) { diff --git a/plugins/network-elements/stratosphere-ssp/src/main/java/org/apache/cloudstack/network/element/SspElement.java b/plugins/network-elements/stratosphere-ssp/src/main/java/org/apache/cloudstack/network/element/SspElement.java index 475c0d4e061a..bfe9de2c837f 100644 --- a/plugins/network-elements/stratosphere-ssp/src/main/java/org/apache/cloudstack/network/element/SspElement.java +++ b/plugins/network-elements/stratosphere-ssp/src/main/java/org/apache/cloudstack/network/element/SspElement.java @@ -29,7 +29,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.commands.AddSspCmd; import org.apache.cloudstack.api.commands.DeleteSspCmd; @@ -87,7 +86,6 @@ * table for that information. */ public class SspElement extends AdapterBase implements ConnectivityProvider, SspManager, SspService, NetworkMigrationResponder { - private static final Logger s_logger = Logger.getLogger(SspElement.class); public static final String s_SSP_NAME = "StratosphereSsp"; private static final Provider s_ssp_provider = new Provider(s_SSP_NAME, false); @@ -180,7 +178,7 @@ public boolean isReady(PhysicalNetworkServiceProvider provider) { if (fetchSspClients(physicalNetwork.getId(), physicalNetwork.getDataCenterId(), false).size() > 0) { return true; } - s_logger.warn("Ssp api endpoint not found. " + physicalNetwork.toString()); + logger.warn("Ssp api endpoint not found. " + physicalNetwork.toString()); return false; } @@ -194,9 +192,9 @@ public boolean canHandle(PhysicalNetwork physicalNetwork) { if (fetchSspClients(physicalNetwork.getId(), physicalNetwork.getDataCenterId(), true).size() > 0) { return true; } - s_logger.warn("enabled Ssp api endpoint not found. " + physicalNetwork.toString()); + logger.warn("enabled Ssp api endpoint not found. " + physicalNetwork.toString()); } else { - s_logger.warn("PhysicalNetwork is NULL."); + logger.warn("PhysicalNetwork is NULL."); } return false; } @@ -204,7 +202,7 @@ public boolean canHandle(PhysicalNetwork physicalNetwork) { private boolean canHandle(Network network) { if (canHandle(_physicalNetworkDao.findById(network.getPhysicalNetworkId()))) { if (!_ntwkSrvcDao.canProviderSupportServiceInNetwork(network.getId(), Service.Connectivity, getProvider())) { - s_logger.info("SSP is implicitly active for " + network); + logger.info("SSP is implicitly active for " + network); } return true; } @@ -231,7 +229,7 @@ public Host addSspHost(AddSspCmd cmd) { _sspCredentialDao.persist(credential); } else { if (cmd.getUsername() != null || cmd.getPassword() != null) { - s_logger.warn("Tenant credential already configured for zone:" + zoneId); + logger.warn("Tenant credential already configured for zone:" + zoneId); } } @@ -246,7 +244,7 @@ public Host addSspHost(AddSspCmd cmd) { _sspTenantDao.persist(tenant); } else { if (cmd.getTenantUuid() != null) { - s_logger.warn("Tenant uuid already configured for zone:" + zoneId); + logger.warn("Tenant uuid already configured for zone:" + zoneId); } } @@ -266,7 +264,7 @@ public Host addSspHost(AddSspCmd cmd) { _hostDao.loadDetails(host); if ("v1Api".equals(host.getDetail("sspHost"))) { if (normalizedUrl.equals(host.getDetail("url"))) { - s_logger.warn("Ssp host already registered " + normalizedUrl); + logger.warn("Ssp host already registered " + normalizedUrl); return host; } } @@ -289,14 +287,14 @@ public Host addSspHost(AddSspCmd cmd) { @Override public boolean deleteSspHost(DeleteSspCmd cmd) { - s_logger.info("deleteStratosphereSsp"); + logger.info("deleteStratosphereSsp"); return _hostDao.remove(cmd.getHostId()); } @Override public boolean createNetwork(Network network, NetworkOffering offering, DeployDestination dest, ReservationContext context) { if (_sspUuidDao.findUuidByNetwork(network) != null) { - s_logger.info("Network already has ssp TenantNetwork uuid :" + network.toString()); + logger.info("Network already has ssp TenantNetwork uuid :" + network.toString()); return true; } if (!canHandle(network)) { @@ -322,10 +320,10 @@ public boolean createNetwork(Network network, NetworkOffering offering, DeployDe processed = true; } if (processed) { - s_logger.error("Could not allocate an uuid for network " + network.toString()); + logger.error("Could not allocate an uuid for network " + network.toString()); return false; } else { - s_logger.error("Skipping #createNetwork() for " + network.toString()); + logger.error("Skipping #createNetwork() for " + network.toString()); return true; } } @@ -343,10 +341,10 @@ public boolean deleteNetwork(Network network) { } } if (!processed) { - s_logger.error("Ssp api tenant network deletion failed " + network.toString()); + logger.error("Ssp api tenant network deletion failed " + network.toString()); } } else { - s_logger.debug("Silently skipping #deleteNetwork() for " + network.toString()); + logger.debug("Silently skipping #deleteNetwork() for " + network.toString()); } return true; } @@ -356,7 +354,7 @@ public boolean deleteNetwork(Network network) { public boolean createNicEnv(Network network, NicProfile nic, DeployDestination dest, ReservationContext context) { String tenantNetworkUuid = _sspUuidDao.findUuidByNetwork(network); if (tenantNetworkUuid == null) { - s_logger.debug("Skipping #createNicEnv() for nic on " + network.toString()); + logger.debug("Skipping #createNicEnv() for nic on " + network.toString()); return true; } @@ -364,7 +362,7 @@ public boolean createNicEnv(Network network, NicProfile nic, DeployDestination d List tenantPortUuidVos = _sspUuidDao.listUUidVoByNicProfile(nic); for (SspUuidVO tenantPortUuidVo : tenantPortUuidVos) { if (reservationId.equals(tenantPortUuidVo.getReservationId())) { - s_logger.info("Skipping because reservation found " + reservationId); + logger.info("Skipping because reservation found " + reservationId); return true; } } @@ -386,7 +384,7 @@ public boolean createNicEnv(Network network, NicProfile nic, DeployDestination d } } if (tenantPortUuid == null) { - s_logger.debug("#createNicEnv() failed for nic on " + network.toString()); + logger.debug("#createNicEnv() failed for nic on " + network.toString()); return false; } @@ -400,14 +398,14 @@ public boolean createNicEnv(Network network, NicProfile nic, DeployDestination d return true; } } - s_logger.error("Updating vif failed " + nic.toString()); + logger.error("Updating vif failed " + nic.toString()); return false; } @Override public boolean deleteNicEnv(Network network, NicProfile nic, ReservationContext context) { if (context == null) { - s_logger.error("ReservationContext was null for " + nic + " " + network); + logger.error("ReservationContext was null for " + nic + " " + network); return false; } String reservationId = context.getReservationId(); @@ -434,7 +432,7 @@ public boolean deleteNicEnv(Network network, NicProfile nic, ReservationContext } } if (!processed) { - s_logger.warn("Ssp api nic detach failed " + nic.toString()); + logger.warn("Ssp api nic detach failed " + nic.toString()); } processed = false; for (SspClient client : fetchSspClients(network.getPhysicalNetworkId(), network.getDataCenterId(), true)) { @@ -445,7 +443,7 @@ public boolean deleteNicEnv(Network network, NicProfile nic, ReservationContext } } if (!processed) { - s_logger.warn("Ssp api tenant port deletion failed " + nic.toString()); + logger.warn("Ssp api tenant port deletion failed " + nic.toString()); } _sspUuidDao.removeUuid(tenantPortUuid); } @@ -467,7 +465,7 @@ public boolean deleteNicEnv(Network network, NicProfile nic, ReservationContext @Override public boolean implement(Network network, NetworkOffering offering, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { - s_logger.info("implement"); + logger.info("implement"); return createNetwork(network, offering, dest, context); } @@ -480,7 +478,7 @@ public boolean implement(Network network, NetworkOffering offering, DeployDestin */ @Override public boolean shutdown(Network network, ReservationContext context, boolean cleanup) throws ConcurrentOperationException, ResourceUnavailableException { - s_logger.trace("shutdown"); + logger.trace("shutdown"); return deleteNetwork(network); } @@ -494,7 +492,7 @@ public boolean shutdown(Network network, ReservationContext context, boolean cle @Override public boolean prepare(Network network, NicProfile nic, VirtualMachineProfile vm, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { - s_logger.trace("prepare"); + logger.trace("prepare"); return createNicEnv(network, nic, dest, context); } @@ -508,7 +506,7 @@ public boolean prepare(Network network, NicProfile nic, VirtualMachineProfile vm @Override public boolean release(Network network, NicProfile nic, VirtualMachineProfile vm, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException { - s_logger.trace("release"); + logger.trace("release"); return deleteNicEnv(network, nic, context); } @@ -520,7 +518,7 @@ public boolean release(Network network, NicProfile nic, VirtualMachineProfile vm */ @Override public boolean destroy(Network network, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException { - s_logger.trace("destroy"); + logger.trace("destroy"); // nothing to do here. return true; } @@ -528,19 +526,19 @@ public boolean destroy(Network network, ReservationContext context) throws Concu @Override public boolean shutdownProviderInstances(PhysicalNetworkServiceProvider provider, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException { - s_logger.trace("shutdownProviderInstances"); + logger.trace("shutdownProviderInstances"); return true; } @Override public boolean canEnableIndividualServices() { - s_logger.trace("canEnableIndividualServices"); + logger.trace("canEnableIndividualServices"); return true; // because there is only Connectivity } @Override public boolean verifyServicesCombination(Set services) { - s_logger.trace("verifyServicesCombination " + services.toString()); + logger.trace("verifyServicesCombination " + services.toString()); return true; } @@ -549,13 +547,13 @@ public boolean prepareMigration(NicProfile nic, Network network, VirtualMachineP try { prepare(network, nic, vm, dest, context); } catch (ConcurrentOperationException e) { - s_logger.error("prepareForMigration failed.", e); + logger.error("prepareForMigration failed.", e); return false; } catch (ResourceUnavailableException e) { - s_logger.error("prepareForMigration failed.", e); + logger.error("prepareForMigration failed.", e); return false; } catch (InsufficientCapacityException e) { - s_logger.error("prepareForMigration failed.", e); + logger.error("prepareForMigration failed.", e); return false; } return true; @@ -566,9 +564,9 @@ public void rollbackMigration(NicProfile nic, Network network, VirtualMachinePro try { release(network, nic, vm, dst); } catch (ConcurrentOperationException e) { - s_logger.error("rollbackMigration failed.", e); + logger.error("rollbackMigration failed.", e); } catch (ResourceUnavailableException e) { - s_logger.error("rollbackMigration failed.", e); + logger.error("rollbackMigration failed.", e); } } @@ -577,9 +575,9 @@ public void commitMigration(NicProfile nic, Network network, VirtualMachineProfi try { release(network, nic, vm, src); } catch (ConcurrentOperationException e) { - s_logger.error("commitMigration failed.", e); + logger.error("commitMigration failed.", e); } catch (ResourceUnavailableException e) { - s_logger.error("commitMigration failed.", e); + logger.error("commitMigration failed.", e); } } diff --git a/plugins/network-elements/stratosphere-ssp/src/main/java/org/apache/cloudstack/network/guru/SspGuestNetworkGuru.java b/plugins/network-elements/stratosphere-ssp/src/main/java/org/apache/cloudstack/network/guru/SspGuestNetworkGuru.java index 9ede8cc5a3e6..894c258939e2 100644 --- a/plugins/network-elements/stratosphere-ssp/src/main/java/org/apache/cloudstack/network/guru/SspGuestNetworkGuru.java +++ b/plugins/network-elements/stratosphere-ssp/src/main/java/org/apache/cloudstack/network/guru/SspGuestNetworkGuru.java @@ -35,7 +35,6 @@ import com.cloud.vm.VirtualMachineProfile; import org.apache.cloudstack.network.element.SspElement; import org.apache.cloudstack.network.element.SspManager; -import org.apache.log4j.Logger; import javax.inject.Inject; @@ -43,7 +42,6 @@ * Stratosphere SDN Platform NetworkGuru */ public class SspGuestNetworkGuru extends GuestNetworkGuru implements NetworkMigrationResponder { - private static final Logger s_logger = Logger.getLogger(SspGuestNetworkGuru.class); @Inject SspManager _sspMgr; @@ -59,7 +57,7 @@ public SspGuestNetworkGuru() { @Override protected boolean canHandle(NetworkOffering offering, NetworkType networkType, PhysicalNetwork physicalNetwork) { - s_logger.trace("canHandle"); + logger.trace("canHandle"); String setting = null; if (physicalNetwork != null && physicalNetwork.getIsolationMethods().contains("SSP")) { @@ -70,18 +68,18 @@ protected boolean canHandle(NetworkOffering offering, NetworkType networkType, P } if (setting != null) { if (networkType != NetworkType.Advanced) { - s_logger.info("SSP enebled by " + setting + " but not active because networkType was " + networkType); + logger.info("SSP enebled by " + setting + " but not active because networkType was " + networkType); } else if (!isMyTrafficType(offering.getTrafficType())) { - s_logger.info("SSP enabled by " + setting + " but not active because traffic type not Guest"); + logger.info("SSP enabled by " + setting + " but not active because traffic type not Guest"); } else if (offering.getGuestType() != Network.GuestType.Isolated) { - s_logger.info("SSP works for network isolatation."); + logger.info("SSP works for network isolatation."); } else if (!_sspMgr.canHandle(physicalNetwork)) { - s_logger.info("SSP manager not ready"); + logger.info("SSP manager not ready"); } else { return true; } } else { - s_logger.debug("SSP not configured to be active"); + logger.debug("SSP not configured to be active"); } return false; } @@ -96,7 +94,7 @@ protected boolean canHandle(NetworkOffering offering, NetworkType networkType, P @Override public Network implement(Network network, NetworkOffering offering, DeployDestination dest, ReservationContext context) throws InsufficientVirtualNetworkCapacityException { - s_logger.trace("implement " + network.toString()); + logger.trace("implement " + network.toString()); super.implement(network, offering, dest, context); _sspMgr.createNetwork(network, offering, dest, context); return network; @@ -104,7 +102,7 @@ public Network implement(Network network, NetworkOffering offering, DeployDestin @Override public void shutdown(NetworkProfile profile, NetworkOffering offering) { - s_logger.trace("shutdown " + profile.toString()); + logger.trace("shutdown " + profile.toString()); _sspMgr.deleteNetwork(profile); super.shutdown(profile, offering); } @@ -133,10 +131,10 @@ public boolean prepareMigration(NicProfile nic, Network network, VirtualMachineP try { reserve(nic, network, vm, dest, context); } catch (InsufficientVirtualNetworkCapacityException e) { - s_logger.error("prepareForMigration failed", e); + logger.error("prepareForMigration failed", e); return false; } catch (InsufficientAddressCapacityException e) { - s_logger.error("prepareForMigration failed", e); + logger.error("prepareForMigration failed", e); return false; } return true; diff --git a/plugins/network-elements/tungsten/pom.xml b/plugins/network-elements/tungsten/pom.xml index 36c1a17d12ba..b0a46b1331a7 100644 --- a/plugins/network-elements/tungsten/pom.xml +++ b/plugins/network-elements/tungsten/pom.xml @@ -43,5 +43,9 @@ juniper-tungsten-api 2.0 + + ch.qos.reload4j + reload4j + diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/AddTungstenFabricNetworkGatewayToLogicalRouterCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/AddTungstenFabricNetworkGatewayToLogicalRouterCmd.java index 54d6bbc0d79a..d3357d4f2db7 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/AddTungstenFabricNetworkGatewayToLogicalRouterCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/AddTungstenFabricNetworkGatewayToLogicalRouterCmd.java @@ -34,7 +34,6 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.api.response.TungstenFabricLogicalRouterResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import java.util.List; @@ -43,7 +42,6 @@ @APICommand(name = AddTungstenFabricNetworkGatewayToLogicalRouterCmd.APINAME, description = "add Tungsten-Fabric network gateway to logical router", responseObject = TungstenFabricLogicalRouterResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class AddTungstenFabricNetworkGatewayToLogicalRouterCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(AddTungstenFabricNetworkGatewayToLogicalRouterCmd.class.getName()); public static final String APINAME = "addTungstenFabricNetworkGatewayToLogicalRouter"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/AddTungstenFabricPolicyRuleCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/AddTungstenFabricPolicyRuleCmd.java index 194157c3f66b..8c3235ef17e0 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/AddTungstenFabricPolicyRuleCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/AddTungstenFabricPolicyRuleCmd.java @@ -34,7 +34,6 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.api.response.TungstenFabricRuleResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import javax.inject.Inject; @@ -42,7 +41,6 @@ responseObject = TungstenFabricRuleResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class AddTungstenFabricPolicyRuleCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(AddTungstenFabricPolicyRuleCmd.class.getName()); public static final String APINAME = "addTungstenFabricPolicyRule"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ApplyTungstenFabricPolicyCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ApplyTungstenFabricPolicyCmd.java index 063e6c09a41f..7327606b1884 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ApplyTungstenFabricPolicyCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ApplyTungstenFabricPolicyCmd.java @@ -33,7 +33,6 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.api.response.TungstenFabricPolicyResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import javax.inject.Inject; @@ -41,7 +40,6 @@ responseObject = TungstenFabricPolicyResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ApplyTungstenFabricPolicyCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(ApplyTungstenFabricPolicyCmd.class.getName()); public static final String APINAME = "applyTungstenFabricPolicy"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ApplyTungstenFabricTagCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ApplyTungstenFabricTagCmd.java index 1bad31869ae1..ee30af45fc00 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ApplyTungstenFabricTagCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ApplyTungstenFabricTagCmd.java @@ -33,7 +33,6 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.api.response.TungstenFabricTagResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import java.util.List; @@ -42,7 +41,6 @@ @APICommand(name = ApplyTungstenFabricTagCmd.APINAME, description = "apply Tungsten-Fabric tag", responseObject = TungstenFabricTagResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ApplyTungstenFabricTagCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(ApplyTungstenFabricTagCmd.class.getName()); public static final String APINAME = "applyTungstenFabricTag"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ConfigTungstenFabricServiceCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ConfigTungstenFabricServiceCmd.java index 305eb60abb26..19bf0a339094 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ConfigTungstenFabricServiceCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ConfigTungstenFabricServiceCmd.java @@ -47,7 +47,6 @@ import org.apache.cloudstack.api.response.PhysicalNetworkResponse; import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.api.response.ZoneResponse; -import org.apache.log4j.Logger; import java.util.HashMap; import java.util.List; @@ -58,7 +57,6 @@ @APICommand(name = ConfigTungstenFabricServiceCmd.APINAME, description = "config Tungsten-Fabric service", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ConfigTungstenFabricServiceCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(ConfigTungstenFabricServiceCmd.class.getName()); public static final String APINAME = "configTungstenFabricService"; public static final String NETWORKOFFERING = "DefaultTungstenFarbicNetworkOffering"; @@ -139,18 +137,18 @@ private void persistDefaultSystemNetwork() { private void persistNetworkServiceMapAvoidingDuplicates(Network network, NetworkServiceMapVO mapVO) { if (mapVO == null) { - s_logger.error("Expected a network-service-provider mapping entity to be persisted"); + logger.error("Expected a network-service-provider mapping entity to be persisted"); return; } Network.Service service = Network.Service.getService(mapVO.getService()); Network.Provider provider = Network.Provider.getProvider(mapVO.getProvider()); if (service == null || provider == null) { - s_logger.error(String.format("Could not obtain the service or the provider " + + logger.error(String.format("Could not obtain the service or the provider " + "from the network-service-provider map with ID = %s", mapVO.getId())); return; } if (networkServiceMapDao.canProviderSupportServiceInNetwork(network.getId(), service, provider)) { - s_logger.debug(String.format("A mapping between the network, service and provider (%s, %s, %s) " + + logger.debug(String.format("A mapping between the network, service and provider (%s, %s, %s) " + "already exists, skipping duplicated entry", network.getId(), service.getName(), provider.getName())); return; diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricAddressGroupCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricAddressGroupCmd.java index edf19df2e5ac..54797d22da35 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricAddressGroupCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricAddressGroupCmd.java @@ -33,7 +33,6 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.api.response.TungstenFabricAddressGroupResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import javax.inject.Inject; @@ -41,7 +40,6 @@ responseObject = TungstenFabricAddressGroupResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateTungstenFabricAddressGroupCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(CreateTungstenFabricAddressGroupCmd.class.getName()); public static final String APINAME = "createTungstenFabricAddressGroup"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricApplicationPolicySetCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricApplicationPolicySetCmd.java index 4cf39d675dc9..2588d08aa855 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricApplicationPolicySetCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricApplicationPolicySetCmd.java @@ -33,14 +33,12 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.api.response.TungstenFabricApplicationPolicySetResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import javax.inject.Inject; @APICommand(name = CreateTungstenFabricApplicationPolicySetCmd.APINAME, description = "create Tungsten-Fabric application policy set", responseObject = TungstenFabricApplicationPolicySetResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateTungstenFabricApplicationPolicySetCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(CreateTungstenFabricApplicationPolicySetCmd.class.getName()); public static final String APINAME = "createTungstenFabricApplicationPolicySet"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricFirewallPolicyCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricFirewallPolicyCmd.java index d2c93f5dd630..d04baf1de3b8 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricFirewallPolicyCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricFirewallPolicyCmd.java @@ -33,7 +33,6 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.api.response.TungstenFabricFirewallPolicyResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import javax.inject.Inject; @@ -41,7 +40,6 @@ responseObject = TungstenFabricFirewallPolicyResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateTungstenFabricFirewallPolicyCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(CreateTungstenFabricFirewallPolicyCmd.class.getName()); public static final String APINAME = "createTungstenFabricFirewallPolicy"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricFirewallRuleCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricFirewallRuleCmd.java index f2cd0684e9ab..ed67f15ff8cf 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricFirewallRuleCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricFirewallRuleCmd.java @@ -34,7 +34,6 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.api.response.TungstenFabricFirewallRuleResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import javax.inject.Inject; @@ -42,7 +41,6 @@ responseObject = TungstenFabricFirewallRuleResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateTungstenFabricFirewallRuleCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(CreateTungstenFabricFirewallRuleCmd.class.getName()); public static final String APINAME = "createTungstenFabricFirewallRule"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricLogicalRouterCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricLogicalRouterCmd.java index add6e503e2d3..a9a775a16c3e 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricLogicalRouterCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricLogicalRouterCmd.java @@ -34,14 +34,12 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.api.response.TungstenFabricLogicalRouterResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import javax.inject.Inject; @APICommand(name = CreateTungstenFabricLogicalRouterCmd.APINAME, description = "create Tungsten-Fabric logical router", responseObject = TungstenFabricLogicalRouterResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateTungstenFabricLogicalRouterCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(CreateTungstenFabricLogicalRouterCmd.class.getName()); public static final String APINAME = "createTungstenFabricLogicalRouter"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricManagementNetworkCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricManagementNetworkCmd.java index bb4414eaecf4..831be57a8d21 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricManagementNetworkCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricManagementNetworkCmd.java @@ -33,7 +33,6 @@ import org.apache.cloudstack.api.response.PodResponse; import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import javax.inject.Inject; @@ -41,7 +40,6 @@ responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateTungstenFabricManagementNetworkCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(CreateTungstenFabricManagementNetworkCmd.class.getName()); public static final String APINAME = "createTungstenFabricManagementNetwork"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricPolicyCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricPolicyCmd.java index a7251d52b6cc..9151825c1223 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricPolicyCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricPolicyCmd.java @@ -33,7 +33,6 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.api.response.TungstenFabricPolicyResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import javax.inject.Inject; @@ -41,7 +40,6 @@ responseObject = TungstenFabricPolicyResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateTungstenFabricPolicyCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(CreateTungstenFabricPolicyCmd.class.getName()); public static final String APINAME = "createTungstenFabricPolicy"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricProviderCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricProviderCmd.java index 98cb3f671ef6..27fdc020d446 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricProviderCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricProviderCmd.java @@ -32,14 +32,12 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.api.response.TungstenFabricProviderResponse; import org.apache.cloudstack.network.tungsten.service.TungstenProviderService; -import org.apache.log4j.Logger; import javax.inject.Inject; @APICommand(name = CreateTungstenFabricProviderCmd.APINAME, description = "Create Tungsten-Fabric provider in cloudstack", responseObject = TungstenFabricProviderResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateTungstenFabricProviderCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(CreateTungstenFabricProviderCmd.class.getName()); public static final String APINAME = "createTungstenFabricProvider"; @Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID, entityType = ZoneResponse.class, required = true diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricPublicNetworkCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricPublicNetworkCmd.java index ba1eb90a4b08..059cff2a9230 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricPublicNetworkCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricPublicNetworkCmd.java @@ -37,7 +37,6 @@ import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import java.util.List; @@ -46,7 +45,6 @@ @APICommand(name = CreateTungstenFabricPublicNetworkCmd.APINAME, description = "create Tungsten-Fabric public network", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateTungstenFabricPublicNetworkCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(CreateTungstenFabricPublicNetworkCmd.class.getName()); public static final String APINAME = "createTungstenFabricPublicNetwork"; diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricServiceGroupCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricServiceGroupCmd.java index ae3b2bf533b5..f92ccd1b7c79 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricServiceGroupCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricServiceGroupCmd.java @@ -33,14 +33,12 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.api.response.TungstenFabricServiceGroupResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import javax.inject.Inject; @APICommand(name = CreateTungstenFabricServiceGroupCmd.APINAME, description = "create Tungsten-Fabric service group", responseObject = TungstenFabricServiceGroupResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateTungstenFabricServiceGroupCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(CreateTungstenFabricServiceGroupCmd.class.getName()); public static final String APINAME = "createTungstenFabricServiceGroup"; diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricTagCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricTagCmd.java index b46e5ffef536..dccc947bd977 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricTagCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricTagCmd.java @@ -33,14 +33,12 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.api.response.TungstenFabricTagResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import javax.inject.Inject; @APICommand(name = CreateTungstenFabricTagCmd.APINAME, description = "create Tungsten-Fabric tag", responseObject = TungstenFabricTagResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateTungstenFabricTagCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(CreateTungstenFabricTagCmd.class.getName()); public static final String APINAME = "createTungstenFabricTag"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricTagTypeCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricTagTypeCmd.java index 4a6d29d5a0be..699a7efcd7db 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricTagTypeCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/CreateTungstenFabricTagTypeCmd.java @@ -33,14 +33,12 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.api.response.TungstenFabricTagTypeResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import javax.inject.Inject; @APICommand(name = CreateTungstenFabricTagTypeCmd.APINAME, description = "create Tungsten-Fabric tag type", responseObject = TungstenFabricTagTypeResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class CreateTungstenFabricTagTypeCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(CreateTungstenFabricTagTypeCmd.class.getName()); public static final String APINAME = "createTungstenFabricTagType"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricAddressGroupCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricAddressGroupCmd.java index b1b130a48978..c3dbff2310cc 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricAddressGroupCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricAddressGroupCmd.java @@ -33,14 +33,12 @@ import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import javax.inject.Inject; @APICommand(name = DeleteTungstenFabricAddressGroupCmd.APINAME, description = "delete Tungsten-Fabric address group", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteTungstenFabricAddressGroupCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteTungstenFabricAddressGroupCmd.class.getName()); public static final String APINAME = "deleteTungstenFabricAddressGroup"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricApplicationPolicySetCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricApplicationPolicySetCmd.java index fe1c95230f3d..34da45253fdf 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricApplicationPolicySetCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricApplicationPolicySetCmd.java @@ -33,14 +33,12 @@ import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import javax.inject.Inject; @APICommand(name = DeleteTungstenFabricApplicationPolicySetCmd.APINAME, description = "delete Tungsten-Fabric application policy set", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteTungstenFabricApplicationPolicySetCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteTungstenFabricApplicationPolicySetCmd.class.getName()); public static final String APINAME = "deleteTungstenFabricApplicationPolicySet"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricFirewallPolicyCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricFirewallPolicyCmd.java index 61d166a71fdd..6a834cd9391b 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricFirewallPolicyCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricFirewallPolicyCmd.java @@ -33,7 +33,6 @@ import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import javax.inject.Inject; @@ -41,7 +40,6 @@ + "policy", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteTungstenFabricFirewallPolicyCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteTungstenFabricFirewallPolicyCmd.class.getName()); public static final String APINAME = "deleteTungstenFabricFirewallPolicy"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricFirewallRuleCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricFirewallRuleCmd.java index 536aad720367..d1daaf323fc7 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricFirewallRuleCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricFirewallRuleCmd.java @@ -33,14 +33,12 @@ import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import javax.inject.Inject; @APICommand(name = DeleteTungstenFabricFirewallRuleCmd.APINAME, description = "delete Tungsten-Fabric firewall rule", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteTungstenFabricFirewallRuleCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteTungstenFabricFirewallRuleCmd.class.getName()); public static final String APINAME = "deleteTungstenFabricFirewallRule"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricLogicalRouterCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricLogicalRouterCmd.java index 953b74857f88..2b0b4c64d554 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricLogicalRouterCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricLogicalRouterCmd.java @@ -33,7 +33,6 @@ import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import java.util.List; @@ -42,7 +41,6 @@ @APICommand(name = DeleteTungstenFabricLogicalRouterCmd.APINAME, description = "delete Tungsten-Fabric logical router", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteTungstenFabricLogicalRouterCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteTungstenFabricLogicalRouterCmd.class.getName()); public static final String APINAME = "deleteTungstenFabricLogicalRouter"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricPolicyCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricPolicyCmd.java index 4398a1091271..f30249360799 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricPolicyCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricPolicyCmd.java @@ -33,14 +33,12 @@ import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import javax.inject.Inject; @APICommand(name = DeleteTungstenFabricPolicyCmd.APINAME, description = "delete Tungsten-Fabric policy", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteTungstenFabricPolicyCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteTungstenFabricPolicyCmd.class.getName()); public static final String APINAME = "deleteTungstenFabricPolicy"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricServiceGroupCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricServiceGroupCmd.java index 28be9e59af63..ab3bd7e984bd 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricServiceGroupCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricServiceGroupCmd.java @@ -33,14 +33,12 @@ import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import javax.inject.Inject; @APICommand(name = DeleteTungstenFabricServiceGroupCmd.APINAME, description = "delete Tungsten-Fabric service group", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteTungstenFabricServiceGroupCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteTungstenFabricServiceGroupCmd.class.getName()); public static final String APINAME = "deleteTungstenFabricServiceGroup"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricTagCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricTagCmd.java index afc15022bd64..44b660291a54 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricTagCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricTagCmd.java @@ -33,14 +33,12 @@ import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import javax.inject.Inject; @APICommand(name = DeleteTungstenFabricTagCmd.APINAME, description = "delete Tungsten-Fabric tag", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteTungstenFabricTagCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteTungstenFabricTagCmd.class.getName()); public static final String APINAME = "deleteTungstenFabricTag"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricTagTypeCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricTagTypeCmd.java index 418ec5284d47..c9537fc65387 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricTagTypeCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/DeleteTungstenFabricTagTypeCmd.java @@ -33,14 +33,12 @@ import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import javax.inject.Inject; @APICommand(name = DeleteTungstenFabricTagTypeCmd.APINAME, description = "delete Tungsten-Fabric tag type", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteTungstenFabricTagTypeCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(DeleteTungstenFabricTagTypeCmd.class.getName()); public static final String APINAME = "deleteTungstenFabricTagType"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/GetLoadBalancerSslCertificateCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/GetLoadBalancerSslCertificateCmd.java index 3e93adb35d5e..eb65ae1aff66 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/GetLoadBalancerSslCertificateCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/GetLoadBalancerSslCertificateCmd.java @@ -34,14 +34,12 @@ import org.apache.cloudstack.network.tungsten.api.response.TlsDataResponse; import org.apache.commons.codec.binary.Base64; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import javax.inject.Inject; @APICommand(name = GetLoadBalancerSslCertificateCmd.APINAME, description = "get load balancer certificate", responseObject = TlsDataResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class GetLoadBalancerSslCertificateCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(GetLoadBalancerSslCertificateCmd.class.getName()); public static final String APINAME = "getLoadBalancerSslCertificate"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricAddressGroupCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricAddressGroupCmd.java index a96bbc4d324f..297823689d6c 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricAddressGroupCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricAddressGroupCmd.java @@ -36,7 +36,6 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.api.response.TungstenFabricAddressGroupResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import java.util.ArrayList; import java.util.List; @@ -47,7 +46,6 @@ responseObject = TungstenFabricAddressGroupResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListTungstenFabricAddressGroupCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListTungstenFabricAddressGroupCmd.class.getName()); public static final String APINAME = "listTungstenFabricAddressGroup"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricApplictionPolicySetCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricApplictionPolicySetCmd.java index b49bdce0fc2c..85b5528af169 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricApplictionPolicySetCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricApplictionPolicySetCmd.java @@ -36,7 +36,6 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.api.response.TungstenFabricApplicationPolicySetResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import java.util.ArrayList; import java.util.List; @@ -47,7 +46,6 @@ + "policy set", responseObject = TungstenFabricApplicationPolicySetResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListTungstenFabricApplictionPolicySetCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListTungstenFabricApplictionPolicySetCmd.class.getName()); public static final String APINAME = "listTungstenFabricApplicationPolicySet"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricFirewallPolicyCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricFirewallPolicyCmd.java index e63e8cb6122c..44c8f7276e5b 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricFirewallPolicyCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricFirewallPolicyCmd.java @@ -36,7 +36,6 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.api.response.TungstenFabricFirewallPolicyResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import java.util.ArrayList; import java.util.List; @@ -47,7 +46,6 @@ responseObject = TungstenFabricFirewallPolicyResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListTungstenFabricFirewallPolicyCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListTungstenFabricFirewallPolicyCmd.class.getName()); public static final String APINAME = "listTungstenFabricFirewallPolicy"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricFirewallRuleCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricFirewallRuleCmd.java index 800b0b269299..bfc1c101e2b3 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricFirewallRuleCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricFirewallRuleCmd.java @@ -36,7 +36,6 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.api.response.TungstenFabricFirewallRuleResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import java.util.ArrayList; import java.util.List; @@ -47,7 +46,6 @@ responseObject = TungstenFabricFirewallRuleResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListTungstenFabricFirewallRuleCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListTungstenFabricFirewallRuleCmd.class.getName()); public static final String APINAME = "listTungstenFabricFirewallRule"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricLBHealthMonitorCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricLBHealthMonitorCmd.java index 0dfaa18c549f..7ac43cb19095 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricLBHealthMonitorCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricLBHealthMonitorCmd.java @@ -34,7 +34,6 @@ import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.network.tungsten.api.response.TungstenFabricLBHealthMonitorResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import java.util.List; @@ -43,7 +42,6 @@ @APICommand(name = ListTungstenFabricLBHealthMonitorCmd.APINAME, description = "list Tungsten-Fabric LB health monitor", responseObject = TungstenFabricLBHealthMonitorResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListTungstenFabricLBHealthMonitorCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListTungstenFabricLBHealthMonitorCmd.class.getName()); public static final String APINAME = "listTungstenFabricLBHealthMonitor"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricLogicalRouterCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricLogicalRouterCmd.java index 4178aa6fd055..e33bd3f47c49 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricLogicalRouterCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricLogicalRouterCmd.java @@ -36,7 +36,6 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.api.response.TungstenFabricLogicalRouterResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import java.util.ArrayList; import java.util.List; @@ -47,7 +46,6 @@ responseObject = TungstenFabricLogicalRouterResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListTungstenFabricLogicalRouterCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListTungstenFabricLogicalRouterCmd.class.getName()); public static final String APINAME = "listTungstenFabricLogicalRouter"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricNetworkCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricNetworkCmd.java index 907165d349ef..08aa2719f578 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricNetworkCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricNetworkCmd.java @@ -36,7 +36,6 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.api.response.TungstenFabricNetworkResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import java.util.ArrayList; import java.util.List; @@ -47,7 +46,6 @@ responseObject = TungstenFabricNetworkResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListTungstenFabricNetworkCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListTungstenFabricNetworkCmd.class.getName()); public static final String APINAME = "listTungstenFabricNetwork"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricNicCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricNicCmd.java index 6f19cb698f0c..b5daf9570f4d 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricNicCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricNicCmd.java @@ -36,7 +36,6 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.api.response.TungstenFabricNicResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import java.util.ArrayList; import java.util.List; @@ -46,7 +45,6 @@ @APICommand(name = ListTungstenFabricNicCmd.APINAME, description = "list Tungsten-Fabric nic", responseObject = TungstenFabricNicResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListTungstenFabricNicCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListTungstenFabricNicCmd.class.getName()); public static final String APINAME = "listTungstenFabricNic"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricPolicyCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricPolicyCmd.java index b5edf2d32322..3bfef0cda021 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricPolicyCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricPolicyCmd.java @@ -38,7 +38,6 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.api.response.TungstenFabricPolicyResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import java.util.ArrayList; import java.util.List; @@ -48,7 +47,6 @@ @APICommand(name = ListTungstenFabricPolicyCmd.APINAME, description = "list Tungsten-Fabric policy", responseObject = TungstenFabricPolicyResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListTungstenFabricPolicyCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListTungstenFabricPolicyCmd.class.getName()); public static final String APINAME = "listTungstenFabricPolicy"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricPolicyRuleCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricPolicyRuleCmd.java index 0bbb292b4f9f..c4c53f26758a 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricPolicyRuleCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricPolicyRuleCmd.java @@ -36,7 +36,6 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.api.response.TungstenFabricRuleResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import java.util.ArrayList; import java.util.List; @@ -47,7 +46,6 @@ responseObject = TungstenFabricRuleResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListTungstenFabricPolicyRuleCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListTungstenFabricPolicyRuleCmd.class.getName()); public static final String APINAME = "listTungstenFabricPolicyRule"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricProvidersCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricProvidersCmd.java index 1e544a6f3388..262e4a9a856d 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricProvidersCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricProvidersCmd.java @@ -34,7 +34,6 @@ import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.network.tungsten.api.response.TungstenFabricProviderResponse; import org.apache.cloudstack.network.tungsten.service.TungstenProviderService; -import org.apache.log4j.Logger; import java.util.List; @@ -43,7 +42,6 @@ @APICommand(name = ListTungstenFabricProvidersCmd.APINAME, responseObject = TungstenFabricProviderResponse.class, description = "Lists Tungsten-Fabric providers", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListTungstenFabricProvidersCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListTungstenFabricProvidersCmd.class.getName()); public static final String APINAME = "listTungstenFabricProviders"; ///////////////////////////////////////////////////// diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricServiceGroupCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricServiceGroupCmd.java index 8d65da425503..eb066520abc1 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricServiceGroupCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricServiceGroupCmd.java @@ -36,7 +36,6 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.api.response.TungstenFabricServiceGroupResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import java.util.ArrayList; import java.util.List; @@ -47,7 +46,6 @@ responseObject = TungstenFabricServiceGroupResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListTungstenFabricServiceGroupCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListTungstenFabricServiceGroupCmd.class.getName()); public static final String APINAME = "listTungstenFabricServiceGroup"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricTagCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricTagCmd.java index 657bc431bc4d..bf73cc0c2272 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricTagCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricTagCmd.java @@ -36,7 +36,6 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.api.response.TungstenFabricTagResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import java.util.ArrayList; import java.util.List; @@ -46,7 +45,6 @@ @APICommand(name = ListTungstenFabricTagCmd.APINAME, responseObject = TungstenFabricTagResponse.class, description = "Lists Tungsten-Fabric tags", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListTungstenFabricTagCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListTungstenFabricTagCmd.class.getName()); public static final String APINAME = "listTungstenFabricTag"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricTagTypeCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricTagTypeCmd.java index bda4ef7a04ab..6ba10f92488e 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricTagTypeCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricTagTypeCmd.java @@ -36,7 +36,6 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.api.response.TungstenFabricTagTypeResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import java.util.ArrayList; import java.util.List; @@ -47,7 +46,6 @@ description = "Lists " + "Tungsten-Fabric tags", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListTungstenFabricTagTypeCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListTungstenFabricTagTypeCmd.class.getName()); public static final String APINAME = "listTungstenFabricTagType"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricVmCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricVmCmd.java index 02e19c5c423c..3626d5d8e3b4 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricVmCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/ListTungstenFabricVmCmd.java @@ -36,7 +36,6 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.api.response.TungstenFabricVmResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import java.util.ArrayList; import java.util.List; @@ -46,7 +45,6 @@ @APICommand(name = ListTungstenFabricVmCmd.APINAME, description = "list Tungsten-Fabric vm", responseObject = TungstenFabricVmResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListTungstenFabricVmCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListTungstenFabricVmCmd.class.getName()); public static final String APINAME = "listTungstenFabricVm"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/RemoveTungstenFabricNetworkGatewayFromLogicalRouterCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/RemoveTungstenFabricNetworkGatewayFromLogicalRouterCmd.java index 74536ca4541f..22d2948e773c 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/RemoveTungstenFabricNetworkGatewayFromLogicalRouterCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/RemoveTungstenFabricNetworkGatewayFromLogicalRouterCmd.java @@ -34,7 +34,6 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.api.response.TungstenFabricLogicalRouterResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import java.util.List; @@ -43,7 +42,6 @@ @APICommand(name = RemoveTungstenFabricNetworkGatewayFromLogicalRouterCmd.APINAME, description = "remove Tungsten-Fabric network gateway from logical router", responseObject = TungstenFabricLogicalRouterResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class RemoveTungstenFabricNetworkGatewayFromLogicalRouterCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(RemoveTungstenFabricNetworkGatewayFromLogicalRouterCmd.class.getName()); public static final String APINAME = "removeTungstenFabricNetworkGatewayFromLogicalRouter"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/RemoveTungstenFabricPolicyCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/RemoveTungstenFabricPolicyCmd.java index 93ed3ba1d278..ed5226461ad8 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/RemoveTungstenFabricPolicyCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/RemoveTungstenFabricPolicyCmd.java @@ -33,7 +33,6 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.api.response.TungstenFabricPolicyResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import javax.inject.Inject; @@ -41,7 +40,6 @@ responseObject = TungstenFabricPolicyResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class RemoveTungstenFabricPolicyCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(RemoveTungstenFabricPolicyCmd.class.getName()); public static final String APINAME = "removeTungstenFabricPolicy"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/RemoveTungstenFabricPolicyRuleCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/RemoveTungstenFabricPolicyRuleCmd.java index 86ec6555a75b..1cb6a78a802a 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/RemoveTungstenFabricPolicyRuleCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/RemoveTungstenFabricPolicyRuleCmd.java @@ -33,7 +33,6 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.api.response.TungstenFabricPolicyResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import javax.inject.Inject; @@ -41,7 +40,6 @@ responseObject = TungstenFabricPolicyResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class RemoveTungstenFabricPolicyRuleCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(RemoveTungstenFabricPolicyRuleCmd.class.getName()); public static final String APINAME = "removeTungstenFabricPolicyRule"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/RemoveTungstenFabricTagCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/RemoveTungstenFabricTagCmd.java index 0214eff881c8..ae0de85067cc 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/RemoveTungstenFabricTagCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/RemoveTungstenFabricTagCmd.java @@ -33,7 +33,6 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.network.tungsten.api.response.TungstenFabricTagResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import java.util.List; @@ -42,7 +41,6 @@ @APICommand(name = RemoveTungstenFabricTagCmd.APINAME, description = "remove Tungsten-Fabric tag", responseObject = TungstenFabricTagResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class RemoveTungstenFabricTagCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(RemoveTungstenFabricTagCmd.class.getName()); public static final String APINAME = "removeTungstenFabricTag"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/SynchronizeTungstenFabricDataCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/SynchronizeTungstenFabricDataCmd.java index 458d915956da..0bf5cd46acc2 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/SynchronizeTungstenFabricDataCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/SynchronizeTungstenFabricDataCmd.java @@ -31,14 +31,12 @@ import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.network.tungsten.api.response.TungstenFabricProviderResponse; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import javax.inject.Inject; @APICommand(name = SynchronizeTungstenFabricDataCmd.APINAME, description = "Synchronize Tungsten-Fabric data", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class SynchronizeTungstenFabricDataCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(SynchronizeTungstenFabricDataCmd.class.getName()); public static final String APINAME = "synchronizeTungstenFabricData"; @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = TungstenFabricProviderResponse.class, diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/UpdateTungstenFabricLBHealthMonitorCmd.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/UpdateTungstenFabricLBHealthMonitorCmd.java index 9e7cce66e346..c0ffdb925e83 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/UpdateTungstenFabricLBHealthMonitorCmd.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/api/command/UpdateTungstenFabricLBHealthMonitorCmd.java @@ -37,7 +37,6 @@ import org.apache.cloudstack.network.tungsten.api.response.TungstenFabricLBHealthMonitorResponse; import org.apache.cloudstack.network.tungsten.dao.TungstenFabricLBHealthMonitorVO; import org.apache.cloudstack.network.tungsten.service.TungstenService; -import org.apache.log4j.Logger; import javax.inject.Inject; @@ -45,7 +44,6 @@ responseObject = TungstenFabricLBHealthMonitorResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class UpdateTungstenFabricLBHealthMonitorCmd extends BaseAsyncCreateCmd { - public static final Logger s_logger = Logger.getLogger(UpdateTungstenFabricLBHealthMonitorCmd.class.getName()); public static final String APINAME = "updateTungstenFabricLBHealthMonitor"; @Inject diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/resource/TungstenResource.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/resource/TungstenResource.java index 0e14dd41aeb9..471083103253 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/resource/TungstenResource.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/resource/TungstenResource.java @@ -161,7 +161,8 @@ import org.apache.cloudstack.network.tungsten.service.TungstenApi; import org.apache.cloudstack.network.tungsten.service.TungstenVRouterApi; import org.apache.cloudstack.network.tungsten.vrouter.Port; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import javax.naming.ConfigurationException; import java.io.IOException; @@ -172,7 +173,7 @@ public class TungstenResource implements ServerResource { - private static final Logger s_logger = Logger.getLogger(TungstenResource.class); + protected Logger logger = LogManager.getLogger(getClass()); private String name; private String guid; @@ -257,7 +258,7 @@ public PingCommand getCurrentStatus(long id) { try { tungstenApi.checkTungstenProviderConnection(); } catch (ServerApiException e) { - s_logger.error("Check Tungsten-Fabric provider connection failed", e); + logger.error("Check Tungsten-Fabric provider connection failed", e); return null; } return new PingCommand(Host.Type.L2Networking, id); @@ -493,7 +494,7 @@ private Answer executeRequestGroup7(Command cmd, int numRetries) { return executeRequest((CreateTungstenDefaultProjectCommand) cmd); } - s_logger.debug("Received unsupported command " + cmd.toString()); + logger.debug("Received unsupported command " + cmd.toString()); return Answer.createUnsupportedCommandAnswer(cmd); } @@ -2302,7 +2303,7 @@ private Answer executeRequest(CreateTungstenDefaultProjectCommand cmd) { } private Answer retry(Command cmd, int numRetries) { - s_logger.warn("Retrying " + cmd.getClass().getSimpleName() + ". Number of retries remaining: " + numRetries); + logger.warn("Retrying " + cmd.getClass().getSimpleName() + ". Number of retries remaining: " + numRetries); return executeRequestGroup1(cmd, numRetries); } diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenApi.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenApi.java index 965ce691b55a..a3989034be91 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenApi.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenApi.java @@ -86,7 +86,8 @@ import org.apache.cloudstack.network.tungsten.model.TungstenLoadBalancerMember; import org.apache.cloudstack.network.tungsten.model.TungstenRule; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import java.io.IOException; import java.net.HttpURLConnection; @@ -99,8 +100,8 @@ public class TungstenApi { - private static final Logger S_LOGGER = Logger.getLogger(TungstenApi.class); - private static final Status.ErrorHandler errorHandler = S_LOGGER::error; + protected Logger logger = LogManager.getLogger(getClass()); + private final Status.ErrorHandler errorHandler = logger::error; public static final String TUNGSTEN_DEFAULT_DOMAIN = "default-domain"; public static final String TUNGSTEN_DEFAULT_PROJECT = "admin"; @@ -208,7 +209,7 @@ public VirtualMachine createTungstenVirtualMachine(String vmUuid, String vmName) status.ifFailure(errorHandler); return (VirtualMachine) apiConnector.findById(VirtualMachine.class, virtualMachine.getUuid()); } catch (IOException e) { - S_LOGGER.error("Unable to create Tungsten-Fabric vm " + vmUuid, e); + logger.error("Unable to create Tungsten-Fabric vm " + vmUuid, e); return null; } } @@ -224,7 +225,7 @@ public VirtualMachineInterface createTungstenVmInterface(String nicUuid, String virtualMachine = (VirtualMachine) apiConnector.findById(VirtualMachine.class, virtualMachineUuid); project = (Project) apiConnector.findById(Project.class, projectUuid); } catch (IOException e) { - S_LOGGER.error("Failed getting the resources needed for virtual machine interface creation from Tungsten-Fabric"); + logger.error("Failed getting the resources needed for virtual machine interface creation from Tungsten-Fabric"); } VirtualMachineInterface virtualMachineInterface = new VirtualMachineInterface(); @@ -248,7 +249,7 @@ public VirtualMachineInterface createTungstenVmInterface(String nicUuid, String return (VirtualMachineInterface) apiConnector.findById(VirtualMachineInterface.class, virtualMachineInterface.getUuid()); } catch (IOException e) { - S_LOGGER.error("Failed creating virtual machine interface in Tungsten-Fabric"); + logger.error("Failed creating virtual machine interface in Tungsten-Fabric"); return null; } } @@ -263,7 +264,7 @@ public InstanceIp createTungstenInstanceIp(String instanceIpName, String ip, Str virtualMachineInterface = (VirtualMachineInterface) apiConnector.findById(VirtualMachineInterface.class, vmInterfaceUuid); } catch (IOException e) { - S_LOGGER.error("Failed getting the resources needed for instance ip creation from Tungsten-Fabric"); + logger.error("Failed getting the resources needed for instance ip creation from Tungsten-Fabric"); return null; } @@ -277,7 +278,7 @@ public InstanceIp createTungstenInstanceIp(String instanceIpName, String ip, Str status.ifFailure(errorHandler); return (InstanceIp) apiConnector.findById(InstanceIp.class, instanceIp.getUuid()); } catch (IOException e) { - S_LOGGER.error("Failed creating instance ip in Tungsten-Fabric"); + logger.error("Failed creating instance ip in Tungsten-Fabric"); return null; } } @@ -292,7 +293,7 @@ public InstanceIp createTungstenInstanceIp(String instanceIpName, String ip, Str virtualMachineInterface = (VirtualMachineInterface) apiConnector.findById(VirtualMachineInterface.class, vmInterfaceUuid); } catch (IOException e) { - S_LOGGER.error("Failed getting the resources needed for instance ip creation with subnet from Tungsten-Fabric"); + logger.error("Failed getting the resources needed for instance ip creation with subnet from Tungsten-Fabric"); return null; } @@ -307,7 +308,7 @@ public InstanceIp createTungstenInstanceIp(String instanceIpName, String ip, Str status.ifFailure(errorHandler); return (InstanceIp) apiConnector.findById(InstanceIp.class, instanceIp.getUuid()); } catch (IOException e) { - S_LOGGER.error("Failed creating instance ip in Tungsten-Fabric"); + logger.error("Failed creating instance ip in Tungsten-Fabric"); return null; } } @@ -325,7 +326,7 @@ public boolean deleteTungstenVmInterface(VirtualMachineInterface vmi) { status.ifFailure(errorHandler); return status.isSuccess(); } catch (IOException e) { - S_LOGGER.error("Failed deleting the virtual machine interface from Tungsten-Fabric"); + logger.error("Failed deleting the virtual machine interface from Tungsten-Fabric"); return false; } } @@ -421,7 +422,7 @@ public ApiObjectBase createTungstenGatewayVmi(String name, String projectUuid, S VirtualMachineInterface.class, project, name); if (virtualMachineInterface != null) { - S_LOGGER.error("interface " + name + " is existed"); + logger.error("interface " + name + " is existed"); return null; } @@ -553,7 +554,7 @@ public String getTungstenNatIp(String projectUuid, String logicalRouterUuid) { TimeUnit.SECONDS.sleep(1); } catch (InterruptedException e) { Thread.currentThread().interrupt(); - S_LOGGER.error("can not delay for service instance create"); + logger.error("can not delay for service instance create"); } try { diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenElement.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenElement.java index 578accaa745a..106cf5180c3d 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenElement.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenElement.java @@ -135,7 +135,6 @@ import org.apache.cloudstack.network.tungsten.model.TungstenLoadBalancerMember; import org.apache.cloudstack.network.tungsten.model.TungstenRule; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import java.util.ArrayList; @@ -154,7 +153,6 @@ public class TungstenElement extends AdapterBase implements StaticNatServiceProvider, IpDeployer, FirewallServiceProvider, LoadBalancingServiceProvider, PortForwardingServiceProvider, ResourceStateAdapter, DnsServiceProvider, Listener, StateListener, NetworkMigrationResponder { - private static final Logger s_logger = Logger.getLogger(TungstenElement.class); private static final String NETWORK = "network"; @@ -273,11 +271,11 @@ private static Map> initCapabil } protected boolean canHandle(Network network, Network.Service service) { - s_logger.debug("Checking if TungstenElement can handle service " + service.getName() + " on network " + logger.debug("Checking if TungstenElement can handle service " + service.getName() + " on network " + network.getDisplayText()); if (!networkModel.isProviderForNetwork(getProvider(), network.getId())) { - s_logger.debug("TungstenElement is not a provider for network " + network.getDisplayText()); + logger.debug("TungstenElement is not a provider for network " + network.getDisplayText()); return false; } diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenFabricUtils.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenFabricUtils.java index f3e321298bc3..b94904ca05ab 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenFabricUtils.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenFabricUtils.java @@ -23,7 +23,8 @@ import com.cloud.network.element.TungstenProviderVO; import org.apache.cloudstack.network.tungsten.agent.api.TungstenAnswer; import org.apache.cloudstack.network.tungsten.agent.api.TungstenCommand; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.springframework.stereotype.Component; import javax.inject.Inject; @@ -31,7 +32,7 @@ @Component public class TungstenFabricUtils { - private static final Logger s_logger = Logger.getLogger(TungstenFabricUtils.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject AgentManager agentMgr; @@ -42,14 +43,14 @@ public TungstenAnswer sendTungstenCommand(TungstenCommand cmd, long zoneId) thro TungstenProviderVO tungstenProviderVO = tungstenProviderDao.findByZoneId(zoneId); if (tungstenProviderVO == null) { - s_logger.error("No Tungsten-Fabric provider have been found!"); + logger.error("No Tungsten-Fabric provider have been found!"); throw new InvalidParameterValueException("Failed to find a Tungsten-Fabric provider"); } Answer answer = agentMgr.easySend(tungstenProviderVO.getHostId(), cmd); if (answer == null || !answer.getResult()) { - s_logger.error("Tungsten-Fabric API Command failed"); + logger.error("Tungsten-Fabric API Command failed"); throw new InvalidParameterValueException("Failed API call to Tungsten-Fabric Network plugin"); } diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenGuestNetworkGuru.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenGuestNetworkGuru.java index 12fe160f979e..818d370cca03 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenGuestNetworkGuru.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenGuestNetworkGuru.java @@ -81,7 +81,6 @@ import org.apache.cloudstack.network.tungsten.agent.api.TungstenAnswer; import org.apache.cloudstack.network.tungsten.agent.api.TungstenCommand; import org.apache.cloudstack.network.tungsten.model.TungstenRule; -import org.apache.log4j.Logger; import java.util.ArrayList; import java.util.List; @@ -90,8 +89,6 @@ public class TungstenGuestNetworkGuru extends GuestNetworkGuru implements NetworkMigrationResponder { - private static final Logger s_logger = Logger.getLogger(TungstenGuestNetworkGuru.class); - @Inject NetworkDao networkDao; @Inject @@ -153,7 +150,7 @@ public Network design(NetworkOffering offering, DeploymentPlan plan, Network use DataCenter dc = _dcDao.findById(plan.getDataCenterId()); if (!canHandle(offering, dc.getNetworkType(), physnet)) { - s_logger.debug("Refusing to design this network"); + logger.debug("Refusing to design this network"); return null; } diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenServiceImpl.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenServiceImpl.java index ad20a98984f7..cb366959327a 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenServiceImpl.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenServiceImpl.java @@ -215,7 +215,6 @@ import org.apache.cloudstack.network.tungsten.model.TungstenRule; import org.apache.cloudstack.network.tungsten.model.TungstenTag; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import java.io.UnsupportedEncodingException; import java.net.URLEncoder; @@ -229,7 +228,6 @@ import javax.inject.Inject; public class TungstenServiceImpl extends ManagerBase implements TungstenService { - private static final Logger s_logger = Logger.getLogger(TungstenServiceImpl.class); private static final String NETWORK = "network"; @@ -327,7 +325,7 @@ private void subscribeSynchonizeEvent() { try { syncTungstenDbWithCloudstackProjectsAndDomains(); } catch (final Exception e) { - s_logger.error(e.getMessage()); + logger.error(e.getMessage()); } }); } @@ -342,7 +340,7 @@ private void subscribeIpAddressEvent() { createTungstenFloatingIp(zoneId, ipAddress); } } catch (final Exception e) { - s_logger.error(e.getMessage()); + logger.error(e.getMessage()); } }); @@ -357,7 +355,7 @@ private void subscribeIpAddressEvent() { } } } catch (final Exception e) { - s_logger.error(e.getMessage()); + logger.error(e.getMessage()); } }); } @@ -377,7 +375,7 @@ private void subscribeNetworkPolicyEvent() { network.getDataCenterId()); } } catch (final Exception e) { - s_logger.error(e.getMessage()); + logger.error(e.getMessage()); } }); } @@ -389,7 +387,7 @@ private void subscribeVlanEvent() { final VlanVO vlanVO = (VlanVO) args; addPublicNetworkSubnet(vlanVO); } catch (final Exception e) { - s_logger.error(e.getMessage()); + logger.error(e.getMessage()); } }); @@ -399,7 +397,7 @@ private void subscribeVlanEvent() { final VlanVO vlanVO = (VlanVO) args; removePublicNetworkSubnet(vlanVO); } catch (final Exception e) { - s_logger.error(e.getMessage()); + logger.error(e.getMessage()); } }); } @@ -410,7 +408,7 @@ private void subscribePopEvent() { final HostPodVO pod = (HostPodVO) args; addManagementNetworkSubnet(pod); } catch (final Exception e) { - s_logger.error(e.getMessage()); + logger.error(e.getMessage()); } }); @@ -419,7 +417,7 @@ private void subscribePopEvent() { final HostPodVO pod = (HostPodVO) args; removeManagementNetworkSubnet(pod); } catch (final Exception e) { - s_logger.error(e.getMessage()); + logger.error(e.getMessage()); } }); } @@ -430,7 +428,7 @@ private void subscribeDomainEvent() { final DomainVO domain = (DomainVO) args; createTungstenDomain(domain); } catch (final Exception e) { - s_logger.error(e.getMessage()); + logger.error(e.getMessage()); } }); @@ -439,7 +437,7 @@ private void subscribeDomainEvent() { final DomainVO domain = (DomainVO) args; deleteTungstenDomain(domain); } catch (final Exception e) { - s_logger.error(e.getMessage()); + logger.error(e.getMessage()); } }); } @@ -450,7 +448,7 @@ private void subscribeProjectEvent() { final Project project = (Project) args; createTungstenProject(project); } catch (final Exception e) { - s_logger.error(e.getMessage()); + logger.error(e.getMessage()); } }); @@ -459,7 +457,7 @@ private void subscribeProjectEvent() { final Project project = (Project) args; deleteTungstenProject(project); } catch (final Exception e) { - s_logger.error(e.getMessage()); + logger.error(e.getMessage()); } }); } @@ -471,7 +469,7 @@ private void subscribeSecurityGroupEvent() { final SecurityGroup securityGroup = (SecurityGroup) args; createTungstenSecurityGroup(securityGroup); } catch (final Exception e) { - s_logger.error(e.getMessage()); + logger.error(e.getMessage()); } }); @@ -481,7 +479,7 @@ private void subscribeSecurityGroupEvent() { final SecurityGroup securityGroup = (SecurityGroup) args; deleteTungstenSecurityGroup(securityGroup); } catch (final Exception e) { - s_logger.error(e.getMessage()); + logger.error(e.getMessage()); } }); @@ -491,7 +489,7 @@ private void subscribeSecurityGroupEvent() { final List securityRules = (List) args; addTungstenSecurityGroupRule(securityRules); } catch (final Exception e) { - s_logger.error(e.getMessage()); + logger.error(e.getMessage()); } }); @@ -501,7 +499,7 @@ private void subscribeSecurityGroupEvent() { final SecurityRule securityRule = (SecurityRule) args; removeTungstenSecurityGroupRule(securityRule); } catch (final Exception e) { - s_logger.error(e.getMessage()); + logger.error(e.getMessage()); } }); } @@ -512,7 +510,7 @@ private void subscribeSecondaryNicEvent() { final long id = (long) args; addTungstenNicSecondaryIpAddress(id); } catch (final Exception e) { - s_logger.error(e.getMessage()); + logger.error(e.getMessage()); } }); @@ -521,7 +519,7 @@ private void subscribeSecondaryNicEvent() { final NicSecondaryIpVO nicSecondaryIpVO = (NicSecondaryIpVO) args; removeTungstenNicSecondaryIpAddress(nicSecondaryIpVO); } catch (final Exception e) { - s_logger.error(e.getMessage()); + logger.error(e.getMessage()); } }); } @@ -1205,7 +1203,7 @@ public boolean updateLoadBalancerSsl(Network network, LoadBalancingRule loadBala updateTungstenLoadBalancerListenerCommand, network.getDataCenterId()); return updateTungstenLoadBalancerListenerAnswer.getResult(); } else { - s_logger.error("Tungsten-Fabric ssl require user api key"); + logger.error("Tungsten-Fabric ssl require user api key"); } } return true; diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenVRouterApi.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenVRouterApi.java index 1ede3f9a2cb5..491424eb35c9 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenVRouterApi.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/service/TungstenVRouterApi.java @@ -19,12 +19,13 @@ import org.apache.cloudstack.network.tungsten.vrouter.Port; import org.apache.cloudstack.network.tungsten.vrouter.VRouterApiConnector; import org.apache.cloudstack.network.tungsten.vrouter.VRouterApiConnectorFactory; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import java.io.IOException; public class TungstenVRouterApi { - private static final Logger s_logger = Logger.getLogger(TungstenVRouterApi.class); + protected static Logger LOGGER = LogManager.getLogger(TungstenVRouterApi.class); private TungstenVRouterApi() { } @@ -37,7 +38,7 @@ public static boolean addTungstenVrouterPort(String host, String vrouterPort, Po try { return getvRouterApiConnector(host, vrouterPort).addPort(port); } catch (IOException ex) { - s_logger.error("Fail to add vrouter port : " + ex.getMessage()); + LOGGER.error("Fail to add vrouter port : " + ex.getMessage()); return false; } } diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/vrouter/IntrospectApiConnectorImpl.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/vrouter/IntrospectApiConnectorImpl.java index 5847b3f53b3c..d1d2fef1c225 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/vrouter/IntrospectApiConnectorImpl.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/vrouter/IntrospectApiConnectorImpl.java @@ -21,7 +21,8 @@ import org.apache.http.client.methods.HttpUriRequest; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClients; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.w3c.dom.Document; import org.xml.sax.SAXException; @@ -33,7 +34,7 @@ import javax.xml.parsers.ParserConfigurationException; public class IntrospectApiConnectorImpl implements IntrospectApiConnector { - private static final Logger s_logger = Logger.getLogger(IntrospectApiConnectorImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); private final String vrouterUrl; public IntrospectApiConnectorImpl(VRouter vRouter) { @@ -46,13 +47,13 @@ public Document getSnhItfReq(String uuid) { CloseableHttpResponse httpResponse = httpClient.execute(request)) { return getResponse(httpResponse); } catch (IOException ex) { - s_logger.error("Failed to connect host : " + ex.getMessage()); + logger.error("Failed to connect host : " + ex.getMessage()); return null; } catch (ParserConfigurationException ex) { - s_logger.error("Failed to parse xml configuration : " + ex.getMessage()); + logger.error("Failed to parse xml configuration : " + ex.getMessage()); return null; } catch (SAXException ex) { - s_logger.error("Failed to get xml data : " + ex.getMessage()); + logger.error("Failed to get xml data : " + ex.getMessage()); return null; } } diff --git a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/vrouter/VRouterApiConnectorImpl.java b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/vrouter/VRouterApiConnectorImpl.java index 6e8d727a9a2e..4344020d013b 100644 --- a/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/vrouter/VRouterApiConnectorImpl.java +++ b/plugins/network-elements/tungsten/src/main/java/org/apache/cloudstack/network/tungsten/vrouter/VRouterApiConnectorImpl.java @@ -27,13 +27,14 @@ import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClients; import org.apache.http.util.EntityUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import java.io.IOException; import java.util.List; public class VRouterApiConnectorImpl implements VRouterApiConnector { - private static final Logger s_logger = Logger.getLogger(VRouterApiConnectorImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); private final String vrouterUrl; public VRouterApiConnectorImpl(VRouter vRouter) { @@ -51,7 +52,7 @@ public boolean addPort(final Port port) throws IOException { CloseableHttpResponse httpResponse = httpClient.execute(httpPost)) { return getResponse(httpResponse); } catch (IOException ex) { - s_logger.error("Failed to add vrouter port : " + ex.getMessage()); + logger.error("Failed to add vrouter port : " + ex.getMessage()); return false; } } @@ -63,7 +64,7 @@ public boolean deletePort(final String portId) { CloseableHttpResponse httpResponse = httpClient.execute(httpDelete)) { return getResponse(httpResponse); } catch (IOException ex) { - s_logger.error("Failed to delete vrouter port : " + ex.getMessage()); + logger.error("Failed to delete vrouter port : " + ex.getMessage()); return false; } } @@ -75,7 +76,7 @@ public boolean enablePort(final String portId) { CloseableHttpResponse httpResponse = httpClient.execute(httpPut)) { return getResponse(httpResponse); } catch (IOException ex) { - s_logger.error("Failed to enable vrouter port : " + ex.getMessage()); + logger.error("Failed to enable vrouter port : " + ex.getMessage()); return false; } } @@ -87,7 +88,7 @@ public boolean disablePort(final String portId) { CloseableHttpResponse httpResponse = httpClient.execute(httpPut)) { return getResponse(httpResponse); } catch (IOException ex) { - s_logger.error("Failed to disable vrouter port : " + ex.getMessage()); + logger.error("Failed to disable vrouter port : " + ex.getMessage()); return false; } } @@ -103,7 +104,7 @@ public boolean addGateway(List gatewayList) throws IOException { CloseableHttpResponse httpResponse = httpClient.execute(httpPost)) { return getResponse(httpResponse); } catch (IOException ex) { - s_logger.error("Failed to add route : " + ex.getMessage()); + logger.error("Failed to add route : " + ex.getMessage()); return false; } } @@ -118,7 +119,7 @@ public boolean deleteGateway(List gatewayList) throws IOException { CloseableHttpResponse httpResponse = httpClient.execute(customHttpDelete)) { return getResponse(httpResponse); } catch (IOException ex) { - s_logger.error("Failed to remove route : " + ex.getMessage()); + logger.error("Failed to remove route : " + ex.getMessage()); return false; } } @@ -131,7 +132,7 @@ private boolean getResponse(final CloseableHttpResponse httpResponse) throws IOE return true; } else { String error = jsonObject.get("error").getAsString(); - s_logger.error(error); + logger.error(error); return false; } } diff --git a/plugins/network-elements/tungsten/src/test/java/org/apache/cloudstack/network/tungsten/service/TungstenApiTest.java b/plugins/network-elements/tungsten/src/test/java/org/apache/cloudstack/network/tungsten/service/TungstenApiTest.java index 580bea057d88..030b802aa8bf 100644 --- a/plugins/network-elements/tungsten/src/test/java/org/apache/cloudstack/network/tungsten/service/TungstenApiTest.java +++ b/plugins/network-elements/tungsten/src/test/java/org/apache/cloudstack/network/tungsten/service/TungstenApiTest.java @@ -16,6 +16,8 @@ // under the License. package org.apache.cloudstack.network.tungsten.service; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; @@ -54,7 +56,6 @@ import net.juniper.tungsten.api.types.VirtualNetwork; import org.apache.cloudstack.network.tungsten.model.TungstenLoadBalancerMember; import org.apache.cloudstack.network.tungsten.model.TungstenRule; -import org.apache.log4j.Logger; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -71,7 +72,7 @@ @RunWith(MockitoJUnitRunner.class) public class TungstenApiTest { - private static final Logger s_logger = Logger.getLogger(TungstenApiTest.class); + protected Logger logger = LogManager.getLogger(getClass()); private final TungstenApi tungstenApi = new TungstenApi(); private Project project; @@ -94,7 +95,7 @@ public class TungstenApiTest { @Before public void setUp() throws Exception { - s_logger.debug("Create Tungsten-Fabric api connector mock."); + logger.debug("Create Tungsten-Fabric api connector mock."); ApiConnector api = new ApiConnectorMock(null, 0); tungstenApi.setApiConnector(api); @@ -102,7 +103,7 @@ public void setUp() throws Exception { projectUuid = UUID.randomUUID().toString(); //create Tungsten-Fabric default domain - s_logger.debug("Create default domain in Tungsten-Fabric."); + logger.debug("Create default domain in Tungsten-Fabric."); Domain domain = new Domain(); domain.setUuid(domainUuid); String defaultDomainName = "default-domain"; @@ -110,7 +111,7 @@ public void setUp() throws Exception { api.create(domain); //create Tungsten-Fabric default project - s_logger.debug("Create default project in Tungsten-Fabric."); + logger.debug("Create default project in Tungsten-Fabric."); Project project = new Project(); project.setUuid(projectUuid); String defaultProjectName = "default-project"; @@ -141,77 +142,77 @@ public void setUp() throws Exception { @Test public void createTungstenNetworkTest() { - s_logger.debug("Creating a virtual network in Tungsten-Fabric."); + logger.debug("Creating a virtual network in Tungsten-Fabric."); assertNotNull(tungstenApi.createTungstenNetwork(tungstenNetworkUuid, tungstenNetworkName, tungstenNetworkName, projectUuid, true, false, "10.0.0.0", 24, "10.0.0.1", true, null, "10.0.0.10", "10.0.0.20", false, false, "")); - s_logger.debug("Get Tungsten-Fabric virtual network and check if it's not null."); + logger.debug("Get Tungsten-Fabric virtual network and check if it's not null."); assertNotNull(tungstenApi.getTungstenObject(VirtualNetwork.class, tungstenNetworkUuid)); } @Test public void createTungstenVirtualMachineTest() { - s_logger.debug("Create virtual machine in Tungsten-Fabric."); + logger.debug("Create virtual machine in Tungsten-Fabric."); assertNotNull(tungstenApi.createTungstenVirtualMachine(tungstenVmUuid, tungstenVmName)); - s_logger.debug("Check if virtual machine was created in Tungsten-Fabric."); + logger.debug("Check if virtual machine was created in Tungsten-Fabric."); assertNotNull(tungstenApi.getTungstenObject(VirtualMachine.class, tungstenVmUuid)); } @Test public void createTungstenVirtualMachineInterfaceTest() { - s_logger.debug("Create fabric virtual network in Tungsten-Fabric."); + logger.debug("Create fabric virtual network in Tungsten-Fabric."); assertNotNull(tungstenApi.createTungstenNetwork(null, "ip-fabric", "ip-fabric", projectUuid, true, false, null, 0, null, true, null, null, null, false, false, "")); - s_logger.debug("Create virtual network in Tungsten-Fabric."); + logger.debug("Create virtual network in Tungsten-Fabric."); assertNotNull(tungstenApi.createTungstenNetwork(tungstenNetworkUuid, tungstenNetworkName, tungstenNetworkName, projectUuid, true, false, "10.0.0.0", 24, "10.0.0.1", true, null, "10.0.0.10", "10.0.0.20", false, true, "")); - s_logger.debug("Create virtual machine in Tungsten-Fabric."); + logger.debug("Create virtual machine in Tungsten-Fabric."); assertNotNull(tungstenApi.createTungstenVirtualMachine(tungstenVmUuid, tungstenVmName)); String vmiMacAddress = "02:fc:f3:d6:83:c3"; - s_logger.debug("Create virtual machine interface in Tungsten-Fabric."); + logger.debug("Create virtual machine interface in Tungsten-Fabric."); assertNotNull(tungstenApi.createTungstenVmInterface(vmiUuid, vmiName, vmiMacAddress, tungstenNetworkUuid, tungstenVmUuid, projectUuid, "10.0.0.1", true)); } @Test public void deleteTungstenVirtualMachineInterfaceTest() { - s_logger.debug("Create virtual network in Tungsten-Fabric."); + logger.debug("Create virtual network in Tungsten-Fabric."); assertNotNull(tungstenApi.createTungstenNetwork(tungstenNetworkUuid, tungstenNetworkName, tungstenNetworkName, projectUuid, true, false, "10.0.0.0", 24, "10.0.0.1", true, null, "10.0.0.10", "10.0.0.20", false, false, "")); - s_logger.debug("Create virtual machine in Tungsten-Fabric."); + logger.debug("Create virtual machine in Tungsten-Fabric."); assertNotNull(tungstenApi.createTungstenVirtualMachine(tungstenVmUuid, tungstenVmName)); String vmiMacAddress = "02:fc:f3:d6:83:c3"; - s_logger.debug("Create virtual machine interface in Tungsten-Fabric."); + logger.debug("Create virtual machine interface in Tungsten-Fabric."); tungstenApi.createTungstenVmInterface(vmiUuid, vmiName, vmiMacAddress, tungstenNetworkUuid, tungstenVmUuid, projectUuid, "10.0.0.1", true); - s_logger.debug("Check if the virtual machine interface was created in Tungsten-Fabric."); + logger.debug("Check if the virtual machine interface was created in Tungsten-Fabric."); VirtualMachineInterface vmi = (VirtualMachineInterface) tungstenApi.getTungstenObject(VirtualMachineInterface.class, vmiUuid); assertNotNull(vmi); - s_logger.debug("Delete virtual machine interface from Tungsten-Fabric."); + logger.debug("Delete virtual machine interface from Tungsten-Fabric."); assertTrue(tungstenApi.deleteTungstenVmInterface(vmi)); } @Test public void createTungstenLogicalRouterTest() { - s_logger.debug("Create public network in Tungsten-Fabric."); + logger.debug("Create public network in Tungsten-Fabric."); assertNotNull(tungstenApi.createTungstenNetwork(tungstenPublicNetworkUuid, tungstenPublicNetworkName, tungstenPublicNetworkName, projectUuid, true, false, "10.0.0.0", 24, "10.0.0.1", true, null, "10.0.0.10", "10.0.0.20", false, false, "")); - s_logger.debug("Create logical router in Tungsten-Fabric."); + logger.debug("Create logical router in Tungsten-Fabric."); assertNotNull( tungstenApi.createTungstenLogicalRouter("TungstenLogicalRouter", projectUuid, tungstenPublicNetworkUuid)); } @@ -220,11 +221,11 @@ public void createTungstenLogicalRouterTest() { public void createTungstenSecurityGroupTest() { String projectFqn = TungstenApi.TUNGSTEN_DEFAULT_DOMAIN + ":" + TungstenApi.TUNGSTEN_DEFAULT_PROJECT; - s_logger.debug("Create a security group in Tungsten-Fabric."); + logger.debug("Create a security group in Tungsten-Fabric."); assertNotNull(tungstenApi.createTungstenSecurityGroup(tungstenSecurityGroupUuid, tungstenSecurityGroupName, "TungstenSecurityGroupDescription", projectFqn)); - s_logger.debug("Check if the security group was created in Tungsten-Fabric."); + logger.debug("Check if the security group was created in Tungsten-Fabric."); SecurityGroup securityGroup = (SecurityGroup) tungstenApi.getTungstenObject(SecurityGroup.class, tungstenSecurityGroupUuid); assertNotNull(securityGroup); @@ -234,7 +235,7 @@ public void createTungstenSecurityGroupTest() { public void addTungstenSecurityGroupRuleTest() { String projectFqn = TungstenApi.TUNGSTEN_DEFAULT_DOMAIN + ":" + TungstenApi.TUNGSTEN_DEFAULT_PROJECT; - s_logger.debug("Create a security group in Tungsten-Fabric."); + logger.debug("Create a security group in Tungsten-Fabric."); assertNotNull(tungstenApi.createTungstenSecurityGroup(tungstenSecurityGroupUuid, tungstenSecurityGroupName, "TungstenSecurityGroupDescription", projectFqn)); @@ -243,7 +244,7 @@ public void addTungstenSecurityGroupRuleTest() { tungstenSecurityGroupUuid); assertNotNull(securityGroup); - s_logger.debug("Add a Tungsten-Fabric security group rule to the security group added earlier"); + logger.debug("Add a Tungsten-Fabric security group rule to the security group added earlier"); boolean result = tungstenApi.addTungstenSecurityGroupRule(tungstenSecurityGroupUuid, tungstenSecurityGroupRuleUuid, "ingress", 80, 90, "10.0.0.0/24", "IPv4", "tcp"); assertTrue(result); @@ -253,7 +254,7 @@ public void addTungstenSecurityGroupRuleTest() { public void removeTungstenSecurityGroupRuleTest() { String projectFqn = TungstenApi.TUNGSTEN_DEFAULT_DOMAIN + ":" + TungstenApi.TUNGSTEN_DEFAULT_PROJECT; - s_logger.debug("Create a security group in Tungsten-Fabric."); + logger.debug("Create a security group in Tungsten-Fabric."); assertNotNull(tungstenApi.createTungstenSecurityGroup(tungstenSecurityGroupUuid, "TungstenSecurityGroup", "TungstenSecurityGroupDescription", projectFqn)); @@ -262,37 +263,37 @@ public void removeTungstenSecurityGroupRuleTest() { tungstenSecurityGroupUuid); assertNotNull(securityGroup); - s_logger.debug("Add a Tungsten-Fabric security group rule to the security group added earlier"); + logger.debug("Add a Tungsten-Fabric security group rule to the security group added earlier"); boolean result1 = tungstenApi.addTungstenSecurityGroupRule(tungstenSecurityGroupUuid, "0a01e4c7-d912-4bd5-9786-5478e3dae7b2", "ingress", 80, 90, "10.0.0.0/24", "IPv4", "tcp"); assertTrue(result1); - s_logger.debug("Add a Tungsten-Fabric security group rule to the security group added earlier"); + logger.debug("Add a Tungsten-Fabric security group rule to the security group added earlier"); boolean result2 = tungstenApi.addTungstenSecurityGroupRule(tungstenSecurityGroupUuid, "fe44b353-21e7-4e6c-af18-1325c5ef886a", "egress", 80, 90, "securitygroup", "IPv4", "tcp"); assertTrue(result2); - s_logger.debug("Delete the Tungsten-Fabric security group rule added earlier"); + logger.debug("Delete the Tungsten-Fabric security group rule added earlier"); assertTrue( tungstenApi.removeTungstenSecurityGroupRule(tungstenSecurityGroupUuid, "0a01e4c7-d912-4bd5-9786-5478e3dae7b2")); } @Test public void createTungstenLoadbalancerTest() { - s_logger.debug("Creating a virtual network in Tungsten-Fabric."); + logger.debug("Creating a virtual network in Tungsten-Fabric."); createTungstenNetworkTest(); - s_logger.debug("Get tungsten virtual network and check if it's not null."); + logger.debug("Get tungsten virtual network and check if it's not null."); assertNotNull(tungstenApi.getTungstenObject(VirtualNetwork.class, tungstenNetworkUuid)); - s_logger.debug("Create virtual machine interface in Tungsten-Fabric."); + logger.debug("Create virtual machine interface in Tungsten-Fabric."); createTungstenVirtualMachineInterfaceTest(); - s_logger.debug("Create loadbalancer in Tungsten-Fabric"); + logger.debug("Create loadbalancer in Tungsten-Fabric"); assertNotNull(tungstenApi.createTungstenLoadbalancer(projectUuid, tungstenLoadbalancerName, vmiUuid, tungstenApi.getSubnetUuid(tungstenNetworkUuid), "192.168.2.100")); - s_logger.debug("Check if the loadbalancer was created in Tungsten-Fabric"); + logger.debug("Check if the loadbalancer was created in Tungsten-Fabric"); Project project = (Project) tungstenApi.getTungstenObject(Project.class, projectUuid); assertNotNull(tungstenApi.getTungstenObjectByName(Loadbalancer.class, project.getQualifiedName(), tungstenLoadbalancerName)); @@ -300,201 +301,201 @@ public void createTungstenLoadbalancerTest() { @Test public void createTungstenLoadbalancerListenerTest() { - s_logger.debug("Create a loadbalancer in Tungsten-Fabric"); + logger.debug("Create a loadbalancer in Tungsten-Fabric"); createTungstenLoadbalancerTest(); - s_logger.debug("Get loadbalancer from Tungsten-Fabric"); + logger.debug("Get loadbalancer from Tungsten-Fabric"); Project project = (Project) tungstenApi.getTungstenObject(Project.class, projectUuid); Loadbalancer loadbalancer = (Loadbalancer) tungstenApi.getTungstenObjectByName(Loadbalancer.class, project.getQualifiedName(), tungstenLoadbalancerName); assertNotNull(loadbalancer); - s_logger.debug("Create a loadbalancer listener in Tungsten-Fabric"); + logger.debug("Create a loadbalancer listener in Tungsten-Fabric"); LoadbalancerListener loadbalancerListener = (LoadbalancerListener) tungstenApi.createTungstenLoadbalancerListener( projectUuid, loadbalancer.getUuid(), tungstenLoadbalancerListenerName, "tcp", 24); - s_logger.debug("Check if the loadbalancer listener was created in Tungsten-Fabric"); + logger.debug("Check if the loadbalancer listener was created in Tungsten-Fabric"); assertNotNull(tungstenApi.getTungstenObject(LoadbalancerListener.class, loadbalancerListener.getUuid())); } @Test public void createTungstenLoadbalancerHealthMonitorTest() { - s_logger.debug("Create a loadbalancer health monitor in Tungsten-Fabric"); + logger.debug("Create a loadbalancer health monitor in Tungsten-Fabric"); LoadbalancerHealthmonitor loadbalancerHealthmonitor = (LoadbalancerHealthmonitor) tungstenApi.createTungstenLoadbalancerHealthMonitor( projectUuid, "LoadbalancerHealthMonitor", "PING", 3, 5, 5, null, null, null); assertNotNull(loadbalancerHealthmonitor); - s_logger.debug("Check if the loadbalancer health monitor was created in Tungsten-Fabric"); + logger.debug("Check if the loadbalancer health monitor was created in Tungsten-Fabric"); assertNotNull( tungstenApi.getTungstenObject(LoadbalancerHealthmonitor.class, loadbalancerHealthmonitor.getUuid())); } @Test public void createTungstenLoadbalancerPoolTest() { - s_logger.debug("Create a loadbalancer in Tungsten-Fabric"); + logger.debug("Create a loadbalancer in Tungsten-Fabric"); createTungstenLoadbalancerTest(); - s_logger.debug("Get loadbalancer from Tungsten-Fabric"); + logger.debug("Get loadbalancer from Tungsten-Fabric"); Project project = (Project) tungstenApi.getTungstenObject(Project.class, projectUuid); Loadbalancer loadbalancer = (Loadbalancer) tungstenApi.getTungstenObjectByName(Loadbalancer.class, project.getQualifiedName(), tungstenLoadbalancerName); assertNotNull(loadbalancer); - s_logger.debug("Create a loadbalancer listener in Tungsten-Fabric"); + logger.debug("Create a loadbalancer listener in Tungsten-Fabric"); LoadbalancerListener loadbalancerListener = (LoadbalancerListener) tungstenApi.createTungstenLoadbalancerListener( projectUuid, loadbalancer.getUuid(), tungstenLoadbalancerListenerName, "tcp", 24); assertNotNull(loadbalancerListener); - s_logger.debug("Create a loadbalancer health monitor in Tungsten-Fabric"); + logger.debug("Create a loadbalancer health monitor in Tungsten-Fabric"); LoadbalancerHealthmonitor loadbalancerHealthmonitor = (LoadbalancerHealthmonitor) tungstenApi.createTungstenLoadbalancerHealthMonitor( projectUuid, "LoadbalancerHealthMonitor", "PING", 3, 5, 5, null, null, null); assertNotNull(loadbalancerHealthmonitor); - s_logger.debug("Create a loadbalancer pool in Tungsten-Fabric"); + logger.debug("Create a loadbalancer pool in Tungsten-Fabric"); LoadbalancerPool loadbalancerPool = (LoadbalancerPool) tungstenApi.createTungstenLoadbalancerPool(projectUuid, loadbalancerListener.getUuid(), loadbalancerHealthmonitor.getUuid(), tungstenLoadbalancerPoolName, "ROUND_ROBIN", "TCP"); assertNotNull(loadbalancerPool); - s_logger.debug("Check if the loadbalancer pool was created in Tungsten-Fabric"); + logger.debug("Check if the loadbalancer pool was created in Tungsten-Fabric"); assertNotNull(tungstenApi.getTungstenObject(LoadbalancerPool.class, loadbalancerPool.getUuid())); } @Test public void createTungstenLoadbalancerMemberTest() { - s_logger.debug("Create a loadbalancer pool in Tungsten-Fabric"); + logger.debug("Create a loadbalancer pool in Tungsten-Fabric"); createTungstenLoadbalancerPoolTest(); - s_logger.debug("Get the loadbalancer pool from Tungsten-Fabric"); + logger.debug("Get the loadbalancer pool from Tungsten-Fabric"); Project project = (Project) tungstenApi.getTungstenObject(Project.class, projectUuid); LoadbalancerPool loadbalancerPool = (LoadbalancerPool) tungstenApi.getTungstenObjectByName( LoadbalancerPool.class, project.getQualifiedName(), tungstenLoadbalancerPoolName); assertNotNull(loadbalancerPool); - s_logger.debug("Create a loadbalancer member in Tungsten-Fabric"); + logger.debug("Create a loadbalancer member in Tungsten-Fabric"); LoadbalancerMember loadbalancerMember = (LoadbalancerMember) tungstenApi.createTungstenLoadbalancerMember( loadbalancerPool.getUuid(), "TungstenLoadbalancerMember", "10.0.0.0", null, 24, 5); assertNotNull(loadbalancerMember); - s_logger.debug("Check if the loadbalancer member was created in Tungsten-Fabric"); + logger.debug("Check if the loadbalancer member was created in Tungsten-Fabric"); assertNotNull(tungstenApi.getTungstenObject(LoadbalancerMember.class, loadbalancerMember.getUuid())); } @Test public void createTungstenInstanceIpTest() { - s_logger.debug("Create a virtual network in Tungsten-Fabric."); + logger.debug("Create a virtual network in Tungsten-Fabric."); tungstenApi.createTungstenNetwork(tungstenNetworkUuid, tungstenNetworkName, tungstenNetworkName, projectUuid, true, false, "10.0.0.0", 24, "10.0.0.1", true, null, "10.0.0.10", "10.0.0.20", false, false, ""); - s_logger.debug("Create a virtual machine in Tungsten-Fabric."); + logger.debug("Create a virtual machine in Tungsten-Fabric."); tungstenApi.createTungstenVirtualMachine(tungstenVmUuid, tungstenVmName); - s_logger.debug("Create virtual machine interface in Tungsten-Fabric."); + logger.debug("Create virtual machine interface in Tungsten-Fabric."); tungstenApi.createTungstenVmInterface(vmiUuid, vmiName, "02:fc:f3:d6:83:c3", tungstenNetworkUuid, tungstenVmUuid, projectUuid, "10.0.0.1", true); - s_logger.debug("Check if the instance ip is not exist in Tungsten-Fabric"); + logger.debug("Check if the instance ip is not exist in Tungsten-Fabric"); assertNull(tungstenApi.getTungstenObjectByName(InstanceIp.class, null, "TungstenInstanceIp")); - s_logger.debug("Create instance ip in Tungsten-Fabric"); + logger.debug("Create instance ip in Tungsten-Fabric"); assertNotNull( tungstenApi.createTungstenInstanceIp("TungstenInstanceIp", "192.168.1.100", tungstenNetworkUuid, vmiUuid)); - s_logger.debug("Check if the instance ip was created in Tungsten-Fabric"); + logger.debug("Check if the instance ip was created in Tungsten-Fabric"); assertNotNull(tungstenApi.getTungstenObjectByName(InstanceIp.class, null, "TungstenInstanceIp")); } @Test public void createTungstenInstanceIpWithSubnetTest() { - s_logger.debug("Create a virtual network in Tungsten-Fabric."); + logger.debug("Create a virtual network in Tungsten-Fabric."); tungstenApi.createTungstenNetwork(tungstenNetworkUuid, tungstenNetworkName, tungstenNetworkName, projectUuid, true, false, "10.0.0.0", 24, "10.0.0.1", true, null, "10.0.0.10", "10.0.0.20", false, false, ""); - s_logger.debug("Create a virtual machine in Tungsten-Fabric."); + logger.debug("Create a virtual machine in Tungsten-Fabric."); tungstenApi.createTungstenVirtualMachine(tungstenVmUuid, tungstenVmName); - s_logger.debug("Create virtual machine interface in Tungsten-Fabric."); + logger.debug("Create virtual machine interface in Tungsten-Fabric."); tungstenApi.createTungstenVmInterface(vmiUuid, vmiName, "02:fc:f3:d6:83:c3", tungstenNetworkUuid, tungstenVmUuid, projectUuid, "10.0.0.1", true); - s_logger.debug("Check if the instance ip is not exist in Tungsten-Fabric"); + logger.debug("Check if the instance ip is not exist in Tungsten-Fabric"); assertNull(tungstenApi.getTungstenObjectByName(InstanceIp.class, null, "TungstenInstanceIp")); - s_logger.debug("Create instance ip in Tungsten-Fabric"); + logger.debug("Create instance ip in Tungsten-Fabric"); assertNotNull( tungstenApi.createTungstenInstanceIp("TungstenInstanceIp", "192.168.1.100", tungstenNetworkUuid, vmiUuid, tungstenApi.getSubnetUuid(tungstenNetworkUuid))); - s_logger.debug("Check if the instance ip was created in Tungsten-Fabric"); + logger.debug("Check if the instance ip was created in Tungsten-Fabric"); assertNotNull(tungstenApi.getTungstenObjectByName(InstanceIp.class, null, "TungstenInstanceIp")); } @Test public void createTungstenFloatingIpPoolTest() { - s_logger.debug("Create a virtual network in Tungsten-Fabric."); + logger.debug("Create a virtual network in Tungsten-Fabric."); VirtualNetwork virtualNetwork = tungstenApi.createTungstenNetwork(tungstenNetworkUuid, tungstenNetworkName, tungstenNetworkName, projectUuid, true, false, "10.0.0.0", 24, "10.0.0.1", true, null, "10.0.0.10", "10.0.0.20", false, false, ""); - s_logger.debug("Check if the floating ip pool is not exist in Tungsten-Fabric"); + logger.debug("Check if the floating ip pool is not exist in Tungsten-Fabric"); assertNull(tungstenApi.getTungstenObjectByName(FloatingIpPool.class, virtualNetwork.getQualifiedName(), "TungstenFip")); - s_logger.debug("Create instance ip in Tungsten-Fabric"); + logger.debug("Create instance ip in Tungsten-Fabric"); assertNotNull(tungstenApi.createTungstenFloatingIpPool(tungstenNetworkUuid, "TungstenFip")); - s_logger.debug("Check if the instance ip was created in Tungsten-Fabric"); + logger.debug("Check if the instance ip was created in Tungsten-Fabric"); assertNotNull(tungstenApi.getTungstenObjectByName(FloatingIpPool.class, virtualNetwork.getQualifiedName(), "TungstenFip")); } @Test public void createTungstenLbVmiTest() { - s_logger.debug("Create a virtual network in Tungsten-Fabric."); + logger.debug("Create a virtual network in Tungsten-Fabric."); tungstenApi.createTungstenNetwork(tungstenNetworkUuid, tungstenNetworkName, tungstenNetworkName, projectUuid, true, false, "10.0.0.0", 24, "10.0.0.1", true, null, "10.0.0.10", "10.0.0.20", false, false, ""); - s_logger.debug("Check if the lb vmi is not exist in Tungsten-Fabric"); + logger.debug("Check if the lb vmi is not exist in Tungsten-Fabric"); assertNull(tungstenApi.getTungstenObjectByName(VirtualMachineInterface.class, project.getQualifiedName(), "TungstenLbVmi")); - s_logger.debug("Create lb vmi in Tungsten-Fabric"); + logger.debug("Create lb vmi in Tungsten-Fabric"); assertNotNull(tungstenApi.createTungstenLbVmi("TungstenLbVmi", projectUuid, tungstenNetworkUuid)); - s_logger.debug("Check if the lb vmi was created in Tungsten-Fabric"); + logger.debug("Check if the lb vmi was created in Tungsten-Fabric"); assertNotNull(tungstenApi.getTungstenObjectByName(VirtualMachineInterface.class, project.getQualifiedName(), "TungstenLbVmi")); } @Test public void updateTungstenObjectTest() { - s_logger.debug("Create public network in Tungsten-Fabric."); + logger.debug("Create public network in Tungsten-Fabric."); tungstenApi.createTungstenNetwork(tungstenNetworkUuid, tungstenPublicNetworkName, tungstenPublicNetworkName, projectUuid, true, false, "10.0.0.0", 24, "10.0.0.1", true, null, "10.0.0.10", "10.0.0.20", false, false, ""); - s_logger.debug("Creating a logical router in Tungsten-Fabric."); + logger.debug("Creating a logical router in Tungsten-Fabric."); LogicalRouter logicalRouter = (LogicalRouter) tungstenApi.createTungstenLogicalRouter("TungstenLogicalRouter", projectUuid, tungstenNetworkUuid); - s_logger.debug("Creating a vmi in Tungsten-Fabric."); + logger.debug("Creating a vmi in Tungsten-Fabric."); VirtualMachineInterface virtualMachineInterface = (VirtualMachineInterface) tungstenApi.createTungstenGatewayVmi( vmiName, projectUuid, tungstenNetworkUuid); - s_logger.debug("Check if the logical router vmi is not exist in Tungsten-Fabric"); + logger.debug("Check if the logical router vmi is not exist in Tungsten-Fabric"); assertNull(logicalRouter.getVirtualMachineInterface()); - s_logger.debug("Update logical router with vmi"); + logger.debug("Update logical router with vmi"); logicalRouter.setVirtualMachineInterface(virtualMachineInterface); tungstenApi.updateTungstenObject(logicalRouter); - s_logger.debug("Check updated logical router have vmi uuid equals created vmi uuid"); + logger.debug("Check updated logical router have vmi uuid equals created vmi uuid"); LogicalRouter updatedlogicalRouter = (LogicalRouter) tungstenApi.getTungstenObjectByName(LogicalRouter.class, project.getQualifiedName(), "TungstenLogicalRouter"); assertEquals(virtualMachineInterface.getUuid(), @@ -503,49 +504,49 @@ public void updateTungstenObjectTest() { @Test public void createTungstenFloatingIpTest() { - s_logger.debug("Create a virtual network in Tungsten-Fabric."); + logger.debug("Create a virtual network in Tungsten-Fabric."); tungstenApi.createTungstenNetwork(tungstenNetworkUuid, tungstenNetworkName, tungstenNetworkName, projectUuid, true, false, "10.0.0.0", 24, "10.0.0.1", true, null, "10.0.0.10", "10.0.0.20", false, false, ""); - s_logger.debug("Create instance ip in Tungsten-Fabric"); + logger.debug("Create instance ip in Tungsten-Fabric"); FloatingIpPool floatingIpPool = (FloatingIpPool) tungstenApi.createTungstenFloatingIpPool(tungstenNetworkUuid, "TungstenFip"); - s_logger.debug("Check if the floating ip pool is not exist in Tungsten-Fabric"); + logger.debug("Check if the floating ip pool is not exist in Tungsten-Fabric"); assertNull( tungstenApi.getTungstenObjectByName(FloatingIp.class, floatingIpPool.getQualifiedName(), "TungstenFi")); - s_logger.debug("Create floating ip in Tungsten-Fabric"); + logger.debug("Create floating ip in Tungsten-Fabric"); assertNotNull( tungstenApi.createTungstenFloatingIp(projectUuid, tungstenNetworkUuid, "TungstenFip", "TungstenFi", "192.168.1.100")); - s_logger.debug("Check if the lb vmi was created in Tungsten-Fabric"); + logger.debug("Check if the lb vmi was created in Tungsten-Fabric"); assertNotNull( tungstenApi.getTungstenObjectByName(FloatingIp.class, floatingIpPool.getQualifiedName(), "TungstenFi")); } @Test public void assignTungstenFloatingIpTest() { - s_logger.debug("Create a virtual network in Tungsten-Fabric."); + logger.debug("Create a virtual network in Tungsten-Fabric."); tungstenApi.createTungstenNetwork(tungstenNetworkUuid, tungstenNetworkName, tungstenNetworkName, projectUuid, true, false, "10.0.0.0", 24, "10.0.0.1", true, null, "10.0.0.10", "10.0.0.20", false, false, ""); - s_logger.debug("Create instance ip in Tungsten-Fabric"); + logger.debug("Create instance ip in Tungsten-Fabric"); tungstenApi.createTungstenFloatingIpPool(tungstenNetworkUuid, "TungstenFip"); - s_logger.debug("Create floating ip in Tungsten-Fabric"); + logger.debug("Create floating ip in Tungsten-Fabric"); tungstenApi.createTungstenFloatingIp(projectUuid, tungstenNetworkUuid, "TungstenFip", "TungstenFi", "192.168.1.100"); - s_logger.debug("Create vm in Tungsten-Fabric"); + logger.debug("Create vm in Tungsten-Fabric"); tungstenApi.createTungstenVirtualMachine(tungstenVmUuid, tungstenVmName); - s_logger.debug("Creating a vmi in Tungsten-Fabric."); + logger.debug("Creating a vmi in Tungsten-Fabric."); tungstenApi.createTungstenVmInterface(vmiUuid, vmiName, "02:fc:f3:d6:83:c3", tungstenNetworkUuid, tungstenVmUuid, projectUuid, "10.0.0.1", true); - s_logger.debug("Check if the floating ip was assigned in Tungsten-Fabric"); + logger.debug("Check if the floating ip was assigned in Tungsten-Fabric"); Assert.assertTrue( tungstenApi.assignTungstenFloatingIp(tungstenNetworkUuid, vmiUuid, "TungstenFip", "TungstenFi", "192.168.1.100")); @@ -553,59 +554,59 @@ public void assignTungstenFloatingIpTest() { @Test public void releaseTungstenFloatingIpTest() { - s_logger.debug("Create a virtual network in Tungsten-Fabric."); + logger.debug("Create a virtual network in Tungsten-Fabric."); tungstenApi.createTungstenNetwork(tungstenNetworkUuid, tungstenNetworkName, tungstenNetworkName, projectUuid, true, false, "10.0.0.0", 24, "10.0.0.1", true, null, "10.0.0.10", "10.0.0.20", false, false, ""); - s_logger.debug("Create instance ip in Tungsten-Fabric"); + logger.debug("Create instance ip in Tungsten-Fabric"); tungstenApi.createTungstenFloatingIpPool(tungstenNetworkUuid, "TungstenFip"); - s_logger.debug("Create floating ip in Tungsten-Fabric"); + logger.debug("Create floating ip in Tungsten-Fabric"); tungstenApi.createTungstenFloatingIp(projectUuid, tungstenNetworkUuid, "TungstenFip", "TungstenFi", "192.168.1.100"); - s_logger.debug("Create vm in Tungsten-Fabric"); + logger.debug("Create vm in Tungsten-Fabric"); tungstenApi.createTungstenVirtualMachine(tungstenVmUuid, tungstenVmName); - s_logger.debug("Creating a vmi in Tungsten-Fabric."); + logger.debug("Creating a vmi in Tungsten-Fabric."); tungstenApi.createTungstenVmInterface(vmiUuid, vmiName, "02:fc:f3:d6:83:c3", tungstenNetworkUuid, tungstenVmUuid, projectUuid, "10.0.0.1", true); - s_logger.debug("Check if the floating ip was assigned in Tungsten-Fabric"); + logger.debug("Check if the floating ip was assigned in Tungsten-Fabric"); tungstenApi.assignTungstenFloatingIp(tungstenNetworkUuid, vmiUuid, "TungstenFip", "TungstenFi", "192.168.1.100"); - s_logger.debug("Check if the floating ip was assigned in Tungsten-Fabric"); + logger.debug("Check if the floating ip was assigned in Tungsten-Fabric"); Assert.assertTrue(tungstenApi.releaseTungstenFloatingIp(tungstenNetworkUuid, "TungstenFip", "TungstenFi")); } @Test public void createTungstenNetworkPolicyTest() { - s_logger.debug("Prepare network policy rule 1"); + logger.debug("Prepare network policy rule 1"); List tungstenRuleList1 = new ArrayList<>(); TungstenRule tungstenRule1 = new TungstenRule("005f0dea-0196-11ec-a1ed-b42e99f6e187", "pass", ">", "tcp", null, "192.168.100.0", 24, 80, 80, null, "192.168.200.0", 24, 80, 80); tungstenRuleList1.add(tungstenRule1); - s_logger.debug("Create a network policy in Tungsten-Fabric."); + logger.debug("Create a network policy in Tungsten-Fabric."); assertNotNull(tungstenApi.createOrUpdateTungstenNetworkPolicy("policy1", projectUuid, tungstenRuleList1)); - s_logger.debug("Get created network policy and check if network policy rule has created"); + logger.debug("Get created network policy and check if network policy rule has created"); NetworkPolicy networkPolicy = (NetworkPolicy) tungstenApi.getTungstenObjectByName(NetworkPolicy.class, project.getQualifiedName(), "policy1"); assertEquals("005f0dea-0196-11ec-a1ed-b42e99f6e187", networkPolicy.getEntries().getPolicyRule().get(0).getRuleUuid()); - s_logger.debug("Prepare network policy rule 2"); + logger.debug("Prepare network policy rule 2"); List tungstenRuleList2 = new ArrayList<>(); TungstenRule tungstenRule2 = new TungstenRule("105f0dea-0196-11ec-a1ed-b42e99f6e187", "pass", ">", "tcp", null, "192.168.100.0", 24, 80, 80, null, "192.168.200.0", 24, 80, 80); tungstenRuleList2.add(tungstenRule2); - s_logger.debug("update created network policy in Tungsten-Fabric."); + logger.debug("update created network policy in Tungsten-Fabric."); assertNotNull(tungstenApi.createOrUpdateTungstenNetworkPolicy("policy1", projectUuid, tungstenRuleList2)); - s_logger.debug("Get updated network policy and check if network policy rule has updated"); + logger.debug("Get updated network policy and check if network policy rule has updated"); NetworkPolicy networkPolicy1 = (NetworkPolicy) tungstenApi.getTungstenObjectByName(NetworkPolicy.class, project.getQualifiedName(), "policy1"); assertEquals("105f0dea-0196-11ec-a1ed-b42e99f6e187", @@ -614,26 +615,26 @@ public void createTungstenNetworkPolicyTest() { @Test public void applyTungstenNetworkPolicy() { - s_logger.debug("Create a virtual network in Tungsten-Fabric."); + logger.debug("Create a virtual network in Tungsten-Fabric."); tungstenApi.createTungstenNetwork(tungstenNetworkUuid, tungstenNetworkName, tungstenNetworkName, projectUuid, true, false, "10.0.0.0", 24, "10.0.0.1", true, null, "10.0.0.10", "10.0.0.20", false, false, ""); - s_logger.debug("Prepare network policy rule"); + logger.debug("Prepare network policy rule"); List tungstenRuleList = new ArrayList<>(); - s_logger.debug("Create a network policy in Tungsten-Fabric."); + logger.debug("Create a network policy in Tungsten-Fabric."); NetworkPolicy networkPolicy = (NetworkPolicy) tungstenApi.createOrUpdateTungstenNetworkPolicy("policy", projectUuid, tungstenRuleList); - s_logger.debug("Check if network policy was not applied in Tungsten-Fabric."); + logger.debug("Check if network policy was not applied in Tungsten-Fabric."); VirtualNetwork virtualNetwork1 = (VirtualNetwork) tungstenApi.getTungstenObject(VirtualNetwork.class, tungstenNetworkUuid); assertNull(virtualNetwork1.getNetworkPolicy()); - s_logger.debug("Apply network policy to network in Tungsten-Fabric."); + logger.debug("Apply network policy to network in Tungsten-Fabric."); assertNotNull(tungstenApi.applyTungstenNetworkPolicy(networkPolicy.getUuid(), tungstenNetworkUuid, 1, 1)); - s_logger.debug("Check if network policy was applied in Tungsten-Fabric."); + logger.debug("Check if network policy was applied in Tungsten-Fabric."); VirtualNetwork virtualNetwork2 = (VirtualNetwork) tungstenApi.getTungstenObject(VirtualNetwork.class, tungstenNetworkUuid); assertNotNull(virtualNetwork2.getNetworkPolicy()); @@ -641,145 +642,145 @@ public void applyTungstenNetworkPolicy() { @Test public void getTungstenFabricNetworkTest() { - s_logger.debug("Create fabric virtual network in Tungsten-Fabric."); + logger.debug("Create fabric virtual network in Tungsten-Fabric."); assertNotNull(tungstenApi.createTungstenNetwork(null, "ip-fabric", "ip-fabric", projectUuid, true, false, null, 0, null, true, null, null, null, false, false, "")); - s_logger.debug("Check if fabric network was got in Tungsten-Fabric."); + logger.debug("Check if fabric network was got in Tungsten-Fabric."); assertNotNull(tungstenApi.getTungstenFabricNetwork()); } @Test public void createTungstenDomainTest() { - s_logger.debug("Check if domain was created in Tungsten-Fabric."); + logger.debug("Check if domain was created in Tungsten-Fabric."); assertNotNull(tungstenApi.createTungstenDomain("domain", "0a01e4c7-d912-4bd5-9786-5478e3dae7b2")); } @Test public void createTungstenProjectTest() { - s_logger.debug("Check if project was created in Tungsten-Fabric."); + logger.debug("Check if project was created in Tungsten-Fabric."); assertNotNull(tungstenApi.createTungstenProject("project","fe44b353-21e7-4e6c-af18-1325c5ef886a","0a01e4c7-d912-4bd5-9786-5478e3dae7b2", "domain")); } @Test public void deleteTungstenDomainTest() { - s_logger.debug("Create domain in Tungsten-Fabric."); + logger.debug("Create domain in Tungsten-Fabric."); assertNotNull(tungstenApi.createTungstenDomain("domain", "0a01e4c7-d912-4bd5-9786-5478e3dae7b2")); - s_logger.debug("Check if domain was deleted in Tungsten-Fabric."); + logger.debug("Check if domain was deleted in Tungsten-Fabric."); assertTrue(tungstenApi.deleteTungstenDomain("0a01e4c7-d912-4bd5-9786-5478e3dae7b2")); } @Test public void deleteTungstenProjectTest() { - s_logger.debug("Create project in Tungsten-Fabric."); + logger.debug("Create project in Tungsten-Fabric."); assertNotNull(tungstenApi.createTungstenProject("project","fe44b353-21e7-4e6c-af18-1325c5ef886a","0a01e4c7-d912-4bd5-9786-5478e3dae7b2", "domain")); - s_logger.debug("Check if project was deleted in Tungsten-Fabric."); + logger.debug("Check if project was deleted in Tungsten-Fabric."); assertTrue(tungstenApi.deleteTungstenProject("fe44b353-21e7-4e6c-af18-1325c5ef886a")); } @Test public void getDefaultTungstenDomainTest() throws IOException { - s_logger.debug("Check if default domain was got in Tungsten-Fabric."); + logger.debug("Check if default domain was got in Tungsten-Fabric."); assertNotNull(tungstenApi.getDefaultTungstenDomain()); } @Test public void updateLoadBalancerMemberTest() { - s_logger.debug("Create a virtual network in Tungsten-Fabric."); + logger.debug("Create a virtual network in Tungsten-Fabric."); tungstenApi.createTungstenNetwork(tungstenNetworkUuid, tungstenNetworkName, tungstenNetworkName, projectUuid, true, false, "10.0.0.0", 24, "10.0.0.1", true, null, "10.0.0.10", "10.0.0.20", false, false, ""); - s_logger.debug("Create a vm in Tungsten-Fabric"); + logger.debug("Create a vm in Tungsten-Fabric"); tungstenApi.createTungstenVirtualMachine(tungstenVmUuid, tungstenVmName); - s_logger.debug("Creating a vmi in Tungsten-Fabric."); + logger.debug("Creating a vmi in Tungsten-Fabric."); tungstenApi.createTungstenVmInterface(vmiUuid, vmiName, "02:fc:f3:d6:83:c3", tungstenNetworkUuid, tungstenVmUuid, projectUuid, "10.0.0.1", true); - s_logger.debug("Create loadbalancer in Tungsten-Fabric"); + logger.debug("Create loadbalancer in Tungsten-Fabric"); Loadbalancer loadbalancer = (Loadbalancer) tungstenApi.createTungstenLoadbalancer(projectUuid, tungstenLoadbalancerName, vmiUuid, tungstenApi.getSubnetUuid(tungstenNetworkUuid), "192.168.2.100"); - s_logger.debug("Create a loadbalancer listener in Tungsten-Fabric"); + logger.debug("Create a loadbalancer listener in Tungsten-Fabric"); LoadbalancerListener loadbalancerListener = (LoadbalancerListener) tungstenApi.createTungstenLoadbalancerListener( projectUuid, loadbalancer.getUuid(), tungstenLoadbalancerListenerName, "tcp", 24); - s_logger.debug("Create a loadbalancer health monitor in Tungsten-Fabric"); + logger.debug("Create a loadbalancer health monitor in Tungsten-Fabric"); LoadbalancerHealthmonitor loadbalancerHealthmonitor = (LoadbalancerHealthmonitor) tungstenApi.createTungstenLoadbalancerHealthMonitor( projectUuid, "LoadbalancerHealthMonitor", "PING", 3, 5, 5, null, null, null); - s_logger.debug("Create a loadbalancer pool in Tungsten-Fabric"); + logger.debug("Create a loadbalancer pool in Tungsten-Fabric"); LoadbalancerPool loadbalancerPool = (LoadbalancerPool) tungstenApi.createTungstenLoadbalancerPool(projectUuid, loadbalancerListener.getUuid(), loadbalancerHealthmonitor.getUuid(), tungstenLoadbalancerPoolName, "ROUND_ROBIN", "TCP"); - s_logger.debug("Update loadbalancer member 1 in Tungsten-Fabric"); + logger.debug("Update loadbalancer member 1 in Tungsten-Fabric"); List tungstenLoadBalancerMemberList1 = new ArrayList<>(); tungstenLoadBalancerMemberList1.add(new TungstenLoadBalancerMember("member1", "192.168.100.100", 80, 1)); assertTrue(tungstenApi.updateLoadBalancerMember(projectUuid, tungstenLoadbalancerPoolName, tungstenLoadBalancerMemberList1, tungstenApi.getSubnetUuid(tungstenNetworkUuid))); - s_logger.debug("Check if loadbalancer member 2 was updated in Tungsten-Fabric"); + logger.debug("Check if loadbalancer member 2 was updated in Tungsten-Fabric"); assertNotNull(tungstenApi.getTungstenObjectByName(LoadbalancerMember.class, loadbalancerPool.getQualifiedName(), "member1")); - s_logger.debug("Update loadbalancer member 2 in Tungsten-Fabric"); + logger.debug("Update loadbalancer member 2 in Tungsten-Fabric"); List tungstenLoadBalancerMemberList2 = new ArrayList<>(); tungstenLoadBalancerMemberList2.add(new TungstenLoadBalancerMember("member2", "192.168.100.100", 80, 1)); assertTrue(tungstenApi.updateLoadBalancerMember(projectUuid, tungstenLoadbalancerPoolName, tungstenLoadBalancerMemberList2, tungstenApi.getSubnetUuid(tungstenNetworkUuid))); - s_logger.debug("Check if loadbalancer member 1 was deleted in Tungsten-Fabric"); + logger.debug("Check if loadbalancer member 1 was deleted in Tungsten-Fabric"); assertNull(tungstenApi.getTungstenObjectByName(LoadbalancerMember.class, loadbalancerPool.getQualifiedName(), "member1")); - s_logger.debug("Check if loadbalancer member 2 was created in Tungsten-Fabric"); + logger.debug("Check if loadbalancer member 2 was created in Tungsten-Fabric"); assertNotNull(tungstenApi.getTungstenObjectByName(LoadbalancerMember.class, loadbalancerPool.getQualifiedName(), "member2")); } @Test public void updateLoadBalancerPoolTest() { - s_logger.debug("Create a virtual network in Tungsten-Fabric."); + logger.debug("Create a virtual network in Tungsten-Fabric."); tungstenApi.createTungstenNetwork(tungstenNetworkUuid, tungstenNetworkName, tungstenNetworkName, projectUuid, true, false, "10.0.0.0", 24, "10.0.0.1", true, null, "10.0.0.10", "10.0.0.20", false, false, ""); - s_logger.debug("Create floating ip in Tungsten-Fabric"); + logger.debug("Create floating ip in Tungsten-Fabric"); tungstenApi.createTungstenVirtualMachine(tungstenVmUuid, tungstenVmName); - s_logger.debug("Creating a vmi in Tungsten-Fabric."); + logger.debug("Creating a vmi in Tungsten-Fabric."); tungstenApi.createTungstenVmInterface(vmiUuid, vmiName, "02:fc:f3:d6:83:c3", tungstenNetworkUuid, tungstenVmUuid, projectUuid, "10.0.0.1", true); - s_logger.debug("Create loadbalancer in Tungsten-Fabric"); + logger.debug("Create loadbalancer in Tungsten-Fabric"); Loadbalancer loadbalancer = (Loadbalancer) tungstenApi.createTungstenLoadbalancer(projectUuid, tungstenLoadbalancerName, vmiUuid, tungstenApi.getSubnetUuid(tungstenNetworkUuid), "192.168.2.100"); - s_logger.debug("Create a loadbalancer listener in Tungsten-Fabric"); + logger.debug("Create a loadbalancer listener in Tungsten-Fabric"); LoadbalancerListener loadbalancerListener = (LoadbalancerListener) tungstenApi.createTungstenLoadbalancerListener( projectUuid, loadbalancer.getUuid(), tungstenLoadbalancerListenerName, "tcp", 24); - s_logger.debug("Create a loadbalancer health monitor in Tungsten-Fabric"); + logger.debug("Create a loadbalancer health monitor in Tungsten-Fabric"); LoadbalancerHealthmonitor loadbalancerHealthmonitor = (LoadbalancerHealthmonitor) tungstenApi.createTungstenLoadbalancerHealthMonitor( projectUuid, "LoadbalancerHealthMonitor", "PING", 3, 5, 5, null, null, null); - s_logger.debug("Create a loadbalancer pool in Tungsten-Fabric"); + logger.debug("Create a loadbalancer pool in Tungsten-Fabric"); tungstenApi.createTungstenLoadbalancerPool(projectUuid, loadbalancerListener.getUuid(), loadbalancerHealthmonitor.getUuid(), tungstenLoadbalancerPoolName, "ROUND_ROBIN", "TCP"); - s_logger.debug("Update loadbalancer pool in Tungsten-Fabric"); + logger.debug("Update loadbalancer pool in Tungsten-Fabric"); assertTrue( tungstenApi.updateLoadBalancerPool(projectUuid, tungstenLoadbalancerPoolName, "SOURCE_IP", "APP_COOKIE", "cookie", "UDP", true, "80", "/stats", "admin:abc")); - s_logger.debug("Check if loadbalancer pool was updated in Tungsten-Fabric"); + logger.debug("Check if loadbalancer pool was updated in Tungsten-Fabric"); LoadbalancerPool loadbalancerPool = (LoadbalancerPool) tungstenApi.getTungstenObjectByName( LoadbalancerPool.class, project.getQualifiedName(), tungstenLoadbalancerPoolName); assertEquals("SOURCE_IP", loadbalancerPool.getProperties().getLoadbalancerMethod()); @@ -790,30 +791,30 @@ public void updateLoadBalancerPoolTest() { @Test public void updateLoadBalancerListenerTest() { - s_logger.debug("Create a virtual network in Tungsten-Fabric."); + logger.debug("Create a virtual network in Tungsten-Fabric."); tungstenApi.createTungstenNetwork(tungstenNetworkUuid, tungstenNetworkName, tungstenNetworkName, projectUuid, true, false, "10.0.0.0", 24, "10.0.0.1", true, null, "10.0.0.10", "10.0.0.20", false, false, ""); - s_logger.debug("Create floating ip in Tungsten-Fabric"); + logger.debug("Create floating ip in Tungsten-Fabric"); tungstenApi.createTungstenVirtualMachine(tungstenVmUuid, tungstenVmName); - s_logger.debug("Creating a vmi in Tungsten-Fabric."); + logger.debug("Creating a vmi in Tungsten-Fabric."); tungstenApi.createTungstenVmInterface(vmiUuid, vmiName, "02:fc:f3:d6:83:c3", tungstenNetworkUuid, tungstenVmUuid, projectUuid, "10.0.0.1", true); - s_logger.debug("Create loadbalancer in Tungsten-Fabric"); + logger.debug("Create loadbalancer in Tungsten-Fabric"); Loadbalancer loadbalancer = (Loadbalancer) tungstenApi.createTungstenLoadbalancer(projectUuid, tungstenLoadbalancerName, vmiUuid, tungstenApi.getSubnetUuid(tungstenNetworkUuid), "192.168.2.100"); - s_logger.debug("Create a loadbalancer listener in Tungsten-Fabric"); + logger.debug("Create a loadbalancer listener in Tungsten-Fabric"); tungstenApi.createTungstenLoadbalancerListener(projectUuid, loadbalancer.getUuid(), tungstenLoadbalancerListenerName, "tcp", 24); - s_logger.debug("update loadbalancer listener in Tungsten-Fabric"); + logger.debug("update loadbalancer listener in Tungsten-Fabric"); assertTrue(tungstenApi.updateLoadBalancerListener(projectUuid, tungstenLoadbalancerListenerName, "udp", 25, "http://host:8080/client/getLoadBalancerSslCertificate")); - s_logger.debug("Check if loadbalancer listener was updated in Tungsten-Fabric"); + logger.debug("Check if loadbalancer listener was updated in Tungsten-Fabric"); LoadbalancerListener loadbalancerListener = (LoadbalancerListener) tungstenApi.getTungstenObjectByName( LoadbalancerListener.class, project.getQualifiedName(), tungstenLoadbalancerListenerName); assertEquals("udp", loadbalancerListener.getProperties().getProtocol()); @@ -824,30 +825,30 @@ public void updateLoadBalancerListenerTest() { @Test public void applyTungstenPortForwardingTest() { - s_logger.debug("Create a virtual network in Tungsten-Fabric."); + logger.debug("Create a virtual network in Tungsten-Fabric."); tungstenApi.createTungstenNetwork(tungstenNetworkUuid, tungstenNetworkName, tungstenNetworkName, projectUuid, true, false, "10.0.0.0", 24, "10.0.0.1", true, null, "10.0.0.10", "10.0.0.20", false, false, ""); - s_logger.debug("Create instance ip in Tungsten-Fabric"); + logger.debug("Create instance ip in Tungsten-Fabric"); tungstenApi.createTungstenFloatingIpPool(tungstenNetworkUuid, "TungstenFip"); - s_logger.debug("Create floating ip in Tungsten-Fabric"); + logger.debug("Create floating ip in Tungsten-Fabric"); FloatingIp floatingIp = (FloatingIp) tungstenApi.createTungstenFloatingIp(projectUuid, tungstenNetworkUuid, "TungstenFip", "TungstenFi", "192.168.1.100"); - s_logger.debug("Create floating ip in Tungsten-Fabric"); + logger.debug("Create floating ip in Tungsten-Fabric"); tungstenApi.createTungstenVirtualMachine(tungstenVmUuid, tungstenVmName); - s_logger.debug("Creating a vmi in Tungsten-Fabric."); + logger.debug("Creating a vmi in Tungsten-Fabric."); tungstenApi.createTungstenVmInterface(vmiUuid, vmiName, "02:fc:f3:d6:83:c3", tungstenNetworkUuid, tungstenVmUuid, projectUuid, "10.0.0.1", true); - s_logger.debug("Check if the port mapping is not exist in Tungsten-Fabric"); + logger.debug("Check if the port mapping is not exist in Tungsten-Fabric"); assertNull(floatingIp.getPortMappings()); assertNull(floatingIp.getVirtualMachineInterface()); assertNull(floatingIp.getPortMappingsEnable()); - s_logger.debug("Check if the port mapping was add in Tungsten-Fabric"); + logger.debug("Check if the port mapping was add in Tungsten-Fabric"); assertTrue( tungstenApi.applyTungstenPortForwarding(true, tungstenNetworkUuid, "TungstenFip", "TungstenFi", vmiUuid, "tcp", 8080, 80)); @@ -857,7 +858,7 @@ public void applyTungstenPortForwardingTest() { assertNotNull(floatingIp.getVirtualMachineInterface()); assertTrue(floatingIp.getPortMappingsEnable()); - s_logger.debug("Check if the port mapping was remove in Tungsten-Fabric"); + logger.debug("Check if the port mapping was remove in Tungsten-Fabric"); assertTrue(tungstenApi.applyTungstenPortForwarding(false, tungstenNetworkUuid, "TungstenFip", "TungstenFi", vmiUuid, "tcp", 8080, 80)); assertEquals(0, floatingIp.getPortMappings().getPortMappings().size()); @@ -867,14 +868,14 @@ public void applyTungstenPortForwardingTest() { @Test public void addTungstenNetworkSubnetCommandTest() { - s_logger.debug("Create a virtual network in Tungsten-Fabric."); + logger.debug("Create a virtual network in Tungsten-Fabric."); VirtualNetwork virtualNetwork = tungstenApi.createTungstenNetwork(tungstenNetworkUuid, tungstenNetworkName, tungstenNetworkName, projectUuid, true, false, null, 0, null, false, null, null, null, false, false, null); - s_logger.debug("Check if network ipam subnet is empty in Tungsten-Fabric"); + logger.debug("Check if network ipam subnet is empty in Tungsten-Fabric"); assertNull(virtualNetwork.getNetworkIpam()); - s_logger.debug("Check if network ipam subnet was added to network in Tungsten-Fabric"); + logger.debug("Check if network ipam subnet was added to network in Tungsten-Fabric"); assertTrue(tungstenApi.addTungstenNetworkSubnetCommand(tungstenNetworkUuid, "10.0.0.0", 24, "10.0.0.1", true, "10.0.0.253", "10.0.0.10", "10.0.0.20", true, "subnetName")); VirtualNetwork virtualNetwork1 = (VirtualNetwork) tungstenApi.getTungstenObject(VirtualNetwork.class, @@ -912,18 +913,18 @@ public void addTungstenNetworkSubnetCommandTest() { @Test public void removeTungstenNetworkSubnetCommandTest() { - s_logger.debug("Create a virtual network in Tungsten-Fabric."); + logger.debug("Create a virtual network in Tungsten-Fabric."); tungstenApi.createTungstenNetwork(tungstenNetworkUuid, tungstenNetworkName, tungstenNetworkName, projectUuid, true, false, "192.168.100.0", 23, "192.168.100.1", false, null, null, null, false, false, "subnetName1"); - s_logger.debug("Check if network ipam subnet was added to network in Tungsten-Fabric"); + logger.debug("Check if network ipam subnet was added to network in Tungsten-Fabric"); assertTrue(tungstenApi.addTungstenNetworkSubnetCommand(tungstenNetworkUuid, "10.0.0.0", 24, "10.0.0.1", true, "10.0.0.253", "10.0.0.10", "10.0.0.20", true, "subnetName2")); VirtualNetwork virtualNetwork1 = (VirtualNetwork) tungstenApi.getTungstenObject(VirtualNetwork.class, tungstenNetworkUuid); assertEquals(2, virtualNetwork1.getNetworkIpam().get(0).getAttr().getIpamSubnets().size()); - s_logger.debug("Check if network ipam subnet was removed to network in Tungsten-Fabric"); + logger.debug("Check if network ipam subnet was removed to network in Tungsten-Fabric"); assertTrue(tungstenApi.removeTungstenNetworkSubnetCommand(tungstenNetworkUuid, "subnetName2")); VirtualNetwork virtualNetwork2 = (VirtualNetwork) tungstenApi.getTungstenObject(VirtualNetwork.class, tungstenNetworkUuid); @@ -932,112 +933,112 @@ public void removeTungstenNetworkSubnetCommandTest() { @Test public void createTungstenTagTypeTest() { - s_logger.debug("Check if tag type is not exist in Tungsten-Fabric"); + logger.debug("Check if tag type is not exist in Tungsten-Fabric"); assertNull(tungstenApi.getTungstenObject(TagType.class, "005f0dea-0196-11ec-a1ed-b42e99f6e187")); - s_logger.debug("Create tag type in Tungsten-Fabric"); + logger.debug("Create tag type in Tungsten-Fabric"); assertNotNull(tungstenApi.createTungstenTagType("005f0dea-0196-11ec-a1ed-b42e99f6e187", "tagtype")); - s_logger.debug("Check if tag type was created in Tungsten-Fabric"); + logger.debug("Check if tag type was created in Tungsten-Fabric"); assertNotNull(tungstenApi.getTungstenObject(TagType.class, "005f0dea-0196-11ec-a1ed-b42e99f6e187")); } @Test public void createTungstenTagTest() { - s_logger.debug("Check if tag is not exist in Tungsten-Fabric"); + logger.debug("Check if tag is not exist in Tungsten-Fabric"); assertNull(tungstenApi.getTungstenObject(Tag.class, "005f0dea-0196-11ec-a1ed-b42e99f6e187")); - s_logger.debug("Create tag in Tungsten-Fabric"); + logger.debug("Create tag in Tungsten-Fabric"); assertNotNull(tungstenApi.createTungstenTag("005f0dea-0196-11ec-a1ed-b42e99f6e187", "tagtype", "tagvalue", "123")); - s_logger.debug("Check if tag was created in Tungsten-Fabric"); + logger.debug("Check if tag was created in Tungsten-Fabric"); assertNotNull(tungstenApi.getTungstenObject(Tag.class, "005f0dea-0196-11ec-a1ed-b42e99f6e187")); } @Test public void createTungstenApplicationPolicySetTest() { - s_logger.debug("Check if application policy set is not exist in Tungsten-Fabric"); + logger.debug("Check if application policy set is not exist in Tungsten-Fabric"); assertNull(tungstenApi.getTungstenObject(ApplicationPolicySet.class, "005f0dea-0196-11ec-a1ed-b42e99f6e187")); - s_logger.debug("Create application policy set in Tungsten-Fabric"); + logger.debug("Create application policy set in Tungsten-Fabric"); assertNotNull(tungstenApi.createTungstenApplicationPolicySet("005f0dea-0196-11ec-a1ed-b42e99f6e187", "applicationpolicyset")); - s_logger.debug("Check if application policy set was created in Tungsten-Fabric"); + logger.debug("Check if application policy set was created in Tungsten-Fabric"); assertNotNull( tungstenApi.getTungstenObject(ApplicationPolicySet.class, "005f0dea-0196-11ec-a1ed-b42e99f6e187")); } @Test public void createTungstenFirewallPolicyTest() { - s_logger.debug("Create application policy set in Tungsten-Fabric"); + logger.debug("Create application policy set in Tungsten-Fabric"); assertNotNull(tungstenApi.createTungstenApplicationPolicySet("f5ba12c8-d4c5-4c20-a57d-67a9b6fca652", "applicationpolicyset")); - s_logger.debug("Check if firewall policy is not exist in Tungsten-Fabric"); + logger.debug("Check if firewall policy is not exist in Tungsten-Fabric"); assertNull(tungstenApi.getTungstenObject(FirewallPolicy.class, "005f0dea-0196-11ec-a1ed-b42e99f6e187")); - s_logger.debug("Create firewall policy in Tungsten-Fabric"); + logger.debug("Create firewall policy in Tungsten-Fabric"); assertNotNull(tungstenApi.createTungstenFirewallPolicy("005f0dea-0196-11ec-a1ed-b42e99f6e187", "f5ba12c8-d4c5-4c20-a57d-67a9b6fca652", "firewallpolicy", 1)); - s_logger.debug("Check if firewall policy was created in Tungsten-Fabric"); + logger.debug("Check if firewall policy was created in Tungsten-Fabric"); assertNotNull(tungstenApi.getTungstenObject(FirewallPolicy.class, "005f0dea-0196-11ec-a1ed-b42e99f6e187")); } @Test public void createTungstenFirewallRuleTest() { - s_logger.debug("Create application policy set in Tungsten-Fabric"); + logger.debug("Create application policy set in Tungsten-Fabric"); assertNotNull(tungstenApi.createTungstenApplicationPolicySet("f5ba12c8-d4c5-4c20-a57d-67a9b6fca652", "applicationpolicyset")); - s_logger.debug("Create firewall policy in Tungsten-Fabric"); + logger.debug("Create firewall policy in Tungsten-Fabric"); assertNotNull(tungstenApi.createTungstenFirewallPolicy("1ab1b179-8c6c-492a-868e-0493f4be175c", "f5ba12c8-d4c5-4c20-a57d-67a9b6fca652", "firewallpolicy", 1)); - s_logger.debug("Check if firewall rule is not exist in Tungsten-Fabric"); + logger.debug("Check if firewall rule is not exist in Tungsten-Fabric"); assertNull(tungstenApi.getTungstenObject(FirewallRule.class, "005f0dea-0196-11ec-a1ed-b42e99f6e187")); - s_logger.debug("Create service group in Tungsten-Fabric"); + logger.debug("Create service group in Tungsten-Fabric"); tungstenApi.createTungstenServiceGroup("baf714fa-80a1-454f-9c32-c4d4a6f5c5a4", "servicegroup", "tcp", 80, 90); - s_logger.debug("Create tag in Tungsten-Fabric"); + logger.debug("Create tag in Tungsten-Fabric"); tungstenApi.createTungstenTag("6b062909-ba9d-4cf3-bbd3-7db93cf6b4fe", "tagtype1", "tagvalue1", "123"); - s_logger.debug("Create tag in Tungsten-Fabric"); + logger.debug("Create tag in Tungsten-Fabric"); tungstenApi.createTungstenTag("7d5575eb-d029-467e-8b78-6056a8c94a71", "tagtype2", "tagvalue2", "123"); - s_logger.debug("Create address group in Tungsten-Fabric"); + logger.debug("Create address group in Tungsten-Fabric"); tungstenApi.createTungstenAddressGroup("88729834-3ebd-413a-adf9-40aff73cf638", "addressgroup1", "10.0.0.0", 24); - s_logger.debug("Create address group in Tungsten-Fabric"); + logger.debug("Create address group in Tungsten-Fabric"); tungstenApi.createTungstenAddressGroup("9291ae28-56cf-448c-b848-f2334b3c86da", "addressgroup2", "10.0.0.0", 24); - s_logger.debug("Create tag type in Tungsten-Fabric"); + logger.debug("Create tag type in Tungsten-Fabric"); tungstenApi.createTungstenTagType("c1680d93-2614-4f99-a8c5-d4f11b3dfc9d", "tagtype"); - s_logger.debug("Create firewall rule in Tungsten-Fabric"); + logger.debug("Create firewall rule in Tungsten-Fabric"); assertNotNull(tungstenApi.createTungstenFirewallRule("124d0792-e890-4b7e-8fe8-1b7a6d63c66a", "1ab1b179-8c6c-492a-868e-0493f4be175c", "firewallrule", "pass", "baf714fa-80a1-454f-9c32-c4d4a6f5c5a4", "6b062909-ba9d-4cf3-bbd3-7db93cf6b4fe", "88729834-3ebd-413a-adf9-40aff73cf638", null, ">", "7d5575eb-d029-467e-8b78-6056a8c94a71", "9291ae28-56cf-448c-b848-f2334b3c86da", null, "c1680d93-2614-4f99-a8c5-d4f11b3dfc9d", 1)); - s_logger.debug("Check if firewall rule was created in Tungsten-Fabric"); + logger.debug("Check if firewall rule was created in Tungsten-Fabric"); assertNotNull(tungstenApi.getTungstenObject(FirewallRule.class, "124d0792-e890-4b7e-8fe8-1b7a6d63c66a")); } @Test public void createTungstenServiceGroupTest() { - s_logger.debug("Check if service group is not exist in Tungsten-Fabric"); + logger.debug("Check if service group is not exist in Tungsten-Fabric"); assertNull(tungstenApi.getTungstenObject(ServiceGroup.class, "005f0dea-0196-11ec-a1ed-b42e99f6e187")); - s_logger.debug("Create service group in Tungsten-Fabric"); + logger.debug("Create service group in Tungsten-Fabric"); assertNotNull( tungstenApi.createTungstenServiceGroup("005f0dea-0196-11ec-a1ed-b42e99f6e187", "servicegroup", "tcp", 80, 90)); - s_logger.debug("Check if service group was created in Tungsten-Fabric"); + logger.debug("Check if service group was created in Tungsten-Fabric"); ServiceGroup serviceGroup = (ServiceGroup) tungstenApi.getTungstenObject(ServiceGroup.class, "005f0dea-0196-11ec-a1ed-b42e99f6e187"); assertNotNull(serviceGroup); @@ -1050,15 +1051,15 @@ public void createTungstenServiceGroupTest() { @Test public void createTungstenAddressGroupTest() { - s_logger.debug("Check if address group is not exist in Tungsten-Fabric"); + logger.debug("Check if address group is not exist in Tungsten-Fabric"); assertNull(tungstenApi.getTungstenObject(AddressGroup.class, "005f0dea-0196-11ec-a1ed-b42e99f6e187")); - s_logger.debug("Create address group in Tungsten-Fabric"); + logger.debug("Create address group in Tungsten-Fabric"); assertNotNull( tungstenApi.createTungstenAddressGroup("005f0dea-0196-11ec-a1ed-b42e99f6e187", "addressgroup", "10.0.0.0", 24)); - s_logger.debug("Check if address group was created in Tungsten-Fabric"); + logger.debug("Check if address group was created in Tungsten-Fabric"); AddressGroup addressGroup = (AddressGroup) tungstenApi.getTungstenObject(AddressGroup.class, "005f0dea-0196-11ec-a1ed-b42e99f6e187"); assertNotNull(addressGroup); @@ -1068,17 +1069,17 @@ public void createTungstenAddressGroupTest() { @Test public void applyTungstenNetworkTagTest() { - s_logger.debug("Create a virtual network in Tungsten-Fabric."); + logger.debug("Create a virtual network in Tungsten-Fabric."); VirtualNetwork virtualNetwork = tungstenApi.createTungstenNetwork(tungstenNetworkUuid, tungstenNetworkName, tungstenNetworkName, projectUuid, true, false, null, 0, null, false, null, null, null, false, false, null); - s_logger.debug("Check if tag is not apply to network in Tungsten-Fabric"); + logger.debug("Check if tag is not apply to network in Tungsten-Fabric"); assertNull(virtualNetwork.getTag()); - s_logger.debug("Create tag in Tungsten-Fabric"); + logger.debug("Create tag in Tungsten-Fabric"); tungstenApi.createTungstenTag("005f0dea-0196-11ec-a1ed-b42e99f6e187", "tagtype", "tagvalue", "123"); - s_logger.debug("Check if tag was applied to network in Tungsten-Fabric"); + logger.debug("Check if tag was applied to network in Tungsten-Fabric"); assertTrue(tungstenApi.applyTungstenNetworkTag(List.of(tungstenNetworkUuid), "005f0dea-0196-11ec-a1ed-b42e99f6e187")); VirtualNetwork virtualNetwork1 = (VirtualNetwork) tungstenApi.getTungstenObject(VirtualNetwork.class, @@ -1088,16 +1089,16 @@ public void applyTungstenNetworkTagTest() { @Test public void applyTungstenVmTagTest() { - s_logger.debug("Create vm in Tungsten-Fabric"); + logger.debug("Create vm in Tungsten-Fabric"); VirtualMachine virtualMachine = tungstenApi.createTungstenVirtualMachine(tungstenVmUuid, tungstenVmName); - s_logger.debug("Check if tag is not apply to vm in Tungsten-Fabric"); + logger.debug("Check if tag is not apply to vm in Tungsten-Fabric"); assertNull(virtualMachine.getTag()); - s_logger.debug("Create tag in Tungsten-Fabric"); + logger.debug("Create tag in Tungsten-Fabric"); tungstenApi.createTungstenTag("005f0dea-0196-11ec-a1ed-b42e99f6e187", "tagtype", "tagvalue", "123"); - s_logger.debug("Check if tag was applied to vm in Tungsten-Fabric"); + logger.debug("Check if tag was applied to vm in Tungsten-Fabric"); assertTrue( tungstenApi.applyTungstenVmTag(List.of(tungstenVmUuid), "005f0dea-0196-11ec-a1ed-b42e99f6e187")); VirtualMachine virtualMachine1 = (VirtualMachine) tungstenApi.getTungstenObject(VirtualMachine.class, @@ -1107,24 +1108,24 @@ public void applyTungstenVmTagTest() { @Test public void applyTungstenNicTagTest() { - s_logger.debug("Create a virtual network in Tungsten-Fabric."); + logger.debug("Create a virtual network in Tungsten-Fabric."); tungstenApi.createTungstenNetwork(tungstenNetworkUuid, tungstenNetworkName, tungstenNetworkName, projectUuid, true, false, "10.0.0.0", 24, "10.0.0.1", true, null, "10.0.0.10", "10.0.0.20", false, false, ""); - s_logger.debug("Create vm in Tungsten-Fabric"); + logger.debug("Create vm in Tungsten-Fabric"); tungstenApi.createTungstenVirtualMachine(tungstenVmUuid, tungstenVmName); - s_logger.debug("Creating a vmi in Tungsten-Fabric."); + logger.debug("Creating a vmi in Tungsten-Fabric."); VirtualMachineInterface virtualMachineInterface = tungstenApi.createTungstenVmInterface(vmiUuid, vmiName, "02:fc:f3:d6:83:c3", tungstenNetworkUuid, tungstenVmUuid, projectUuid, "10.0.0.1", true); - s_logger.debug("Check if tag is not apply to vmi in Tungsten-Fabric"); + logger.debug("Check if tag is not apply to vmi in Tungsten-Fabric"); assertNull(virtualMachineInterface.getTag()); - s_logger.debug("Create tag in Tungsten-Fabric"); + logger.debug("Create tag in Tungsten-Fabric"); tungstenApi.createTungstenTag("005f0dea-0196-11ec-a1ed-b42e99f6e187", "tagtype", "tagvalue", "123"); - s_logger.debug("Check if tag was applied to vmi in Tungsten-Fabric"); + logger.debug("Check if tag was applied to vmi in Tungsten-Fabric"); assertTrue(tungstenApi.applyTungstenNicTag(List.of(vmiUuid), "005f0dea-0196-11ec-a1ed-b42e99f6e187")); VirtualMachineInterface virtualMachineInterface1 = (VirtualMachineInterface) tungstenApi.getTungstenObject( VirtualMachineInterface.class, vmiUuid); @@ -1133,18 +1134,18 @@ public void applyTungstenNicTagTest() { @Test public void applyTungstenPolicyTagTest() { - s_logger.debug("Create a network policy in Tungsten-Fabric."); + logger.debug("Create a network policy in Tungsten-Fabric."); List tungstenRuleList1 = new ArrayList<>(); NetworkPolicy networkPolicy = (NetworkPolicy) tungstenApi.createOrUpdateTungstenNetworkPolicy("policy", projectUuid, tungstenRuleList1); - s_logger.debug("Check if tag is not apply to network policy in Tungsten-Fabric"); + logger.debug("Check if tag is not apply to network policy in Tungsten-Fabric"); assertNull(networkPolicy.getTag()); - s_logger.debug("Create tag in Tungsten-Fabric"); + logger.debug("Create tag in Tungsten-Fabric"); tungstenApi.createTungstenTag("005f0dea-0196-11ec-a1ed-b42e99f6e187", "tagtype", "tagvalue", "123"); - s_logger.debug("Check if tag was applied to network policy in Tungsten-Fabric"); + logger.debug("Check if tag was applied to network policy in Tungsten-Fabric"); assertTrue(tungstenApi.applyTungstenPolicyTag(networkPolicy.getUuid(), "005f0dea-0196-11ec-a1ed-b42e99f6e187")); NetworkPolicy networkPolicy1 = (NetworkPolicy) tungstenApi.getTungstenObjectByName(NetworkPolicy.class, project.getQualifiedName(), "policy"); @@ -1153,78 +1154,78 @@ public void applyTungstenPolicyTagTest() { @Test public void removeTungstenTagTest() { - s_logger.debug("Create a virtual network in Tungsten-Fabric."); + logger.debug("Create a virtual network in Tungsten-Fabric."); tungstenApi.createTungstenNetwork(tungstenNetworkUuid, tungstenNetworkName, tungstenNetworkName, projectUuid, true, false, "10.0.0.0", 24, "10.0.0.1", true, null, "10.0.0.10", "10.0.0.20", false, false, ""); - s_logger.debug("Create vm in Tungsten-Fabric"); + logger.debug("Create vm in Tungsten-Fabric"); tungstenApi.createTungstenVirtualMachine(tungstenVmUuid, tungstenVmName); - s_logger.debug("Creating a vmi in Tungsten-Fabric."); + logger.debug("Creating a vmi in Tungsten-Fabric."); tungstenApi.createTungstenVmInterface(vmiUuid, vmiName, "02:fc:f3:d6:83:c3", tungstenNetworkUuid, tungstenVmUuid, projectUuid, "10.0.0.1", true); - s_logger.debug("Create a network policy in Tungsten-Fabric."); + logger.debug("Create a network policy in Tungsten-Fabric."); - s_logger.debug("Create a network policy in Tungsten-Fabric."); + logger.debug("Create a network policy in Tungsten-Fabric."); List tungstenRuleList1 = new ArrayList<>(); NetworkPolicy networkPolicy = (NetworkPolicy) tungstenApi.createOrUpdateTungstenNetworkPolicy("policy", projectUuid, tungstenRuleList1); - s_logger.debug("Create tag in Tungsten-Fabric"); + logger.debug("Create tag in Tungsten-Fabric"); tungstenApi.createTungstenTag("005f0dea-0196-11ec-a1ed-b42e99f6e187", "tagtype", "tagvalue", "123"); - s_logger.debug("Apply tag to network in Tungsten-Fabric"); + logger.debug("Apply tag to network in Tungsten-Fabric"); tungstenApi.applyTungstenNetworkTag(List.of(tungstenNetworkUuid), "005f0dea-0196-11ec-a1ed-b42e99f6e187"); - s_logger.debug("Check if tag was applied to network in Tungsten-Fabric"); + logger.debug("Check if tag was applied to network in Tungsten-Fabric"); VirtualNetwork virtualNetwork = (VirtualNetwork) tungstenApi.getTungstenObject(VirtualNetwork.class, tungstenNetworkUuid); assertEquals(1, virtualNetwork.getTag().size()); - s_logger.debug("Apply tag to vm in Tungsten-Fabric"); + logger.debug("Apply tag to vm in Tungsten-Fabric"); tungstenApi.applyTungstenVmTag(List.of(tungstenVmUuid), "005f0dea-0196-11ec-a1ed-b42e99f6e187"); - s_logger.debug("Check if tag was applied to vm in Tungsten-Fabric"); + logger.debug("Check if tag was applied to vm in Tungsten-Fabric"); VirtualMachine virtualMachine = (VirtualMachine) tungstenApi.getTungstenObject(VirtualMachine.class, tungstenVmUuid); assertEquals(1, virtualMachine.getTag().size()); - s_logger.debug("Apply tag to nic in Tungsten-Fabric"); + logger.debug("Apply tag to nic in Tungsten-Fabric"); tungstenApi.applyTungstenNicTag(List.of(vmiUuid), "005f0dea-0196-11ec-a1ed-b42e99f6e187"); - s_logger.debug("Check if tag was applied to nic in Tungsten-Fabric"); + logger.debug("Check if tag was applied to nic in Tungsten-Fabric"); VirtualMachineInterface virtualMachineInterface = (VirtualMachineInterface) tungstenApi.getTungstenObject( VirtualMachineInterface.class, vmiUuid); assertEquals(1, virtualMachineInterface.getTag().size()); - s_logger.debug("Apply tag to policy in Tungsten-Fabric"); + logger.debug("Apply tag to policy in Tungsten-Fabric"); tungstenApi.applyTungstenPolicyTag(networkPolicy.getUuid(), "005f0dea-0196-11ec-a1ed-b42e99f6e187"); - s_logger.debug("Check if tag was applied to policy in Tungsten-Fabric"); + logger.debug("Check if tag was applied to policy in Tungsten-Fabric"); NetworkPolicy networkPolicy1 = (NetworkPolicy) tungstenApi.getTungstenObject(NetworkPolicy.class, networkPolicy.getUuid()); assertEquals(1, networkPolicy1.getTag().size()); - s_logger.debug("remove tag from network, vm, nic, policy in Tungsten-Fabric"); + logger.debug("remove tag from network, vm, nic, policy in Tungsten-Fabric"); assertNotNull(tungstenApi.removeTungstenTag(List.of(tungstenNetworkUuid), List.of(tungstenVmUuid), List.of(vmiUuid), networkPolicy.getUuid(), null, "005f0dea-0196-11ec-a1ed-b42e99f6e187")); - s_logger.debug("Check if tag was removed from network in Tungsten-Fabric"); + logger.debug("Check if tag was removed from network in Tungsten-Fabric"); VirtualNetwork virtualNetwork1 = (VirtualNetwork) tungstenApi.getTungstenObject(VirtualNetwork.class, tungstenNetworkUuid); assertEquals(0, virtualNetwork1.getTag().size()); - s_logger.debug("Check if tag was removed from vm in Tungsten-Fabric"); + logger.debug("Check if tag was removed from vm in Tungsten-Fabric"); VirtualMachine virtualMachine1 = (VirtualMachine) tungstenApi.getTungstenObject(VirtualMachine.class, tungstenVmUuid); assertEquals(0, virtualMachine1.getTag().size()); - s_logger.debug("Check if tag was removed from nic in Tungsten-Fabric"); + logger.debug("Check if tag was removed from nic in Tungsten-Fabric"); VirtualMachineInterface virtualMachineInterface1 = (VirtualMachineInterface) tungstenApi.getTungstenObject( VirtualMachineInterface.class, vmiUuid); assertEquals(0, virtualMachineInterface1.getTag().size()); - s_logger.debug("Check if tag was removed from policy in Tungsten-Fabric"); + logger.debug("Check if tag was removed from policy in Tungsten-Fabric"); NetworkPolicy networkPolicy2 = (NetworkPolicy) tungstenApi.getTungstenObject(NetworkPolicy.class, networkPolicy.getUuid()); assertEquals(0, networkPolicy2.getTag().size()); @@ -1232,29 +1233,29 @@ public void removeTungstenTagTest() { @Test public void removeTungstenPolicyTest() { - s_logger.debug("Create a virtual network in Tungsten-Fabric."); + logger.debug("Create a virtual network in Tungsten-Fabric."); tungstenApi.createTungstenNetwork(tungstenNetworkUuid, tungstenNetworkName, tungstenNetworkName, projectUuid, true, false, "10.0.0.0", 24, "10.0.0.1", true, null, "10.0.0.10", "10.0.0.20", false, false, ""); - s_logger.debug("Prepare network policy rule"); + logger.debug("Prepare network policy rule"); List tungstenRuleList = new ArrayList<>(); - s_logger.debug("Create a network policy in Tungsten-Fabric."); + logger.debug("Create a network policy in Tungsten-Fabric."); NetworkPolicy networkPolicy = (NetworkPolicy) tungstenApi.createOrUpdateTungstenNetworkPolicy("policy", projectUuid, tungstenRuleList); - s_logger.debug("Apply network policy to network in Tungsten-Fabric."); + logger.debug("Apply network policy to network in Tungsten-Fabric."); tungstenApi.applyTungstenNetworkPolicy(networkPolicy.getUuid(), tungstenNetworkUuid, 1, 1); - s_logger.debug("Check if network policy was applied in Tungsten-Fabric."); + logger.debug("Check if network policy was applied in Tungsten-Fabric."); VirtualNetwork virtualNetwork = (VirtualNetwork) tungstenApi.getTungstenObject(VirtualNetwork.class, tungstenNetworkUuid); assertEquals(1, virtualNetwork.getNetworkPolicy().size()); - s_logger.debug("Apply network policy to network in Tungsten-Fabric."); + logger.debug("Apply network policy to network in Tungsten-Fabric."); tungstenApi.removeTungstenPolicy(tungstenNetworkUuid, networkPolicy.getUuid()); - s_logger.debug("Check if network policy was applied in Tungsten-Fabric."); + logger.debug("Check if network policy was applied in Tungsten-Fabric."); VirtualNetwork virtualNetwork1 = (VirtualNetwork) tungstenApi.getTungstenObject(VirtualNetwork.class, tungstenNetworkUuid); assertEquals(0, virtualNetwork1.getNetworkPolicy().size()); @@ -1262,26 +1263,26 @@ public void removeTungstenPolicyTest() { @Test public void createTungstenPolicyTest() { - s_logger.debug("Check if policy is not exist in Tungsten-Fabric"); + logger.debug("Check if policy is not exist in Tungsten-Fabric"); assertNull(tungstenApi.getTungstenObject(NetworkPolicy.class, "005f0dea-0196-11ec-a1ed-b42e99f6e187")); - s_logger.debug("Create policy in Tungsten-Fabric"); + logger.debug("Create policy in Tungsten-Fabric"); assertNotNull(tungstenApi.createTungstenPolicy("005f0dea-0196-11ec-a1ed-b42e99f6e187", "policy", projectUuid)); - s_logger.debug("Check if policy was created in Tungsten-Fabric"); + logger.debug("Check if policy was created in Tungsten-Fabric"); assertNotNull(tungstenApi.getTungstenObject(NetworkPolicy.class, "005f0dea-0196-11ec-a1ed-b42e99f6e187")); } @Test public void addTungstenPolicyRuleTest() { - s_logger.debug("Create policy in Tungsten-Fabric"); + logger.debug("Create policy in Tungsten-Fabric"); NetworkPolicy networkPolicy = (NetworkPolicy) tungstenApi.createTungstenPolicy( "005f0dea-0196-11ec-a1ed-b42e99f6e187", "policy", projectUuid); - s_logger.debug("Check if policy was created in Tungsten-Fabric"); + logger.debug("Check if policy was created in Tungsten-Fabric"); assertNull(networkPolicy.getEntries()); - s_logger.debug("Check if policy rule was added in Tungsten-Fabric"); + logger.debug("Check if policy rule was added in Tungsten-Fabric"); assertNotNull(tungstenApi.addTungstenPolicyRule("c1680d93-2614-4f99-a8c5-d4f11b3dfc9d", "005f0dea-0196-11ec-a1ed-b42e99f6e187", "pass", "tcp", ">", "network1", "192.168.100.0", 24, 8080, 8081, "network2", "10.0.0.0", 16, 80, 81)); @@ -1338,18 +1339,18 @@ public void addTungstenPolicyRuleTest() { @Test public void listTungstenAddressPolicyTest() { - s_logger.debug("Create policy in Tungsten-Fabric"); + logger.debug("Create policy in Tungsten-Fabric"); ApiObjectBase networkPolicy1 = tungstenApi.createTungstenPolicy("005f0dea-0196-11ec-a1ed-b42e99f6e187", "policy1", projectUuid); - s_logger.debug("Check if network policy was listed in Tungsten-Fabric"); + logger.debug("Check if network policy was listed in Tungsten-Fabric"); List networkPolicyList = tungstenApi.listTungstenAddressPolicy(projectUuid, "policy1"); assertEquals(List.of(networkPolicy1), networkPolicyList); } @Test public void listTungstenPolicyTest() { - s_logger.debug("Create policy in Tungsten-Fabric"); + logger.debug("Create policy in Tungsten-Fabric"); ApiObjectBase apiObjectBase1 = tungstenApi.createTungstenPolicy("005f0dea-0196-11ec-a1ed-b42e99f6e187", "policy1", projectUuid); ApiObjectBase apiObjectBase2 = tungstenApi.createTungstenPolicy("baf714fa-80a1-454f-9c32-c4d4a6f5c5a4", @@ -1358,12 +1359,12 @@ public void listTungstenPolicyTest() { policyList1.sort(comparator); List policyList2 = List.of(apiObjectBase1); - s_logger.debug("Check if policy was listed all in Tungsten-Fabric"); + logger.debug("Check if policy was listed all in Tungsten-Fabric"); List policyList3 = tungstenApi.listTungstenPolicy(projectUuid, null); policyList3.sort(comparator); assertEquals(policyList1, policyList3); - s_logger.debug("Check if policy was listed with uuid in Tungsten-Fabric"); + logger.debug("Check if policy was listed with uuid in Tungsten-Fabric"); List policyList4 = tungstenApi.listTungstenPolicy(projectUuid, "005f0dea-0196-11ec-a1ed-b42e99f6e187"); assertEquals(policyList2, policyList4); @@ -1371,7 +1372,7 @@ public void listTungstenPolicyTest() { @Test public void listTungstenNetworkTest() { - s_logger.debug("Create network in Tungsten-Fabric"); + logger.debug("Create network in Tungsten-Fabric"); VirtualNetwork virtualNetwork1 = tungstenApi.createTungstenNetwork("005f0dea-0196-11ec-a1ed-b42e99f6e187", "network1", "network1", projectUuid, true, false, "10.0.0.0", 24, "10.0.0.1", true, null, "10.0.0.10", "10.0.0.20", false, false, ""); @@ -1382,12 +1383,12 @@ public void listTungstenNetworkTest() { networkList1.sort(comparator); List networkList2 = List.of(virtualNetwork1); - s_logger.debug("Check if network was listed all in Tungsten-Fabric"); + logger.debug("Check if network was listed all in Tungsten-Fabric"); List networkList3 = tungstenApi.listTungstenNetwork(projectUuid, null); networkList3.sort(comparator); assertEquals(networkList1, networkList3); - s_logger.debug("Check if network policy was listed with uuid in Tungsten-Fabric"); + logger.debug("Check if network policy was listed with uuid in Tungsten-Fabric"); List networkList4 = tungstenApi.listTungstenNetwork(projectUuid, "005f0dea-0196-11ec-a1ed-b42e99f6e187"); assertEquals(networkList2, networkList4); @@ -1395,19 +1396,19 @@ public void listTungstenNetworkTest() { @Test public void listTungstenVmTest() { - s_logger.debug("Create vm in Tungsten-Fabric"); + logger.debug("Create vm in Tungsten-Fabric"); VirtualMachine vm1 = tungstenApi.createTungstenVirtualMachine("005f0dea-0196-11ec-a1ed-b42e99f6e187", "vm1"); VirtualMachine vm2 = tungstenApi.createTungstenVirtualMachine("baf714fa-80a1-454f-9c32-c4d4a6f5c5a4", "vm2"); List vmList1 = Arrays.asList(vm1, vm2); vmList1.sort(comparator); List vmList2 = List.of(vm1); - s_logger.debug("Check if vm was listed all in Tungsten-Fabric"); + logger.debug("Check if vm was listed all in Tungsten-Fabric"); List vmList3 = tungstenApi.listTungstenVm(projectUuid, null); vmList3.sort(comparator); assertEquals(vmList1, vmList3); - s_logger.debug("Check if policy was listed with uuid in Tungsten-Fabric"); + logger.debug("Check if policy was listed with uuid in Tungsten-Fabric"); List vmList4 = tungstenApi.listTungstenVm(projectUuid, "005f0dea-0196-11ec-a1ed-b42e99f6e187"); assertEquals(vmList2, vmList4); @@ -1415,17 +1416,17 @@ public void listTungstenVmTest() { @Test public void listTungstenNicTest() { - s_logger.debug("Create network in Tungsten-Fabric"); + logger.debug("Create network in Tungsten-Fabric"); tungstenApi.createTungstenNetwork("005f0dea-0196-11ec-a1ed-b42e99f6e187", "network1", "network1", projectUuid, true, false, "10.0.0.0", 24, "10.0.0.1", true, null, "10.0.0.10", "10.0.0.20", false, false, ""); tungstenApi.createTungstenNetwork("baf714fa-80a1-454f-9c32-c4d4a6f5c5a4", "network2", "network2", projectUuid, true, false, "10.0.0.0", 24, "10.0.0.1", true, null, "10.0.0.10", "10.0.0.20", false, false, ""); - s_logger.debug("Create vm in Tungsten-Fabric"); + logger.debug("Create vm in Tungsten-Fabric"); tungstenApi.createTungstenVirtualMachine("7d5575eb-d029-467e-8b78-6056a8c94a71", "vm1"); tungstenApi.createTungstenVirtualMachine("88729834-3ebd-413a-adf9-40aff73cf638", "vm2"); - s_logger.debug("Creating vmi in Tungsten-Fabric."); + logger.debug("Creating vmi in Tungsten-Fabric."); VirtualMachineInterface vmi1 = tungstenApi.createTungstenVmInterface("9291ae28-56cf-448c-b848-f2334b3c86da", "vmi1", "02:fc:f3:d6:83:c3", "005f0dea-0196-11ec-a1ed-b42e99f6e187", "7d5575eb-d029-467e-8b78-6056a8c94a71", projectUuid, "10.0.0.1", true); @@ -1436,12 +1437,12 @@ public void listTungstenNicTest() { vmiList1.sort(comparator); List vmiList2 = List.of(vmi1); - s_logger.debug("Check if vmi was listed all in Tungsten-Fabric"); + logger.debug("Check if vmi was listed all in Tungsten-Fabric"); List vmiList3 = tungstenApi.listTungstenNic(projectUuid, null); vmiList3.sort(comparator); assertEquals(vmiList1, vmiList3); - s_logger.debug("Check if vmi was listed with uuid in Tungsten-Fabric"); + logger.debug("Check if vmi was listed with uuid in Tungsten-Fabric"); List vmList4 = tungstenApi.listTungstenNic(projectUuid, "9291ae28-56cf-448c-b848-f2334b3c86da"); assertEquals(vmiList2, vmList4); @@ -1449,7 +1450,7 @@ public void listTungstenNicTest() { @Test public void listTungstenTagTest() { - s_logger.debug("Create tag in Tungsten-Fabric"); + logger.debug("Create tag in Tungsten-Fabric"); ApiObjectBase apiObjectBase1 = tungstenApi.createTungstenTag("005f0dea-0196-11ec-a1ed-b42e99f6e187", "tagtype1", "tagvalue1", "123"); ApiObjectBase apiObjectBase2 = tungstenApi.createTungstenTag("6b062909-ba9d-4cf3-bbd3-7db93cf6b4fe", "tagtype2", @@ -1479,7 +1480,7 @@ public void listTungstenTagTest() { listTag4.sort(comparator); listTag5.sort(comparator); - s_logger.debug("Create network and apply tag in Tungsten-Fabric"); + logger.debug("Create network and apply tag in Tungsten-Fabric"); tungstenApi.createTungstenNetwork("9291ae28-56cf-448c-b848-f2334b3c86da", "network1", "network1", projectUuid, true, false, "10.0.0.0", 24, "10.0.0.1", true, null, "10.0.0.10", "10.0.0.20", false, false, ""); tungstenApi.applyTungstenNetworkTag(List.of("9291ae28-56cf-448c-b848-f2334b3c86da"), @@ -1487,14 +1488,14 @@ public void listTungstenTagTest() { tungstenApi.applyTungstenNetworkTag(List.of("9291ae28-56cf-448c-b848-f2334b3c86da"), "6b062909-ba9d-4cf3-bbd3-7db93cf6b4fe"); - s_logger.debug("Create vm and apply tag in Tungsten-Fabric"); + logger.debug("Create vm and apply tag in Tungsten-Fabric"); tungstenApi.createTungstenVirtualMachine("124d0792-e890-4b7e-8fe8-1b7a6d63c66a", "vm1"); tungstenApi.applyTungstenVmTag(List.of("124d0792-e890-4b7e-8fe8-1b7a6d63c66a"), "7d5575eb-d029-467e-8b78-6056a8c94a71"); tungstenApi.applyTungstenVmTag(List.of("124d0792-e890-4b7e-8fe8-1b7a6d63c66a"), "88729834-3ebd-413a-adf9-40aff73cf638"); - s_logger.debug("Creating vmi and apply tag in Tungsten-Fabric."); + logger.debug("Creating vmi and apply tag in Tungsten-Fabric."); tungstenApi.createTungstenVmInterface("c1680d93-2614-4f99-a8c5-d4f11b3dfc9d", "vmi1", "02:fc:f3:d6:83:c3", "9291ae28-56cf-448c-b848-f2334b3c86da", "124d0792-e890-4b7e-8fe8-1b7a6d63c66a", projectUuid, "10.0.0.1", true); tungstenApi.applyTungstenNicTag(List.of("c1680d93-2614-4f99-a8c5-d4f11b3dfc9d"), @@ -1502,45 +1503,45 @@ public void listTungstenTagTest() { tungstenApi.applyTungstenNicTag(List.of("c1680d93-2614-4f99-a8c5-d4f11b3dfc9d"), "7b062909-ba9d-4cf3-bbd3-7db93cf6b4fe"); - s_logger.debug("Creating policy and apply tag in Tungsten-Fabric."); + logger.debug("Creating policy and apply tag in Tungsten-Fabric."); tungstenApi.createTungstenPolicy("205f0dea-0196-11ec-a1ed-b42e99f6e187", "policy", projectUuid); tungstenApi.applyTungstenPolicyTag("205f0dea-0196-11ec-a1ed-b42e99f6e187", "8d5575eb-d029-467e-8b78-6056a8c94a71"); tungstenApi.applyTungstenPolicyTag("205f0dea-0196-11ec-a1ed-b42e99f6e187", "98729834-3ebd-413a-adf9-40aff73cf638"); - s_logger.debug("Check if tag was listed with network in Tungsten-Fabric"); + logger.debug("Check if tag was listed with network in Tungsten-Fabric"); List listTag6 = tungstenApi.listTungstenTag("9291ae28-56cf-448c-b848-f2334b3c86da", null, null, null, null, null); listTag6.sort(comparator); assertEquals(listTag1, listTag6); - s_logger.debug("Check if tag was listed with vm in Tungsten-Fabric"); + logger.debug("Check if tag was listed with vm in Tungsten-Fabric"); List listTag7 = tungstenApi.listTungstenTag(null, "124d0792-e890-4b7e-8fe8-1b7a6d63c66a", null, null, null , null); listTag7.sort(comparator); assertEquals(listTag2, listTag7); - s_logger.debug("Check if tag was listed with nic in Tungsten-Fabric"); + logger.debug("Check if tag was listed with nic in Tungsten-Fabric"); List listTag8 = tungstenApi.listTungstenTag(null, null, "c1680d93-2614-4f99-a8c5-d4f11b3dfc9d", null, null, null); listTag8.sort(comparator); assertEquals(listTag3, listTag8); - s_logger.debug("Check if tag was listed with policy in Tungsten-Fabric"); + logger.debug("Check if tag was listed with policy in Tungsten-Fabric"); List listTag9 = tungstenApi.listTungstenTag(null, null, null, "205f0dea-0196-11ec-a1ed-b42e99f6e187", null, null); listTag9.sort(comparator); assertEquals(listTag4, listTag9); - s_logger.debug("Check if tag was listed all in Tungsten-Fabric"); + logger.debug("Check if tag was listed all in Tungsten-Fabric"); List listTag10 = tungstenApi.listTungstenTag(null, null, null, null, null, null); listTag10.sort(comparator); assertEquals(listTag5, listTag10); - s_logger.debug("Check if tag was listed with uuid in Tungsten-Fabric"); + logger.debug("Check if tag was listed with uuid in Tungsten-Fabric"); List listTag11 = tungstenApi.listTungstenTag(null, null, null, null, null, "005f0dea-0196-11ec-a1ed-b42e99f6e187"); listTag11.sort(comparator); @@ -1549,19 +1550,19 @@ public void listTungstenTagTest() { @Test public void listTungstenTagTypeTest() { - s_logger.debug("Create tag type in Tungsten-Fabric"); + logger.debug("Create tag type in Tungsten-Fabric"); ApiObjectBase tagType1 = tungstenApi.createTungstenTagType("005f0dea-0196-11ec-a1ed-b42e99f6e187", "tagtype1"); ApiObjectBase tagType2 = tungstenApi.createTungstenTagType("baf714fa-80a1-454f-9c32-c4d4a6f5c5a4", "tagtype2"); List tagTypeList1 = Arrays.asList(tagType1, tagType2); tagTypeList1.sort(comparator); List tagTypeList2 = List.of(tagType1); - s_logger.debug("Check if tag type was listed all in Tungsten-Fabric"); + logger.debug("Check if tag type was listed all in Tungsten-Fabric"); List tagTypeList3 = tungstenApi.listTungstenTagType(null); tagTypeList3.sort(comparator); assertEquals(tagTypeList1, tagTypeList3); - s_logger.debug("Check if tag type was listed with uuid in Tungsten-Fabric"); + logger.debug("Check if tag type was listed with uuid in Tungsten-Fabric"); List tagTypeList4 = tungstenApi.listTungstenTagType( "005f0dea-0196-11ec-a1ed-b42e99f6e187"); assertEquals(tagTypeList2, tagTypeList4); @@ -1569,11 +1570,11 @@ public void listTungstenTagTypeTest() { @Test public void listTungstenNetworkPolicyTest() { - s_logger.debug("Create network in Tungsten-Fabric"); + logger.debug("Create network in Tungsten-Fabric"); tungstenApi.createTungstenNetwork("005f0dea-0196-11ec-a1ed-b42e99f6e187", "network1", "network1", projectUuid, true, false, "10.0.0.0", 24, "10.0.0.1", true, null, "10.0.0.10", "10.0.0.20", false, false, ""); - s_logger.debug("Create policy in Tungsten-Fabric"); + logger.debug("Create policy in Tungsten-Fabric"); ApiObjectBase apiObjectBase1 = tungstenApi.createTungstenPolicy("6b062909-ba9d-4cf3-bbd3-7db93cf6b4fe", "policy1", projectUuid); ApiObjectBase apiObjectBase2 = tungstenApi.createTungstenPolicy("baf714fa-80a1-454f-9c32-c4d4a6f5c5a4", @@ -1582,18 +1583,18 @@ public void listTungstenNetworkPolicyTest() { List policyList2 = List.of(apiObjectBase1); policyList1.sort(comparator); - s_logger.debug("Apply network policy to network in Tungsten-Fabric."); + logger.debug("Apply network policy to network in Tungsten-Fabric."); tungstenApi.applyTungstenNetworkPolicy("6b062909-ba9d-4cf3-bbd3-7db93cf6b4fe", "005f0dea-0196-11ec-a1ed-b42e99f6e187", 1, 1); tungstenApi.applyTungstenNetworkPolicy("baf714fa-80a1-454f-9c32-c4d4a6f5c5a4", "005f0dea-0196-11ec-a1ed-b42e99f6e187", 1, 2); - s_logger.debug("Check if network policy was listed all in Tungsten-Fabric"); + logger.debug("Check if network policy was listed all in Tungsten-Fabric"); List policyList3 = tungstenApi.listTungstenNetworkPolicy( "005f0dea-0196-11ec-a1ed-b42e99f6e187", null); assertEquals(policyList1, policyList3); - s_logger.debug("Check if network policy was listed with uuid in Tungsten-Fabric"); + logger.debug("Check if network policy was listed with uuid in Tungsten-Fabric"); List policyList4 = tungstenApi.listTungstenNetworkPolicy( "005f0dea-0196-11ec-a1ed-b42e99f6e187", "6b062909-ba9d-4cf3-bbd3-7db93cf6b4fe"); assertEquals(policyList2, policyList4); @@ -1601,7 +1602,7 @@ public void listTungstenNetworkPolicyTest() { @Test public void listTungstenApplicationPolicySetTest() { - s_logger.debug("Create application policy set in Tungsten-Fabric"); + logger.debug("Create application policy set in Tungsten-Fabric"); ApiObjectBase applicationPolicySet1 = tungstenApi.createTungstenApplicationPolicySet( "005f0dea-0196-11ec-a1ed-b42e99f6e187", "aps1"); ApiObjectBase applicationPolicySet2 = tungstenApi.createTungstenApplicationPolicySet( @@ -1610,12 +1611,12 @@ public void listTungstenApplicationPolicySetTest() { apsList1.sort(comparator); List apsList2 = List.of(applicationPolicySet1); - s_logger.debug("Check if application policy set was listed all in Tungsten-Fabric"); + logger.debug("Check if application policy set was listed all in Tungsten-Fabric"); List apsList3 = tungstenApi.listTungstenApplicationPolicySet(null); apsList3.sort(comparator); assertEquals(apsList1, apsList3); - s_logger.debug("Check if application policy set was listed with uuid in Tungsten-Fabric"); + logger.debug("Check if application policy set was listed with uuid in Tungsten-Fabric"); List apsList4 = tungstenApi.listTungstenApplicationPolicySet( "005f0dea-0196-11ec-a1ed-b42e99f6e187"); assertEquals(apsList2, apsList4); @@ -1623,13 +1624,13 @@ public void listTungstenApplicationPolicySetTest() { @Test public void listTungstenFirewallPolicyTest() { - s_logger.debug("Create application policy set in Tungsten-Fabric"); + logger.debug("Create application policy set in Tungsten-Fabric"); tungstenApi.createTungstenApplicationPolicySet("f5ba12c8-d4c5-4c20-a57d-67a9b6fca652", "aps1"); - s_logger.debug("Create tag in Tungsten-Fabric"); + logger.debug("Create tag in Tungsten-Fabric"); tungstenApi.createTungstenTag("7d5575eb-d029-467e-8b78-6056a8c94a71", "tagtype1", "tagvalue1", "123"); - s_logger.debug("Create firewall policy in Tungsten-Fabric"); + logger.debug("Create firewall policy in Tungsten-Fabric"); ApiObjectBase fwPolicy1 = tungstenApi.createTungstenFirewallPolicy("baf714fa-80a1-454f-9c32-c4d4a6f5c5a4", "f5ba12c8-d4c5-4c20-a57d-67a9b6fca652", "firewallpolicy1", 1); ApiObjectBase fwPolicy2 = tungstenApi.createTungstenFirewallPolicy("6b062909-ba9d-4cf3-bbd3-7db93cf6b4fe", @@ -1638,13 +1639,13 @@ public void listTungstenFirewallPolicyTest() { fwPolicyList1.sort(comparator); List fwPolicyList2 = List.of(fwPolicy1); - s_logger.debug("Check if firewall policy set was listed all with application policy set in Tungsten-Fabric"); + logger.debug("Check if firewall policy set was listed all with application policy set in Tungsten-Fabric"); List fwPolicyList3 = tungstenApi.listTungstenFirewallPolicy( "f5ba12c8-d4c5-4c20-a57d-67a9b6fca652", null); fwPolicyList3.sort(comparator); assertEquals(fwPolicyList1, fwPolicyList3); - s_logger.debug( + logger.debug( "Check if firewall policy set was listed with uuid and application policy set in Tungsten-Fabric"); List fwPolicyList4 = tungstenApi.listTungstenFirewallPolicy( "f5ba12c8-d4c5-4c20-a57d-67a9b6fca652", "baf714fa-80a1-454f-9c32-c4d4a6f5c5a4"); @@ -1653,32 +1654,32 @@ public void listTungstenFirewallPolicyTest() { @Test public void listTungstenFirewallRuleTest() { - s_logger.debug("Create application policy set in Tungsten-Fabric"); + logger.debug("Create application policy set in Tungsten-Fabric"); tungstenApi.createTungstenApplicationPolicySet("f5ba12c8-d4c5-4c20-a57d-67a9b6fca652", "aps"); - s_logger.debug("Create firewall policy in Tungsten-Fabric"); + logger.debug("Create firewall policy in Tungsten-Fabric"); tungstenApi.createTungstenFirewallPolicy("005f0dea-0196-11ec-a1ed-b42e99f6e187", "f5ba12c8-d4c5-4c20-a57d-67a9b6fca652", "firewallpolicy", 1); - s_logger.debug("Create service group in Tungsten-Fabric"); + logger.debug("Create service group in Tungsten-Fabric"); tungstenApi.createTungstenServiceGroup("baf714fa-80a1-454f-9c32-c4d4a6f5c5a4", "servicegroup1", "tcp", 80, 90); - s_logger.debug("Create tag in Tungsten-Fabric"); + logger.debug("Create tag in Tungsten-Fabric"); tungstenApi.createTungstenTag("6b062909-ba9d-4cf3-bbd3-7db93cf6b4fe", "tagtype1", "tagvalue1", "123"); - s_logger.debug("Create tag in Tungsten-Fabric"); + logger.debug("Create tag in Tungsten-Fabric"); tungstenApi.createTungstenTag("7d5575eb-d029-467e-8b78-6056a8c94a71", "tagtype2", "tagvalue2", "123"); - s_logger.debug("Create address group in Tungsten-Fabric"); + logger.debug("Create address group in Tungsten-Fabric"); tungstenApi.createTungstenAddressGroup("88729834-3ebd-413a-adf9-40aff73cf638", "addressgroup1", "10.0.0.0", 24); - s_logger.debug("Create address group in Tungsten-Fabric"); + logger.debug("Create address group in Tungsten-Fabric"); tungstenApi.createTungstenAddressGroup("9291ae28-56cf-448c-b848-f2334b3c86da", "addressgroup2", "10.0.0.0", 24); - s_logger.debug("Create tag type in Tungsten-Fabric"); + logger.debug("Create tag type in Tungsten-Fabric"); tungstenApi.createTungstenTagType("c1680d93-2614-4f99-a8c5-d4f11b3dfc9d", "tagtype1"); - s_logger.debug("Create firewall rule in Tungsten-Fabric"); + logger.debug("Create firewall rule in Tungsten-Fabric"); ApiObjectBase firewallRule1 = tungstenApi.createTungstenFirewallRule("124d0792-e890-4b7e-8fe8-1b7a6d63c66a", "005f0dea-0196-11ec-a1ed-b42e99f6e187", "firewallrule1", "pass", "baf714fa-80a1-454f-9c32-c4d4a6f5c5a4", "6b062909-ba9d-4cf3-bbd3-7db93cf6b4fe", "88729834-3ebd-413a-adf9-40aff73cf638", null, ">", @@ -1694,13 +1695,13 @@ public void listTungstenFirewallRuleTest() { fwRuleList1.sort(comparator); List fwRuleList2 = List.of(firewallRule1); - s_logger.debug("Check if firewall rule set was listed all with firewall policy in Tungsten-Fabric"); + logger.debug("Check if firewall rule set was listed all with firewall policy in Tungsten-Fabric"); List fwRuleList3 = tungstenApi.listTungstenFirewallRule( "005f0dea-0196-11ec-a1ed-b42e99f6e187", null); fwRuleList3.sort(comparator); assertEquals(fwRuleList1, fwRuleList3); - s_logger.debug("Check if firewall rule set was listed with uuid and firewall policy in Tungsten-Fabric"); + logger.debug("Check if firewall rule set was listed with uuid and firewall policy in Tungsten-Fabric"); List fwRuleList4 = tungstenApi.listTungstenFirewallRule( "005f0dea-0196-11ec-a1ed-b42e99f6e187", "124d0792-e890-4b7e-8fe8-1b7a6d63c66a"); assertEquals(fwRuleList2, fwRuleList4); @@ -1708,7 +1709,7 @@ public void listTungstenFirewallRuleTest() { @Test public void listTungstenServiceGroupTest() { - s_logger.debug("Create service group in Tungsten-Fabric"); + logger.debug("Create service group in Tungsten-Fabric"); ApiObjectBase serviceGroup1 = tungstenApi.createTungstenServiceGroup("005f0dea-0196-11ec-a1ed-b42e99f6e187", "serviceGroup1", "tcp", 80, 80); ApiObjectBase serviceGroup2 = tungstenApi.createTungstenServiceGroup("baf714fa-80a1-454f-9c32-c4d4a6f5c5a4", @@ -1717,12 +1718,12 @@ public void listTungstenServiceGroupTest() { serviceGroupList1.sort(comparator); List serviceGroupList2 = List.of(serviceGroup1); - s_logger.debug("Check if service group was listed all in Tungsten-Fabric"); + logger.debug("Check if service group was listed all in Tungsten-Fabric"); List serviceGroupList3 = tungstenApi.listTungstenServiceGroup(null); serviceGroupList3.sort(comparator); assertEquals(serviceGroupList1, serviceGroupList3); - s_logger.debug("Check if tag type was listed with uuid in Tungsten-Fabric"); + logger.debug("Check if tag type was listed with uuid in Tungsten-Fabric"); List serviceGroupList4 = tungstenApi.listTungstenServiceGroup( "005f0dea-0196-11ec-a1ed-b42e99f6e187"); assertEquals(serviceGroupList2, serviceGroupList4); @@ -1730,7 +1731,7 @@ public void listTungstenServiceGroupTest() { @Test public void listTungstenAddressGroupTest() { - s_logger.debug("Create address group in Tungsten-Fabric"); + logger.debug("Create address group in Tungsten-Fabric"); ApiObjectBase addressGroup1 = tungstenApi.createTungstenAddressGroup("005f0dea-0196-11ec-a1ed-b42e99f6e187", "addressGroup1", "10.0.0.0", 24); ApiObjectBase addressGroup2 = tungstenApi.createTungstenAddressGroup("baf714fa-80a1-454f-9c32-c4d4a6f5c5a4", @@ -1739,12 +1740,12 @@ public void listTungstenAddressGroupTest() { addressGroupList1.sort(comparator); List addressGroupList2 = List.of(addressGroup1); - s_logger.debug("Check if service group was listed all in Tungsten-Fabric"); + logger.debug("Check if service group was listed all in Tungsten-Fabric"); List addressGroupList3 = tungstenApi.listTungstenAddressGroup(null); addressGroupList3.sort(comparator); assertEquals(addressGroupList1, addressGroupList3); - s_logger.debug("Check if service group was listed with uuid in Tungsten-Fabric"); + logger.debug("Check if service group was listed with uuid in Tungsten-Fabric"); List addressGroupList4 = tungstenApi.listTungstenAddressGroup( "005f0dea-0196-11ec-a1ed-b42e99f6e187"); assertEquals(addressGroupList2, addressGroupList4); @@ -1752,20 +1753,20 @@ public void listTungstenAddressGroupTest() { @Test public void removeTungstenNetworkPolicyRuleTest() { - s_logger.debug("Create policy in Tungsten-Fabric"); + logger.debug("Create policy in Tungsten-Fabric"); tungstenApi.createTungstenPolicy("005f0dea-0196-11ec-a1ed-b42e99f6e187", "policy", projectUuid); - s_logger.debug("Add policy rule in Tungsten-Fabric"); + logger.debug("Add policy rule in Tungsten-Fabric"); tungstenApi.addTungstenPolicyRule("c1680d93-2614-4f99-a8c5-d4f11b3dfc9d", "005f0dea-0196-11ec-a1ed-b42e99f6e187", "pass", "tcp", ">", "network1", "192.168.100.0", 24, 8080, 8081, "network2", "10.0.0.0", 16, 80, 81); - s_logger.debug("Check if policy rule was add to network policy in Tungsten-Fabric"); + logger.debug("Check if policy rule was add to network policy in Tungsten-Fabric"); NetworkPolicy networkPolicy1 = (NetworkPolicy) tungstenApi.getTungstenObject(NetworkPolicy.class, "005f0dea-0196-11ec-a1ed-b42e99f6e187"); assertEquals(1, networkPolicy1.getEntries().getPolicyRule().size()); - s_logger.debug("Check if policy rule was remove from network policy in Tungsten-Fabric"); + logger.debug("Check if policy rule was remove from network policy in Tungsten-Fabric"); assertNotNull(tungstenApi.removeTungstenNetworkPolicyRule("005f0dea-0196-11ec-a1ed-b42e99f6e187", "c1680d93-2614-4f99-a8c5-d4f11b3dfc9d")); NetworkPolicy networkPolicy2 = (NetworkPolicy) tungstenApi.getTungstenObject(NetworkPolicy.class, @@ -1781,10 +1782,10 @@ public void updateTungstenVrouterConfig() { @Test public void deleteTungstenObjectTest() { - s_logger.debug("Create tag type in Tungsten-Fabric"); + logger.debug("Create tag type in Tungsten-Fabric"); assertNotNull(tungstenApi.createTungstenTagType("005f0dea-0196-11ec-a1ed-b42e99f6e187", "tagtype")); - s_logger.debug("Check if tag type was deleted in Tungsten-Fabric"); + logger.debug("Check if tag type was deleted in Tungsten-Fabric"); ApiObjectBase apiObjectBase = tungstenApi.getTungstenObject(TagType.class, "005f0dea-0196-11ec-a1ed-b42e99f6e187"); assertTrue(tungstenApi.deleteTungstenObject(apiObjectBase)); @@ -1793,17 +1794,17 @@ public void deleteTungstenObjectTest() { @Test public void deleteTungstenObjectWithUuidTest() { - s_logger.debug("Create tag type in Tungsten-Fabric"); + logger.debug("Create tag type in Tungsten-Fabric"); assertNotNull(tungstenApi.createTungstenTagType("005f0dea-0196-11ec-a1ed-b42e99f6e187", "tagtype")); - s_logger.debug("Check if tag type was deleted in Tungsten-Fabric"); + logger.debug("Check if tag type was deleted in Tungsten-Fabric"); assertTrue(tungstenApi.deleteTungstenObject(TagType.class, "005f0dea-0196-11ec-a1ed-b42e99f6e187")); assertNull(tungstenApi.getTungstenObject(TagType.class, "005f0dea-0196-11ec-a1ed-b42e99f6e187")); } @Test public void getTungstenListObjectTest() { - s_logger.debug("Create network in Tungsten-Fabric"); + logger.debug("Create network in Tungsten-Fabric"); VirtualNetwork network1 = tungstenApi.createTungstenNetwork("005f0dea-0196-11ec-a1ed-b42e99f6e187", "network1", "network1", projectUuid, true, false, "10.0.0.0", 24, "10.0.0.1", true, null, "10.0.0.10", "10.0.0.20", false, false, ""); @@ -1814,12 +1815,12 @@ public void getTungstenListObjectTest() { list1.sort(comparator); List list2 = List.of(network1); - s_logger.debug("Check if network was listed all in Tungsten-Fabric"); + logger.debug("Check if network was listed all in Tungsten-Fabric"); List list3 = tungstenApi.getTungstenListObject(VirtualNetwork.class, project, null); list3.sort(comparator); assertEquals(list1, list3); - s_logger.debug("Check if network was listed with uuid in Tungsten-Fabric"); + logger.debug("Check if network was listed with uuid in Tungsten-Fabric"); List list4 = tungstenApi.getTungstenListObject(VirtualNetwork.class, null, "005f0dea-0196-11ec-a1ed-b42e99f6e187"); assertEquals(list2, list4); @@ -1829,33 +1830,33 @@ public void getTungstenListObjectTest() { public void addInstanceToSecurityGroupTest() { String projectFqn = TungstenApi.TUNGSTEN_DEFAULT_DOMAIN + ":" + TungstenApi.TUNGSTEN_DEFAULT_PROJECT; - s_logger.debug("Create a security group in Tungsten-Fabric."); + logger.debug("Create a security group in Tungsten-Fabric."); assertNotNull(tungstenApi.createTungstenSecurityGroup(tungstenSecurityGroupUuid, tungstenSecurityGroupName, "TungstenSecurityGroupDescription", projectFqn)); - s_logger.debug("Create virtual network in Tungsten-Fabric."); + logger.debug("Create virtual network in Tungsten-Fabric."); assertNotNull(tungstenApi.createTungstenNetwork(tungstenNetworkUuid, tungstenNetworkName, tungstenNetworkName, projectUuid, true, false, "10.0.0.0", 24, "10.0.0.1", true, null, "10.0.0.10", "10.0.0.20", false, false, "")); - s_logger.debug("Create virtual machine in Tungsten-Fabric."); + logger.debug("Create virtual machine in Tungsten-Fabric."); assertNotNull(tungstenApi.createTungstenVirtualMachine(tungstenVmUuid, tungstenVmName)); - s_logger.debug("Create virtual machine interface in Tungsten-Fabric."); + logger.debug("Create virtual machine interface in Tungsten-Fabric."); assertNotNull( tungstenApi.createTungstenVmInterface(vmiUuid, vmiName, "02:fc:f3:d6:83:c3", tungstenNetworkUuid, tungstenVmUuid, projectUuid, "10.0.0.1", true)); - s_logger.debug("Check if instance have no security group in Tungsten-Fabric."); + logger.debug("Check if instance have no security group in Tungsten-Fabric."); VirtualMachineInterface virtualMachineInterface1 = (VirtualMachineInterface) tungstenApi.getTungstenObject( VirtualMachineInterface.class, vmiUuid); assertNull(virtualMachineInterface1.getSecurityGroup()); assertFalse(virtualMachineInterface1.getPortSecurityEnabled()); - s_logger.debug("Add instance to security group in Tungsten-Fabric."); + logger.debug("Add instance to security group in Tungsten-Fabric."); tungstenApi.addInstanceToSecurityGroup(vmiUuid, List.of(tungstenSecurityGroupUuid)); - s_logger.debug("Check if instance was added to security group in Tungsten-Fabric."); + logger.debug("Check if instance was added to security group in Tungsten-Fabric."); VirtualMachineInterface virtualMachineInterface2 = (VirtualMachineInterface) tungstenApi.getTungstenObject( VirtualMachineInterface.class, vmiUuid); assertEquals(1, virtualMachineInterface2.getSecurityGroup().size()); @@ -1867,33 +1868,33 @@ public void addInstanceToSecurityGroupTest() { public void removeInstanceFromSecurityGroupTest() { String projectFqn = TungstenApi.TUNGSTEN_DEFAULT_DOMAIN + ":" + TungstenApi.TUNGSTEN_DEFAULT_PROJECT; - s_logger.debug("Create a security group in Tungsten-Fabric."); + logger.debug("Create a security group in Tungsten-Fabric."); tungstenApi.createTungstenSecurityGroup(tungstenSecurityGroupUuid, tungstenSecurityGroupName, "TungstenSecurityGroupDescription", projectFqn); - s_logger.debug("Create virtual network in Tungsten-Fabric."); + logger.debug("Create virtual network in Tungsten-Fabric."); tungstenApi.createTungstenNetwork(tungstenNetworkUuid, tungstenNetworkName, tungstenNetworkName, projectUuid, true, false, "10.0.0.0", 24, "10.0.0.1", true, null, "10.0.0.10", "10.0.0.20", false, false, ""); - s_logger.debug("Create virtual machine in Tungsten-Fabric."); + logger.debug("Create virtual machine in Tungsten-Fabric."); tungstenApi.createTungstenVirtualMachine(tungstenVmUuid, tungstenVmName); - s_logger.debug("Create virtual machine interface in Tungsten-Fabric."); + logger.debug("Create virtual machine interface in Tungsten-Fabric."); tungstenApi.createTungstenVmInterface(vmiUuid, vmiName, "02:fc:f3:d6:83:c3", tungstenNetworkUuid, tungstenVmUuid, projectUuid, "10.0.0.1", true); - s_logger.debug("Add instance to security group in Tungsten-Fabric."); + logger.debug("Add instance to security group in Tungsten-Fabric."); tungstenApi.addInstanceToSecurityGroup(vmiUuid, List.of(tungstenSecurityGroupUuid)); - s_logger.debug("Check if instance was added to security group in Tungsten-Fabric."); + logger.debug("Check if instance was added to security group in Tungsten-Fabric."); VirtualMachineInterface virtualMachineInterface1 = (VirtualMachineInterface) tungstenApi.getTungstenObject( VirtualMachineInterface.class, vmiUuid); assertEquals(1, virtualMachineInterface1.getSecurityGroup().size()); - s_logger.debug("Remove instance from security group in Tungsten-Fabric."); + logger.debug("Remove instance from security group in Tungsten-Fabric."); assertTrue(tungstenApi.removeInstanceFromSecurityGroup(vmiUuid, List.of(tungstenSecurityGroupUuid))); - s_logger.debug("Check if instance was removed from security group in Tungsten-Fabric."); + logger.debug("Check if instance was removed from security group in Tungsten-Fabric."); VirtualMachineInterface virtualMachineInterface2 = (VirtualMachineInterface) tungstenApi.getTungstenObject( VirtualMachineInterface.class, vmiUuid); assertEquals(0, virtualMachineInterface2.getSecurityGroup().size()); @@ -1902,21 +1903,21 @@ public void removeInstanceFromSecurityGroupTest() { @Test public void addSecondaryIpAddressTest() { - s_logger.debug("Create virtual network in Tungsten-Fabric."); + logger.debug("Create virtual network in Tungsten-Fabric."); tungstenApi.createTungstenNetwork(tungstenNetworkUuid, tungstenNetworkName, tungstenNetworkName, projectUuid, true, false, "10.0.0.0", 24, "10.0.0.1", true, null, "10.0.0.10", "10.0.0.20", false, false, ""); - s_logger.debug("Create virtual machine in Tungsten-Fabric."); + logger.debug("Create virtual machine in Tungsten-Fabric."); tungstenApi.createTungstenVirtualMachine(tungstenVmUuid, tungstenVmName); - s_logger.debug("Create virtual machine interface in Tungsten-Fabric."); + logger.debug("Create virtual machine interface in Tungsten-Fabric."); tungstenApi.createTungstenVmInterface(vmiUuid, vmiName, "02:fc:f3:d6:83:c3", tungstenNetworkUuid, tungstenVmUuid, projectUuid, "10.0.0.1", true); - s_logger.debug("Check if secondary ip address was not exist in Tungsten-Fabric."); + logger.debug("Check if secondary ip address was not exist in Tungsten-Fabric."); assertNull(tungstenApi.getTungstenObjectByName(InstanceIp.class, null, "secondaryip")); - s_logger.debug("Check if secondary ip address was added to nic in Tungsten-Fabric."); + logger.debug("Check if secondary ip address was added to nic in Tungsten-Fabric."); assertTrue(tungstenApi.addSecondaryIpAddress(tungstenNetworkUuid, vmiUuid, "secondaryip1", "10.0.0.100")); InstanceIp instanceIp2 = (InstanceIp) tungstenApi.getTungstenObjectByName(InstanceIp.class, null, "secondaryip1"); @@ -1925,7 +1926,7 @@ public void addSecondaryIpAddressTest() { assertEquals(vmiUuid, instanceIp2.getVirtualMachineInterface().get(0).getUuid()); assertTrue(instanceIp2.getSecondary()); - s_logger.debug("Check if secondary ip address with ip v6 was added to nic in Tungsten-Fabric."); + logger.debug("Check if secondary ip address with ip v6 was added to nic in Tungsten-Fabric."); assertTrue(tungstenApi.addSecondaryIpAddress(tungstenNetworkUuid, vmiUuid, "secondaryip2", "fd00::100")); InstanceIp instanceIp3 = (InstanceIp) tungstenApi.getTungstenObjectByName(InstanceIp.class, null, "secondaryip2"); @@ -1935,32 +1936,32 @@ public void addSecondaryIpAddressTest() { @Test public void removeSecondaryIpAddressTest() { - s_logger.debug("Create virtual network in Tungsten-Fabric."); + logger.debug("Create virtual network in Tungsten-Fabric."); tungstenApi.createTungstenNetwork(tungstenNetworkUuid, tungstenNetworkName, tungstenNetworkName, projectUuid, true, false, "10.0.0.0", 24, "10.0.0.1", true, null, "10.0.0.10", "10.0.0.20", false, false, ""); - s_logger.debug("Create virtual machine in Tungsten-Fabric."); + logger.debug("Create virtual machine in Tungsten-Fabric."); tungstenApi.createTungstenVirtualMachine(tungstenVmUuid, tungstenVmName); - s_logger.debug("Create virtual machine interface in Tungsten-Fabric."); + logger.debug("Create virtual machine interface in Tungsten-Fabric."); tungstenApi.createTungstenVmInterface(vmiUuid, vmiName, "02:fc:f3:d6:83:c3", tungstenNetworkUuid, tungstenVmUuid, projectUuid, "10.0.0.1", true); - s_logger.debug("Check if secondary ip address was added to nic in Tungsten-Fabric."); + logger.debug("Check if secondary ip address was added to nic in Tungsten-Fabric."); assertTrue(tungstenApi.addSecondaryIpAddress(tungstenNetworkUuid, vmiUuid, "secondaryip", "10.0.0.100")); assertNotNull(tungstenApi.getTungstenObjectByName(InstanceIp.class, null, "secondaryip")); - s_logger.debug("Check if secondary ip address was removed from nic in Tungsten-Fabric."); + logger.debug("Check if secondary ip address was removed from nic in Tungsten-Fabric."); assertTrue(tungstenApi.removeSecondaryIpAddress("secondaryip")); assertNull(tungstenApi.getTungstenObjectByName(InstanceIp.class, null, "secondaryip")); } @Test public void createRoutingLogicalRouterTest() { - s_logger.debug("Check if logical router was not exist in Tungsten-Fabric."); + logger.debug("Check if logical router was not exist in Tungsten-Fabric."); assertNull(tungstenApi.getTungstenObject(LogicalRouter.class, "005f0dea-0196-11ec-a1ed-b42e99f6e187")); - s_logger.debug("Check if logical router was created in Tungsten-Fabric."); + logger.debug("Check if logical router was created in Tungsten-Fabric."); assertNotNull(tungstenApi.createRoutingLogicalRouter(projectUuid, "005f0dea-0196-11ec-a1ed-b42e99f6e187", "TungstenLogicalRouter")); assertNotNull(tungstenApi.getTungstenObject(LogicalRouter.class, "005f0dea-0196-11ec-a1ed-b42e99f6e187")); @@ -1968,7 +1969,7 @@ public void createRoutingLogicalRouterTest() { @Test public void listRoutingLogicalRouterTest() { - s_logger.debug("Create logical router in Tungsten-Fabric."); + logger.debug("Create logical router in Tungsten-Fabric."); ApiObjectBase apiObjectBase1 = tungstenApi.createRoutingLogicalRouter(projectUuid, "005f0dea-0196-11ec-a1ed-b42e99f6e187", "logicalRouter1"); ApiObjectBase apiObjectBase2 = tungstenApi.createRoutingLogicalRouter(projectUuid, @@ -1977,7 +1978,7 @@ public void listRoutingLogicalRouterTest() { list1.sort(comparator); List list2 = List.of(apiObjectBase1); - s_logger.debug("Check if logical router was listed all in Tungsten-Fabric."); + logger.debug("Check if logical router was listed all in Tungsten-Fabric."); List list3 = tungstenApi.listRoutingLogicalRouter(null); list3.sort(comparator); assertEquals(list1, list3); @@ -1987,19 +1988,19 @@ public void listRoutingLogicalRouterTest() { @Test public void addNetworkGatewayToLogicalRouterTest() { - s_logger.debug("Create virtual network in Tungsten-Fabric."); + logger.debug("Create virtual network in Tungsten-Fabric."); tungstenApi.createTungstenNetwork(tungstenNetworkUuid, tungstenNetworkName, tungstenNetworkName, projectUuid, false, false, "10.0.0.0", 24, "10.0.0.1", true, null, "10.0.0.10", "10.0.0.20", false, false, ""); - s_logger.debug("Create logical router in Tungsten-Fabric."); + logger.debug("Create logical router in Tungsten-Fabric."); tungstenApi.createRoutingLogicalRouter(projectUuid, "005f0dea-0196-11ec-a1ed-b42e99f6e187", "logicalRouter1"); - s_logger.debug("Check if logical router have no network gateway in Tungsten-Fabric."); + logger.debug("Check if logical router have no network gateway in Tungsten-Fabric."); LogicalRouter logicalRouter1 = (LogicalRouter) tungstenApi.getTungstenObject(LogicalRouter.class, "005f0dea-0196-11ec-a1ed-b42e99f6e187"); assertNull(logicalRouter1.getVirtualMachineInterface()); - s_logger.debug("Check if network gateway was added to logical router in Tungsten-Fabric."); + logger.debug("Check if network gateway was added to logical router in Tungsten-Fabric."); assertNotNull( tungstenApi.addNetworkGatewayToLogicalRouter(tungstenNetworkUuid, "005f0dea-0196-11ec-a1ed-b42e99f6e187", "192.168.100.100")); @@ -2010,14 +2011,14 @@ public void addNetworkGatewayToLogicalRouterTest() { @Test public void removeNetworkGatewayFromLogicalRouterTest() { - s_logger.debug("Create virtual network in Tungsten-Fabric."); + logger.debug("Create virtual network in Tungsten-Fabric."); tungstenApi.createTungstenNetwork(tungstenNetworkUuid, tungstenNetworkName, tungstenNetworkName, projectUuid, false, false, "10.0.0.0", 24, "10.0.0.1", true, null, "10.0.0.10", "10.0.0.20", false, false, ""); - s_logger.debug("Create logical router in Tungsten-Fabric."); + logger.debug("Create logical router in Tungsten-Fabric."); tungstenApi.createRoutingLogicalRouter(projectUuid, "005f0dea-0196-11ec-a1ed-b42e99f6e187", "logicalRouter1"); - s_logger.debug("Check if network gateway was added to logical router in Tungsten-Fabric."); + logger.debug("Check if network gateway was added to logical router in Tungsten-Fabric."); assertNotNull( tungstenApi.addNetworkGatewayToLogicalRouter(tungstenNetworkUuid, "005f0dea-0196-11ec-a1ed-b42e99f6e187", "192.168.100.100")); @@ -2025,7 +2026,7 @@ public void removeNetworkGatewayFromLogicalRouterTest() { "005f0dea-0196-11ec-a1ed-b42e99f6e187"); assertEquals(1, logicalRouter1.getVirtualMachineInterface().size()); - s_logger.debug("Check if network gateway was removed from logical router in Tungsten-Fabric."); + logger.debug("Check if network gateway was removed from logical router in Tungsten-Fabric."); assertNotNull(tungstenApi.removeNetworkGatewayFromLogicalRouter(tungstenNetworkUuid, "005f0dea-0196-11ec-a1ed-b42e99f6e187")); LogicalRouter logicalRouter2 = (LogicalRouter) tungstenApi.getTungstenObject(LogicalRouter.class, @@ -2035,7 +2036,7 @@ public void removeNetworkGatewayFromLogicalRouterTest() { @Test public void listConnectedNetworkFromLogicalRouterTest() { - s_logger.debug("Create network in Tungsten-Fabric"); + logger.debug("Create network in Tungsten-Fabric"); VirtualNetwork virtualNetwork1 = tungstenApi.createTungstenNetwork("6b062909-ba9d-4cf3-bbd3-7db93cf6b4fe", "network1", "network1", projectUuid, true, false, "10.0.0.0", 24, "10.0.0.1", true, null, "10.0.0.10", "10.0.0.20", false, false, ""); @@ -2045,16 +2046,16 @@ public void listConnectedNetworkFromLogicalRouterTest() { List list1 = Arrays.asList(virtualNetwork1, virtualNetwork2); list1.sort(comparator); - s_logger.debug("Create logical router in Tungsten-Fabric."); + logger.debug("Create logical router in Tungsten-Fabric."); tungstenApi.createRoutingLogicalRouter(projectUuid, "005f0dea-0196-11ec-a1ed-b42e99f6e187", "logicalRouter"); - s_logger.debug("Add network gateway to logical router in Tungsten-Fabric."); + logger.debug("Add network gateway to logical router in Tungsten-Fabric."); tungstenApi.addNetworkGatewayToLogicalRouter("6b062909-ba9d-4cf3-bbd3-7db93cf6b4fe", "005f0dea-0196-11ec-a1ed-b42e99f6e187", "192.168.100.100"); tungstenApi.addNetworkGatewayToLogicalRouter("baf714fa-80a1-454f-9c32-c4d4a6f5c5a4", "005f0dea-0196-11ec-a1ed-b42e99f6e187", "192.168.100.101"); - s_logger.debug("Check if connected network in logical router was listed in Tungsten-Fabric."); + logger.debug("Check if connected network in logical router was listed in Tungsten-Fabric."); LogicalRouter logicalRouter = (LogicalRouter) tungstenApi.getTungstenObject(LogicalRouter.class, "005f0dea-0196-11ec-a1ed-b42e99f6e187"); List list2 = tungstenApi.listConnectedNetworkFromLogicalRouter(logicalRouter); diff --git a/plugins/network-elements/vxlan/src/main/java/com/cloud/network/guru/VxlanGuestNetworkGuru.java b/plugins/network-elements/vxlan/src/main/java/com/cloud/network/guru/VxlanGuestNetworkGuru.java index ce6baaa15e90..1618c9710f87 100644 --- a/plugins/network-elements/vxlan/src/main/java/com/cloud/network/guru/VxlanGuestNetworkGuru.java +++ b/plugins/network-elements/vxlan/src/main/java/com/cloud/network/guru/VxlanGuestNetworkGuru.java @@ -19,7 +19,6 @@ import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.dc.DataCenter; @@ -47,7 +46,6 @@ @Component public class VxlanGuestNetworkGuru extends GuestNetworkGuru { - private static final Logger s_logger = Logger.getLogger(VxlanGuestNetworkGuru.class); public VxlanGuestNetworkGuru() { super(); @@ -62,7 +60,7 @@ protected boolean canHandle(NetworkOffering offering, final NetworkType networkT isMyIsolationMethod(physicalNetwork)) { return true; } else { - s_logger.trace("We only take care of Guest networks of type " + GuestType.Isolated + " or " + GuestType.L2 + " in zone of type " + NetworkType.Advanced); + logger.trace("We only take care of Guest networks of type " + GuestType.Isolated + " or " + GuestType.L2 + " in zone of type " + NetworkType.Advanced); return false; } } @@ -151,7 +149,7 @@ public boolean release(NicProfile nic, VirtualMachineProfile vm, String reservat public void shutdown(NetworkProfile profile, NetworkOffering offering) { NetworkVO networkObject = _networkDao.findById(profile.getId()); if (networkObject.getBroadcastDomainType() != BroadcastDomainType.Vxlan || networkObject.getBroadcastUri() == null) { - s_logger.warn("BroadcastUri is empty or incorrect for guestnetwork " + networkObject.getDisplayText()); + logger.warn("BroadcastUri is empty or incorrect for guestnetwork " + networkObject.getDisplayText()); return; } diff --git a/plugins/outofbandmanagement-drivers/ipmitool/src/main/java/org/apache/cloudstack/outofbandmanagement/driver/ipmitool/IpmitoolOutOfBandManagementDriver.java b/plugins/outofbandmanagement-drivers/ipmitool/src/main/java/org/apache/cloudstack/outofbandmanagement/driver/ipmitool/IpmitoolOutOfBandManagementDriver.java index 2c42554afccf..2fe747594105 100644 --- a/plugins/outofbandmanagement-drivers/ipmitool/src/main/java/org/apache/cloudstack/outofbandmanagement/driver/ipmitool/IpmitoolOutOfBandManagementDriver.java +++ b/plugins/outofbandmanagement-drivers/ipmitool/src/main/java/org/apache/cloudstack/outofbandmanagement/driver/ipmitool/IpmitoolOutOfBandManagementDriver.java @@ -29,7 +29,6 @@ import org.apache.cloudstack.outofbandmanagement.driver.OutOfBandManagementDriverCommand; import org.apache.cloudstack.outofbandmanagement.driver.OutOfBandManagementDriverPowerCommand; import org.apache.cloudstack.outofbandmanagement.driver.OutOfBandManagementDriverResponse; -import org.apache.log4j.Logger; import org.joda.time.Duration; import java.util.Arrays; @@ -39,7 +38,6 @@ import org.apache.commons.lang3.StringUtils; public final class IpmitoolOutOfBandManagementDriver extends AdapterBase implements OutOfBandManagementDriver, Configurable { - public static final Logger LOG = Logger.getLogger(IpmitoolOutOfBandManagementDriver.class); private static volatile boolean isDriverEnabled = false; private static boolean isIpmiToolBinAvailable = false; @@ -70,14 +68,14 @@ private String getIpmiUserId(ImmutableMap op if (!output.isSuccess()) { String oneLineCommand = StringUtils.join(ipmiToolCommands, " "); String message = String.format("Failed to find IPMI user [%s] to change password. Command [%s], error [%s].", username, oneLineCommand, output.getError()); - LOG.debug(message); + logger.debug(message); throw new CloudRuntimeException(message); } final String userId = IPMITOOL.findIpmiUser(output.getResult(), username); if (StringUtils.isEmpty(userId)) { String message = String.format("No IPMI user ID found for the username [%s].", username); - LOG.debug(message); + logger.debug(message); throw new CloudRuntimeException(message); } return userId; @@ -88,7 +86,7 @@ public OutOfBandManagementDriverResponse execute(final OutOfBandManagementDriver initDriver(); if (!isIpmiToolBinAvailable) { String message = "Aborting operation due to ipmitool binary not available for execution."; - LOG.debug(message); + logger.debug(message); return new OutOfBandManagementDriverResponse(null, message, false); } } @@ -96,7 +94,7 @@ public OutOfBandManagementDriverResponse execute(final OutOfBandManagementDriver OutOfBandManagementDriverResponse response = new OutOfBandManagementDriverResponse(null, "Unsupported Command", false); if (!isDriverEnabled) { String message = "Driver not enabled or shutdown."; - LOG.debug(message); + logger.debug(message); response.setError(message); return response; } @@ -108,7 +106,7 @@ public OutOfBandManagementDriverResponse execute(final OutOfBandManagementDriver if (response != null && !response.isSuccess() && response.getError().contains("RAKP 2 HMAC is invalid")) { String message = String.format("Setting authFailure as 'true' due to [%s].", response.getError()); - LOG.debug(message); + logger.debug(message); response.setAuthFailure(true); } return response; @@ -126,12 +124,12 @@ private OutOfBandManagementDriverResponse execute(final OutOfBandManagementDrive String result = response.getResult().trim(); if (response.isSuccess()) { - LOG.debug(String.format("The command [%s] was successful and got the result [%s].", oneLineCommand, result)); + logger.debug(String.format("The command [%s] was successful and got the result [%s].", oneLineCommand, result)); if (cmd.getPowerOperation().equals(OutOfBandManagement.PowerOperation.STATUS)) { response.setPowerState(IPMITOOL.parsePowerState(result)); } } else { - LOG.debug(String.format("The command [%s] failed and got the result [%s]. Error: [%s].", oneLineCommand, result, response.getError())); + logger.debug(String.format("The command [%s] failed and got the result [%s]. Error: [%s].", oneLineCommand, result, response.getError())); } return response; } @@ -150,10 +148,10 @@ private void initDriver() { final OutOfBandManagementDriverResponse output = IPMITOOL.executeCommands(Arrays.asList(IpmiToolPath.value(), "-V")); if (output.isSuccess() && output.getResult().startsWith("ipmitool version")) { isIpmiToolBinAvailable = true; - LOG.debug(String.format("OutOfBandManagementDriver ipmitool initialized [%s].", output.getResult())); + logger.debug(String.format("OutOfBandManagementDriver ipmitool initialized [%s].", output.getResult())); } else { isIpmiToolBinAvailable = false; - LOG.error(String.format("OutOfBandManagementDriver ipmitool failed initialization with error [%s]; standard output [%s].", output.getError(), output.getResult())); + logger.error(String.format("OutOfBandManagementDriver ipmitool failed initialization with error [%s]; standard output [%s].", output.getError(), output.getResult())); } } diff --git a/plugins/outofbandmanagement-drivers/ipmitool/src/main/java/org/apache/cloudstack/outofbandmanagement/driver/ipmitool/IpmitoolWrapper.java b/plugins/outofbandmanagement-drivers/ipmitool/src/main/java/org/apache/cloudstack/outofbandmanagement/driver/ipmitool/IpmitoolWrapper.java index 6fe98c0925c6..86ec615e5523 100644 --- a/plugins/outofbandmanagement-drivers/ipmitool/src/main/java/org/apache/cloudstack/outofbandmanagement/driver/ipmitool/IpmitoolWrapper.java +++ b/plugins/outofbandmanagement-drivers/ipmitool/src/main/java/org/apache/cloudstack/outofbandmanagement/driver/ipmitool/IpmitoolWrapper.java @@ -25,7 +25,8 @@ import org.apache.cloudstack.outofbandmanagement.driver.OutOfBandManagementDriverResponse; import org.apache.cloudstack.utils.process.ProcessResult; import org.apache.cloudstack.utils.process.ProcessRunner; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.joda.time.Duration; import java.util.ArrayList; @@ -33,7 +34,7 @@ import java.util.concurrent.ExecutorService; public final class IpmitoolWrapper { - public static final Logger LOG = Logger.getLogger(IpmitoolWrapper.class); + protected Logger logger = LogManager.getLogger(getClass()); private final ProcessRunner RUNNER; @@ -155,7 +156,7 @@ public OutOfBandManagementDriverResponse executeCommands(final List comm public OutOfBandManagementDriverResponse executeCommands(final List commands, final Duration timeOut) { final ProcessResult result = RUNNER.executeCommands(commands, timeOut); - if (LOG.isTraceEnabled()) { + if (logger.isTraceEnabled()) { List cleanedCommands = new ArrayList(); int maskNextCommand = 0; for (String command : commands) { @@ -171,7 +172,7 @@ public OutOfBandManagementDriverResponse executeCommands(final List comm } cleanedCommands.add(command); } - LOG.trace("Executed ipmitool process with commands: " + StringUtils.join(cleanedCommands, ", ") + + logger.trace("Executed ipmitool process with commands: " + StringUtils.join(cleanedCommands, ", ") + "\nIpmitool execution standard output: " + result.getStdOutput() + "\nIpmitool execution error output: " + result.getStdError()); } diff --git a/plugins/outofbandmanagement-drivers/nested-cloudstack/src/main/java/org/apache/cloudstack/outofbandmanagement/driver/nestedcloudstack/NestedCloudStackOutOfBandManagementDriver.java b/plugins/outofbandmanagement-drivers/nested-cloudstack/src/main/java/org/apache/cloudstack/outofbandmanagement/driver/nestedcloudstack/NestedCloudStackOutOfBandManagementDriver.java index fcf2caa6b96a..7f73e1dda88b 100644 --- a/plugins/outofbandmanagement-drivers/nested-cloudstack/src/main/java/org/apache/cloudstack/outofbandmanagement/driver/nestedcloudstack/NestedCloudStackOutOfBandManagementDriver.java +++ b/plugins/outofbandmanagement-drivers/nested-cloudstack/src/main/java/org/apache/cloudstack/outofbandmanagement/driver/nestedcloudstack/NestedCloudStackOutOfBandManagementDriver.java @@ -31,14 +31,12 @@ import org.apache.cloudstack.outofbandmanagement.driver.OutOfBandManagementDriverCommand; import org.apache.cloudstack.outofbandmanagement.driver.OutOfBandManagementDriverPowerCommand; import org.apache.cloudstack.outofbandmanagement.driver.OutOfBandManagementDriverResponse; -import org.apache.log4j.Logger; import java.io.IOException; import java.util.List; import java.util.Map; public final class NestedCloudStackOutOfBandManagementDriver extends AdapterBase implements OutOfBandManagementDriver { - private static final Logger LOG = Logger.getLogger(NestedCloudStackOutOfBandManagementDriver.class); public OutOfBandManagementDriverResponse execute(final OutOfBandManagementDriverCommand cmd) { OutOfBandManagementDriverResponse response = new OutOfBandManagementDriverResponse(null, "Unsupported Command", false); @@ -79,7 +77,7 @@ protected OutOfBandManagement.PowerState getNestedVMPowerState(final String json } } } catch (IOException e) { - LOG.warn("Exception caught while de-serializing and reading state of the nested-cloudstack VM from the response: " + jsonResponse + ", with exception:", e); + logger.warn("Exception caught while de-serializing and reading state of the nested-cloudstack VM from the response: " + jsonResponse + ", with exception:", e); } return OutOfBandManagement.PowerState.Unknown; } @@ -130,7 +128,7 @@ private OutOfBandManagementDriverResponse execute(final OutOfBandManagementDrive try { apiResponse = client.executeRequest(apacheCloudStackRequest); } catch (final ApacheCloudStackClientRequestRuntimeException e) { - LOG.error("Nested CloudStack oobm plugin failed due to API error: ", e); + logger.error("Nested CloudStack oobm plugin failed due to API error: ", e); final OutOfBandManagementDriverResponse failedResponse = new OutOfBandManagementDriverResponse(e.getResponse(), "HTTP error code: " + e.getStatusCode(), false); if (e.getStatusCode() == 401) { failedResponse.setAuthFailure(true); diff --git a/plugins/shutdown/src/main/java/org/apache/cloudstack/api/command/CancelShutdownCmd.java b/plugins/shutdown/src/main/java/org/apache/cloudstack/api/command/CancelShutdownCmd.java index fe6204fd0cc3..aa90d7fcbdc9 100644 --- a/plugins/shutdown/src/main/java/org/apache/cloudstack/api/command/CancelShutdownCmd.java +++ b/plugins/shutdown/src/main/java/org/apache/cloudstack/api/command/CancelShutdownCmd.java @@ -19,7 +19,6 @@ import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.BaseCmd; -import org.apache.log4j.Logger; import com.cloud.user.Account; @@ -35,7 +34,6 @@ public class CancelShutdownCmd extends BaseShutdownActionCmd { - public static final Logger LOG = Logger.getLogger(CancelShutdownCmd.class); public static final String APINAME = "cancelShutdown"; @Override diff --git a/plugins/shutdown/src/main/java/org/apache/cloudstack/api/command/PrepareForShutdownCmd.java b/plugins/shutdown/src/main/java/org/apache/cloudstack/api/command/PrepareForShutdownCmd.java index 01ea1797a105..c86d28560470 100644 --- a/plugins/shutdown/src/main/java/org/apache/cloudstack/api/command/PrepareForShutdownCmd.java +++ b/plugins/shutdown/src/main/java/org/apache/cloudstack/api/command/PrepareForShutdownCmd.java @@ -20,7 +20,6 @@ import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.BaseCmd; -import org.apache.log4j.Logger; import com.cloud.user.Account; @@ -34,7 +33,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, authorized = {RoleType.Admin}) public class PrepareForShutdownCmd extends BaseShutdownActionCmd { - public static final Logger LOG = Logger.getLogger(PrepareForShutdownCmd.class); public static final String APINAME = "prepareForShutdown"; @Override diff --git a/plugins/shutdown/src/main/java/org/apache/cloudstack/api/command/ReadyForShutdownCmd.java b/plugins/shutdown/src/main/java/org/apache/cloudstack/api/command/ReadyForShutdownCmd.java index 1e6b3e1a2d5e..de4db9c04284 100644 --- a/plugins/shutdown/src/main/java/org/apache/cloudstack/api/command/ReadyForShutdownCmd.java +++ b/plugins/shutdown/src/main/java/org/apache/cloudstack/api/command/ReadyForShutdownCmd.java @@ -26,7 +26,6 @@ import org.apache.cloudstack.api.response.ManagementServerResponse; import org.apache.cloudstack.api.response.ReadyForShutdownResponse; import org.apache.cloudstack.shutdown.ShutdownManager; -import org.apache.log4j.Logger; import com.cloud.user.Account; @APICommand(name = ReadyForShutdownCmd.APINAME, @@ -35,7 +34,6 @@ responseObject = ReadyForShutdownResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ReadyForShutdownCmd extends BaseCmd { - public static final Logger LOG = Logger.getLogger(ReadyForShutdownCmd.class); public static final String APINAME = "readyForShutdown"; @Inject diff --git a/plugins/shutdown/src/main/java/org/apache/cloudstack/api/command/TriggerShutdownCmd.java b/plugins/shutdown/src/main/java/org/apache/cloudstack/api/command/TriggerShutdownCmd.java index 3abde0b1f3b1..b4ef7c1f67a6 100644 --- a/plugins/shutdown/src/main/java/org/apache/cloudstack/api/command/TriggerShutdownCmd.java +++ b/plugins/shutdown/src/main/java/org/apache/cloudstack/api/command/TriggerShutdownCmd.java @@ -19,7 +19,6 @@ import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.BaseCmd; -import org.apache.log4j.Logger; import com.cloud.user.Account; @@ -33,7 +32,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, authorized = {RoleType.Admin}) public class TriggerShutdownCmd extends BaseShutdownActionCmd { - public static final Logger LOG = Logger.getLogger(TriggerShutdownCmd.class); public static final String APINAME = "triggerShutdown"; ///////////////////////////////////////////////////// diff --git a/plugins/shutdown/src/main/java/org/apache/cloudstack/shutdown/ShutdownManagerImpl.java b/plugins/shutdown/src/main/java/org/apache/cloudstack/shutdown/ShutdownManagerImpl.java index b8f5fb57155d..955390ec0978 100644 --- a/plugins/shutdown/src/main/java/org/apache/cloudstack/shutdown/ShutdownManagerImpl.java +++ b/plugins/shutdown/src/main/java/org/apache/cloudstack/shutdown/ShutdownManagerImpl.java @@ -36,7 +36,6 @@ import org.apache.cloudstack.shutdown.command.PrepareForShutdownManagementServerHostCommand; import org.apache.cloudstack.shutdown.command.TriggerShutdownManagementServerHostCommand; import org.apache.cloudstack.utils.identity.ManagementServerNode; -import org.apache.log4j.Logger; import com.cloud.agent.api.Command; import com.cloud.cluster.ClusterManager; @@ -49,8 +48,6 @@ import com.google.gson.Gson; public class ShutdownManagerImpl extends ManagerBase implements ShutdownManager, PluggableService{ - - private static Logger logger = Logger.getLogger(ShutdownManagerImpl.class); Gson gson; @Inject diff --git a/plugins/storage-allocators/random/src/main/java/org/apache/cloudstack/storage/allocator/RandomStoragePoolAllocator.java b/plugins/storage-allocators/random/src/main/java/org/apache/cloudstack/storage/allocator/RandomStoragePoolAllocator.java index 9787e618f0fc..78a588a2cf9f 100644 --- a/plugins/storage-allocators/random/src/main/java/org/apache/cloudstack/storage/allocator/RandomStoragePoolAllocator.java +++ b/plugins/storage-allocators/random/src/main/java/org/apache/cloudstack/storage/allocator/RandomStoragePoolAllocator.java @@ -20,7 +20,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; @@ -32,7 +31,6 @@ import com.cloud.vm.VirtualMachineProfile; public class RandomStoragePoolAllocator extends AbstractStoragePoolAllocator { - private static final Logger s_logger = Logger.getLogger(RandomStoragePoolAllocator.class); @Override public List select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo, boolean bypassStorageTypeCheck) { @@ -45,21 +43,21 @@ public List select(DiskProfile dskCh, VirtualMachineProfile vmProfi Long clusterId = plan.getClusterId(); if (podId == null) { - s_logger.debug("RandomStoragePoolAllocator is returning null since the pod ID is null. This may be a zone wide storage."); + logger.debug("RandomStoragePoolAllocator is returning null since the pod ID is null. This may be a zone wide storage."); return null; } - s_logger.debug(String.format("Looking for pools in dc [%s], pod [%s] and cluster [%s].", dcId, podId, clusterId)); + logger.debug(String.format("Looking for pools in dc [%s], pod [%s] and cluster [%s].", dcId, podId, clusterId)); List pools = storagePoolDao.listBy(dcId, podId, clusterId, ScopeType.CLUSTER); if (pools.size() == 0) { - s_logger.debug(String.format("RandomStoragePoolAllocator found no storage pools available for allocation in dc [%s], pod [%s] and cluster [%s]. Returning an empty list.", + logger.debug(String.format("RandomStoragePoolAllocator found no storage pools available for allocation in dc [%s], pod [%s] and cluster [%s]. Returning an empty list.", dcId, podId, clusterId)); return suitablePools; } Collections.shuffle(pools); - s_logger.debug(String.format("RandomStoragePoolAllocator has [%s] pools to check for allocation [%s].", pools.size(), pools)); + logger.debug(String.format("RandomStoragePoolAllocator has [%s] pools to check for allocation [%s].", pools.size(), pools)); for (StoragePoolVO pool : pools) { if (suitablePools.size() == returnUpTo) { @@ -68,7 +66,7 @@ public List select(DiskProfile dskCh, VirtualMachineProfile vmProfi StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId()); if (filter(avoid, pol, dskCh, plan)) { - s_logger.trace(String.format("Found suitable local storage pool [%s], adding to list.", pool)); + logger.trace(String.format("Found suitable local storage pool [%s], adding to list.", pool)); suitablePools.add(pol); } } diff --git a/plugins/storage/image/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackImageStoreDriverImpl.java b/plugins/storage/image/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackImageStoreDriverImpl.java index 71fa2e91bcb0..3b5c47c5029d 100644 --- a/plugins/storage/image/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackImageStoreDriverImpl.java +++ b/plugins/storage/image/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackImageStoreDriverImpl.java @@ -25,7 +25,6 @@ import com.cloud.agent.api.storage.DeleteEntityDownloadURLCommand; import com.cloud.host.dao.HostDao; import com.cloud.storage.Upload; -import org.apache.log4j.Logger; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; @@ -45,7 +44,6 @@ import com.cloud.utils.exception.CloudRuntimeException; public class CloudStackImageStoreDriverImpl extends NfsImageStoreDriverImpl { - private static final Logger s_logger = Logger.getLogger(CloudStackImageStoreDriverImpl.class); @Inject ConfigurationDao _configDao; @@ -81,14 +79,14 @@ public String createEntityExtractUrl(DataStore store, String installPath, ImageF Answer ans = null; if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; - s_logger.error(errMsg); + logger.error(errMsg); ans = new Answer(cmd, false, errMsg); } else { ans = ep.sendMessage(cmd); } if (ans == null || !ans.getResult()) { String errorString = "Unable to create a link for entity at " + installPath + " on ssvm, " + ans.getDetails(); - s_logger.error(errorString); + logger.error(errorString); throw new CloudRuntimeException(errorString); } // Construct actual URL locally now that the symlink exists at SSVM @@ -106,7 +104,7 @@ private String generateCopyUrl(String ipAddress, String uuid) { _sslCopy = Boolean.parseBoolean(sslCfg); } if(_sslCopy && (_ssvmUrlDomain == null || _ssvmUrlDomain.isEmpty())){ - s_logger.warn("Empty secondary storage url domain, ignoring SSL"); + logger.warn("Empty secondary storage url domain, ignoring SSL"); _sslCopy = false; } if (_sslCopy) { @@ -132,14 +130,14 @@ public void deleteEntityExtractUrl(DataStore store, String installPath, String d Answer ans = null; if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; - s_logger.error(errMsg); + logger.error(errMsg); ans = new Answer(cmd, false, errMsg); } else { ans = ep.sendMessage(cmd); } if (ans == null || !ans.getResult()) { String errorString = "Unable to delete the url " + downloadUrl + " for path " + installPath + " on ssvm, " + ans.getDetails(); - s_logger.error(errorString); + logger.error(errorString); throw new CloudRuntimeException(errorString); } diff --git a/plugins/storage/image/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackImageStoreLifeCycleImpl.java b/plugins/storage/image/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackImageStoreLifeCycleImpl.java index 0e53191fb8ef..fca542a181fb 100644 --- a/plugins/storage/image/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackImageStoreLifeCycleImpl.java +++ b/plugins/storage/image/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackImageStoreLifeCycleImpl.java @@ -26,7 +26,8 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.utils.StringUtils; @@ -51,7 +52,7 @@ public class CloudStackImageStoreLifeCycleImpl implements ImageStoreLifeCycle { - private static final Logger s_logger = Logger.getLogger(CloudStackImageStoreLifeCycleImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject protected ResourceManager _resourceMgr; @Inject @@ -94,7 +95,7 @@ public DataStore initialize(Map dsInfos) { } else { logString = StringUtils.cleanString(url); } - s_logger.info("Trying to add a new data store at " + logString + " to data center " + dcId); + logger.info("Trying to add a new data store at " + logString + " to data center " + dcId); URI uri = null; try { diff --git a/plugins/storage/image/s3/src/main/java/org/apache/cloudstack/storage/datastore/driver/S3ImageStoreDriverImpl.java b/plugins/storage/image/s3/src/main/java/org/apache/cloudstack/storage/datastore/driver/S3ImageStoreDriverImpl.java index 3c2bc953d979..9b2f3ddd1001 100644 --- a/plugins/storage/image/s3/src/main/java/org/apache/cloudstack/storage/datastore/driver/S3ImageStoreDriverImpl.java +++ b/plugins/storage/image/s3/src/main/java/org/apache/cloudstack/storage/datastore/driver/S3ImageStoreDriverImpl.java @@ -24,7 +24,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; @@ -42,7 +41,6 @@ import com.cloud.utils.storage.S3.S3Utils; public class S3ImageStoreDriverImpl extends BaseImageStoreDriverImpl { - private static final Logger s_logger = Logger.getLogger(S3ImageStoreDriverImpl.class); @Inject ImageStoreDetailsDao _imageStoreDetailsDao; @@ -88,8 +86,8 @@ public String createEntityExtractUrl(DataStore store, String key, ImageFormat fo */ S3TO s3 = (S3TO)getStoreTO(store); - if(s_logger.isDebugEnabled()) { - s_logger.debug("Generating pre-signed s3 entity extraction URL for object: " + key); + if(logger.isDebugEnabled()) { + logger.debug("Generating pre-signed s3 entity extraction URL for object: " + key); } Date expiration = new Date(); long milliSeconds = expiration.getTime(); @@ -103,7 +101,7 @@ public String createEntityExtractUrl(DataStore store, String key, ImageFormat fo URL s3url = S3Utils.generatePresignedUrl(s3, s3.getBucketName(), key, expiration); - s_logger.info("Pre-Signed URL = " + s3url.toString()); + logger.info("Pre-Signed URL = " + s3url.toString()); return s3url.toString(); } diff --git a/plugins/storage/image/s3/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/S3ImageStoreLifeCycleImpl.java b/plugins/storage/image/s3/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/S3ImageStoreLifeCycleImpl.java index 062fb70ae63f..5e5069af3fc2 100644 --- a/plugins/storage/image/s3/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/S3ImageStoreLifeCycleImpl.java +++ b/plugins/storage/image/s3/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/S3ImageStoreLifeCycleImpl.java @@ -22,7 +22,8 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; @@ -44,7 +45,7 @@ public class S3ImageStoreLifeCycleImpl implements ImageStoreLifeCycle { - private static final Logger s_logger = Logger.getLogger(S3ImageStoreLifeCycleImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject protected ResourceManager _resourceMgr; @Inject @@ -78,7 +79,7 @@ public DataStore initialize(Map dsInfos) { DataStoreRole role = (DataStoreRole)dsInfos.get("role"); Map details = (Map)dsInfos.get("details"); - s_logger.info("Trying to add a S3 store with endpoint: " + details.get(ApiConstants.S3_END_POINT)); + logger.info("Trying to add a S3 store with endpoint: " + details.get(ApiConstants.S3_END_POINT)); Map imageStoreParameters = new HashMap(); imageStoreParameters.put("name", name); diff --git a/plugins/storage/image/swift/src/main/java/org/apache/cloudstack/storage/datastore/driver/SwiftImageStoreDriverImpl.java b/plugins/storage/image/swift/src/main/java/org/apache/cloudstack/storage/datastore/driver/SwiftImageStoreDriverImpl.java index 7e1486214bcf..c3a82c421896 100644 --- a/plugins/storage/image/swift/src/main/java/org/apache/cloudstack/storage/datastore/driver/SwiftImageStoreDriverImpl.java +++ b/plugins/storage/image/swift/src/main/java/org/apache/cloudstack/storage/datastore/driver/SwiftImageStoreDriverImpl.java @@ -27,7 +27,6 @@ import com.cloud.configuration.Config; import com.cloud.utils.SwiftUtil; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; @@ -53,7 +52,6 @@ import com.cloud.utils.exception.CloudRuntimeException; public class SwiftImageStoreDriverImpl extends BaseImageStoreDriverImpl { - private static final Logger s_logger = Logger.getLogger(SwiftImageStoreDriverImpl.class); @Inject ImageStoreDetailsDao _imageStoreDetailsDao; @@ -80,7 +78,7 @@ public String createEntityExtractUrl(DataStore store, String installPath, ImageF if (!result) { String errMsg = "Unable to set Temp-Key: " + tempKey; - s_logger.error(errMsg); + logger.error(errMsg); throw new CloudRuntimeException(errMsg); } @@ -91,7 +89,7 @@ public String createEntityExtractUrl(DataStore store, String installPath, ImageF URL swiftUrl = SwiftUtil.generateTempUrl(swiftTO, containerName, objectName, tempKey, urlExpirationInterval); if (swiftUrl != null) { - s_logger.debug("Swift temp-url: " + swiftUrl.toString()); + logger.debug("Swift temp-url: " + swiftUrl.toString()); return swiftUrl.toString(); } @@ -110,7 +108,7 @@ public void createAsync(DataStore dataStore, DataObject data, AsyncCompletionCal EndPoint ep = _epSelector.select(data); if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; - s_logger.error(errMsg); + logger.error(errMsg); throw new CloudRuntimeException(errMsg); } diff --git a/plugins/storage/image/swift/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SwiftImageStoreLifeCycleImpl.java b/plugins/storage/image/swift/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SwiftImageStoreLifeCycleImpl.java index f70eb3fa3e3f..a568270bf2a7 100644 --- a/plugins/storage/image/swift/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SwiftImageStoreLifeCycleImpl.java +++ b/plugins/storage/image/swift/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SwiftImageStoreLifeCycleImpl.java @@ -21,7 +21,8 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; @@ -41,7 +42,7 @@ public class SwiftImageStoreLifeCycleImpl implements ImageStoreLifeCycle { - private static final Logger s_logger = Logger.getLogger(SwiftImageStoreLifeCycleImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject protected ResourceManager _resourceMgr; @Inject @@ -66,7 +67,7 @@ public DataStore initialize(Map dsInfos) { Map details = (Map)dsInfos.get("details"); - s_logger.info("Trying to add a swift store at " + url + " in data center " + dcId); + logger.info("Trying to add a swift store at " + url + " in data center " + dcId); // just need to insert an entry in DB Map imageStoreParameters = new HashMap(); diff --git a/plugins/storage/object/minio/src/main/java/org/apache/cloudstack/storage/datastore/driver/MinIOObjectStoreDriverImpl.java b/plugins/storage/object/minio/src/main/java/org/apache/cloudstack/storage/datastore/driver/MinIOObjectStoreDriverImpl.java index b85383a65e83..7effcb78314b 100644 --- a/plugins/storage/object/minio/src/main/java/org/apache/cloudstack/storage/datastore/driver/MinIOObjectStoreDriverImpl.java +++ b/plugins/storage/object/minio/src/main/java/org/apache/cloudstack/storage/datastore/driver/MinIOObjectStoreDriverImpl.java @@ -38,7 +38,6 @@ import org.apache.cloudstack.storage.object.BucketObject; import org.apache.commons.codec.binary.Base64; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.amazonaws.services.s3.model.AccessControlList; import com.amazonaws.services.s3.model.BucketPolicy; @@ -66,7 +65,6 @@ import io.minio.messages.VersioningConfiguration; public class MinIOObjectStoreDriverImpl extends BaseObjectStoreDriverImpl { - private static final Logger s_logger = Logger.getLogger(MinIOObjectStoreDriverImpl.class); protected static final String ACS_PREFIX = "acs"; @Inject @@ -268,7 +266,7 @@ protected void updateAccountCredentials(final long accountId, final String acces updateNeeded = true; } if (StringUtils.isAllBlank(secretKey, details.get(MINIO_SECRET_KEY))) { - s_logger.error(String.format("Failed to retrieve secret key for MinIO user: %s from store and account details", accessKey)); + logger.error(String.format("Failed to retrieve secret key for MinIO user: %s from store and account details", accessKey)); } if (StringUtils.isNotBlank(secretKey) && (!checkIfNotPresent || StringUtils.isBlank(details.get(MINIO_SECRET_KEY)))) { details.put(MINIO_SECRET_KEY, secretKey); @@ -289,23 +287,23 @@ public boolean createUser(long accountId, long storeId) { try { UserInfo userInfo = minioAdminClient.getUserInfo(accessKey); if(userInfo != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Skipping user creation as the user already exists in MinIO store: %s", accessKey)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Skipping user creation as the user already exists in MinIO store: %s", accessKey)); } updateAccountCredentials(accountId, accessKey, userInfo.secretKey(), true); return true; } } catch (NoSuchAlgorithmException | IOException | InvalidKeyException e) { - s_logger.error(String.format("Error encountered while retrieving user: %s for existing MinIO store user check", accessKey), e); + logger.error(String.format("Error encountered while retrieving user: %s for existing MinIO store user check", accessKey), e); return false; } catch (RuntimeException e) { // MinIO lib may throw RuntimeException with code: XMinioAdminNoSuchUser - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Ignoring error encountered while retrieving user: %s for existing MinIO store user check", accessKey)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Ignoring error encountered while retrieving user: %s for existing MinIO store user check", accessKey)); } - s_logger.trace("Exception during MinIO user check", e); + logger.trace("Exception during MinIO user check", e); } - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("MinIO store user does not exist. Creating user: %s", accessKey)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("MinIO store user does not exist. Creating user: %s", accessKey)); } KeyGenerator generator = null; try { diff --git a/plugins/storage/object/minio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/MinIOObjectStoreLifeCycleImpl.java b/plugins/storage/object/minio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/MinIOObjectStoreLifeCycleImpl.java index fb7d1a652fcb..9d620b32b544 100644 --- a/plugins/storage/object/minio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/MinIOObjectStoreLifeCycleImpl.java +++ b/plugins/storage/object/minio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/MinIOObjectStoreLifeCycleImpl.java @@ -28,7 +28,8 @@ import org.apache.cloudstack.storage.object.datastore.ObjectStoreHelper; import org.apache.cloudstack.storage.object.datastore.ObjectStoreProviderManager; import org.apache.cloudstack.storage.object.store.lifecycle.ObjectStoreLifeCycle; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import javax.inject.Inject; import java.util.HashMap; @@ -36,7 +37,7 @@ public class MinIOObjectStoreLifeCycleImpl implements ObjectStoreLifeCycle { - private static final Logger s_logger = Logger.getLogger(MinIOObjectStoreLifeCycleImpl.class); + protected Logger logger = LogManager.getLogger(MinIOObjectStoreLifeCycleImpl.class); @Inject ObjectStoreHelper objectStoreHelper; @@ -78,9 +79,9 @@ public DataStore initialize(Map dsInfos) { try { // Test connection by listing buckets minioClient.listBuckets(); - s_logger.debug("Successfully connected to MinIO EndPoint: "+url); + logger.debug("Successfully connected to MinIO EndPoint: "+url); } catch (Exception e) { - s_logger.debug("Error while initializing MinIO Object Store: "+e.getMessage()); + logger.debug("Error while initializing MinIO Object Store: "+e.getMessage()); throw new RuntimeException("Error while initializing MinIO Object Store. Invalid credentials or URL"); } diff --git a/plugins/storage/object/simulator/src/main/java/org/apache/cloudstack/storage/datastore/driver/SimulatorObjectStoreDriverImpl.java b/plugins/storage/object/simulator/src/main/java/org/apache/cloudstack/storage/datastore/driver/SimulatorObjectStoreDriverImpl.java index 5f25a6061a77..b6912483caa6 100644 --- a/plugins/storage/object/simulator/src/main/java/org/apache/cloudstack/storage/datastore/driver/SimulatorObjectStoreDriverImpl.java +++ b/plugins/storage/object/simulator/src/main/java/org/apache/cloudstack/storage/datastore/driver/SimulatorObjectStoreDriverImpl.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.storage.datastore.db.ObjectStoreDao; import org.apache.cloudstack.storage.datastore.db.ObjectStoreVO; import org.apache.cloudstack.storage.object.BaseObjectStoreDriverImpl; -import org.apache.log4j.Logger; import javax.inject.Inject; import java.util.ArrayList; @@ -37,7 +36,6 @@ import java.util.Map; public class SimulatorObjectStoreDriverImpl extends BaseObjectStoreDriverImpl { - private static final Logger s_logger = Logger.getLogger(SimulatorObjectStoreDriverImpl.class); @Inject ObjectStoreDao _storeDao; diff --git a/plugins/storage/object/simulator/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SimulatorObjectStoreLifeCycleImpl.java b/plugins/storage/object/simulator/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SimulatorObjectStoreLifeCycleImpl.java index 34e928ced30d..6ceed041e8df 100644 --- a/plugins/storage/object/simulator/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SimulatorObjectStoreLifeCycleImpl.java +++ b/plugins/storage/object/simulator/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SimulatorObjectStoreLifeCycleImpl.java @@ -29,7 +29,6 @@ import org.apache.cloudstack.storage.object.datastore.ObjectStoreHelper; import org.apache.cloudstack.storage.object.datastore.ObjectStoreProviderManager; import org.apache.cloudstack.storage.object.store.lifecycle.ObjectStoreLifeCycle; -import org.apache.log4j.Logger; import javax.inject.Inject; import java.util.HashMap; @@ -37,8 +36,6 @@ import java.util.Map; public class SimulatorObjectStoreLifeCycleImpl implements ObjectStoreLifeCycle { - - private static final Logger s_logger = Logger.getLogger(SimulatorObjectStoreLifeCycleImpl.class); @Inject protected ResourceManager _resourceMgr; @Inject diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java index d908d48c7dad..87dd67f72af4 100644 --- a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java @@ -18,7 +18,6 @@ import java.util.Map; import javax.inject.Inject; -import org.apache.log4j.Logger; import java.util.HashMap; import java.util.List; @@ -94,10 +93,12 @@ import com.cloud.utils.Pair; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VirtualMachine; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDriverImpl { - static final Logger s_logger = Logger.getLogger(AdaptiveDataStoreDriverImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); private String providerName = null; @@ -159,7 +160,7 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncCompletionCallback callback) { CreateCmdResult result = null; try { - s_logger.info("Volume creation starting for data store [" + dataStore.getName() + + logger.info("Volume creation starting for data store [" + dataStore.getName() + "] and data object [" + dataObject.getUuid() + "] of type [" + dataObject.getType() + "]"); // quota size of the cloudbyte volume will be increased with the given @@ -192,7 +193,7 @@ public void createAsync(DataStore dataStore, DataObject dataObject, if (DataObjectType.TEMPLATE.equals(dataObject.getType())) { volume = api.getVolume(context, dataIn); if (volume != null) { - s_logger.info("Template volume already exists [" + dataObject.getUuid() + "]"); + logger.info("Template volume already exists [" + dataObject.getUuid() + "]"); } } @@ -210,7 +211,7 @@ public void createAsync(DataStore dataStore, DataObject dataObject, throw e; } } - s_logger.info("New volume created on remote storage for [" + dataObject.getUuid() + "]"); + logger.info("New volume created on remote storage for [" + dataObject.getUuid() + "]"); } // set these from the discovered or created volume before proceeding @@ -225,9 +226,9 @@ public void createAsync(DataStore dataStore, DataObject dataObject, result = new CreateCmdResult(dataObject.getUuid(), new Answer(null)); result.setSuccess(true); - s_logger.info("Volume creation complete for [" + dataObject.getUuid() + "]"); + logger.info("Volume creation complete for [" + dataObject.getUuid() + "]"); } catch (Throwable e) { - s_logger.error("Volume creation failed for dataObject [" + dataObject.getUuid() + "]: " + e.toString(), e); + logger.error("Volume creation failed for dataObject [" + dataObject.getUuid() + "]: " + e.toString(), e); result = new CreateCmdResult(null, new Answer(null)); result.setResult(e.toString()); result.setSuccess(false); @@ -241,7 +242,7 @@ public void createAsync(DataStore dataStore, DataObject dataObject, @Override public void deleteAsync(DataStore dataStore, DataObject dataObject, AsyncCompletionCallback callback) { - s_logger.debug("Delete volume started"); + logger.debug("Delete volume started"); CommandResult result = new CommandResult(); try { StoragePoolVO storagePool = _storagePoolDao.findById(dataStore.getId()); @@ -257,7 +258,7 @@ public void deleteAsync(DataStore dataStore, DataObject dataObject, result.setResult("Successfully deleted volume"); result.setSuccess(true); } catch (Throwable e) { - s_logger.error("Result to volume delete failed with exception", e); + logger.error("Result to volume delete failed with exception", e); result.setResult(e.toString()); } finally { if (callback != null) @@ -270,7 +271,7 @@ public void copyAsync(DataObject srcdata, DataObject destdata, AsyncCompletionCallback callback) { CopyCommandResult result = null; try { - s_logger.info("Copying volume " + srcdata.getUuid() + " to " + destdata.getUuid() + "]"); + logger.info("Copying volume " + srcdata.getUuid() + " to " + destdata.getUuid() + "]"); if (!canCopy(srcdata, destdata)) { throw new CloudRuntimeException( @@ -282,7 +283,7 @@ public void copyAsync(DataObject srcdata, DataObject destdata, Map details = _storagePoolDao.getDetails(storagePool.getId()); ProviderAdapter api = getAPI(storagePool, details); - s_logger.info("Copy volume " + srcdata.getUuid() + " to " + destdata.getUuid()); + logger.info("Copy volume " + srcdata.getUuid() + " to " + destdata.getUuid()); ProviderVolume outVolume; ProviderAdapterContext context = newManagedVolumeContext(destdata); @@ -298,7 +299,7 @@ public void copyAsync(DataObject srcdata, DataObject destdata, // if we copied from one volume to another, the target volume's disk offering or user input may be of a larger size // we won't, however, shrink a volume if its smaller. if (outVolume.getAllocatedSizeInBytes() < destdata.getSize()) { - s_logger.info("Resizing volume " + destdata.getUuid() + " to requested target volume size of " + destdata.getSize()); + logger.info("Resizing volume " + destdata.getUuid() + " to requested target volume size of " + destdata.getSize()); api.resize(context, destIn, destdata.getSize()); } @@ -313,7 +314,7 @@ public void copyAsync(DataObject srcdata, DataObject destdata, } persistVolumeData(storagePool, details, destdata, outVolume, connectionId); - s_logger.info("Copy completed from [" + srcdata.getUuid() + "] to [" + destdata.getUuid() + "]"); + logger.info("Copy completed from [" + srcdata.getUuid() + "] to [" + destdata.getUuid() + "]"); VolumeObjectTO voto = new VolumeObjectTO(); voto.setPath(finalPath); @@ -321,7 +322,7 @@ public void copyAsync(DataObject srcdata, DataObject destdata, result = new CopyCommandResult(finalPath, new CopyCmdAnswer(voto)); result.setSuccess(true); } catch (Throwable e) { - s_logger.error("Result to volume copy failed with exception", e); + logger.error("Result to volume copy failed with exception", e); result = new CopyCommandResult(null, null); result.setSuccess(false); result.setResult(e.toString()); @@ -340,20 +341,20 @@ public void copyAsync(DataObject srcData, DataObject destData, Host destHost, @Override public boolean canCopy(DataObject srcData, DataObject destData) { - s_logger.debug("canCopy: Checking srcData [" + srcData.getUuid() + ":" + srcData.getType() + ":" + logger.debug("canCopy: Checking srcData [" + srcData.getUuid() + ":" + srcData.getType() + ":" + srcData.getDataStore().getId() + " AND destData [" + destData.getUuid() + ":" + destData.getType() + ":" + destData.getDataStore().getId() + "]"); try { if (!isSameProvider(srcData)) { - s_logger.debug("canCopy: No we can't -- the source provider is NOT the correct type for this driver!"); + logger.debug("canCopy: No we can't -- the source provider is NOT the correct type for this driver!"); return false; } if (!isSameProvider(destData)) { - s_logger.debug("canCopy: No we can't -- the destination provider is NOT the correct type for this driver!"); + logger.debug("canCopy: No we can't -- the destination provider is NOT the correct type for this driver!"); return false; } - s_logger.debug( + logger.debug( "canCopy: Source and destination are the same so we can copy via storage endpoint, checking that the source actually exists"); StoragePoolVO poolVO = _storagePoolDao.findById(srcData.getDataStore().getId()); Map details = _storagePoolDao.getDetails(srcData.getDataStore().getId()); @@ -381,14 +382,14 @@ public boolean canCopy(DataObject srcData, DataObject destData) { } } } catch (Throwable e) { - s_logger.warn("Problem checking if we canCopy", e); + logger.warn("Problem checking if we canCopy", e); return false; } } @Override public void resize(DataObject data, AsyncCompletionCallback callback) { - s_logger.debug("Resize volume started"); + logger.debug("Resize volume started"); CreateCmdResult result = null; try { @@ -417,12 +418,12 @@ public void resize(DataObject data, AsyncCompletionCallback cal ProviderAdapterContext context = newManagedVolumeContext(data); ProviderAdapterDataObject dataIn = newManagedDataObject(data, poolVO); - if (s_logger.isDebugEnabled()) s_logger.debug("Calling provider API to resize volume " + data.getUuid() + " to " + resizeParameter.newSize); + if (logger.isDebugEnabled()) logger.debug("Calling provider API to resize volume " + data.getUuid() + " to " + resizeParameter.newSize); api.resize(context, dataIn, resizeParameter.newSize); if (vol.isAttachedVM()) { if (VirtualMachine.State.Running.equals(vol.getAttachedVM().getState())) { - if (s_logger.isDebugEnabled()) s_logger.debug("Notify currently attached VM of volume resize for " + data.getUuid() + " to " + resizeParameter.newSize); + if (logger.isDebugEnabled()) logger.debug("Notify currently attached VM of volume resize for " + data.getUuid() + " to " + resizeParameter.newSize); _volumeService.resizeVolumeOnHypervisor(vol.getId(), resizeParameter.newSize, vol.getAttachedVM().getHostId(), vol.getAttachedVM().getInstanceName()); } } @@ -430,7 +431,7 @@ public void resize(DataObject data, AsyncCompletionCallback cal result = new CreateCmdResult(data.getUuid(), new Answer(null)); result.setSuccess(true); } catch (Throwable e) { - s_logger.error("Resize volume failed, please contact cloud support.", e); + logger.error("Resize volume failed, please contact cloud support.", e); result = new CreateCmdResult(null, new Answer(null)); result.setResult(e.toString()); result.setSuccess(false); @@ -445,7 +446,7 @@ public void resize(DataObject data, AsyncCompletionCallback cal @Override public void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo, QualityOfServiceState qualityOfServiceState) { - s_logger.info("handleQualityOfServiceVolumeMigration: " + volumeInfo.getUuid() + " " + + logger.info("handleQualityOfServiceVolumeMigration: " + volumeInfo.getUuid() + " " + volumeInfo.getPath() + ": " + qualityOfServiceState.toString()); } @@ -475,7 +476,7 @@ public ChapInfo getChapInfo(DataObject dataObject) { public void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback callback) { CreateCmdResult result = null; try { - s_logger.debug("taking volume snapshot"); + logger.debug("taking volume snapshot"); SnapshotObjectTO snapshotTO = (SnapshotObjectTO) snapshot.getTO(); VolumeInfo baseVolume = snapshot.getBaseVolume(); @@ -526,7 +527,7 @@ public void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback dsInfos) { password = userInfo.split(":")[1]; } - s_logger.info("Registering block storage provider with user=" + username); + logger.info("Registering block storage provider with user=" + username); if (clusterId != null) { @@ -153,7 +154,7 @@ public DataStore initialize(Map dsInfos) { throw new CloudRuntimeException("Pod Id must also be specified when the Cluster Id is specified for Cluster-wide primary storage."); } - s_logger.info("Registering with clusterid=" + clusterId + " which is confirmed to be a KVM host"); + logger.info("Registering with clusterid=" + clusterId + " which is confirmed to be a KVM host"); } else if (podId != null) { throw new CloudRuntimeException("Cluster Id must also be specified when the Pod Id is specified for Cluster-wide primary storage."); @@ -175,7 +176,7 @@ public DataStore initialize(Map dsInfos) { } } - s_logger.info("Validated no other pool exists with this name: " + dsName); + logger.info("Validated no other pool exists with this name: " + dsName); try { PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters(); @@ -239,10 +240,10 @@ public DataStore initialize(Map dsInfos) { parameters.setCapacityBytes(stats.getCapacityInBytes()); } - s_logger.info("Persisting [" + dsName + "] storage pool metadata to database"); + logger.info("Persisting [" + dsName + "] storage pool metadata to database"); return _dataStoreHelper.createPrimaryDataStore(parameters); } catch (Throwable e) { - s_logger.error("Problem persisting storage pool", e); + logger.error("Problem persisting storage pool", e); throw new CloudRuntimeException(e); } } @@ -266,7 +267,7 @@ private Hypervisor.HypervisorType getHypervisorTypeForCluster(long clusterId) { */ @Override public boolean attachCluster(DataStore store, ClusterScope scope) { - s_logger.info("Attaching storage pool [" + store.getName() + "] to cluster [" + scope.getScopeId() + "]"); + logger.info("Attaching storage pool [" + store.getName() + "] to cluster [" + scope.getScopeId() + "]"); _dataStoreHelper.attachCluster(store); StoragePoolVO dataStoreVO = _storagePoolDao.findById(store.getId()); @@ -282,23 +283,23 @@ public boolean attachCluster(DataStore store, ClusterScope scope) { if (dataStoreVO.isManaged()) { //boolean success = false; for (HostVO h : allHosts) { - s_logger.debug("adding host " + h.getName() + " to storage pool " + store.getName()); + logger.debug("adding host " + h.getName() + " to storage pool " + store.getName()); } } - s_logger.debug("In createPool Adding the pool to each of the hosts"); + logger.debug("In createPool Adding the pool to each of the hosts"); List poolHosts = new ArrayList(); for (HostVO h : allHosts) { try { _storageMgr.connectHostToSharedPool(h.getId(), primarystore.getId()); poolHosts.add(h); } catch (Exception e) { - s_logger.warn("Unable to establish a connection between " + h + " and " + primarystore, e); + logger.warn("Unable to establish a connection between " + h + " and " + primarystore, e); } } if (poolHosts.isEmpty()) { - s_logger.warn("No host can access storage pool " + primarystore + " on cluster " + primarystore.getClusterId()); + logger.warn("No host can access storage pool " + primarystore + " on cluster " + primarystore.getClusterId()); _primaryDataStoreDao.expunge(primarystore.getId()); throw new CloudRuntimeException("Failed to access storage pool"); } @@ -308,14 +309,14 @@ public boolean attachCluster(DataStore store, ClusterScope scope) { @Override public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) { - s_logger.info("Attaching storage pool [" + store.getName() + "] to host [" + scope.getScopeId() + "]"); + logger.info("Attaching storage pool [" + store.getName() + "] to host [" + scope.getScopeId() + "]"); _dataStoreHelper.attachHost(store, scope, existingInfo); return true; } @Override public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) { - s_logger.info("Attaching storage pool [" + dataStore.getName() + "] to zone [" + scope.getScopeId() + "]"); + logger.info("Attaching storage pool [" + dataStore.getName() + "] to zone [" + scope.getScopeId() + "]"); List hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(hypervisorType, scope.getScopeId()); List poolHosts = new ArrayList(); for (HostVO host : hosts) { @@ -323,11 +324,11 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType h _storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); poolHosts.add(host); } catch (Exception e) { - s_logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); + logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); } } if (poolHosts.isEmpty()) { - s_logger.warn("No host can access storage pool " + dataStore + " in this zone."); + logger.warn("No host can access storage pool " + dataStore + " in this zone."); _primaryDataStoreDao.expunge(dataStore.getId()); throw new CloudRuntimeException("Failed to create storage pool as it is not accessible to hosts."); } @@ -340,7 +341,7 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType h */ @Override public boolean maintain(DataStore store) { - s_logger.info("Placing storage pool [" + store.getName() + "] in maintainence mode"); + logger.info("Placing storage pool [" + store.getName() + "] in maintainence mode"); if (_storagePoolAutomation.maintain(store)) { return _dataStoreHelper.maintain(store); } else { @@ -353,7 +354,7 @@ public boolean maintain(DataStore store) { */ @Override public boolean cancelMaintain(DataStore store) { - s_logger.info("Canceling storage pool maintainence for [" + store.getName() + "]"); + logger.info("Canceling storage pool maintainence for [" + store.getName() + "]"); if (_dataStoreHelper.cancelMaintain(store)) { return _storagePoolAutomation.cancelMaintain(store); } else { @@ -366,7 +367,7 @@ public boolean cancelMaintain(DataStore store) { */ @Override public boolean deleteDataStore(DataStore store) { - s_logger.info("Delete datastore called for [" + store.getName() + "]"); + logger.info("Delete datastore called for [" + store.getName() + "]"); return _dataStoreHelper.deletePrimaryDataStore(store); } @@ -375,7 +376,7 @@ public boolean deleteDataStore(DataStore store) { */ @Override public boolean migrateToObjectStore(DataStore store) { - s_logger.info("Migrate datastore called for [" + store.getName() + "]. This is not currently implemented for this provider at this time"); + logger.info("Migrate datastore called for [" + store.getName() + "]. This is not currently implemented for this provider at this time"); return false; } @@ -392,7 +393,7 @@ public void updateStoragePool(StoragePool storagePool, Map detai */ @Override public void enableStoragePool(DataStore store) { - s_logger.info("Enabling storage pool [" + store.getName() + "]"); + logger.info("Enabling storage pool [" + store.getName() + "]"); _dataStoreHelper.enable(store); } @@ -401,7 +402,7 @@ public void enableStoragePool(DataStore store) { */ @Override public void disableStoragePool(DataStore store) { - s_logger.info("Disabling storage pool [" + store.getName() + "]"); + logger.info("Disabling storage pool [" + store.getName() + "]"); _dataStoreHelper.disable(store); } } diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryDatastoreAdapterFactoryMap.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryDatastoreAdapterFactoryMap.java index ee5caa7178ef..f15f93498b6d 100644 --- a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryDatastoreAdapterFactoryMap.java +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryDatastoreAdapterFactoryMap.java @@ -21,13 +21,14 @@ import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapter; import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterFactory; -import org.apache.log4j.Logger; import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; public class AdaptivePrimaryDatastoreAdapterFactoryMap { - private final Logger logger = Logger.getLogger(ProviderAdapter.class); + protected Logger logger = LogManager.getLogger(getClass()); private Map factoryMap = new HashMap(); private Map apiMap = new HashMap(); diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryDatastoreProviderImpl.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryDatastoreProviderImpl.java index 200844702b28..ddb7b5b0444d 100644 --- a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryDatastoreProviderImpl.java +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryDatastoreProviderImpl.java @@ -24,7 +24,8 @@ import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.utils.component.ComponentContext; @@ -35,7 +36,7 @@ @Component public abstract class AdaptivePrimaryDatastoreProviderImpl implements PrimaryDataStoreProvider { - static final Logger s_logger = Logger.getLogger(AdaptivePrimaryDatastoreProviderImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); AdaptiveDataStoreDriverImpl driver; @@ -46,7 +47,7 @@ public abstract class AdaptivePrimaryDatastoreProviderImpl implements PrimaryDat DataStoreLifeCycle lifecycle; AdaptivePrimaryDatastoreProviderImpl(ProviderAdapterFactory f) { - s_logger.info("Creating " + f.getProviderName()); + logger.info("Creating " + f.getProviderName()); factoryMap.register(f); } @@ -57,7 +58,7 @@ public DataStoreLifeCycle getDataStoreLifeCycle() { @Override public boolean configure(Map params) { - s_logger.info("Configuring " + getName()); + logger.info("Configuring " + getName()); driver = new AdaptiveDataStoreDriverImpl(factoryMap); driver.setProviderName(getName()); lifecycle = ComponentContext.inject(new AdaptiveDataStoreLifeCycleImpl(factoryMap)); diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryHostListener.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryHostListener.java index 68dd4a15c62a..831db2446a54 100644 --- a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryHostListener.java +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryHostListener.java @@ -19,14 +19,15 @@ import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; -import org.apache.log4j.Logger; import com.cloud.exception.StorageConflictException; import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.dao.StoragePoolHostDao; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; public class AdaptivePrimaryHostListener implements HypervisorHostListener { - static final Logger s_logger = Logger.getLogger(AdaptivePrimaryHostListener.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject StoragePoolHostDao storagePoolHostDao; @@ -37,19 +38,19 @@ public AdaptivePrimaryHostListener(AdaptivePrimaryDatastoreAdapterFactoryMap fac @Override public boolean hostAboutToBeRemoved(long hostId) { - s_logger.debug("hostAboutToBeRemoved called"); + logger.debug("hostAboutToBeRemoved called"); return true; } @Override public boolean hostAdded(long hostId) { - s_logger.debug("hostAdded called"); + logger.debug("hostAdded called"); return true; } @Override public boolean hostConnect(long hostId, long poolId) throws StorageConflictException { - s_logger.debug("hostConnect called for hostid [" + hostId + "], poolId [" + poolId + "]"); + logger.debug("hostConnect called for hostid [" + hostId + "], poolId [" + poolId + "]"); StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(poolId, hostId); if (storagePoolHost == null) { storagePoolHost = new StoragePoolHostVO(poolId, hostId, ""); @@ -60,7 +61,7 @@ public boolean hostConnect(long hostId, long poolId) throws StorageConflictExcep @Override public boolean hostDisconnected(long hostId, long poolId) { - s_logger.debug("hostDisconnected called for hostid [" + hostId + "], poolId [" + poolId + "]"); + logger.debug("hostDisconnected called for hostid [" + hostId + "], poolId [" + poolId + "]"); StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(poolId, hostId); if (storagePoolHost != null) { @@ -71,13 +72,13 @@ public boolean hostDisconnected(long hostId, long poolId) { @Override public boolean hostEnabled(long hostId) { - s_logger.debug("hostEnabled called"); + logger.debug("hostEnabled called"); return true; } @Override public boolean hostRemoved(long hostId, long clusterId) { - s_logger.debug("hostRemoved called"); + logger.debug("hostRemoved called"); return true; } } diff --git a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/driver/ElastistorPrimaryDataStoreDriver.java b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/driver/ElastistorPrimaryDataStoreDriver.java index f9e614692338..60359dd2c266 100644 --- a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/driver/ElastistorPrimaryDataStoreDriver.java +++ b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/driver/ElastistorPrimaryDataStoreDriver.java @@ -24,7 +24,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; @@ -70,7 +69,6 @@ */ public class ElastistorPrimaryDataStoreDriver extends CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver { - private static final Logger s_logger = Logger.getLogger(ElastistorPrimaryDataStoreDriver.class); @Inject AccountManager _accountMgr; @@ -154,7 +152,7 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncComplet try { esvolume = ElastistorUtil.createElastistorVolume(volumeName, dataStoreVO.getUuid(), quotaSize, Iops, protocoltype, volumeName); } catch (Throwable e) { - s_logger.error(e.toString(), e); + logger.error(e.toString(), e); result.setResult(e.toString()); callback.complete(result); throw new CloudRuntimeException(e.getMessage()); @@ -191,10 +189,10 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncComplet storagePool.setUsedBytes(usedBytes > capacityBytes ? capacityBytes : usedBytes); _storagePoolDao.update(storagePoolId, storagePool); - s_logger.info("Elastistor volume creation complete."); + logger.info("Elastistor volume creation complete."); } else { errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to createAsync"; - s_logger.error(errMsg); + logger.error(errMsg); } result.setResult(errMsg); @@ -276,7 +274,7 @@ public boolean canCopy(DataObject srcData, DataObject destData) { @Override public void resize(DataObject data, AsyncCompletionCallback callback) { - s_logger.debug("Resize elastistor volume started"); + logger.debug("Resize elastistor volume started"); Boolean status = false; VolumeObject vol = (VolumeObject) data; StoragePool pool = (StoragePool) data.getDataStore(); @@ -297,7 +295,7 @@ public void resize(DataObject data, AsyncCompletionCallback cal status = ElastistorUtil.updateElastistorVolumeSize(vol.getUuid(), resizeParameter.newSize); } catch (Throwable e) { - s_logger.error("Resize elastistor volume failed, please contact elastistor admin.", e); + logger.error("Resize elastistor volume failed, please contact elastistor admin.", e); result.setResult(e.toString()); callback.complete(result); } @@ -370,7 +368,7 @@ public ChapInfo getChapInfo(DataObject dataObject) { public void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback callback) { CreateCmdResult result = null; try { - s_logger.info("taking elastistor volume snapshot"); + logger.info("taking elastistor volume snapshot"); SnapshotObjectTO snapshotTO = (SnapshotObjectTO)snapshot.getTO(); String volumeid = snapshotTO.getVolume().getUuid(); @@ -379,10 +377,10 @@ public void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback dsInfos) { if (details.get("essubnet") != null) ElastistorUtil.setElastistorSubnet(details.get("essubnet")); - s_logger.info("Elastistor details was set successfully."); + logger.info("Elastistor details was set successfully."); if (capacityBytes == null || capacityBytes <= 0) { throw new IllegalArgumentException("'capacityBytes' must be present and greater than 0."); @@ -167,7 +168,7 @@ public DataStore initialize(Map dsInfos) { if (domainName == null) { domainName = "ROOT"; - s_logger.debug("setting the domain to ROOT"); + logger.debug("setting the domain to ROOT"); } // elastistor does not allow same name and ip pools. @@ -220,7 +221,7 @@ public DataStore initialize(Map dsInfos) { private Tsm createElastistorTSM(String storagePoolName, String storageIp, Long capacityBytes, Long capacityIops, String domainName) { - s_logger.info("Creation of elastistor TSM started."); + logger.info("Creation of elastistor TSM started."); Tsm tsm; String elastistorAccountId; @@ -231,11 +232,11 @@ private Tsm createElastistorTSM(String storagePoolName, String storageIp, Long c // create the tsm for the given account id tsm = ElastistorUtil.createElastistorTsm(storagePoolName, storageIp, capacityBytes, capacityIops, elastistorAccountId); } catch (Throwable e) { - s_logger.error("Failed to create TSM in elastistor.", e); + logger.error("Failed to create TSM in elastistor.", e); throw new CloudRuntimeException("Failed to create TSM in elastistor. " + e.getMessage()); } - s_logger.info("Creation of elastistor TSM completed successfully."); + logger.info("Creation of elastistor TSM completed successfully."); return tsm; } @@ -245,7 +246,7 @@ private PrimaryDataStoreParameters createElastistorVolume(PrimaryDataStoreParame try { - s_logger.info("Creation of elastistor volume started."); + logger.info("Creation of elastistor volume started."); FileSystem volume = ElastistorUtil.createElastistorVolume(storagePoolName, tsm.getUuid(), capacityBytes, capacityIops, protocoltype, mountpoint); @@ -253,11 +254,11 @@ private PrimaryDataStoreParameters createElastistorVolume(PrimaryDataStoreParame String accesspath = "/" + volume.getIqn() + "/0"; parameters.setPath(accesspath); } - s_logger.info("Creation of elastistor volume completed successfully."); + logger.info("Creation of elastistor volume completed successfully."); return parameters; } catch (Throwable e) { - s_logger.error("Failed to create volume in elastistor.", e); + logger.error("Failed to create volume in elastistor.", e); throw new CloudRuntimeException("Failed to create volume in elastistor. " + e.getMessage()); } @@ -377,18 +378,18 @@ public boolean attachCluster(DataStore store, ClusterScope scope) { } } - s_logger.debug("In createPool Adding the pool to each of the hosts"); + logger.debug("In createPool Adding the pool to each of the hosts"); List poolHosts = new ArrayList(); for (HostVO h : allHosts) { try { storageMgr.connectHostToSharedPool(h.getId(), primarystore.getId()); poolHosts.add(h); } catch (Exception e) { - s_logger.warn("Unable to establish a connection between " + h + " and " + primarystore, e); + logger.warn("Unable to establish a connection between " + h + " and " + primarystore, e); } if (poolHosts.isEmpty()) { - s_logger.warn("No host can access storage pool " + primarystore + " on cluster " + primarystore.getClusterId()); + logger.warn("No host can access storage pool " + primarystore + " on cluster " + primarystore.getClusterId()); primaryDataStoreDao.expunge(primarystore.getId()); throw new CloudRuntimeException("Failed to access storage pool"); } @@ -398,12 +399,12 @@ public boolean attachCluster(DataStore store, ClusterScope scope) { } private boolean createStoragePool(long hostId, StoragePool pool) { - s_logger.debug("creating pool " + pool.getName() + " on host " + hostId); + logger.debug("creating pool " + pool.getName() + " on host " + hostId); if (pool.getPoolType() != StoragePoolType.NetworkFilesystem && pool.getPoolType() != StoragePoolType.Filesystem && pool.getPoolType() != StoragePoolType.IscsiLUN && pool.getPoolType() != StoragePoolType.Iscsi && pool.getPoolType() != StoragePoolType.VMFS && pool.getPoolType() != StoragePoolType.SharedMountPoint && pool.getPoolType() != StoragePoolType.PreSetup && pool.getPoolType() != StoragePoolType.OCFS2 && pool.getPoolType() != StoragePoolType.RBD && pool.getPoolType() != StoragePoolType.CLVM) { - s_logger.warn(" Doesn't support storage pool type " + pool.getPoolType()); + logger.warn(" Doesn't support storage pool type " + pool.getPoolType()); return false; } CreateStoragePoolCommand cmd = new CreateStoragePoolCommand(true, pool); @@ -415,10 +416,10 @@ private boolean createStoragePool(long hostId, StoragePool pool) { String msg = ""; if (answer != null) { msg = "Can not create storage pool through host " + hostId + " due to " + answer.getDetails(); - s_logger.warn(msg); + logger.warn(msg); } else { msg = "Can not create storage pool through host " + hostId + " due to CreateStoragePoolCommand returns null"; - s_logger.warn(msg); + logger.warn(msg); } throw new CloudRuntimeException(msg); } @@ -433,18 +434,18 @@ public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo exis @Override public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) { List hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(hypervisorType, scope.getScopeId()); - s_logger.debug("In createPool. Attaching the pool to each of the hosts."); + logger.debug("In createPool. Attaching the pool to each of the hosts."); List poolHosts = new ArrayList(); for (HostVO host : hosts) { try { storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); poolHosts.add(host); } catch (Exception e) { - s_logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); + logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); } } if (poolHosts.isEmpty()) { - s_logger.warn("No host can access storage pool " + dataStore + " in this zone."); + logger.warn("No host can access storage pool " + dataStore + " in this zone."); primaryDataStoreDao.expunge(dataStore.getId()); throw new CloudRuntimeException("Failed to create storage pool as it is not accessible to hosts."); } @@ -504,7 +505,7 @@ public boolean deleteDataStore(DataStore store) { } } else { if (answer != null) { - s_logger.error("Failed to delete storage pool: " + answer.getResult()); + logger.error("Failed to delete storage pool: " + answer.getResult()); } } } @@ -527,9 +528,9 @@ private void deleteElastistorVolume(StoragePool pool, boolean managed) { } if (status == true) { - s_logger.info("deletion of elastistor primary storage complete"); + logger.info("deletion of elastistor primary storage complete"); } else { - s_logger.error("deletion of elastistor volume failed"); + logger.error("deletion of elastistor volume failed"); } } @@ -567,7 +568,7 @@ public void updateStoragePool(StoragePool storagePool, Map detai // update the cloudstack db _storagePoolDao.updateCapacityBytes(storagePool.getId(), Long.parseLong(capacityBytes)); - s_logger.info("elastistor TSM storage successfully updated"); + logger.info("elastistor TSM storage successfully updated"); }else{ throw new CloudRuntimeException("Failed to update the storage of Elastistor TSM" + updateTsmStorageCmdResponse.toString()); } @@ -588,7 +589,7 @@ public void updateStoragePool(StoragePool storagePool, Map detai // update the cloudstack db _storagePoolDao.updateCapacityIops(storagePool.getId(), capacity); - s_logger.info("elastistor TSM IOPS successfully updated"); + logger.info("elastistor TSM IOPS successfully updated"); }else{ throw new CloudRuntimeException("Failed to update the IOPS of Elastistor TSM" + updateTsmCmdResponse.toString()); diff --git a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/provider/ElastistorHostListener.java b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/provider/ElastistorHostListener.java index 971449806d5b..d2307111a816 100644 --- a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/provider/ElastistorHostListener.java +++ b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/provider/ElastistorHostListener.java @@ -43,7 +43,8 @@ import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -60,7 +61,7 @@ import com.cloud.utils.exception.CloudRuntimeException; public class ElastistorHostListener implements HypervisorHostListener { - private static final Logger s_logger = Logger.getLogger(DefaultHostListener.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject AgentManager agentMgr; @Inject @@ -117,7 +118,7 @@ public boolean hostConnect(long hostId, long poolId) { assert (answer instanceof ModifyStoragePoolAnswer) : "Well, now why won't you actually return the ModifyStoragePoolAnswer when it's ModifyStoragePoolCommand? Pool=" + pool.getId() + "Host=" + hostId; - s_logger.info("Connection established between " + pool + " host + " + hostId); + logger.info("Connection established between " + pool + " host + " + hostId); return true; } diff --git a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/provider/ElastistorPrimaryDataStoreProvider.java b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/provider/ElastistorPrimaryDataStoreProvider.java index 55326b488308..a6b1848da559 100644 --- a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/provider/ElastistorPrimaryDataStoreProvider.java +++ b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/provider/ElastistorPrimaryDataStoreProvider.java @@ -25,7 +25,8 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle; @@ -53,7 +54,7 @@ @Component public class ElastistorPrimaryDataStoreProvider implements PrimaryDataStoreProvider { - private static final Logger s_logger = Logger.getLogger(DefaultHostListener.class); + protected Logger logger = LogManager.getLogger(getClass()); // these classes will be injected by spring private ElastistorPrimaryDataStoreLifeCycle lifecycle; @@ -97,7 +98,7 @@ public HypervisorHostListener getHostListener() { @Override public boolean configure(Map params) { - s_logger.info("Will configure elastistor's lifecycle, driver, listener & global configurations."); + logger.info("Will configure elastistor's lifecycle, driver, listener & global configurations."); lifecycle = ComponentContext.inject(ElastistorPrimaryDataStoreLifeCycle.class); driver = ComponentContext.inject(ElastistorPrimaryDataStoreDriver.class); @@ -109,7 +110,7 @@ public boolean configure(Map params) { // set the injected configuration object in elastistor util class too!!! ElastistorUtil.setConfigurationDao(configurationDao); - s_logger.info("Successfully configured elastistor's lifecycle, driver, listener & global configurations."); + logger.info("Successfully configured elastistor's lifecycle, driver, listener & global configurations."); return true; } diff --git a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/util/ElastistorUtil.java b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/util/ElastistorUtil.java index 2f2ad259d6cb..570ac377c208 100644 --- a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/util/ElastistorUtil.java +++ b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/util/ElastistorUtil.java @@ -28,7 +28,8 @@ import org.apache.cloudstack.utils.security.SSLUtils; import org.apache.cloudstack.utils.security.SecureSSLSocketFactory; import org.apache.http.auth.InvalidCredentialsException; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.glassfish.jersey.client.ClientConfig; import org.glassfish.jersey.client.ClientResponse; @@ -55,7 +56,7 @@ public class ElastistorUtil { - private static final Logger s_logger = Logger.getLogger(ElastistorUtil.class); + protected static Logger LOGGER = LogManager.getLogger(ElastistorUtil.class); private static ConfigurationDao configurationDao; @@ -542,7 +543,7 @@ public static String updateElastistorNfsVolume(String volumeid) throws Throwable UpdateControllerResponse controllerResponse = (UpdateControllerResponse) getElastistorRestClient().executeCommand(controllerCmd); if (controllerResponse.getController().getUuid() != null) { - s_logger.info("updated nfs service to ALL"); + LOGGER.info("updated nfs service to ALL"); return nfsServiceResponse.getNfsService().getDatasetid(); } else { throw new CloudRuntimeException("Updating Nfs Volume Failed"); @@ -617,7 +618,7 @@ public static boolean deleteElastistorTsm(String tsmid, boolean managed) throws if (!managed) { - s_logger.info("elastistor pool is NOT a managed storage , hence deleting the volume then tsm"); + LOGGER.info("elastistor pool is NOT a managed storage , hence deleting the volume then tsm"); String esvolumeid = null; ListTsmsResponse listTsmsResponse = listTsm(tsmid); @@ -633,9 +634,9 @@ public static boolean deleteElastistorTsm(String tsmid, boolean managed) throws int jobstatus = queryAsyncJobResult(jobid); if (jobstatus == 1) { - s_logger.info("elastistor volume successfully deleted"); + LOGGER.info("elastistor volume successfully deleted"); } else { - s_logger.info("now farce deleting the volume"); + LOGGER.info("now farce deleting the volume"); while (jobstatus != 1) { DeleteVolumeResponse deleteVolumeResponse1 = deleteVolume(esvolumeid, "true"); @@ -645,17 +646,17 @@ public static boolean deleteElastistorTsm(String tsmid, boolean managed) throws jobstatus = queryAsyncJobResult(jobid1); } } - s_logger.info("elastistor volume successfully deleted"); + LOGGER.info("elastistor volume successfully deleted"); } } } else { - s_logger.info("no volume present in on the given tsm"); + LOGGER.info("no volume present in on the given tsm"); } } } - s_logger.info("now trying to delete elastistor tsm"); + LOGGER.info("now trying to delete elastistor tsm"); if (tsmid != null) { DeleteTsmCmd deleteTsmCmd = new DeleteTsmCmd(); @@ -666,22 +667,22 @@ public static boolean deleteElastistorTsm(String tsmid, boolean managed) throws String jobstatus = deleteTsmResponse.getJobStatus(); if (jobstatus.equalsIgnoreCase("true")) { - s_logger.info("deletion of elastistor tsm successful"); + LOGGER.info("deletion of elastistor tsm successful"); return true; } else { - s_logger.info("failed to delete elastistor tsm"); + LOGGER.info("failed to delete elastistor tsm"); return false; } } else { - s_logger.info("elastistor tsm id not present"); + LOGGER.info("elastistor tsm id not present"); } } - s_logger.info("tsm id is null"); + LOGGER.info("tsm id is null"); return false; /* - * else { s_logger.error("no volume is present in the tsm"); } } else { - * s_logger.error( + * else { LOGGER.error("no volume is present in the tsm"); } } else { + * LOGGER.error( * "List tsm failed, no tsm present in the eastistor for the given IP " * ); return false; } return false; */ @@ -700,10 +701,10 @@ public static boolean deleteElastistorVolume(String esvolumeid) throws Throwable int jobstatus = queryAsyncJobResult(jobid); if (jobstatus == 1) { - s_logger.info("elastistor volume successfully deleted"); + LOGGER.info("elastistor volume successfully deleted"); return true; } else { - s_logger.info("now force deleting the volume"); + LOGGER.info("now force deleting the volume"); while (jobstatus != 1) { DeleteVolumeResponse deleteVolumeResponse1 = deleteVolume(esvolumeid, "true"); @@ -713,15 +714,15 @@ public static boolean deleteElastistorVolume(String esvolumeid) throws Throwable jobstatus = queryAsyncJobResult(jobid1); } } - s_logger.info("elastistor volume successfully deleted"); + LOGGER.info("elastistor volume successfully deleted"); return true; } } else { - s_logger.info("the given volume is not present on elastistor, datasetrespone is NULL"); + LOGGER.info("the given volume is not present on elastistor, datasetrespone is NULL"); return false; } } else { - s_logger.info("the given volume is not present on elastistor"); + LOGGER.info("the given volume is not present on elastistor"); return false; } @@ -2498,7 +2499,7 @@ public static UpdateTsmStorageCmdResponse updateElastistorTsmStorage(String capa }else{ quotasize = String.valueOf(quotasize) + "G"; } - s_logger.info("elastistor tsm storage is updating to " + quotasize); + LOGGER.info("elastistor tsm storage is updating to " + quotasize); UpdateTsmStorageCmd updateTsmStorageCmd = new UpdateTsmStorageCmd(); updateTsmStorageCmd.putCommandParameter("id", uuid); @@ -2565,7 +2566,7 @@ public String getsize(){ // update the TSM IOPS public static UpdateTsmCmdResponse updateElastistorTsmIOPS(String capacityIOPs,String uuid) throws Throwable{ - s_logger.info("elastistor tsm IOPS is updating to " + capacityIOPs); + LOGGER.info("elastistor tsm IOPS is updating to " + capacityIOPs); UpdateTsmCmd updateTsmCmd = new UpdateTsmCmd(); String throughput = String.valueOf(Long.parseLong(capacityIOPs)*4); diff --git a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/util/ElastistorVolumeApiServiceImpl.java b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/util/ElastistorVolumeApiServiceImpl.java index 709c1fe42658..83f7356eb440 100644 --- a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/util/ElastistorVolumeApiServiceImpl.java +++ b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/util/ElastistorVolumeApiServiceImpl.java @@ -26,7 +26,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.response.ListResponse; @@ -47,7 +46,6 @@ @Component public class ElastistorVolumeApiServiceImpl extends ManagerBase implements ElastistorVolumeApiService { - private static final Logger s_logger = Logger.getLogger(ElastistorVolumeApiServiceImpl.class); @Inject protected VolumeDao _volsDao; @@ -74,7 +72,7 @@ public List> getCommands() { cmdList.add(ListElastistorPoolCmd.class); cmdList.add(ListElastistorInterfaceCmd.class); - s_logger.info("Commands were registered successfully with elastistor volume api service. [cmdcount:" + cmdList.size() + "]"); + logger.info("Commands were registered successfully with elastistor volume api service. [cmdcount:" + cmdList.size() + "]"); return cmdList; } @@ -125,7 +123,7 @@ public ListResponse listElastistorVolume(ListElast return response; } catch (Throwable e) { - s_logger.error("Unable to list elastistor volume.", e); + logger.error("Unable to list elastistor volume.", e); throw new CloudRuntimeException("Unable to list elastistor volume. " + e.getMessage()); } } @@ -165,7 +163,7 @@ public ListResponse listElastistorPools(ListElastist return response; } catch (Throwable e) { - s_logger.error("Unable to list elastistor pools.", e); + logger.error("Unable to list elastistor pools.", e); throw new CloudRuntimeException("Unable to list elastistor pools. " + e.getMessage()); } @@ -199,7 +197,7 @@ public ListResponse listElastistorInterfaces(Li return response; } catch (Throwable e) { - s_logger.error("Unable to list elastistor interfaces.", e); + logger.error("Unable to list elastistor interfaces.", e); throw new CloudRuntimeException("Unable to list elastistor interfaces. " + e.getMessage()); } diff --git a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/util/ListElastistorInterfaceCmd.java b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/util/ListElastistorInterfaceCmd.java index a100f439be86..67062d23c964 100644 --- a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/util/ListElastistorInterfaceCmd.java +++ b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/util/ListElastistorInterfaceCmd.java @@ -25,11 +25,9 @@ import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.response.ListResponse; -import org.apache.log4j.Logger; @APICommand(name = "listElastistorInterface", description = "Lists the network Interfaces of elastistor", responseObject = ListElastistorVolumeResponse.class) public class ListElastistorInterfaceCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(ListElastistorInterfaceCmd.class.getName()); private static final String s_name = "listElastistorInterfaceResponse"; @Inject diff --git a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/util/ListElastistorPoolCmd.java b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/util/ListElastistorPoolCmd.java index d3701b784bdc..32b1fbb64802 100644 --- a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/util/ListElastistorPoolCmd.java +++ b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/util/ListElastistorPoolCmd.java @@ -27,12 +27,10 @@ import org.apache.cloudstack.api.BaseCmd.CommandType; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.response.ListResponse; -import org.apache.log4j.Logger; @APICommand(name = "listElastistorPool", description = "Lists the pools of elastistor", responseObject = ListElastistorPoolResponse.class) public class ListElastistorPoolCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(ListElastistorPoolCmd.class.getName()); private static final String s_name = "listElastistorPoolResponse"; @Inject diff --git a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/util/ListElastistorVolumeCmd.java b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/util/ListElastistorVolumeCmd.java index d2b89e388d29..4c55f8c32096 100644 --- a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/util/ListElastistorVolumeCmd.java +++ b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/util/ListElastistorVolumeCmd.java @@ -26,11 +26,9 @@ import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.response.ListResponse; -import org.apache.log4j.Logger; @APICommand(name = "listElastistorVolume", description = "Lists the volumes of elastistor", responseObject = ListElastistorVolumeResponse.class) public class ListElastistorVolumeCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(ListElastistorVolumeCmd.class.getName()); private static final String s_name = "listElastistorVolumeResponse"; @Inject diff --git a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java index ae4b699ea7bf..9b3f0531de15 100644 --- a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java +++ b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java @@ -47,7 +47,8 @@ import org.apache.cloudstack.storage.datastore.util.DateraUtil; import org.apache.cloudstack.storage.to.SnapshotObjectTO; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.to.DataObjectType; @@ -87,7 +88,7 @@ import static com.cloud.utils.NumbersUtil.toHumanReadableSize; public class DateraPrimaryDataStoreDriver implements PrimaryDataStoreDriver { - private static final Logger s_logger = Logger.getLogger(DateraPrimaryDataStoreDriver.class); + protected Logger logger = LogManager.getLogger(getClass()); private static final int s_lockTimeInSeconds = 300; private static final int s_lowestHypervisorSnapshotReserve = 10; @@ -166,7 +167,7 @@ public DateraObject.AppInstance getDateraAppInstance(DateraObject.DateraConnecti try { appInstance = DateraUtil.getAppInstance(conn, appInstanceName); } catch (DateraObject.DateraError dateraError) { - s_logger.warn("Error getting appInstance " + appInstanceName, dateraError); + logger.warn("Error getting appInstance " + appInstanceName, dateraError); throw new CloudRuntimeException(dateraError.getMessage()); } @@ -192,7 +193,7 @@ public DateraObject.AppInstance getDateraAppInstance(DateraObject.DateraConnecti @Override public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore) { - s_logger.debug("grantAccess() called"); + logger.debug("grantAccess() called"); Preconditions.checkArgument(dataObject != null, "'dataObject' should not be 'null'"); Preconditions.checkArgument(host != null, "'host' should not be 'null'"); @@ -214,7 +215,7 @@ public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore GlobalLock lock = GlobalLock.getInternLock(cluster.getUuid()); if (!lock.lock(s_lockTimeInSeconds)) { - s_logger.debug("Couldn't lock the DB (in grantAccess) on the following string: " + cluster.getUuid()); + logger.debug("Couldn't lock the DB (in grantAccess) on the following string: " + cluster.getUuid()); } try { @@ -225,18 +226,18 @@ public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore List hosts = _hostDao.findByClusterId(clusterId); if (!DateraUtil.hostsSupport_iScsi(hosts)) { - s_logger.debug("hostsSupport_iScsi() :Host does NOT support iscsci"); + logger.debug("hostsSupport_iScsi() :Host does NOT support iscsci"); return false; } // We don't have the initiator group, create one String initiatorGroupName = DateraUtil.INITIATOR_GROUP_PREFIX + "-" + cluster.getUuid(); - s_logger.debug("Will use initiator group " + String.valueOf(initiatorGroupName)); + logger.debug("Will use initiator group " + String.valueOf(initiatorGroupName)); initiatorGroup = DateraUtil.getInitiatorGroup(conn, initiatorGroupName); if (initiatorGroup == null) { - s_logger.debug("create initiator group " + String.valueOf(initiatorGroupName)); + logger.debug("create initiator group " + String.valueOf(initiatorGroupName)); initiatorGroup = DateraUtil.createInitiatorGroup(conn, initiatorGroupName); // Save it to the DB ClusterDetailsVO clusterDetail = new ClusterDetailsVO(clusterId, initiatorGroupKey, initiatorGroupName); @@ -265,17 +266,17 @@ public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore Preconditions.checkArgument(isInitiatorGroupAssignedToAppInstance(conn, initiatorGroup, appInstance), "Initgroup is not assigned to appinstance"); // FIXME: Sleep anyways - s_logger.debug("sleep " + String.valueOf(DateraUtil.POLL_TIMEOUT_MS) + " msec for ACL to be applied"); + logger.debug("sleep " + String.valueOf(DateraUtil.POLL_TIMEOUT_MS) + " msec for ACL to be applied"); Thread.sleep(DateraUtil.POLL_TIMEOUT_MS); // ms - s_logger.debug( + logger.debug( "Initiator group " + String.valueOf(initiatorGroupName) + " is assigned to " + appInstanceName); } return true; } catch (DateraObject.DateraError | UnsupportedEncodingException | InterruptedException dateraError) { - s_logger.warn(dateraError.getMessage(), dateraError); + logger.warn(dateraError.getMessage(), dateraError); throw new CloudRuntimeException("Unable to grant access to volume " + dateraError.getMessage()); } finally { lock.unlock(); @@ -301,13 +302,13 @@ private void addClusterHostsToInitiatorGroup(DateraObject.DateraConnection conn, initiatorName = DateraUtil.INITIATOR_PREFIX + "-" + host.getUuid(); initiator = DateraUtil.createInitiator(conn, initiatorName, iqn); - s_logger.debug("Initiator " + initiatorName + " with " + iqn + "added "); + logger.debug("Initiator " + initiatorName + " with " + iqn + "added "); } Preconditions.checkNotNull(initiator); if (!DateraUtil.isInitiatorPresentInGroup(initiator, initiatorGroup)) { - s_logger.debug("Add " + initiatorName + " to " + initiatorGroupName); + logger.debug("Add " + initiatorName + " to " + initiatorGroupName); DateraUtil.addInitiatorToGroup(conn, initiator.getPath(), initiatorGroupName); } } @@ -349,7 +350,7 @@ private boolean isInitiatorGroupAssignedToAppInstance(DateraObject.DateraConnect */ @Override public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) { - s_logger.debug("revokeAccess() called"); + logger.debug("revokeAccess() called"); Preconditions.checkArgument(dataObject != null, "'dataObject' should not be 'null'"); Preconditions.checkArgument(host != null, "'host' should not be 'null'"); @@ -364,7 +365,7 @@ public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) GlobalLock lock = GlobalLock.getInternLock(cluster.getUuid()); if (!lock.lock(s_lockTimeInSeconds)) { - s_logger.debug("Couldn't lock the DB (in revokeAccess) on the following string: " + cluster.getUuid()); + logger.debug("Couldn't lock the DB (in revokeAccess) on the following string: " + cluster.getUuid()); } try { @@ -388,7 +389,7 @@ public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) } catch (DateraObject.DateraError | UnsupportedEncodingException | InterruptedException dateraError) { String errMesg = "Error revoking access for Volume : " + dataObject.getId(); - s_logger.warn(errMesg, dateraError); + logger.warn(errMesg, dateraError); throw new CloudRuntimeException(errMesg); } finally { lock.unlock(); @@ -461,7 +462,7 @@ private String getAppInstanceName(DataObject dataObject) { name.add(dataObject.getUuid()); // 6db58e3f-14c4-45ac-95e9-60e3a00ce7d0 VolumeVO volumeVo = _volumeDao.findById(dataObject.getId()); - s_logger.debug("volumeName : " + volumeName); + logger.debug("volumeName : " + volumeName); break; case SNAPSHOT: @@ -541,7 +542,7 @@ private String getIpPool(long storagePoolId) { if (storagePoolDetail != null) { ipPool = storagePoolDetail.getValue(); } - s_logger.debug("ipPool: " + ipPool); + logger.debug("ipPool: " + ipPool); return ipPool; } @@ -588,7 +589,7 @@ private long getUsedBytes(StoragePool storagePool, long volumeIdToIgnore) { } } catch (DateraObject.DateraError dateraError) { String errMesg = "Error getting used bytes for storage pool : " + storagePool.getId(); - s_logger.warn(errMesg, dateraError); + logger.warn(errMesg, dateraError); throw new CloudRuntimeException(errMesg); } } @@ -623,7 +624,7 @@ private long getUsedBytes(StoragePool storagePool, long volumeIdToIgnore) { usedSpaceBytes += templatePoolRef.getTemplateSize(); } } - s_logger.debug("usedSpaceBytes: " + toHumanReadableSize(usedSpaceBytes)); + logger.debug("usedSpaceBytes: " + toHumanReadableSize(usedSpaceBytes)); return usedSpaceBytes; } @@ -664,7 +665,7 @@ public long getDataObjectSizeIncludingHypervisorSnapshotReserve(DataObject dataO hypervisorSnapshotReserve = Math.max(hypervisorSnapshotReserve, s_lowestHypervisorSnapshotReserve); volumeSize += volumeSize * (hypervisorSnapshotReserve / 100f); } - s_logger.debug("Volume size: " + toHumanReadableSize(volumeSize)); + logger.debug("Volume size: " + toHumanReadableSize(volumeSize)); break; case TEMPLATE: @@ -677,7 +678,7 @@ public long getDataObjectSizeIncludingHypervisorSnapshotReserve(DataObject dataO } else { volumeSize = (long) (templateSize + templateSize * (s_lowestHypervisorSnapshotReserve / 100f)); } - s_logger.debug("Template volume size:" + toHumanReadableSize(volumeSize)); + logger.debug("Template volume size:" + toHumanReadableSize(volumeSize)); break; } @@ -723,7 +724,7 @@ private void deleteVolume(VolumeInfo volumeInfo, long storagePoolId) { } catch (UnsupportedEncodingException | DateraObject.DateraError e) { String errMesg = "Error deleting app instance for Volume : " + volumeInfo.getId(); - s_logger.warn(errMesg, e); + logger.warn(errMesg, e); throw new CloudRuntimeException(errMesg); } } @@ -750,7 +751,7 @@ private void deleteVolume(VolumeInfo volumeInfo, long storagePoolId) { */ private String createVolume(VolumeInfo volumeInfo, long storagePoolId) { - s_logger.debug("createVolume() called"); + logger.debug("createVolume() called"); Preconditions.checkArgument(volumeInfo != null, "volumeInfo cannot be null"); Preconditions.checkArgument(storagePoolId > 0, "storagePoolId should be > 0"); @@ -763,20 +764,20 @@ private String createVolume(VolumeInfo volumeInfo, long storagePoolId) { long csSnapshotId = getCsIdForCloning(volumeInfo.getId(), "cloneOfSnapshot"); long csTemplateId = getCsIdForCloning(volumeInfo.getId(), "cloneOfTemplate"); - s_logger.debug("csTemplateId is " + String.valueOf(csTemplateId)); + logger.debug("csTemplateId is " + String.valueOf(csTemplateId)); try { if (csSnapshotId > 0) { // creating volume from snapshot. The snapshot could either be a native snapshot // or another volume. - s_logger.debug("Creating volume from snapshot "); + logger.debug("Creating volume from snapshot "); appInstance = createDateraClone(conn, csSnapshotId, volumeInfo, storagePoolId, DataObjectType.SNAPSHOT); } else if (csTemplateId > 0) { // create volume from template. Invoked when creating new ROOT volume - s_logger.debug("Creating volume from template "); + logger.debug("Creating volume from template "); appInstance = createDateraClone(conn, csTemplateId, volumeInfo, storagePoolId, DataObjectType.TEMPLATE); String appInstanceName = appInstance.getName(); @@ -805,18 +806,18 @@ private String createVolume(VolumeInfo volumeInfo, long storagePoolId) { } else { // Just create a standard volume - s_logger.debug("Creating a standard volume "); + logger.debug("Creating a standard volume "); appInstance = createDateraVolume(conn, volumeInfo, storagePoolId); } } catch (UnsupportedEncodingException | DateraObject.DateraError e) { String errMesg = "Unable to create Volume Error: " + e.getMessage(); - s_logger.warn(errMesg); + logger.warn(errMesg); throw new CloudRuntimeException(errMesg, e); } if (appInstance == null) { String errMesg = "appInstance returned null"; - s_logger.warn(errMesg); + logger.warn(errMesg); throw new CloudRuntimeException(errMesg); } @@ -825,8 +826,8 @@ private String createVolume(VolumeInfo volumeInfo, long storagePoolId) { String iqnPath = DateraUtil.generateIqnPath(iqn); VolumeVO volumeVo = _volumeDao.findById(volumeInfo.getId()); - s_logger.debug("volume ID : " + volumeInfo.getId()); - s_logger.debug("volume uuid : " + volumeInfo.getUuid()); + logger.debug("volume ID : " + volumeInfo.getId()); + logger.debug("volume uuid : " + volumeInfo.getUuid()); volumeVo.set_iScsiName(iqnPath); volumeVo.setFolder(appInstance.getName()); @@ -862,7 +863,7 @@ private String createVolume(VolumeInfo volumeInfo, long storagePoolId) { private DateraObject.AppInstance createDateraVolume(DateraObject.DateraConnection conn, VolumeInfo volumeInfo, long storagePoolId) throws UnsupportedEncodingException, DateraObject.DateraError { - s_logger.debug("createDateraVolume() called"); + logger.debug("createDateraVolume() called"); DateraObject.AppInstance appInstance = null; try { @@ -895,8 +896,8 @@ private DateraObject.AppInstance createDateraVolume(DateraObject.DateraConnectio replicas, volumePlacement, ipPool); } } catch (Exception ex) { - s_logger.debug("createDateraVolume() failed"); - s_logger.error(ex); + logger.debug("createDateraVolume() failed"); + logger.error(ex); } return appInstance; } @@ -918,7 +919,7 @@ private DateraObject.AppInstance createDateraClone(DateraObject.DateraConnection VolumeInfo volumeInfo, long storagePoolId, DataObjectType dataType) throws UnsupportedEncodingException, DateraObject.DateraError { - s_logger.debug("createDateraClone() called"); + logger.debug("createDateraClone() called"); String clonedAppInstanceName = getAppInstanceName(volumeInfo); String baseAppInstanceName = null; @@ -930,7 +931,7 @@ private DateraObject.AppInstance createDateraClone(DateraObject.DateraConnection // Clone volume from a snapshot if (snapshotDetails != null && snapshotDetails.getValue() != null) { - s_logger.debug("Clone volume from a snapshot"); + logger.debug("Clone volume from a snapshot"); appInstance = DateraUtil.cloneAppInstanceFromSnapshot(conn, clonedAppInstanceName, snapshotDetails.getValue(), ipPool); @@ -951,14 +952,14 @@ private DateraObject.AppInstance createDateraClone(DateraObject.DateraConnection } else { // Clone volume from an appInstance - s_logger.debug("Clone volume from an appInstance"); + logger.debug("Clone volume from an appInstance"); snapshotDetails = snapshotDetailsDao.findDetail(dataObjectId, DateraUtil.VOLUME_ID); baseAppInstanceName = snapshotDetails.getValue(); } } else if (dataType == DataObjectType.TEMPLATE) { - s_logger.debug("Clone volume from a template"); + logger.debug("Clone volume from a template"); VMTemplateStoragePoolVO templatePoolRef = tmpltPoolDao.findByPoolTemplate(storagePoolId, dataObjectId, null); @@ -996,7 +997,7 @@ private DateraObject.AppInstance createDateraClone(DateraObject.DateraConnection throw new CloudRuntimeException("Unable to create an app instance from snapshot or template " + volumeInfo.getId() + " type " + dataType); } - s_logger.debug("Datera - Cloned " + baseAppInstanceName + " to " + clonedAppInstanceName); + logger.debug("Datera - Cloned " + baseAppInstanceName + " to " + clonedAppInstanceName); return appInstance; } @@ -1013,7 +1014,7 @@ private DateraObject.AppInstance createDateraClone(DateraObject.DateraConnection * @param storagePoolId primary store ID */ private void createTempVolume(SnapshotInfo snapshotInfo, long storagePoolId) { - s_logger.debug("createTempVolume() from snapshot called"); + logger.debug("createTempVolume() from snapshot called"); String ipPool = getIpPool(storagePoolId); long csSnapshotId = snapshotInfo.getId(); @@ -1043,14 +1044,14 @@ private void createTempVolume(SnapshotInfo snapshotInfo, long storagePoolId) { DateraUtil.pollAppInstanceAvailable(conn, clonedAppInstanceName); } catch (DateraObject.DateraError | UnsupportedEncodingException e) { String errMesg = "Unable to create temp volume " + csSnapshotId + "Error:" + e.getMessage(); - s_logger.error(errMesg, e); + logger.error(errMesg, e); throw new CloudRuntimeException(errMesg, e); } if (clonedAppInstance == null) { throw new CloudRuntimeException("Unable to clone volume for snapshot " + snapshotName); } - s_logger.debug("Temp app_instance " + clonedAppInstanceName + " created"); + logger.debug("Temp app_instance " + clonedAppInstanceName + " created"); addTempVolumeToDb(csSnapshotId, clonedAppInstanceName); handleSnapshotDetails(csSnapshotId, DiskTO.IQN, DateraUtil.generateIqnPath(clonedAppInstance.getIqn())); @@ -1059,7 +1060,7 @@ private void createTempVolume(SnapshotInfo snapshotInfo, long storagePoolId) { snapshotDetails = snapshotDetailsDao.findDetail(csSnapshotId, DateraUtil.VOLUME_ID); try { - s_logger.debug("Deleting temp app_instance " + snapshotDetails.getValue()); + logger.debug("Deleting temp app_instance " + snapshotDetails.getValue()); DateraUtil.deleteAppInstance(conn, snapshotDetails.getValue()); } catch (UnsupportedEncodingException | DateraObject.DateraError dateraError) { String errMesg = "Error deleting temp volume " + dateraError.getMessage(); @@ -1085,7 +1086,7 @@ private void createTempVolume(SnapshotInfo snapshotInfo, long storagePoolId) { * @return IQN of the template volume */ public String createTemplateVolume(TemplateInfo templateInfo, long storagePoolId) { - s_logger.debug("createTemplateVolume() as cache template called"); + logger.debug("createTemplateVolume() as cache template called"); verifySufficientBytesForStoragePool(templateInfo, storagePoolId); @@ -1098,7 +1099,7 @@ public String createTemplateVolume(TemplateInfo templateInfo, long storagePoolId long templateSizeBytes = getDataObjectSizeIncludingHypervisorSnapshotReserve(templateInfo, storagePoolDao.findById(storagePoolId)); - s_logger.debug("cached VM template sizeBytes: " + toHumanReadableSize(templateSizeBytes)); + logger.debug("cached VM template sizeBytes: " + toHumanReadableSize(templateSizeBytes)); int templateSizeGib = DateraUtil.bytesToGib(templateSizeBytes); @@ -1108,7 +1109,7 @@ public String createTemplateVolume(TemplateInfo templateInfo, long storagePoolId String volumePlacement = getVolPlacement(storagePoolId); String ipPool = getIpPool(storagePoolId); - s_logger.debug("cached VM template app_instance: " + appInstanceName + " ipPool: " + ipPool + " sizeGib: " + String.valueOf(templateSizeGib)); + logger.debug("cached VM template app_instance: " + appInstanceName + " ipPool: " + ipPool + " sizeGib: " + String.valueOf(templateSizeGib)); DateraObject.AppInstance appInstance = DateraUtil.createAppInstance(conn, appInstanceName, templateSizeGib, templateIops, replicaCount, volumePlacement, ipPool); @@ -1140,10 +1141,10 @@ public String createTemplateVolume(TemplateInfo templateInfo, long storagePoolId } catch (UnsupportedEncodingException | DateraObject.DateraError dateraError) { if (DateraObject.DateraErrorTypes.ConflictError.equals(dateraError)) { String errMesg = "template app Instance " + appInstanceName + " exists"; - s_logger.debug(errMesg, dateraError); + logger.debug(errMesg, dateraError); } else { String errMesg = "Unable to create template app Instance " + dateraError.getMessage(); - s_logger.error(errMesg, dateraError); + logger.error(errMesg, dateraError); throw new CloudRuntimeException(errMesg, dateraError); } } @@ -1166,22 +1167,22 @@ public void createAsync(DataStore dataStore, DataObject dataObject, try { if (dataObject.getType() == DataObjectType.VOLUME) { - s_logger.debug("createAsync - creating volume"); + logger.debug("createAsync - creating volume"); iqn = createVolume((VolumeInfo) dataObject, dataStore.getId()); } else if (dataObject.getType() == DataObjectType.SNAPSHOT) { - s_logger.debug("createAsync - creating snapshot"); + logger.debug("createAsync - creating snapshot"); createTempVolume((SnapshotInfo) dataObject, dataStore.getId()); } else if (dataObject.getType() == DataObjectType.TEMPLATE) { - s_logger.debug("createAsync - creating template"); + logger.debug("createAsync - creating template"); iqn = createTemplateVolume((TemplateInfo) dataObject, dataStore.getId()); } else { errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to createAsync"; - s_logger.error(errMsg); + logger.error(errMsg); } } catch (Exception ex) { errMsg = ex.getMessage(); - s_logger.error(errMsg); + logger.error(errMsg); if (callback == null) { throw ex; @@ -1228,13 +1229,13 @@ public void deleteAsync(DataStore dataStore, DataObject dataObject, try { if (dataObject.getType() == DataObjectType.VOLUME) { - s_logger.debug("deleteAsync - deleting volume"); + logger.debug("deleteAsync - deleting volume"); deleteVolume((VolumeInfo) dataObject, dataStore.getId()); } else if (dataObject.getType() == DataObjectType.SNAPSHOT) { - s_logger.debug("deleteAsync - deleting snapshot"); + logger.debug("deleteAsync - deleting snapshot"); deleteSnapshot((SnapshotInfo) dataObject, dataStore.getId()); } else if (dataObject.getType() == DataObjectType.TEMPLATE) { - s_logger.debug("deleteAsync - deleting template"); + logger.debug("deleteAsync - deleting template"); deleteTemplate((TemplateInfo) dataObject, dataStore.getId()); } else { errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to deleteAsync"; @@ -1242,7 +1243,7 @@ public void deleteAsync(DataStore dataStore, DataObject dataObject, } catch (Exception ex) { errMsg = ex.getMessage(); - s_logger.error(errMsg); + logger.error(errMsg); } CommandResult result = new CommandResult(); @@ -1280,7 +1281,7 @@ public boolean canCopy(DataObject srcData, DataObject destData) { */ @Override public void takeSnapshot(SnapshotInfo snapshotInfo, AsyncCompletionCallback callback) { - s_logger.debug("takeSnapshot() called"); + logger.debug("takeSnapshot() called"); CreateCmdResult result; @@ -1305,7 +1306,7 @@ public void takeSnapshot(SnapshotInfo snapshotInfo, AsyncCompletionCallback dsInfos) { // uuid = DateraUtil.PROVIDER_NAME + "_" + cluster.getUuid() + "_" + storageVip // + "_" + clusterAdminUsername + "_" + numReplicas + "_" + volPlacement; uuid = DateraUtil.PROVIDER_NAME + "_" + clusterUuid + "_" + randomString; - s_logger.debug("Datera - Setting Datera cluster-wide primary storage uuid to " + uuid); + logger.debug("Datera - Setting Datera cluster-wide primary storage uuid to " + uuid); parameters.setPodId(podId); parameters.setClusterId(clusterId); @@ -152,7 +153,7 @@ public DataStore initialize(Map dsInfos) { // "_" + clusterAdminUsername + "_" + numReplicas + "_" + volPlacement; uuid = DateraUtil.PROVIDER_NAME + "_" + zoneUuid + "_" + randomString; - s_logger.debug("Datera - Setting Datera zone-wide primary storage uuid to " + uuid); + logger.debug("Datera - Setting Datera zone-wide primary storage uuid to " + uuid); } if (capacityBytes == null || capacityBytes <= 0) { throw new IllegalArgumentException("'capacityBytes' must be present and greater than 0."); @@ -164,9 +165,9 @@ public DataStore initialize(Map dsInfos) { if (domainName == null) { domainName = "ROOT"; - s_logger.debug("setting the domain to ROOT"); + logger.debug("setting the domain to ROOT"); } - s_logger.debug("Datera - domainName: " + domainName); + logger.debug("Datera - domainName: " + domainName); parameters.setHost(storageVip); parameters.setPort(storagePort); @@ -203,7 +204,7 @@ public DataStore initialize(Map dsInfos) { lClusterDefaultMinIops = Long.parseLong(clusterDefaultMinIops); } } catch (NumberFormatException ex) { - s_logger.warn("Cannot parse the setting of " + DateraUtil.CLUSTER_DEFAULT_MIN_IOPS + logger.warn("Cannot parse the setting of " + DateraUtil.CLUSTER_DEFAULT_MIN_IOPS + ", using default value: " + lClusterDefaultMinIops + ". Exception: " + ex); } @@ -214,7 +215,7 @@ public DataStore initialize(Map dsInfos) { lClusterDefaultMaxIops = Long.parseLong(clusterDefaultMaxIops); } } catch (NumberFormatException ex) { - s_logger.warn("Cannot parse the setting of " + DateraUtil.CLUSTER_DEFAULT_MAX_IOPS + logger.warn("Cannot parse the setting of " + DateraUtil.CLUSTER_DEFAULT_MAX_IOPS + ", using default value: " + lClusterDefaultMaxIops + ". Exception: " + ex); } @@ -267,12 +268,12 @@ public boolean attachCluster(DataStore datastore, ClusterScope scope) { poolHosts.add(host); } catch (Exception e) { - s_logger.warn("Unable to establish a connection between " + host + " and " + primaryDataStoreInfo, e); + logger.warn("Unable to establish a connection between " + host + " and " + primaryDataStoreInfo, e); } } if (poolHosts.isEmpty()) { - s_logger.warn("No host can access storage pool '" + primaryDataStoreInfo + "' on cluster '" + logger.warn("No host can access storage pool '" + primaryDataStoreInfo + "' on cluster '" + primaryDataStoreInfo.getClusterId() + "'."); storagePoolDao.expunge(primaryDataStoreInfo.getId()); @@ -307,7 +308,7 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType h try { _storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); } catch (Exception e) { - s_logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); + logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); } } diff --git a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/provider/DateraHostListener.java b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/provider/DateraHostListener.java index 99d0758a96a1..89ac2a9a21c3 100644 --- a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/provider/DateraHostListener.java +++ b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/provider/DateraHostListener.java @@ -33,7 +33,8 @@ import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.datastore.util.DateraObject; import org.apache.cloudstack.storage.datastore.util.DateraUtil; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -60,7 +61,7 @@ import com.cloud.vm.dao.VMInstanceDao; public class DateraHostListener implements HypervisorHostListener { - private static final Logger s_logger = Logger.getLogger(DateraHostListener.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject private AgentManager _agentMgr; @Inject private AlertManager _alertMgr; @@ -85,7 +86,7 @@ public boolean hostConnect(long hostId, long storagePoolId) { HostVO host = _hostDao.findById(hostId); if (host == null) { - s_logger.error("Failed to add host by HostListener as host was not found with id : " + hostId); + logger.error("Failed to add host by HostListener as host was not found with id : " + hostId); return false; } StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(storagePoolId, hostId); @@ -142,7 +143,7 @@ public boolean hostRemoved(long hostId, long clusterId) { if (!lock.lock(s_lockTimeInSeconds)) { String errMsg = "Couldn't lock the DB on the following string: " + clusterVO.getUuid(); - s_logger.debug(errMsg); + logger.debug(errMsg); throw new CloudRuntimeException(errMsg); } @@ -169,7 +170,7 @@ public boolean hostRemoved(long hostId, long clusterId) { } } catch (DateraObject.DateraError | UnsupportedEncodingException e) { - s_logger.warn("Error while removing host from initiator groups ", e); + logger.warn("Error while removing host from initiator groups ", e); } finally { lock.unlock(); lock.releaseRef(); @@ -307,7 +308,7 @@ private void sendModifyStoragePoolCommand(ModifyStoragePoolCommand cmd, StorageP assert (answer instanceof ModifyStoragePoolAnswer) : "ModifyStoragePoolAnswer expected ; Pool = " + storagePool.getId() + " Host = " + hostId; - s_logger.info("Connection established between storage pool " + storagePool + " and host + " + hostId); + logger.info("Connection established between storage pool " + storagePool + " and host + " + hostId); } private List> getTargets(long clusterId, long storagePoolId) { diff --git a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/util/DateraUtil.java b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/util/DateraUtil.java index a1084bf9a40d..6aeedd275464 100644 --- a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/util/DateraUtil.java +++ b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/util/DateraUtil.java @@ -41,7 +41,8 @@ import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClientBuilder; import org.apache.http.util.EntityUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import java.io.IOException; import java.io.UnsupportedEncodingException; @@ -57,7 +58,7 @@ public class DateraUtil { - private static final Logger s_logger = Logger.getLogger(DateraUtil.class); + protected static Logger LOGGER = LogManager.getLogger(DateraUtil.class); private static final String API_VERSION = "v2"; public static final String PROVIDER_NAME = "Datera"; @@ -296,7 +297,7 @@ public static DateraObject.AppInstance cloneAppInstanceFromVolume(DateraObject.D public static DateraObject.AppInstance cloneAppInstanceFromVolume(DateraObject.DateraConnection conn, String name, String srcCloneName, String ipPool) throws UnsupportedEncodingException, DateraObject.DateraError { - s_logger.debug("cloneAppInstanceFromVolume() called"); + LOGGER.debug("cloneAppInstanceFromVolume() called"); DateraObject.AppInstance srcAppInstance = getAppInstance(conn, srcCloneName); if (srcAppInstance == null) { @@ -1002,7 +1003,7 @@ public static String extractIqn(String iqnPath) { final String tokens[] = iqnPath.split("/"); if (tokens.length != 3) { final String msg = "Wrong iscsi path " + iqnPath + " it should be /targetIQN/LUN"; - s_logger.warn(msg); + LOGGER.warn(msg); return null; } diff --git a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java index a0aaab1d0aa1..02a28b6e947d 100644 --- a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java +++ b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java @@ -56,7 +56,8 @@ import org.apache.cloudstack.storage.to.SnapshotObjectTO; import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.cloudstack.storage.volume.VolumeObject; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.storage.ResizeVolumeAnswer; @@ -95,7 +96,7 @@ public Map getCapabilities() { return caps; } - private static final Logger s_logger = Logger.getLogger(CloudStackPrimaryDataStoreDriverImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); private static final String NO_REMOTE_ENDPOINT_WITH_ENCRYPTION = "No remote endpoint to send command, unable to find a valid endpoint. Requires encryption support: %s"; @Inject @@ -138,8 +139,8 @@ public DataStoreTO getStoreTO(DataStore store) { } public Answer createVolume(VolumeInfo volume) throws StorageUnavailableException { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Creating volume: " + volume); + if (logger.isDebugEnabled()) { + logger.debug("Creating volume: " + volume); } CreateObjectCommand cmd = new CreateObjectCommand(volume.getTO()); @@ -148,7 +149,7 @@ public Answer createVolume(VolumeInfo volume) throws StorageUnavailableException Answer answer = null; if (ep == null) { String errMsg = String.format(NO_REMOTE_ENDPOINT_WITH_ENCRYPTION, encryptionRequired); - s_logger.error(errMsg); + logger.error(errMsg); answer = new Answer(cmd, false, errMsg); } else { answer = ep.sendMessage(cmd); @@ -207,7 +208,7 @@ public void createAsync(DataStore dataStore, DataObject data, AsyncCompletionCal result.setAnswer(answer); } } catch (Exception e) { - s_logger.debug("failed to create volume", e); + logger.debug("failed to create volume", e); errMsg = e.toString(); } } @@ -246,7 +247,7 @@ public void deleteAsync(DataStore dataStore, DataObject data, AsyncCompletionCal } if (ep == null) { String errMsg = "No remote endpoint to send DeleteCommand, check if host or ssvm is down?"; - s_logger.error(errMsg); + logger.error(errMsg); result.setResult(errMsg); } else { Answer answer = ep.sendMessage(cmd); @@ -255,7 +256,7 @@ public void deleteAsync(DataStore dataStore, DataObject data, AsyncCompletionCal } } } catch (Exception ex) { - s_logger.debug("Unable to destroy volume" + data.getId(), ex); + logger.debug("Unable to destroy volume" + data.getId(), ex); result.setResult(ex.toString()); } callback.complete(result); @@ -263,7 +264,7 @@ public void deleteAsync(DataStore dataStore, DataObject data, AsyncCompletionCal @Override public void copyAsync(DataObject srcdata, DataObject destData, AsyncCompletionCallback callback) { - s_logger.debug(String.format("Copying volume %s(%s) to %s(%s)", srcdata.getId(), srcdata.getType(), destData.getId(), destData.getType())); + logger.debug(String.format("Copying volume %s(%s) to %s(%s)", srcdata.getId(), srcdata.getType(), destData.getId(), destData.getType())); boolean encryptionRequired = anyVolumeRequiresEncryption(srcdata, destData); DataStore store = destData.getDataStore(); if (store.getRole() == DataStoreRole.Primary) { @@ -289,10 +290,10 @@ public void copyAsync(DataObject srcdata, DataObject destData, AsyncCompletionCa Answer answer = null; if (ep == null) { String errMsg = String.format(NO_REMOTE_ENDPOINT_WITH_ENCRYPTION, encryptionRequired); - s_logger.error(errMsg); + logger.error(errMsg); answer = new Answer(cmd, false, errMsg); } else { - s_logger.debug(String.format("Sending copy command to endpoint %s, where encryption support is %s", ep.getHostAddr(), encryptionRequired ? "required" : "not required")); + logger.debug(String.format("Sending copy command to endpoint %s, where encryption support is %s", ep.getHostAddr(), encryptionRequired ? "required" : "not required")); answer = ep.sendMessage(cmd); } CopyCommandResult result = new CopyCommandResult("", answer); @@ -304,7 +305,7 @@ public void copyAsync(DataObject srcdata, DataObject destData, AsyncCompletionCa CopyCmdAnswer answer = null; if (ep == null) { String errMsg = String.format(NO_REMOTE_ENDPOINT_WITH_ENCRYPTION, encryptionRequired); - s_logger.error(errMsg); + logger.error(errMsg); answer = new CopyCmdAnswer(errMsg); } else { answer = (CopyCmdAnswer) ep.sendMessage(cmd); @@ -348,7 +349,7 @@ public boolean canCopy(DataObject srcData, DataObject destData) { @Override public void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback callback) { CreateCmdResult result = null; - s_logger.debug("Taking snapshot of "+ snapshot); + logger.debug("Taking snapshot of "+ snapshot); try { SnapshotObjectTO snapshotTO = (SnapshotObjectTO) snapshot.getTO(); Object payload = snapshot.getPayload(); @@ -362,11 +363,11 @@ public void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback cal ResizeVolumeAnswer answer = (ResizeVolumeAnswer) storageMgr.sendToPool(pool, endpointsToRunResize, resizeCmd); if (answer != null && answer.getResult()) { long finalSize = answer.getNewSize(); - s_logger.debug("Resize: volume started at size: " + toHumanReadableSize(vol.getSize()) + " and ended at size: " + toHumanReadableSize(finalSize)); + logger.debug("Resize: volume started at size: " + toHumanReadableSize(vol.getSize()) + " and ended at size: " + toHumanReadableSize(finalSize)); vol.setSize(finalSize); vol.update(); @@ -453,12 +454,12 @@ public void resize(DataObject data, AsyncCompletionCallback cal } else if (answer != null) { result.setResult(answer.getDetails()); } else { - s_logger.debug("return a null answer, mark it as failed for unknown reason"); + logger.debug("return a null answer, mark it as failed for unknown reason"); result.setResult("return a null answer, mark it as failed for unknown reason"); } } catch (Exception e) { - s_logger.debug("sending resize command failed", e); + logger.debug("sending resize command failed", e); result.setResult(e.toString()); } finally { resizeCmd.clearPassphrase(); @@ -475,7 +476,7 @@ private void updateVolumePathDetails(VolumeObject vol, ResizeVolumeAnswer answer if (storagePoolVO != null) { volumeVO.setPoolId(storagePoolVO.getId()); } else { - s_logger.warn(String.format("Unable to find datastore %s while updating the new datastore of the volume %d", datastoreUUID, vol.getId())); + logger.warn(String.format("Unable to find datastore %s while updating the new datastore of the volume %d", datastoreUUID, vol.getId())); } } diff --git a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java index bad57068a731..bbc61e765224 100644 --- a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java +++ b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java @@ -64,7 +64,8 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import javax.inject.Inject; import java.util.ArrayList; @@ -73,7 +74,7 @@ import java.util.UUID; public class CloudStackPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle { - private static final Logger s_logger = Logger.getLogger(CloudStackPrimaryDataStoreLifeCycleImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject protected ResourceManager _resourceMgr; @Inject @@ -155,8 +156,8 @@ public DataStore initialize(Map dsInfos) { String userInfo = dsInfos.get("userInfo") != null ? dsInfos.get("userInfo").toString() : null; int port = dsInfos.get("port") != null ? Integer.parseInt(dsInfos.get("port").toString()) : -1; - if (s_logger.isDebugEnabled()) { - s_logger.debug("createPool Params @ scheme - " + scheme + " storageHost - " + storageHost + " hostPath - " + hostPath + " port - " + port); + if (logger.isDebugEnabled()) { + logger.debug("createPool Params @ scheme - " + scheme + " storageHost - " + storageHost + " hostPath - " + hostPath + " port - " + port); } if (scheme.equalsIgnoreCase("nfs")) { if (port == -1) { @@ -271,7 +272,7 @@ public DataStore initialize(Map dsInfos) { parameters.setPort(0); parameters.setPath(hostPath); } else { - s_logger.warn("Unable to figure out the scheme for URI: " + scheme); + logger.warn("Unable to figure out the scheme for URI: " + scheme); throw new IllegalArgumentException("Unable to figure out the scheme for URI: " + scheme); } } @@ -299,8 +300,8 @@ public DataStore initialize(Map dsInfos) { List spHandles = primaryDataStoreDao.findIfDuplicatePoolsExistByUUID(uuid); if ((spHandles != null) && (spHandles.size() > 0)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Another active pool with the same uuid already exists"); + if (logger.isDebugEnabled()) { + logger.debug("Another active pool with the same uuid already exists"); } throw new CloudRuntimeException("Another active pool with the same uuid already exists"); } @@ -331,14 +332,14 @@ private void validateVcenterDetails(Long zoneId, Long podId, Long clusterId, Str ValidateVcenterDetailsCommand cmd = new ValidateVcenterDetailsCommand(storageHost); final Answer answer = agentMgr.easySend(h.getId(), cmd); if (answer != null && answer.getResult()) { - s_logger.info("Successfully validated vCenter details provided"); + logger.info("Successfully validated vCenter details provided"); return; } else { if (answer != null) { throw new InvalidParameterValueException("Provided vCenter server details does not match with the existing vCenter in zone id: " + zoneId); } else { String msg = "Can not validate vCenter through host " + h.getId() + " due to ValidateVcenterDetailsCommand returns null"; - s_logger.warn(msg); + logger.warn(msg); } } } @@ -346,14 +347,14 @@ private void validateVcenterDetails(Long zoneId, Long podId, Long clusterId, Str } protected boolean createStoragePool(long hostId, StoragePool pool) { - s_logger.debug("creating pool " + pool.getName() + " on host " + hostId); + logger.debug("creating pool " + pool.getName() + " on host " + hostId); if (pool.getPoolType() != StoragePoolType.NetworkFilesystem && pool.getPoolType() != StoragePoolType.Filesystem && pool.getPoolType() != StoragePoolType.IscsiLUN && pool.getPoolType() != StoragePoolType.Iscsi && pool.getPoolType() != StoragePoolType.VMFS && pool.getPoolType() != StoragePoolType.SharedMountPoint && pool.getPoolType() != StoragePoolType.PreSetup && pool.getPoolType() != StoragePoolType.DatastoreCluster && pool.getPoolType() != StoragePoolType.OCFS2 && pool.getPoolType() != StoragePoolType.RBD && pool.getPoolType() != StoragePoolType.CLVM && pool.getPoolType() != StoragePoolType.SMB && pool.getPoolType() != StoragePoolType.Gluster) { - s_logger.warn(" Doesn't support storage pool type " + pool.getPoolType()); + logger.warn(" Doesn't support storage pool type " + pool.getPoolType()); return false; } CreateStoragePoolCommand cmd = new CreateStoragePoolCommand(true, pool); @@ -366,10 +367,10 @@ protected boolean createStoragePool(long hostId, StoragePool pool) { String msg = ""; if (answer != null) { msg = "Can not create storage pool through host " + hostId + " due to " + answer.getDetails(); - s_logger.warn(msg); + logger.warn(msg); } else { msg = "Can not create storage pool through host " + hostId + " due to CreateStoragePoolCommand returns null"; - s_logger.warn(msg); + logger.warn(msg); } throw new CloudRuntimeException(msg); } @@ -387,7 +388,7 @@ public boolean attachCluster(DataStore store, ClusterScope scope) { } if (primarystore.getPoolType() == StoragePoolType.OCFS2 && !_ocfs2Mgr.prepareNodes(allHosts, primarystore)) { - s_logger.warn("Can not create storage pool " + primarystore + " on cluster " + primarystore.getClusterId()); + logger.warn("Can not create storage pool " + primarystore + " on cluster " + primarystore.getClusterId()); primaryDataStoreDao.expunge(primarystore.getId()); return false; } @@ -400,7 +401,7 @@ public boolean attachCluster(DataStore store, ClusterScope scope) { } } - s_logger.debug("In createPool Adding the pool to each of the hosts"); + logger.debug("In createPool Adding the pool to each of the hosts"); List poolHosts = new ArrayList(); for (HostVO h : allHosts) { try { @@ -410,12 +411,12 @@ public boolean attachCluster(DataStore store, ClusterScope scope) { primaryDataStoreDao.expunge(primarystore.getId()); throw new CloudRuntimeException("Storage has already been added as local storage"); } catch (Exception e) { - s_logger.warn("Unable to establish a connection between " + h + " and " + primarystore, e); + logger.warn("Unable to establish a connection between " + h + " and " + primarystore, e); } } if (poolHosts.isEmpty()) { - s_logger.warn("No host can access storage pool " + primarystore + " on cluster " + primarystore.getClusterId()); + logger.warn("No host can access storage pool " + primarystore + " on cluster " + primarystore.getClusterId()); primaryDataStoreDao.expunge(primarystore.getId()); throw new CloudRuntimeException("Failed to access storage pool"); } @@ -427,7 +428,7 @@ public boolean attachCluster(DataStore store, ClusterScope scope) { @Override public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) { List hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(hypervisorType, scope.getScopeId()); - s_logger.debug("In createPool. Attaching the pool to each of the hosts."); + logger.debug("In createPool. Attaching the pool to each of the hosts."); List poolHosts = new ArrayList(); for (HostVO host : hosts) { try { @@ -437,11 +438,11 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType h primaryDataStoreDao.expunge(dataStore.getId()); throw new CloudRuntimeException("Storage has already been added as local storage to host: " + host.getName()); } catch (Exception e) { - s_logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); + logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); } } if (poolHosts.isEmpty()) { - s_logger.warn("No host can access storage pool " + dataStore + " in this zone."); + logger.warn("No host can access storage pool " + dataStore + " in this zone."); primaryDataStoreDao.expunge(dataStore.getId()); throw new CloudRuntimeException("Failed to create storage pool as it is not accessible to hosts."); } @@ -488,7 +489,7 @@ public boolean deleteDataStore(DataStore store) { } } else { if (answer != null) { - s_logger.debug("Failed to delete storage pool: " + answer.getResult()); + logger.debug("Failed to delete storage pool: " + answer.getResult()); } } } @@ -514,9 +515,9 @@ public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo exis try { storageMgr.connectHostToSharedPool(scope.getScopeId(), dataStore.getId()); } catch (StorageUnavailableException ex) { - s_logger.error("Storage unavailable ",ex); + logger.error("Storage unavailable ",ex); } catch (StorageConflictException ex) { - s_logger.error("Storage already exists ",ex); + logger.error("Storage already exists ",ex); } } return true; diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java index 3082a19c7324..34179680607e 100644 --- a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java @@ -57,18 +57,19 @@ import org.apache.http.impl.client.HttpClients; import org.apache.http.message.BasicNameValuePair; import org.apache.http.ssl.SSLContextBuilder; -import org.apache.log4j.Logger; import com.cloud.utils.exception.CloudRuntimeException; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; /** * Array API */ public class FlashArrayAdapter implements ProviderAdapter { - private Logger logger = Logger.getLogger(FlashArrayAdapter.class); + protected Logger logger = LogManager.getLogger(getClass()); public static final String HOSTGROUP = "hostgroup"; public static final String STORAGE_POD = "pod"; diff --git a/plugins/storage/volume/linstor/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LinstorBackupSnapshotCommandWrapper.java b/plugins/storage/volume/linstor/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LinstorBackupSnapshotCommandWrapper.java index a210d53d7e78..663b2c7d0265 100644 --- a/plugins/storage/volume/linstor/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LinstorBackupSnapshotCommandWrapper.java +++ b/plugins/storage/volume/linstor/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LinstorBackupSnapshotCommandWrapper.java @@ -35,7 +35,8 @@ import org.apache.cloudstack.utils.qemu.QemuImgException; import org.apache.cloudstack.utils.qemu.QemuImgFile; import org.apache.commons.io.FileUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.joda.time.Duration; import org.libvirt.LibvirtException; @@ -43,7 +44,7 @@ public final class LinstorBackupSnapshotCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LinstorBackupSnapshotCommandWrapper.class); + protected static Logger LOGGER = LogManager.getLogger(LinstorBackupSnapshotCommandWrapper.class); private String zfsSnapdev(boolean hide, String zfsUrl) { Script script = new Script("/usr/bin/zfs", Duration.millis(5000)); @@ -67,7 +68,7 @@ static void cleanupSecondaryPool(final KVMStoragePool secondaryPool) { try { secondaryPool.delete(); } catch (final Exception e) { - s_logger.debug("Failed to delete secondary storage", e); + LOGGER.debug("Failed to delete secondary storage", e); } } } @@ -90,7 +91,7 @@ private String convertImageToQCow2( // NOTE: the qemu img will also contain the drbd metadata at the end final QemuImg qemu = new QemuImg(waitMilliSeconds); qemu.convert(srcFile, dstFile); - s_logger.info("Backup snapshot " + srcFile + " to " + dstPath); + LOGGER.info("Backup snapshot " + srcFile + " to " + dstPath); return dstPath; } @@ -107,7 +108,7 @@ private SnapshotObjectTO setCorrectSnapshotSize(final SnapshotObjectTO dst, fina @Override public CopyCmdAnswer execute(LinstorBackupSnapshotCommand cmd, LibvirtComputingResource serverResource) { - s_logger.debug("LinstorBackupSnapshotCommandWrapper: " + cmd.getSrcTO().getPath() + " -> " + cmd.getDestTO().getPath()); + LOGGER.debug("LinstorBackupSnapshotCommandWrapper: " + cmd.getSrcTO().getPath() + " -> " + cmd.getDestTO().getPath()); final SnapshotObjectTO src = (SnapshotObjectTO) cmd.getSrcTO(); final SnapshotObjectTO dst = (SnapshotObjectTO) cmd.getDestTO(); KVMStoragePool secondaryPool = null; @@ -130,7 +131,7 @@ public CopyCmdAnswer execute(LinstorBackupSnapshotCommand cmd, LibvirtComputingR // provide the linstor snapshot block device // on lvm thin this should already be there in /dev/mapper/vg-snapshotname // on zfs we need to unhide the snapshot block device - s_logger.info("Src: " + srcPath + " | " + src.getName()); + LOGGER.info("Src: " + srcPath + " | " + src.getName()); if (srcPath.startsWith("zfs://")) { zfsHidden = true; if (zfsSnapdev(false, srcPath) != null) { @@ -148,14 +149,14 @@ public CopyCmdAnswer execute(LinstorBackupSnapshotCommand cmd, LibvirtComputingR if (result != null) { return new CopyCmdAnswer("qemu-img shrink failed: " + result); } - s_logger.info("Backup shrunk " + dstPath + " to actual size " + src.getVolume().getSize()); + LOGGER.info("Backup shrunk " + dstPath + " to actual size " + src.getVolume().getSize()); SnapshotObjectTO snapshot = setCorrectSnapshotSize(dst, dstPath); return new CopyCmdAnswer(snapshot); } catch (final Exception e) { final String error = String.format("Failed to backup snapshot with id [%s] with a pool %s, due to %s", cmd.getSrcTO().getId(), cmd.getSrcTO().getDataStore().getUuid(), e.getMessage()); - s_logger.error(error); + LOGGER.error(error); return new CopyCmdAnswer(cmd, e); } finally { cleanupSecondaryPool(secondaryPool); diff --git a/plugins/storage/volume/linstor/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LinstorRevertBackupSnapshotCommandWrapper.java b/plugins/storage/volume/linstor/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LinstorRevertBackupSnapshotCommandWrapper.java index 511b5a40ca83..252cb6c73271 100644 --- a/plugins/storage/volume/linstor/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LinstorRevertBackupSnapshotCommandWrapper.java +++ b/plugins/storage/volume/linstor/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LinstorRevertBackupSnapshotCommandWrapper.java @@ -32,15 +32,12 @@ import org.apache.cloudstack.utils.qemu.QemuImg; import org.apache.cloudstack.utils.qemu.QemuImgException; import org.apache.cloudstack.utils.qemu.QemuImgFile; -import org.apache.log4j.Logger; import org.libvirt.LibvirtException; @ResourceWrapper(handles = LinstorRevertBackupSnapshotCommand.class) public final class LinstorRevertBackupSnapshotCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(LinstorRevertBackupSnapshotCommandWrapper.class); - private void convertQCow2ToRAW(final String srcPath, final String dstPath, int waitMilliSeconds) throws LibvirtException, QemuImgException { @@ -54,7 +51,7 @@ private void convertQCow2ToRAW(final String srcPath, final String dstPath, int w @Override public CopyCmdAnswer execute(LinstorRevertBackupSnapshotCommand cmd, LibvirtComputingResource serverResource) { - s_logger.debug("LinstorRevertBackupSnapshotCommandWrapper: " + cmd.getSrcTO().getPath() + " -> " + cmd.getDestTO().getPath()); + logger.debug("LinstorRevertBackupSnapshotCommandWrapper: " + cmd.getSrcTO().getPath() + " -> " + cmd.getDestTO().getPath()); final SnapshotObjectTO src = (SnapshotObjectTO) cmd.getSrcTO(); final VolumeObjectTO dst = (VolumeObjectTO) cmd.getDestTO(); KVMStoragePool secondaryPool = null; @@ -83,7 +80,7 @@ public CopyCmdAnswer execute(LinstorRevertBackupSnapshotCommand cmd, LibvirtComp } catch (final Exception e) { final String error = String.format("Failed to revert snapshot with id [%s] with a pool %s, due to %s", cmd.getSrcTO().getId(), cmd.getSrcTO().getDataStore().getUuid(), e.getMessage()); - s_logger.error(error); + logger.error(error); return new CopyCmdAnswer(cmd, e); } finally { LinstorBackupSnapshotCommandWrapper.cleanupSecondaryPool(secondaryPool); diff --git a/plugins/storage/volume/linstor/src/main/java/com/cloud/hypervisor/kvm/storage/LinstorStorageAdaptor.java b/plugins/storage/volume/linstor/src/main/java/com/cloud/hypervisor/kvm/storage/LinstorStorageAdaptor.java index deee5fb6c413..7739be838452 100644 --- a/plugins/storage/volume/linstor/src/main/java/com/cloud/hypervisor/kvm/storage/LinstorStorageAdaptor.java +++ b/plugins/storage/volume/linstor/src/main/java/com/cloud/hypervisor/kvm/storage/LinstorStorageAdaptor.java @@ -32,7 +32,8 @@ import org.apache.cloudstack.utils.qemu.QemuImg; import org.apache.cloudstack.utils.qemu.QemuImgException; import org.apache.cloudstack.utils.qemu.QemuImgFile; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.libvirt.LibvirtException; import com.cloud.storage.Storage; @@ -55,7 +56,7 @@ import com.linbit.linstor.api.model.VolumeDefinition; public class LinstorStorageAdaptor implements StorageAdaptor { - private static final Logger s_logger = Logger.getLogger(LinstorStorageAdaptor.class); + protected Logger logger = LogManager.getLogger(getClass()); private static final Map MapStorageUuidToStoragePool = new HashMap<>(); private final String localNodeName; @@ -98,11 +99,11 @@ private String getHostname() { private void logLinstorAnswer(@Nonnull ApiCallRc answer) { if (answer.isError()) { - s_logger.error(answer.getMessage()); + logger.error(answer.getMessage()); } else if (answer.isWarning()) { - s_logger.warn(answer.getMessage()); + logger.warn(answer.getMessage()); } else if (answer.isInfo()) { - s_logger.info(answer.getMessage()); + logger.info(answer.getMessage()); } } @@ -136,14 +137,14 @@ public KVMStoragePool getStoragePool(String uuid) { @Override public KVMStoragePool getStoragePool(String uuid, boolean refreshInfo) { - s_logger.debug("Linstor getStoragePool: " + uuid + " -> " + refreshInfo); + logger.debug("Linstor getStoragePool: " + uuid + " -> " + refreshInfo); return MapStorageUuidToStoragePool.get(uuid); } @Override public KVMPhysicalDisk getPhysicalDisk(String name, KVMStoragePool pool) { - s_logger.debug("Linstor: getPhysicalDisk for " + name); + logger.debug("Linstor: getPhysicalDisk for " + name); if (name == null) { return null; } @@ -170,11 +171,11 @@ public KVMPhysicalDisk getPhysicalDisk(String name, KVMStoragePool pool) kvmDisk.setVirtualSize(size); return kvmDisk; } else { - s_logger.error("Linstor: viewResources didn't return resources or volumes for " + rscName); + logger.error("Linstor: viewResources didn't return resources or volumes for " + rscName); throw new CloudRuntimeException("Linstor: viewResources didn't return resources or volumes."); } } catch (ApiException apiEx) { - s_logger.error(apiEx); + logger.error(apiEx); throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx); } } @@ -183,7 +184,7 @@ public KVMPhysicalDisk getPhysicalDisk(String name, KVMStoragePool pool) public KVMStoragePool createStoragePool(String name, String host, int port, String path, String userInfo, Storage.StoragePoolType type, Map details) { - s_logger.debug(String.format( + logger.debug(String.format( "Linstor createStoragePool: name: '%s', host: '%s', path: %s, userinfo: %s", name, host, path, userInfo)); LinstorStoragePool storagePool = new LinstorStoragePool(name, host, port, userInfo, type, this); @@ -219,7 +220,7 @@ private void makeResourceAvailable(DevelopersApi api, String rscName, boolean di public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool, QemuImg.PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, byte[] passphrase) { - s_logger.debug(String.format("Linstor.createPhysicalDisk: %s;%s", name, format)); + logger.debug(String.format("Linstor.createPhysicalDisk: %s;%s", name, format)); final String rscName = getLinstorRscName(name); LinstorStoragePool lpool = (LinstorStoragePool) pool; final DevelopersApi api = getLinstorAPI(pool); @@ -233,7 +234,7 @@ public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool, Qemu rgSpawn.setResourceDefinitionName(rscName); rgSpawn.addVolumeSizesItem(size / 1024); // linstor uses KiB - s_logger.info("Linstor: Spawn resource " + rscName); + logger.info("Linstor: Spawn resource " + rscName); ApiCallRcList answers = api.resourceGroupSpawn(lpool.getResourceGroup(), rgSpawn); handleLinstorApiAnswers(answers, "Linstor: Unable to spawn resource."); } @@ -251,16 +252,16 @@ public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool, Qemu if (!resources.isEmpty() && !resources.get(0).getVolumes().isEmpty()) { final String devPath = resources.get(0).getVolumes().get(0).getDevicePath(); - s_logger.info("Linstor: Created drbd device: " + devPath); + logger.info("Linstor: Created drbd device: " + devPath); final KVMPhysicalDisk kvmDisk = new KVMPhysicalDisk(devPath, name, pool); kvmDisk.setFormat(QemuImg.PhysicalDiskFormat.RAW); return kvmDisk; } else { - s_logger.error("Linstor: viewResources didn't return resources or volumes."); + logger.error("Linstor: viewResources didn't return resources or volumes."); throw new CloudRuntimeException("Linstor: viewResources didn't return resources or volumes."); } } catch (ApiException apiEx) { - s_logger.error(String.format("Linstor.createPhysicalDisk: ApiException: %s", apiEx.getBestMessage())); + logger.error(String.format("Linstor.createPhysicalDisk: ApiException: %s", apiEx.getBestMessage())); throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx); } } @@ -268,9 +269,9 @@ public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool, Qemu @Override public boolean connectPhysicalDisk(String volumePath, KVMStoragePool pool, Map details) { - s_logger.debug(String.format("Linstor: connectPhysicalDisk %s:%s -> %s", pool.getUuid(), volumePath, details)); + logger.debug(String.format("Linstor: connectPhysicalDisk %s:%s -> %s", pool.getUuid(), volumePath, details)); if (volumePath == null) { - s_logger.warn("volumePath is null, ignoring"); + logger.warn("volumePath is null, ignoring"); return false; } @@ -290,11 +291,11 @@ public boolean connectPhysicalDisk(String volumePath, KVMStoragePool pool, Map optFirstPool = MapStorageUuidToStoragePool.values().stream().findFirst(); if (optFirstPool.isPresent()) { - s_logger.debug("Linstor: disconnectPhysicalDiskByPath " + localPath); + logger.debug("Linstor: disconnectPhysicalDiskByPath " + localPath); final KVMStoragePool pool = optFirstPool.get(); - s_logger.debug("Linstor: Using storpool: " + pool.getUuid()); + logger.debug("Linstor: Using storpool: " + pool.getUuid()); final DevelopersApi api = getLinstorAPI(pool); try @@ -359,15 +360,15 @@ public boolean disconnectPhysicalDiskByPath(String localPath) ApiCallRcList answers = api.resourceDefinitionModify(rsc.get().getName(), rdm); if (answers.hasError()) { - s_logger.error("Failed to remove 'allow-two-primaries' on " + rsc.get().getName()); + logger.error("Failed to remove 'allow-two-primaries' on " + rsc.get().getName()); throw new CloudRuntimeException(answers.get(0).getMessage()); } return true; } - s_logger.warn("Linstor: Couldn't find resource for this path: " + localPath); + logger.warn("Linstor: Couldn't find resource for this path: " + localPath); } catch (ApiException apiEx) { - s_logger.error(apiEx); + logger.error(apiEx); throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx); } } @@ -377,12 +378,12 @@ public boolean disconnectPhysicalDiskByPath(String localPath) @Override public boolean deletePhysicalDisk(String name, KVMStoragePool pool, Storage.ImageFormat format) { - s_logger.debug("Linstor: deletePhysicalDisk " + name); + logger.debug("Linstor: deletePhysicalDisk " + name); final DevelopersApi api = getLinstorAPI(pool); try { final String rscName = getLinstorRscName(name); - s_logger.debug("Linstor: delete resource definition " + rscName); + logger.debug("Linstor: delete resource definition " + rscName); ApiCallRcList answers = api.resourceDefinitionDelete(rscName); handleLinstorApiAnswers(answers, "Linstor: Unable to delete resource definition " + rscName); } catch (ApiException apiEx) { @@ -402,7 +403,7 @@ public KVMPhysicalDisk createDiskFromTemplate( int timeout, byte[] passphrase) { - s_logger.info("Linstor: createDiskFromTemplate"); + logger.info("Linstor: createDiskFromTemplate"); return copyPhysicalDisk(template, name, destPool, timeout); } @@ -431,7 +432,7 @@ public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMSt @Override public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPools, int timeout, byte[] srcPassphrase, byte[] destPassphrase, Storage.ProvisioningType provisioningType) { - s_logger.debug(String.format("Linstor.copyPhysicalDisk: %s -> %s", disk.getPath(), name)); + logger.debug(String.format("Linstor.copyPhysicalDisk: %s -> %s", disk.getPath(), name)); final QemuImg.PhysicalDiskFormat sourceFormat = disk.getFormat(); final String sourcePath = disk.getPath(); @@ -440,7 +441,7 @@ public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMSt final KVMPhysicalDisk dstDisk = destPools.createPhysicalDisk( name, QemuImg.PhysicalDiskFormat.RAW, provisioningType, disk.getVirtualSize(), null); - s_logger.debug(String.format("Linstor.copyPhysicalDisk: dstPath: %s", dstDisk.getPath())); + logger.debug(String.format("Linstor.copyPhysicalDisk: dstPath: %s", dstDisk.getPath())); final QemuImgFile destFile = new QemuImgFile(dstDisk.getPath()); destFile.setFormat(dstDisk.getFormat()); destFile.setSize(disk.getVirtualSize()); @@ -449,7 +450,7 @@ public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMSt final QemuImg qemu = new QemuImg(timeout); qemu.convert(srcFile, destFile); } catch (QemuImgException | LibvirtException e) { - s_logger.error(e); + logger.error(e); destPools.deletePhysicalDisk(name, Storage.ImageFormat.RAW); throw new CloudRuntimeException("Failed to copy " + disk.getPath() + " to " + name); } @@ -460,7 +461,7 @@ public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMSt @Override public boolean refresh(KVMStoragePool pool) { - s_logger.debug("Linstor: refresh"); + logger.debug("Linstor: refresh"); return true; } @@ -483,7 +484,7 @@ public KVMPhysicalDisk createDiskFromTemplateBacking( KVMStoragePool destPool, int timeout, byte[] passphrase) { - s_logger.debug("Linstor: createDiskFromTemplateBacking"); + logger.debug("Linstor: createDiskFromTemplateBacking"); return null; } @@ -492,7 +493,7 @@ public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFileP KVMStoragePool destPool, Storage.ImageFormat format, int timeout) { - s_logger.debug("Linstor: createTemplateFromDirectDownloadFile"); + logger.debug("Linstor: createTemplateFromDirectDownloadFile"); return null; } @@ -513,7 +514,7 @@ public long getAvailable(LinstorStoragePool pool) { if (rscGrps.isEmpty()) { final String errMsg = String.format("Linstor: Resource group '%s' not found", rscGroupName); - s_logger.error(errMsg); + logger.error(errMsg); throw new CloudRuntimeException(errMsg); } @@ -529,10 +530,10 @@ public long getAvailable(LinstorStoragePool pool) { .filter(sp -> sp.getProviderKind() != ProviderKind.DISKLESS) .mapToLong(sp -> sp.getFreeCapacity() != null ? sp.getFreeCapacity() : 0L).sum() * 1024; // linstor uses KiB - s_logger.debug("Linstor: getAvailable() -> " + free); + logger.debug("Linstor: getAvailable() -> " + free); return free; } catch (ApiException apiEx) { - s_logger.error(apiEx.getMessage()); + logger.error(apiEx.getMessage()); throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx); } } @@ -549,7 +550,7 @@ public long getUsed(LinstorStoragePool pool) { if (rscGrps.isEmpty()) { final String errMsg = String.format("Linstor: Resource group '%s' not found", rscGroupName); - s_logger.error(errMsg); + logger.error(errMsg); throw new CloudRuntimeException(errMsg); } @@ -566,10 +567,10 @@ public long getUsed(LinstorStoragePool pool) { .mapToLong(sp -> sp.getTotalCapacity() != null && sp.getFreeCapacity() != null ? sp.getTotalCapacity() - sp.getFreeCapacity() : 0L) .sum() * 1024; // linstor uses Kib - s_logger.debug("Linstor: getUsed() -> " + used); + logger.debug("Linstor: getUsed() -> " + used); return used; } catch (ApiException apiEx) { - s_logger.error(apiEx.getMessage()); + logger.error(apiEx.getMessage()); throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx); } } diff --git a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java index 9b493ff01b9e..c5a6391dd92a 100644 --- a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java +++ b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/driver/LinstorPrimaryDataStoreDriverImpl.java @@ -102,10 +102,11 @@ import org.apache.cloudstack.storage.datastore.util.LinstorUtil; import org.apache.cloudstack.storage.to.SnapshotObjectTO; import org.apache.cloudstack.storage.volume.VolumeObject; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver { - private static final Logger s_logger = Logger.getLogger(LinstorPrimaryDataStoreDriverImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject private PrimaryDataStoreDao _storagePoolDao; @Inject private VolumeDao _volumeDao; @Inject private VolumeDetailsDao _volumeDetailsDao; @@ -210,13 +211,13 @@ private void deleteResourceDefinition(StoragePoolVO storagePoolVO, String rscDef { for (ApiCallRc answer : answers) { - s_logger.error(answer.getMessage()); + logger.error(answer.getMessage()); } throw new CloudRuntimeException("Linstor: Unable to delete resource definition: " + rscDefName); } } catch (ApiException apiEx) { - s_logger.error("Linstor: ApiEx - " + apiEx.getMessage()); + logger.error("Linstor: ApiEx - " + apiEx.getMessage()); throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx); } } @@ -233,14 +234,14 @@ private void deleteSnapshot(@Nonnull DataStore dataStore, @Nonnull String rscDef { for (ApiCallRc answer : answers) { - s_logger.error(answer.getMessage()); + logger.error(answer.getMessage()); } throw new CloudRuntimeException("Linstor: Unable to delete snapshot: " + rscDefName); } - s_logger.info("Linstor: Deleted snapshot " + snapshotName + " for resource " + rscDefName); + logger.info("Linstor: Deleted snapshot " + snapshotName + " for resource " + rscDefName); } catch (ApiException apiEx) { - s_logger.error("Linstor: ApiEx - " + apiEx.getMessage()); + logger.error("Linstor: ApiEx - " + apiEx.getMessage()); throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx); } } @@ -258,7 +259,7 @@ private long getCsIdForCloning(long volumeId, String cloneOf) { @Override public void deleteAsync(DataStore dataStore, DataObject dataObject, AsyncCompletionCallback callback) { - s_logger.debug("deleteAsync: " + dataObject.getType() + ";" + dataObject.getUuid()); + logger.debug("deleteAsync: " + dataObject.getType() + ";" + dataObject.getUuid()); String errMsg = null; final long storagePoolId = dataStore.getId(); @@ -297,7 +298,7 @@ public void deleteAsync(DataStore dataStore, DataObject dataObject, AsyncComplet break; default: errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to deleteAsync"; - s_logger.error(errMsg); + logger.error(errMsg); } if (callback != null) { @@ -310,11 +311,11 @@ public void deleteAsync(DataStore dataStore, DataObject dataObject, AsyncComplet private void logLinstorAnswer(@Nonnull ApiCallRc answer) { if (answer.isError()) { - s_logger.error(answer.getMessage()); + logger.error(answer.getMessage()); } else if (answer.isWarning()) { - s_logger.warn(answer.getMessage()); + logger.warn(answer.getMessage()); } else if (answer.isInfo()) { - s_logger.info(answer.getMessage()); + logger.info(answer.getMessage()); } } @@ -349,11 +350,11 @@ private String getDeviceName(DevelopersApi linstorApi, String rscName) throws Ap null); if (!resources.isEmpty() && !resources.get(0).getVolumes().isEmpty()) { - s_logger.info("Linstor: Created drbd device: " + resources.get(0).getVolumes().get(0).getDevicePath()); + logger.info("Linstor: Created drbd device: " + resources.get(0).getVolumes().get(0).getDevicePath()); return resources.get(0).getVolumes().get(0).getDevicePath(); } else { - s_logger.error("Linstor: viewResources didn't return resources or volumes."); + logger.error("Linstor: viewResources didn't return resources or volumes."); throw new CloudRuntimeException("Linstor: viewResources didn't return resources or volumes."); } } @@ -379,11 +380,11 @@ private void applyQoSSettings(StoragePoolVO storagePool, DevelopersApi api, Stri props.put("sys/fs/blkio_throttle_read_iops", "" + maxIops); props.put("sys/fs/blkio_throttle_write_iops", "" + maxIops); vdm.overrideProps(props); - s_logger.info("Apply qos setting: " + maxIops + " to " + rscName); + logger.info("Apply qos setting: " + maxIops + " to " + rscName); } else { - s_logger.info("Remove QoS setting for " + rscName); + logger.info("Remove QoS setting for " + rscName); vdm.deleteProps(Arrays.asList("sys/fs/blkio_throttle_read_iops", "sys/fs/blkio_throttle_write_iops")); } ApiCallRcList answers = api.volumeDefinitionModify(rscName, 0, vdm); @@ -396,7 +397,7 @@ private void applyQoSSettings(StoragePoolVO storagePool, DevelopersApi api, Stri long vMaxIops = maxIops != null ? maxIops : 0; long newIops = vcIops + vMaxIops; capacityIops -= newIops; - s_logger.info("Current storagepool " + storagePool.getName() + " iops capacity: " + capacityIops); + logger.info("Current storagepool " + storagePool.getName() + " iops capacity: " + capacityIops); storagePool.setCapacityIops(Math.max(0, capacityIops)); _storagePoolDao.update(storagePool.getId(), storagePool); } @@ -437,7 +438,7 @@ private String createResourceBase( try { - s_logger.info("Linstor: Spawn resource " + rscName); + logger.info("Linstor: Spawn resource " + rscName); ApiCallRcList answers = api.resourceGroupSpawn(rscGrp, rscGrpSpawn); checkLinstorAnswersThrow(answers); @@ -446,7 +447,7 @@ private String createResourceBase( return getDeviceName(api, rscName); } catch (ApiException apiEx) { - s_logger.error("Linstor: ApiEx - " + apiEx.getMessage()); + logger.error("Linstor: ApiEx - " + apiEx.getMessage()); throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx); } } @@ -466,7 +467,7 @@ private String createResource(VolumeInfo vol, StoragePoolVO storagePoolVO) { return deviceName; } catch (ApiException apiEx) { - s_logger.error("Linstor: ApiEx - " + apiEx.getMessage()); + logger.error("Linstor: ApiEx - " + apiEx.getMessage()); throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx); } } @@ -477,10 +478,10 @@ private void resizeResource(DevelopersApi api, String resourceName, long sizeByt ApiCallRcList answers = api.volumeDefinitionModify(resourceName, 0, dfm); if (answers.hasError()) { - s_logger.error("Resize error: " + answers.get(0).getMessage()); + logger.error("Resize error: " + answers.get(0).getMessage()); throw new CloudRuntimeException(answers.get(0).getMessage()); } else { - s_logger.info(String.format("Successfully resized %s to %d kib", resourceName, dfm.getSizeKib())); + logger.info(String.format("Successfully resized %s to %d kib", resourceName, dfm.getSizeKib())); } } @@ -495,7 +496,7 @@ private String cloneResource(long csCloneId, VolumeInfo volumeInfo, StoragePoolV final DevelopersApi linstorApi = LinstorUtil.getLinstorAPI(storagePoolVO.getHostAddress()); try { - s_logger.info("Clone resource definition " + cloneRes + " to " + rscName); + logger.info("Clone resource definition " + cloneRes + " to " + rscName); ResourceDefinitionCloneRequest cloneRequest = new ResourceDefinitionCloneRequest(); cloneRequest.setName(rscName); ResourceDefinitionCloneStarted cloneStarted = linstorApi.resourceDefinitionClone( @@ -507,18 +508,17 @@ private String cloneResource(long csCloneId, VolumeInfo volumeInfo, StoragePoolV throw new CloudRuntimeException("Clone for resource " + rscName + " failed."); } - s_logger.info("Clone resource definition " + cloneRes + " to " + rscName + " finished"); + logger.info("Clone resource definition " + cloneRes + " to " + rscName + " finished"); if (volumeInfo.getSize() != null && volumeInfo.getSize() > 0) { resizeResource(linstorApi, rscName, volumeInfo.getSize()); } - applyAuxProps(linstorApi, rscName, volumeInfo.getName(), volumeInfo.getAttachedVmName()); applyQoSSettings(storagePoolVO, linstorApi, rscName, volumeInfo.getMaxIops()); return getDeviceName(linstorApi, rscName); } catch (ApiException apiEx) { - s_logger.error("Linstor: ApiEx - " + apiEx.getMessage()); + logger.error("Linstor: ApiEx - " + apiEx.getMessage()); throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx); } } else { @@ -548,7 +548,7 @@ private String createResourceFromSnapshot(long csSnapshotId, String rscName, Sto try { - s_logger.debug("Create new resource definition: " + rscName); + logger.debug("Create new resource definition: " + rscName); ResourceDefinitionCreate rdCreate = createResourceDefinitionCreate(rscName, rscGrp); ApiCallRcList answers = linstorApi.resourceDefinitionCreate(rdCreate); checkLinstorAnswersThrow(answers); @@ -556,12 +556,12 @@ private String createResourceFromSnapshot(long csSnapshotId, String rscName, Sto SnapshotRestore snapshotRestore = new SnapshotRestore(); snapshotRestore.toResource(rscName); - s_logger.debug("Create new volume definition for snapshot: " + cloneRes + ":" + snapName); + logger.debug("Create new volume definition for snapshot: " + cloneRes + ":" + snapName); answers = linstorApi.resourceSnapshotsRestoreVolumeDefinition(cloneRes, snapName, snapshotRestore); checkLinstorAnswersThrow(answers); // restore snapshot to new resource - s_logger.info("Restore resource from snapshot: " + cloneRes + ":" + snapName); + logger.info("Restore resource from snapshot: " + cloneRes + ":" + snapName); answers = linstorApi.resourceSnapshotRestore(cloneRes, snapName, snapshotRestore); checkLinstorAnswersThrow(answers); @@ -570,7 +570,7 @@ private String createResourceFromSnapshot(long csSnapshotId, String rscName, Sto return getDeviceName(linstorApi, rscName); } catch (ApiException apiEx) { - s_logger.error("Linstor: ApiEx - " + apiEx.getMessage()); + logger.error("Linstor: ApiEx - " + apiEx.getMessage()); throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx); } } @@ -628,7 +628,7 @@ private void createVolumeFromSnapshot(SnapshotInfo snapshotInfo, StoragePoolVO s final String tempRscName = LinstorUtil.RSC_PREFIX + csName; createResourceFromSnapshot(csSnapshotId, tempRscName, storagePoolVO); - s_logger.debug("Temp resource created: " + tempRscName); + logger.debug("Temp resource created: " + tempRscName); addTempVolumeToDb(csSnapshotId, csName); } else if (snapshotDetails != null && snapshotDetails.getValue() != null && @@ -638,7 +638,7 @@ else if (snapshotDetails != null && snapshotDetails.getValue() != null && deleteResourceDefinition(storagePoolVO, snapshotDetails.getValue()); - s_logger.debug("Temp resource deleted: " + snapshotDetails.getValue()); + logger.debug("Temp resource deleted: " + snapshotDetails.getValue()); removeTempVolumeFromDb(csSnapshotId); } else { @@ -660,7 +660,7 @@ public void createAsync(DataStore dataStore, DataObject vol, AsyncCompletionCall case VOLUME: VolumeInfo volumeInfo = (VolumeInfo) vol; VolumeVO volume = _volumeDao.findById(volumeInfo.getId()); - s_logger.debug("createAsync - creating volume"); + logger.debug("createAsync - creating volume"); devPath = createVolume(volumeInfo, storagePool); volume.setFolder("/dev/"); volume.setPoolId(storagePool.getId()); @@ -670,22 +670,22 @@ public void createAsync(DataStore dataStore, DataObject vol, AsyncCompletionCall _volumeDao.update(volume.getId(), volume); break; case SNAPSHOT: - s_logger.debug("createAsync - SNAPSHOT"); + logger.debug("createAsync - SNAPSHOT"); createVolumeFromSnapshot((SnapshotInfo) vol, storagePool); break; case TEMPLATE: errMsg = "creating template - not supported"; - s_logger.error("createAsync - " + errMsg); + logger.error("createAsync - " + errMsg); break; default: errMsg = "Invalid DataObjectType (" + vol.getType() + ") passed to createAsync"; - s_logger.error(errMsg); + logger.error(errMsg); } } catch (Exception ex) { errMsg = ex.getMessage(); - s_logger.error("createAsync: " + errMsg); + logger.error("createAsync: " + errMsg); if (callback == null) { throw ex; @@ -750,7 +750,7 @@ private String doRevertSnapshot(final SnapshotInfo snapshot, final VolumeInfo vo resultMsg = "Linstor: Snapshot revert datastore not supported"; } } catch (ApiException apiEx) { - s_logger.error("Linstor: ApiEx - " + apiEx.getMessage()); + logger.error("Linstor: ApiEx - " + apiEx.getMessage()); resultMsg = apiEx.getBestMessage(); } @@ -763,7 +763,7 @@ public void revertSnapshot( SnapshotInfo snapshotOnPrimaryStore, AsyncCompletionCallback callback) { - s_logger.debug("Linstor: revertSnapshot"); + logger.debug("Linstor: revertSnapshot"); final VolumeInfo volumeInfo = snapshot.getBaseVolume(); VolumeVO volumeVO = _volumeDao.findById(volumeInfo.getId()); if (volumeVO == null || volumeVO.getRemoved() != null) { @@ -799,7 +799,7 @@ private static boolean canCopyTemplateCond(DataObject srcData, DataObject dstDat @Override public boolean canCopy(DataObject srcData, DataObject dstData) { - s_logger.debug("LinstorPrimaryDataStoreDriverImpl.canCopy: " + srcData.getType() + " -> " + dstData.getType()); + logger.debug("LinstorPrimaryDataStoreDriverImpl.canCopy: " + srcData.getType() + " -> " + dstData.getType()); if (canCopySnapshotCond(srcData, dstData)) { SnapshotInfo sinfo = (SnapshotInfo) srcData; @@ -819,7 +819,7 @@ public boolean canCopy(DataObject srcData, DataObject dstData) @Override public void copyAsync(DataObject srcData, DataObject dstData, AsyncCompletionCallback callback) { - s_logger.debug("LinstorPrimaryDataStoreDriverImpl.copyAsync: " + logger.debug("LinstorPrimaryDataStoreDriverImpl.copyAsync: " + srcData.getType() + " -> " + dstData.getType()); final CopyCommandResult res; @@ -866,12 +866,12 @@ private Optional getLinstorEP(DevelopersApi api, String rscN for (String nodeName : linstorNodeNames) { host = _hostDao.findByName(nodeName); if (host != null) { - s_logger.info(String.format("Linstor: Make resource %s available on node %s ...", rscName, nodeName)); + logger.info(String.format("Linstor: Make resource %s available on node %s ...", rscName, nodeName)); ApiCallRcList answers = api.resourceMakeAvailableOnNode(rscName, nodeName, new ResourceMakeAvailable()); if (!answers.hasError()) { break; // found working host } else { - s_logger.error( + logger.error( String.format("Linstor: Unable to make resource %s on node %s available: %s", rscName, nodeName, @@ -882,7 +882,7 @@ private Optional getLinstorEP(DevelopersApi api, String rscN if (host == null) { - s_logger.error("Linstor: Couldn't create a resource on any cloudstack host."); + logger.error("Linstor: Couldn't create a resource on any cloudstack host."); return Optional.empty(); } else @@ -899,7 +899,7 @@ private Optional getDiskfullEP(DevelopersApi api, String rsc Host host = _hostDao.findByName(linSP.getNodeName()); if (host == null) { - s_logger.error("Linstor: Host '" + linSP.getNodeName() + "' not found."); + logger.error("Linstor: Host '" + linSP.getNodeName() + "' not found."); return Optional.empty(); } else @@ -960,7 +960,7 @@ private Answer copyTemplate(DataObject srcData, DataObject dstData) { answer = new Answer(cmd, false, "Unable to get matching Linstor endpoint."); } } catch (ApiException exc) { - s_logger.error("copy template failed: ", exc); + logger.error("copy template failed: ", exc); throw new CloudRuntimeException(exc.getBestMessage()); } return answer; @@ -1032,12 +1032,12 @@ protected Answer copySnapshot(DataObject srcData, DataObject destData) { if (optEP.isPresent()) { answer = optEP.get().sendMessage(cmd); } else { - s_logger.debug("No diskfull endpoint found to copy image, creating diskless endpoint"); + logger.debug("No diskfull endpoint found to copy image, creating diskless endpoint"); answer = copyFromTemporaryResource(api, pool, rscName, snapshotInfo, cmd); } return answer; } catch (Exception e) { - s_logger.debug("copy snapshot failed: ", e); + logger.debug("copy snapshot failed: ", e); throw new CloudRuntimeException(e.toString()); } @@ -1047,7 +1047,7 @@ protected Answer copySnapshot(DataObject srcData, DataObject destData) { public void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback callback) { // as long as canCopy is false, this isn't called - s_logger.debug("Linstor: copyAsync with host"); + logger.debug("Linstor: copyAsync with host"); copyAsync(srcData, destData, callback); } @@ -1065,16 +1065,16 @@ private CreateCmdResult notifyResize( try { ResizeVolumeAnswer answer = (ResizeVolumeAnswer) _storageMgr.sendToPool(pool, resizeParameter.hosts, resizeCmd); if (answer != null && answer.getResult()) { - s_logger.debug("Resize: notified hosts"); + logger.debug("Resize: notified hosts"); } else if (answer != null) { result.setResult(answer.getDetails()); } else { - s_logger.debug("return a null answer, mark it as failed for unknown reason"); + logger.debug("return a null answer, mark it as failed for unknown reason"); result.setResult("return a null answer, mark it as failed for unknown reason"); } } catch (Exception e) { - s_logger.debug("sending resize command failed", e); + logger.debug("sending resize command failed", e); result.setResult(e.toString()); } @@ -1109,7 +1109,7 @@ public void resize(DataObject data, AsyncCompletionCallback cal } } catch (ApiException apiExc) { - s_logger.error(apiExc); + logger.error(apiExc); errMsg = apiExc.getBestMessage(); } @@ -1130,7 +1130,7 @@ public void handleQualityOfServiceForVolumeMigration( VolumeInfo volumeInfo, QualityOfServiceState qualityOfServiceState) { - s_logger.debug("Linstor: handleQualityOfServiceForVolumeMigration"); + logger.debug("Linstor: handleQualityOfServiceForVolumeMigration"); } private Answer createAnswerAndPerstistDetails(DevelopersApi api, SnapshotInfo snapshotInfo, String rscName) @@ -1153,7 +1153,7 @@ private Answer createAnswerAndPerstistDetails(DevelopersApi api, SnapshotInfo sn @Override public void takeSnapshot(SnapshotInfo snapshotInfo, AsyncCompletionCallback callback) { - s_logger.debug("Linstor: takeSnapshot with snapshot: " + snapshotInfo.getUuid()); + logger.debug("Linstor: takeSnapshot with snapshot: " + snapshotInfo.getUuid()); final VolumeInfo volumeInfo = snapshotInfo.getBaseVolume(); final VolumeVO volumeVO = _volumeDao.findById(volumeInfo.getId()); @@ -1174,12 +1174,12 @@ public void takeSnapshot(SnapshotInfo snapshotInfo, AsyncCompletionCallback dsInfos) { throw new CloudRuntimeException("The Zone ID must be specified."); } ClusterVO cluster = clusterDao.findById(clusterId); - s_logger.info("Linstor: Setting Linstor cluster-wide primary storage uuid to " + uuid); + logger.info("Linstor: Setting Linstor cluster-wide primary storage uuid to " + uuid); parameters.setPodId(podId); parameters.setClusterId(clusterId); @@ -177,10 +178,10 @@ public DataStore initialize(Map dsInfos) { } protected boolean createStoragePool(long hostId, StoragePool pool) { - s_logger.debug("creating pool " + pool.getName() + " on host " + hostId); + logger.debug("creating pool " + pool.getName() + " on host " + hostId); if (pool.getPoolType() != Storage.StoragePoolType.Linstor) { - s_logger.warn(" Doesn't support storage pool type " + pool.getPoolType()); + logger.warn(" Doesn't support storage pool type " + pool.getPoolType()); return false; } CreateStoragePoolCommand cmd = new CreateStoragePoolCommand(true, pool); @@ -192,7 +193,7 @@ protected boolean createStoragePool(long hostId, StoragePool pool) { String msg = answer != null ? "Can not create storage pool through host " + hostId + " due to " + answer.getDetails() : "Can not create storage pool through host " + hostId + " due to CreateStoragePoolCommand returns null"; - s_logger.warn(msg); + logger.warn(msg); throw new CloudRuntimeException(msg); } } @@ -228,12 +229,12 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { poolHosts.add(host); } catch (Exception e) { - s_logger.warn("Unable to establish a connection between " + host + " and " + primaryDataStoreInfo, e); + logger.warn("Unable to establish a connection between " + host + " and " + primaryDataStoreInfo, e); } } if (poolHosts.isEmpty()) { - s_logger.warn("No host can access storage pool '" + primaryDataStoreInfo + "' on cluster '" + logger.warn("No host can access storage pool '" + primaryDataStoreInfo + "' on cluster '" + primaryDataStoreInfo.getClusterId() + "'."); _primaryDataStoreDao.expunge(primaryDataStoreInfo.getId()); @@ -259,7 +260,7 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType h try { _storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); } catch (Exception e) { - s_logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); + logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); } } diff --git a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/util/LinstorUtil.java b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/util/LinstorUtil.java index b6904b90b29f..c0c55a9ceae3 100644 --- a/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/util/LinstorUtil.java +++ b/plugins/storage/volume/linstor/src/main/java/org/apache/cloudstack/storage/datastore/util/LinstorUtil.java @@ -36,10 +36,11 @@ import java.util.stream.Collectors; import com.cloud.utils.exception.CloudRuntimeException; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; public class LinstorUtil { - private static final Logger s_logger = Logger.getLogger(LinstorUtil.class); + protected static Logger LOGGER = LogManager.getLogger(LinstorUtil.class); public final static String PROVIDER_NAME = "Linstor"; public static final String RSC_PREFIX = "cs-"; @@ -147,7 +148,7 @@ public static long getCapacityBytes(String linstorUrl, String rscGroupName) { if (rscGrps.isEmpty()) { final String errMsg = String.format("Linstor: Resource group '%s' not found", rscGroupName); - s_logger.error(errMsg); + LOGGER.error(errMsg); throw new CloudRuntimeException(errMsg); } @@ -164,7 +165,7 @@ public static long getCapacityBytes(String linstorUrl, String rscGroupName) { .mapToLong(sp -> sp.getTotalCapacity() != null ? sp.getTotalCapacity() : 0L) .sum() * 1024; // linstor uses kiB } catch (ApiException apiEx) { - s_logger.error(apiEx.getMessage()); + LOGGER.error(apiEx.getMessage()); throw new CloudRuntimeException(apiEx); } } diff --git a/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/driver/NexentaPrimaryDataStoreDriver.java b/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/driver/NexentaPrimaryDataStoreDriver.java index 8487c881158e..582411055c7a 100644 --- a/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/driver/NexentaPrimaryDataStoreDriver.java +++ b/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/driver/NexentaPrimaryDataStoreDriver.java @@ -41,7 +41,8 @@ import org.apache.cloudstack.storage.datastore.util.NexentaStorAppliance; import org.apache.cloudstack.storage.datastore.util.NexentaStorAppliance.NexentaStorZvol; import org.apache.cloudstack.storage.datastore.util.NexentaUtil; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.to.DataObjectType; @@ -58,7 +59,7 @@ import com.cloud.utils.Pair; public class NexentaPrimaryDataStoreDriver implements PrimaryDataStoreDriver { - private static final Logger logger = Logger.getLogger(NexentaPrimaryDataStoreDriver.class); + protected Logger logger = LogManager.getLogger(getClass()); @Override public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore) { diff --git a/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/lifecylce/NexentaPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/lifecylce/NexentaPrimaryDataStoreLifeCycle.java index 507189edc14c..c1d3668ba02c 100644 --- a/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/lifecylce/NexentaPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/lifecylce/NexentaPrimaryDataStoreLifeCycle.java @@ -32,7 +32,8 @@ import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; import org.apache.cloudstack.storage.datastore.util.NexentaUtil; import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.api.StoragePoolInfo; import com.cloud.dc.DataCenterVO; @@ -46,8 +47,7 @@ public class NexentaPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycle { - private static final Logger logger = - Logger.getLogger(NexentaPrimaryDataStoreLifeCycle.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject private DataCenterDao zoneDao; diff --git a/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/provider/NexentaHostListener.java b/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/provider/NexentaHostListener.java index e13a7e649e77..376cd29d2eb3 100644 --- a/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/provider/NexentaHostListener.java +++ b/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/provider/NexentaHostListener.java @@ -18,44 +18,45 @@ */ package org.apache.cloudstack.storage.datastore.provider; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; public class NexentaHostListener implements HypervisorHostListener { - private static final Logger s_logger = Logger.getLogger(NexentaHostListener.class); + protected Logger logger = LogManager.getLogger(getClass()); @Override public boolean hostAdded(long hostId) { - s_logger.trace("hostAdded(long) invoked"); + logger.trace("hostAdded(long) invoked"); return true; } @Override public boolean hostConnect(long hostId, long poolId) { - s_logger.trace("hostConnect(long, long) invoked"); + logger.trace("hostConnect(long, long) invoked"); return true; } @Override public boolean hostDisconnected(long hostId, long poolId) { - s_logger.trace("hostDisconnected(long, long) invoked"); + logger.trace("hostDisconnected(long, long) invoked"); return true; } @Override public boolean hostAboutToBeRemoved(long hostId) { - s_logger.trace("hostAboutToBeRemoved(long) invoked"); + logger.trace("hostAboutToBeRemoved(long) invoked"); return true; } @Override public boolean hostRemoved(long hostId, long clusterId) { - s_logger.trace("hostRemoved(long) invoked"); + logger.trace("hostRemoved(long) invoked"); return true; } diff --git a/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/util/NexentaNmsClient.java b/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/util/NexentaNmsClient.java index e1a59f78facb..73f3fa0be78d 100644 --- a/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/util/NexentaNmsClient.java +++ b/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/util/NexentaNmsClient.java @@ -43,7 +43,8 @@ import org.apache.http.entity.StringEntity; import org.apache.http.impl.client.DefaultHttpClient; import org.apache.http.impl.conn.BasicClientConnectionManager; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.utils.security.SSLUtils; @@ -53,7 +54,7 @@ import com.cloud.utils.exception.CloudRuntimeException; public class NexentaNmsClient { - private static final Logger logger = Logger.getLogger(NexentaNmsClient.class); + protected Logger logger = LogManager.getLogger(getClass()); protected NexentaNmsUrl nmsUrl = null; protected DefaultHttpClient httpClient = null; diff --git a/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/util/NexentaStorAppliance.java b/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/util/NexentaStorAppliance.java index fbb6645b8e0a..8c22908c64db 100644 --- a/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/util/NexentaStorAppliance.java +++ b/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/util/NexentaStorAppliance.java @@ -22,14 +22,14 @@ import java.util.LinkedList; import org.apache.cloudstack.storage.datastore.util.NexentaNmsClient.NmsResponse; -import org.apache.log4j.LogManager; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.utils.exception.CloudRuntimeException; import com.google.gson.annotations.SerializedName; public class NexentaStorAppliance { - private static final Logger logger = LogManager.getLogger(NexentaStorAppliance.class); + protected Logger logger = LogManager.getLogger(getClass()); protected NexentaNmsClient client; protected NexentaUtil.NexentaPluginParameters parameters; diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapter.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapter.java index 69f98567f728..dbbfcfc59213 100644 --- a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapter.java +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapter.java @@ -54,15 +54,16 @@ import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClients; import org.apache.http.ssl.SSLContextBuilder; -import org.apache.log4j.Logger; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; public class PrimeraAdapter implements ProviderAdapter { - static final Logger logger = Logger.getLogger(PrimeraAdapter.class); + protected Logger logger = LogManager.getLogger(getClass()); public static final String HOSTSET = "hostset"; public static final String CPG = "cpg"; diff --git a/plugins/storage/volume/sample/src/main/java/org/apache/cloudstack/storage/datastore/driver/SamplePrimaryDataStoreDriverImpl.java b/plugins/storage/volume/sample/src/main/java/org/apache/cloudstack/storage/datastore/driver/SamplePrimaryDataStoreDriverImpl.java index 0b26ce0337a6..fcaa5b4f5e19 100644 --- a/plugins/storage/volume/sample/src/main/java/org/apache/cloudstack/storage/datastore/driver/SamplePrimaryDataStoreDriverImpl.java +++ b/plugins/storage/volume/sample/src/main/java/org/apache/cloudstack/storage/datastore/driver/SamplePrimaryDataStoreDriverImpl.java @@ -20,7 +20,8 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; @@ -52,7 +53,7 @@ import com.cloud.utils.exception.CloudRuntimeException; public class SamplePrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver { - private static final Logger s_logger = Logger.getLogger(SamplePrimaryDataStoreDriverImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject EndPointSelector selector; @Inject @@ -202,7 +203,7 @@ public void createAsync(DataStore dataStore, DataObject vol, AsyncCompletionCall EndPoint ep = selector.select(vol); if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; - s_logger.error(errMsg); + logger.error(errMsg); throw new CloudRuntimeException(errMsg); } CreateObjectCommand createCmd = new CreateObjectCommand(null); diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientConnectionPool.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientConnectionPool.java index e557e0881328..a9dc8b42cd5a 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientConnectionPool.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientConnectionPool.java @@ -23,14 +23,15 @@ import java.util.concurrent.ConcurrentHashMap; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.storage.StorageManager; import com.cloud.utils.crypt.DBEncryptionUtil; import com.google.common.base.Preconditions; public class ScaleIOGatewayClientConnectionPool { - private static final Logger LOGGER = Logger.getLogger(ScaleIOGatewayClientConnectionPool.class); + protected Logger logger = LogManager.getLogger(getClass()); private ConcurrentHashMap gatewayClients; @@ -66,7 +67,7 @@ public ScaleIOGatewayClient getClient(Long storagePoolId, StoragePoolDetailsDao client = new ScaleIOGatewayClientImpl(url, username, password, false, clientTimeout, clientMaxConnections); gatewayClients.put(storagePoolId, client); - LOGGER.debug("Added gateway client for the storage pool: " + storagePoolId); + logger.debug("Added gateway client for the storage pool: " + storagePoolId); } } @@ -82,7 +83,7 @@ public boolean removeClient(Long storagePoolId) { } if (client != null) { - LOGGER.debug("Removed gateway client for the storage pool: " + storagePoolId); + logger.debug("Removed gateway client for the storage pool: " + storagePoolId); return true; } diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImpl.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImpl.java index fa4283139432..32c717b64898 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImpl.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImpl.java @@ -68,7 +68,8 @@ import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; import org.apache.http.pool.PoolStats; import org.apache.http.util.EntityUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.storage.Storage; import com.cloud.utils.exception.CloudRuntimeException; @@ -81,7 +82,7 @@ import com.google.common.base.Preconditions; public class ScaleIOGatewayClientImpl implements ScaleIOGatewayClient { - private static final Logger LOG = Logger.getLogger(ScaleIOGatewayClientImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); private final URI apiURI; private final HttpClient httpClient; @@ -141,7 +142,7 @@ public ScaleIOGatewayClientImpl(final String url, final String username, final S this.password = password; authenticate(); - LOG.debug("API client for the PowerFlex gateway " + apiURI.getHost() + " is created successfully, with max connections: " + logger.debug("API client for the PowerFlex gateway " + apiURI.getHost() + " is created successfully, with max connections: " + maxConnections + " and timeout: " + timeout + " secs"); } @@ -155,14 +156,14 @@ private synchronized void authenticate() { HttpResponse response = null; try { authenticating = true; - LOG.debug("Authenticating gateway " + apiURI.getHost() + " with the request: " + request.toString()); + logger.debug("Authenticating gateway " + apiURI.getHost() + " with the request: " + request.toString()); response = httpClient.execute(request); if (isNullResponse(response)) { - LOG.warn("Invalid response received while authenticating, for the request: " + request.toString()); + logger.warn("Invalid response received while authenticating, for the request: " + request.toString()); throw new CloudRuntimeException("Failed to authenticate PowerFlex API Gateway due to invalid response from the Gateway " + apiURI.getHost()); } - LOG.debug("Received response: " + response.getStatusLine().getStatusCode() + " " + response.getStatusLine().getReasonPhrase() + logger.debug("Received response: " + response.getStatusLine().getStatusCode() + " " + response.getStatusLine().getReasonPhrase() + ", for the authenticate request: " + request.toString()); if (response.getStatusLine().getStatusCode() != HttpStatus.SC_OK) { throw new CloudRuntimeException("PowerFlex Gateway " + apiURI.getHost() + " login failed, please check the provided settings"); @@ -173,13 +174,13 @@ private synchronized void authenticate() { throw new CloudRuntimeException("Failed to create a valid session for PowerFlex Gateway " + apiURI.getHost() + " to perform API requests"); } - LOG.info("PowerFlex API Gateway " + apiURI.getHost() + " authenticated successfully"); + logger.info("PowerFlex API Gateway " + apiURI.getHost() + " authenticated successfully"); this.sessionKey = sessionKeyInResponse.replace("\"", ""); long now = System.currentTimeMillis(); createTime = lastUsedTime = now; } catch (final IOException e) { - LOG.error("Failed to authenticate PowerFlex API Gateway " + apiURI.getHost() + " due to: " + e.getMessage() + getConnectionManagerStats()); + logger.error("Failed to authenticate PowerFlex API Gateway " + apiURI.getHost() + " due to: " + e.getMessage() + getConnectionManagerStats()); throw new CloudRuntimeException("Failed to authenticate PowerFlex API Gateway " + apiURI.getHost() + " due to: " + e.getMessage()); } finally { authenticating = false; @@ -191,7 +192,7 @@ private synchronized void authenticate() { private synchronized void renewClientSessionOnExpiry() { if (isSessionExpired()) { - LOG.debug("Session expired for the PowerFlex API Gateway " + apiURI.getHost() + ", renewing"); + logger.debug("Session expired for the PowerFlex API Gateway " + apiURI.getHost() + ", renewing"); authenticate(); } } @@ -199,13 +200,13 @@ private synchronized void renewClientSessionOnExpiry() { private boolean isSessionExpired() { long now = System.currentTimeMillis() + BUFFER_TIME_IN_MILLISECS; if ((now - createTime) > MAX_VALID_SESSION_TIME_IN_MILLISECS) { - LOG.debug("Session expired for the Gateway " + apiURI.getHost() + ", token is invalid after " + MAX_VALID_SESSION_TIME_IN_HRS + logger.debug("Session expired for the Gateway " + apiURI.getHost() + ", token is invalid after " + MAX_VALID_SESSION_TIME_IN_HRS + " hours from the time it was created"); return true; } if ((now - lastUsedTime) > MAX_IDLE_TIME_IN_MILLISECS) { - LOG.debug("Session expired for the Gateway " + apiURI.getHost() + ", as there has been no activity for " + MAX_IDLE_TIME_IN_MINS + " mins"); + logger.debug("Session expired for the Gateway " + apiURI.getHost() + ", as there has been no activity for " + MAX_IDLE_TIME_IN_MINS + " mins"); return true; } @@ -214,12 +215,12 @@ private boolean isSessionExpired() { private boolean isNullResponse(final HttpResponse response) { if (response == null) { - LOG.warn("Nil response"); + logger.warn("Nil response"); return true; } if (response.getStatusLine() == null) { - LOG.warn("No status line in the response"); + logger.warn("No status line in the response"); return true; } @@ -231,7 +232,7 @@ private boolean checkAuthFailure(final HttpResponse response, final boolean rene if (!renewAndRetryOnAuthFailure) { throw new ServerApiException(ApiErrorCode.UNAUTHORIZED, "PowerFlex Gateway API call unauthorized, please check the provided settings"); } - LOG.debug("PowerFlex Gateway API call unauthorized. Current token might be invalid, renew the session." + getConnectionManagerStats()); + logger.debug("PowerFlex Gateway API call unauthorized. Current token might be invalid, renew the session." + getConnectionManagerStats()); return true; } return false; @@ -243,7 +244,7 @@ private void checkResponseOK(final HttpResponse response) { } if (response.getStatusLine().getStatusCode() == HttpStatus.SC_NO_CONTENT) { - LOG.warn("Requested resource does not exist"); + logger.warn("Requested resource does not exist"); return; } @@ -258,7 +259,7 @@ private void checkResponseOK(final HttpResponse response) { responseBody = EntityUtils.toString(response.getEntity()); } catch (IOException ignored) { } - LOG.debug("HTTP request failed, status code: " + response.getStatusLine().getStatusCode() + ", response: " + logger.debug("HTTP request failed, status code: " + response.getStatusLine().getStatusCode() + ", response: " + responseBody + getConnectionManagerStats()); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "API failed due to: " + responseBody); } @@ -282,10 +283,10 @@ private T get(final String path, final Class type, final boolean renewAnd while (authenticating); // wait for authentication request (if any) to complete (and to pick the new session key) final HttpGet request = new HttpGet(apiURI.toString() + path); request.setHeader(HttpHeaders.AUTHORIZATION, "Basic " + Base64.getEncoder().encodeToString((this.username + ":" + this.sessionKey).getBytes())); - LOG.debug("Sending GET request: " + request.toString()); + logger.debug("Sending GET request: " + request.toString()); response = httpClient.execute(request); String responseStatus = (!isNullResponse(response)) ? (response.getStatusLine().getStatusCode() + " " + response.getStatusLine().getReasonPhrase()) : "nil"; - LOG.debug("Received response: " + responseStatus + ", for the sent GET request: " + request.toString()); + logger.debug("Received response: " + responseStatus + ", for the sent GET request: " + request.toString()); if (checkAuthFailure(response, renewAndRetryOnAuthFailure)) { EntityUtils.consumeQuietly(response.getEntity()); responseConsumed = true; @@ -295,7 +296,7 @@ private T get(final String path, final Class type, final boolean renewAnd } return processResponse(response, type); } catch (final IOException e) { - LOG.error("Failed in GET method due to: " + e.getMessage() + getConnectionManagerStats(), e); + logger.error("Failed in GET method due to: " + e.getMessage() + getConnectionManagerStats(), e); checkResponseTimeOut(e); } finally { if (!responseConsumed && response != null) { @@ -328,10 +329,10 @@ private T post(final String path, final Object obj, final Class type, fin request.setEntity(new StringEntity(json)); } } - LOG.debug("Sending POST request: " + request.toString()); + logger.debug("Sending POST request: " + request.toString()); response = httpClient.execute(request); String responseStatus = (!isNullResponse(response)) ? (response.getStatusLine().getStatusCode() + " " + response.getStatusLine().getReasonPhrase()) : "nil"; - LOG.debug("Received response: " + responseStatus + ", for the sent POST request: " + request.toString()); + logger.debug("Received response: " + responseStatus + ", for the sent POST request: " + request.toString()); if (checkAuthFailure(response, renewAndRetryOnAuthFailure)) { EntityUtils.consumeQuietly(response.getEntity()); responseConsumed = true; @@ -341,7 +342,7 @@ private T post(final String path, final Object obj, final Class type, fin } return processResponse(response, type); } catch (final IOException e) { - LOG.error("Failed in POST method due to: " + e.getMessage() + getConnectionManagerStats(), e); + logger.error("Failed in POST method due to: " + e.getMessage() + getConnectionManagerStats(), e); checkResponseTimeOut(e); } finally { if (!responseConsumed && response != null) { @@ -529,14 +530,14 @@ public boolean revertSnapshot(final String systemId, final Map s boolean revertStatus = revertSnapshot(sourceSnapshotVolumeId, destVolumeId); if (!revertStatus) { revertSnapshotResult = false; - LOG.warn("Failed to revert snapshot for volume id: " + sourceSnapshotVolumeId); + logger.warn("Failed to revert snapshot for volume id: " + sourceSnapshotVolumeId); throw new CloudRuntimeException("Failed to revert snapshot for volume id: " + sourceSnapshotVolumeId); } else { revertStatusIndex++; } } } catch (final Exception e) { - LOG.error("Failed to revert vm snapshot due to: " + e.getMessage(), e); + logger.error("Failed to revert vm snapshot due to: " + e.getMessage(), e); throw new CloudRuntimeException("Failed to revert vm snapshot due to: " + e.getMessage()); } finally { if (!revertSnapshotResult) { @@ -748,7 +749,7 @@ public boolean deleteVolume(final String volumeId) { } } catch (Exception ex) { if (ex instanceof ServerApiException && ex.getMessage().contains("Could not find the volume")) { - LOG.warn(String.format("API says deleting volume %s does not exist, handling gracefully", volumeId)); + logger.warn(String.format("API says deleting volume %s does not exist, handling gracefully", volumeId)); return true; } throw ex; @@ -765,18 +766,18 @@ public boolean migrateVolume(final String srcVolumeId, final String destPoolId, try { Volume volume = getVolume(srcVolumeId); if (volume == null || StringUtils.isEmpty(volume.getVtreeId())) { - LOG.warn("Couldn't find the volume(-tree), can not migrate the volume " + srcVolumeId); + logger.warn("Couldn't find the volume(-tree), can not migrate the volume " + srcVolumeId); return false; } String srcPoolId = volume.getStoragePoolId(); - LOG.info("Migrating the volume: " + srcVolumeId + " on the src pool: " + srcPoolId + " to the dest pool: " + destPoolId + + logger.info("Migrating the volume: " + srcVolumeId + " on the src pool: " + srcPoolId + " to the dest pool: " + destPoolId + " in the same PowerFlex cluster"); post("/instances/Volume::" + srcVolumeId + "/action/migrateVTree", String.format("{\"destSPId\":\"%s\"}", destPoolId), Boolean.class); - LOG.debug("Wait until the migration is complete for the volume: " + srcVolumeId); + logger.debug("Wait until the migration is complete for the volume: " + srcVolumeId); long migrationStartTime = System.currentTimeMillis(); boolean status = waitForVolumeMigrationToComplete(volume.getVtreeId(), timeoutInSecs); @@ -784,13 +785,13 @@ public boolean migrateVolume(final String srcVolumeId, final String destPoolId, // volume, v-tree, snapshot ids remains same after the migration volume = getVolume(srcVolumeId); if (volume == null || volume.getStoragePoolId() == null) { - LOG.warn("Couldn't get the volume: " + srcVolumeId + " details after migration"); + logger.warn("Couldn't get the volume: " + srcVolumeId + " details after migration"); return status; } else { String volumeOnPoolId = volume.getStoragePoolId(); // confirm whether the volume is on the dest storage pool or not if (status && destPoolId.equalsIgnoreCase(volumeOnPoolId)) { - LOG.debug("Migration success for the volume: " + srcVolumeId); + logger.debug("Migration success for the volume: " + srcVolumeId); return true; } else { try { @@ -813,23 +814,23 @@ public boolean migrateVolume(final String srcVolumeId, final String destPoolId, return status; } catch (Exception ex) { - LOG.warn("Exception on pause/rollback migration of the volume: " + srcVolumeId + " - " + ex.getLocalizedMessage()); + logger.warn("Exception on pause/rollback migration of the volume: " + srcVolumeId + " - " + ex.getLocalizedMessage()); } } } } catch (final Exception e) { - LOG.error("Failed to migrate PowerFlex volume due to: " + e.getMessage(), e); + logger.error("Failed to migrate PowerFlex volume due to: " + e.getMessage(), e); throw new CloudRuntimeException("Failed to migrate PowerFlex volume due to: " + e.getMessage()); } - LOG.debug("Migration failed for the volume: " + srcVolumeId); + logger.debug("Migration failed for the volume: " + srcVolumeId); return false; } private boolean waitForVolumeMigrationToComplete(final String volumeTreeId, int waitTimeoutInSecs) { - LOG.debug("Waiting for the migration to complete for the volume-tree " + volumeTreeId); + logger.debug("Waiting for the migration to complete for the volume-tree " + volumeTreeId); if (StringUtils.isEmpty(volumeTreeId)) { - LOG.warn("Invalid volume-tree id, unable to check the migration status of the volume-tree " + volumeTreeId); + logger.warn("Invalid volume-tree id, unable to check the migration status of the volume-tree " + volumeTreeId); return false; } @@ -841,24 +842,24 @@ private boolean waitForVolumeMigrationToComplete(final String volumeTreeId, int VTreeMigrationInfo.MigrationStatus migrationStatus = getVolumeTreeMigrationStatus(volumeTreeId); if (migrationStatus != null && migrationStatus == VTreeMigrationInfo.MigrationStatus.NotInMigration) { - LOG.debug("Migration completed for the volume-tree " + volumeTreeId); + logger.debug("Migration completed for the volume-tree " + volumeTreeId); return true; } } catch (Exception ex) { - LOG.warn("Exception while checking for migration status of the volume-tree: " + volumeTreeId + " - " + ex.getLocalizedMessage()); + logger.warn("Exception while checking for migration status of the volume-tree: " + volumeTreeId + " - " + ex.getLocalizedMessage()); // don't do anything } finally { waitTimeoutInSecs = waitTimeoutInSecs - delayTimeInSecs; } } - LOG.debug("Unable to complete the migration for the volume-tree " + volumeTreeId); + logger.debug("Unable to complete the migration for the volume-tree " + volumeTreeId); return false; } private VTreeMigrationInfo.MigrationStatus getVolumeTreeMigrationStatus(final String volumeTreeId) { if (StringUtils.isEmpty(volumeTreeId)) { - LOG.warn("Invalid volume-tree id, unable to get the migration status of the volume-tree " + volumeTreeId); + logger.warn("Invalid volume-tree id, unable to get the migration status of the volume-tree " + volumeTreeId); return null; } @@ -874,13 +875,13 @@ private boolean rollbackVolumeMigration(final String srcVolumeId) { Volume volume = getVolume(srcVolumeId); if (volume == null) { - LOG.warn("Unable to rollback volume migration, couldn't get details for the volume: " + srcVolumeId); + logger.warn("Unable to rollback volume migration, couldn't get details for the volume: " + srcVolumeId); return false; } VTreeMigrationInfo.MigrationStatus migrationStatus = getVolumeTreeMigrationStatus(volume.getVtreeId()); if (migrationStatus != null && migrationStatus == VTreeMigrationInfo.MigrationStatus.NotInMigration) { - LOG.debug("Volume: " + srcVolumeId + " is not migrating, no need to rollback"); + logger.debug("Volume: " + srcVolumeId + " is not migrating, no need to rollback"); return true; } @@ -893,12 +894,12 @@ private boolean rollbackVolumeMigration(final String srcVolumeId) { Thread.sleep(3000); // Try after few secs migrationStatus = getVolumeTreeMigrationStatus(volume.getVtreeId()); // Get updated migration status if (migrationStatus != null && migrationStatus == VTreeMigrationInfo.MigrationStatus.Paused) { - LOG.debug("Migration for the volume: " + srcVolumeId + " paused"); + logger.debug("Migration for the volume: " + srcVolumeId + " paused"); paused = true; break; } } catch (Exception ex) { - LOG.warn("Exception while checking for migration pause status of the volume: " + srcVolumeId + " - " + ex.getLocalizedMessage()); + logger.warn("Exception while checking for migration pause status of the volume: " + srcVolumeId + " - " + ex.getLocalizedMessage()); // don't do anything } finally { retryCount--; @@ -914,14 +915,14 @@ private boolean rollbackVolumeMigration(final String srcVolumeId) { return migrateVTreeStatus; } } else { - LOG.warn("Migration for the volume: " + srcVolumeId + " didn't pause, couldn't rollback"); + logger.warn("Migration for the volume: " + srcVolumeId + " didn't pause, couldn't rollback"); } return false; } private boolean pauseVolumeMigration(final String volumeId, final boolean forced) { if (StringUtils.isEmpty(volumeId)) { - LOG.warn("Invalid Volume Id, Unable to pause migration of the volume " + volumeId); + logger.warn("Invalid Volume Id, Unable to pause migration of the volume " + volumeId); return false; } diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java index 6d3089480d0b..22689909f0ea 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java @@ -59,7 +59,8 @@ import org.apache.cloudstack.storage.volume.VolumeObject; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.storage.MigrateVolumeCommand; @@ -102,7 +103,7 @@ import com.google.common.base.Preconditions; public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { - private static final Logger LOGGER = Logger.getLogger(ScaleIOPrimaryDataStoreDriver.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject EndPointSelector selector; @@ -146,7 +147,7 @@ public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore try { if (DataObjectType.VOLUME.equals(dataObject.getType())) { final VolumeVO volume = volumeDao.findById(dataObject.getId()); - LOGGER.debug("Granting access for PowerFlex volume: " + volume.getPath()); + logger.debug("Granting access for PowerFlex volume: " + volume.getPath()); Long bandwidthLimitInKbps = Long.valueOf(0); // Unlimited // Check Bandwidht Limit parameter in volume details @@ -177,7 +178,7 @@ public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore return client.mapVolumeToSdcWithLimits(ScaleIOUtil.getVolumePath(volume.getPath()), sdcId, iopsLimit, bandwidthLimitInKbps); } else if (DataObjectType.TEMPLATE.equals(dataObject.getType())) { final VMTemplateStoragePoolVO templatePoolRef = vmTemplatePoolDao.findByPoolTemplate(dataStore.getId(), dataObject.getId(), null); - LOGGER.debug("Granting access for PowerFlex template volume: " + templatePoolRef.getInstallPath()); + logger.debug("Granting access for PowerFlex template volume: " + templatePoolRef.getInstallPath()); final String sdcId = getConnectedSdc(dataStore.getId(), host.getId()); if (StringUtils.isBlank(sdcId)) { @@ -189,7 +190,7 @@ public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore return client.mapVolumeToSdc(ScaleIOUtil.getVolumePath(templatePoolRef.getInstallPath()), sdcId); } else if (DataObjectType.SNAPSHOT.equals(dataObject.getType())) { SnapshotInfo snapshot = (SnapshotInfo) dataObject; - LOGGER.debug("Granting access for PowerFlex volume snapshot: " + snapshot.getPath()); + logger.debug("Granting access for PowerFlex volume snapshot: " + snapshot.getPath()); final String sdcId = getConnectedSdc(dataStore.getId(), host.getId()); if (StringUtils.isBlank(sdcId)) { @@ -215,14 +216,14 @@ private boolean grantAccess(DataObject dataObject, EndPoint ep, DataStore dataSt @Override public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) { if (host == null) { - LOGGER.info("Declining to revoke access to PowerFlex volume when a host is not provided"); + logger.info("Declining to revoke access to PowerFlex volume when a host is not provided"); return; } try { if (DataObjectType.VOLUME.equals(dataObject.getType())) { final VolumeVO volume = volumeDao.findById(dataObject.getId()); - LOGGER.debug("Revoking access for PowerFlex volume: " + volume.getPath()); + logger.debug("Revoking access for PowerFlex volume: " + volume.getPath()); final String sdcId = getConnectedSdc(dataStore.getId(), host.getId()); if (StringUtils.isBlank(sdcId)) { @@ -233,7 +234,7 @@ public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) client.unmapVolumeFromSdc(ScaleIOUtil.getVolumePath(volume.getPath()), sdcId); } else if (DataObjectType.TEMPLATE.equals(dataObject.getType())) { final VMTemplateStoragePoolVO templatePoolRef = vmTemplatePoolDao.findByPoolTemplate(dataStore.getId(), dataObject.getId(), null); - LOGGER.debug("Revoking access for PowerFlex template volume: " + templatePoolRef.getInstallPath()); + logger.debug("Revoking access for PowerFlex template volume: " + templatePoolRef.getInstallPath()); final String sdcId = getConnectedSdc(dataStore.getId(), host.getId()); if (StringUtils.isBlank(sdcId)) { @@ -244,7 +245,7 @@ public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) client.unmapVolumeFromSdc(ScaleIOUtil.getVolumePath(templatePoolRef.getInstallPath()), sdcId); } else if (DataObjectType.SNAPSHOT.equals(dataObject.getType())) { SnapshotInfo snapshot = (SnapshotInfo) dataObject; - LOGGER.debug("Revoking access for PowerFlex volume snapshot: " + snapshot.getPath()); + logger.debug("Revoking access for PowerFlex volume snapshot: " + snapshot.getPath()); final String sdcId = getConnectedSdc(dataStore.getId(), host.getId()); if (StringUtils.isBlank(sdcId)) { @@ -255,18 +256,18 @@ public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) client.unmapVolumeFromSdc(ScaleIOUtil.getVolumePath(snapshot.getPath()), sdcId); } } catch (Exception e) { - LOGGER.warn("Failed to revoke access due to: " + e.getMessage(), e); + logger.warn("Failed to revoke access due to: " + e.getMessage(), e); } } public void revokeVolumeAccess(String volumePath, Host host, DataStore dataStore) { if (host == null) { - LOGGER.warn("Declining to revoke access to PowerFlex volume when a host is not provided"); + logger.warn("Declining to revoke access to PowerFlex volume when a host is not provided"); return; } try { - LOGGER.debug("Revoking access for PowerFlex volume: " + volumePath); + logger.debug("Revoking access for PowerFlex volume: " + volumePath); final String sdcId = getConnectedSdc(dataStore.getId(), host.getId()); if (StringUtils.isBlank(sdcId)) { @@ -276,7 +277,7 @@ public void revokeVolumeAccess(String volumePath, Host host, DataStore dataStore final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId()); client.unmapVolumeFromSdc(ScaleIOUtil.getVolumePath(volumePath), sdcId); } catch (Exception e) { - LOGGER.warn("Failed to revoke access due to: " + e.getMessage(), e); + logger.warn("Failed to revoke access due to: " + e.getMessage(), e); } } @@ -297,7 +298,7 @@ public String getConnectedSdc(long poolId, long hostId) { return poolHostVO.getLocalPath(); } } catch (Exception e) { - LOGGER.warn("Couldn't check SDC connection for the host: " + hostId + " and storage pool: " + poolId + " due to " + e.getMessage(), e); + logger.warn("Couldn't check SDC connection for the host: " + hostId + " and storage pool: " + poolId + " due to " + e.getMessage(), e); } return null; @@ -333,7 +334,7 @@ public long getUsedBytes(StoragePool storagePool) { } } - LOGGER.debug("Used/Allocated storage space (in bytes): " + String.valueOf(usedSpaceBytes)); + logger.debug("Used/Allocated storage space (in bytes): " + String.valueOf(usedSpaceBytes)); return usedSpaceBytes; } @@ -390,7 +391,7 @@ public DataStoreTO getStoreTO(DataStore store) { @Override public void takeSnapshot(SnapshotInfo snapshotInfo, AsyncCompletionCallback callback) { - LOGGER.debug("Taking PowerFlex volume snapshot"); + logger.debug("Taking PowerFlex volume snapshot"); Preconditions.checkArgument(snapshotInfo != null, "snapshotInfo cannot be null"); @@ -428,7 +429,7 @@ public void takeSnapshot(SnapshotInfo snapshotInfo, AsyncCompletionCallback callback) { - LOGGER.debug("Reverting to PowerFlex volume snapshot"); + logger.debug("Reverting to PowerFlex volume snapshot"); Preconditions.checkArgument(snapshot != null, "snapshotInfo cannot be null"); @@ -465,7 +466,7 @@ public void revertSnapshot(SnapshotInfo snapshot, SnapshotInfo snapshotOnPrimary CommandResult commandResult = new CommandResult(); callback.complete(commandResult); } catch (Exception ex) { - LOGGER.debug("Unable to revert to PowerFlex snapshot: " + snapshot.getId(), ex); + logger.debug("Unable to revert to PowerFlex snapshot: " + snapshot.getId(), ex); throw new CloudRuntimeException(ex.getMessage()); } } @@ -475,7 +476,7 @@ public CreateObjectAnswer createVolume(VolumeInfo volumeInfo, long storagePoolId } public CreateObjectAnswer createVolume(VolumeInfo volumeInfo, long storagePoolId, boolean migrationInvolved) { - LOGGER.debug("Creating PowerFlex volume"); + logger.debug("Creating PowerFlex volume"); StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); @@ -520,7 +521,7 @@ public CreateObjectAnswer createVolume(VolumeInfo volumeInfo, long storagePoolId // if volume needs to be set up with encryption, do it now if it's not a root disk (which gets done during template copy) if (anyVolumeRequiresEncryption(volumeInfo) && (!volumeInfo.getVolumeType().equals(Volume.Type.ROOT) || migrationInvolved)) { - LOGGER.debug(String.format("Setting up encryption for volume %s", volumeInfo.getId())); + logger.debug(String.format("Setting up encryption for volume %s", volumeInfo.getId())); VolumeObjectTO prepVolume = (VolumeObjectTO) createdObject.getTO(); prepVolume.setPath(volumePath); prepVolume.setUuid(volumePath); @@ -541,19 +542,19 @@ public CreateObjectAnswer createVolume(VolumeInfo volumeInfo, long storagePoolId } } } else { - LOGGER.debug(String.format("No encryption configured for data volume %s", volumeInfo)); + logger.debug(String.format("No encryption configured for data volume %s", volumeInfo)); } return answer; } catch (Exception e) { String errMsg = "Unable to create PowerFlex Volume due to " + e.getMessage(); - LOGGER.warn(errMsg); + logger.warn(errMsg); throw new CloudRuntimeException(errMsg, e); } } private String createTemplateVolume(TemplateInfo templateInfo, long storagePoolId) { - LOGGER.debug("Creating PowerFlex template volume"); + logger.debug("Creating PowerFlex template volume"); StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); Preconditions.checkArgument(templateInfo != null, "templateInfo cannot be null"); @@ -591,7 +592,7 @@ private String createTemplateVolume(TemplateInfo templateInfo, long storagePool return templatePath; } catch (Exception e) { String errMsg = "Unable to create PowerFlex template volume due to " + e.getMessage(); - LOGGER.warn(errMsg); + logger.warn(errMsg); throw new CloudRuntimeException(errMsg, e); } } @@ -603,22 +604,22 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncComplet Answer answer = new Answer(null, false, "not started"); try { if (dataObject.getType() == DataObjectType.VOLUME) { - LOGGER.debug("createAsync - creating volume"); + logger.debug("createAsync - creating volume"); CreateObjectAnswer createAnswer = createVolume((VolumeInfo) dataObject, dataStore.getId()); scaleIOVolumePath = createAnswer.getData().getPath(); answer = createAnswer; } else if (dataObject.getType() == DataObjectType.TEMPLATE) { - LOGGER.debug("createAsync - creating template"); + logger.debug("createAsync - creating template"); scaleIOVolumePath = createTemplateVolume((TemplateInfo)dataObject, dataStore.getId()); answer = new Answer(null, true, "created template"); } else { errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to createAsync"; - LOGGER.error(errMsg); + logger.error(errMsg); answer = new Answer(null, false, errMsg); } } catch (Exception ex) { errMsg = ex.getMessage(); - LOGGER.error(errMsg); + logger.error(errMsg); if (callback == null) { throw ex; } @@ -646,17 +647,17 @@ public void deleteAsync(DataStore dataStore, DataObject dataObject, AsyncComplet try { boolean deleteResult = false; if (dataObject.getType() == DataObjectType.VOLUME) { - LOGGER.debug("deleteAsync - deleting volume"); + logger.debug("deleteAsync - deleting volume"); scaleIOVolumePath = ((VolumeInfo) dataObject).getPath(); } else if (dataObject.getType() == DataObjectType.SNAPSHOT) { - LOGGER.debug("deleteAsync - deleting snapshot"); + logger.debug("deleteAsync - deleting snapshot"); scaleIOVolumePath = ((SnapshotInfo) dataObject).getPath(); } else if (dataObject.getType() == DataObjectType.TEMPLATE) { - LOGGER.debug("deleteAsync - deleting template"); + logger.debug("deleteAsync - deleting template"); scaleIOVolumePath = ((TemplateInfo) dataObject).getInstallPath(); } else { errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to deleteAsync"; - LOGGER.error(errMsg); + logger.error(errMsg); throw new CloudRuntimeException(errMsg); } @@ -674,12 +675,12 @@ public void deleteAsync(DataStore dataStore, DataObject dataObject, AsyncComplet storagePoolDao.update(storagePoolId, storagePool); } catch (Exception e) { errMsg = "Unable to delete PowerFlex volume: " + scaleIOVolumePath + " due to " + e.getMessage(); - LOGGER.warn(errMsg); + logger.warn(errMsg); throw new CloudRuntimeException(errMsg, e); } } catch (Exception ex) { errMsg = ex.getMessage(); - LOGGER.error(errMsg); + logger.error(errMsg); if (callback == null) { throw ex; } @@ -723,16 +724,16 @@ public void copyAsync(DataObject srcData, DataObject destData, Host destHost, As } else { errMsg = "Unsupported copy operation from src object: (" + srcData.getType() + ", " + srcData.getDataStore() + "), dest object: (" + destData.getType() + ", " + destData.getDataStore() + ")"; - LOGGER.warn(errMsg); + logger.warn(errMsg); answer = new Answer(null, false, errMsg); } } else { errMsg = "Unsupported copy operation"; - LOGGER.warn(errMsg); + logger.warn(errMsg); answer = new Answer(null, false, errMsg); } } catch (Exception e) { - LOGGER.debug("Failed to copy due to " + e.getMessage(), e); + logger.debug("Failed to copy due to " + e.getMessage(), e); errMsg = e.toString(); answer = new Answer(null, false, errMsg); } @@ -756,26 +757,26 @@ private Answer copyTemplateToVolume(DataObject srcData, DataObject destData, Hos * Data stores of file type happen automatically, but block device types have to handle it. Unfortunately for ScaleIO this means we add a whole 8GB to * the original size, but only if we are close to an 8GB boundary. */ - LOGGER.debug(String.format("Copying template %s to volume %s", srcData.getId(), destData.getId())); + logger.debug(String.format("Copying template %s to volume %s", srcData.getId(), destData.getId())); VolumeInfo destInfo = (VolumeInfo) destData; boolean encryptionRequired = anyVolumeRequiresEncryption(destData); if (encryptionRequired) { if (needsExpansionForEncryptionHeader(srcData.getSize(), destData.getSize())) { long newSize = destData.getSize() + (1<<30); - LOGGER.debug(String.format("Destination volume %s(%s) is configured for encryption. Resizing to fit headers, new size %s will be rounded up to nearest 8Gi", destInfo.getId(), destData.getSize(), newSize)); + logger.debug(String.format("Destination volume %s(%s) is configured for encryption. Resizing to fit headers, new size %s will be rounded up to nearest 8Gi", destInfo.getId(), destData.getSize(), newSize)); ResizeVolumePayload p = new ResizeVolumePayload(newSize, destInfo.getMinIops(), destInfo.getMaxIops(), destInfo.getHypervisorSnapshotReserve(), false, destInfo.getAttachedVmName(), null, true); destInfo.addPayload(p); resizeVolume(destInfo); } else { - LOGGER.debug(String.format("Template %s has size %s, ok for volume %s with size %s", srcData.getId(), srcData.getSize(), destData.getId(), destData.getSize())); + logger.debug(String.format("Template %s has size %s, ok for volume %s with size %s", srcData.getId(), srcData.getSize(), destData.getId(), destData.getSize())); } } else { - LOGGER.debug(String.format("Destination volume is not configured for encryption, skipping encryption prep. Volume: %s", destData.getId())); + logger.debug(String.format("Destination volume is not configured for encryption, skipping encryption prep. Volume: %s", destData.getId())); } // Copy PowerFlex/ScaleIO template to volume - LOGGER.debug(String.format("Initiating copy from PowerFlex template volume on host %s", destHost != null ? destHost.getId() : "")); + logger.debug(String.format("Initiating copy from PowerFlex template volume on host %s", destHost != null ? destHost.getId() : "")); int primaryStorageDownloadWait = StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value(); CopyCommand cmd = new CopyCommand(srcData.getTO(), destData.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); @@ -783,7 +784,7 @@ private Answer copyTemplateToVolume(DataObject srcData, DataObject destData, Hos EndPoint ep = destHost != null ? RemoteHostEndPoint.getHypervisorHostEndPoint(destHost) : selector.select(srcData, encryptionRequired); if (ep == null) { String errorMsg = String.format("No remote endpoint to send command, unable to find a valid endpoint. Requires encryption support: %s", encryptionRequired); - LOGGER.error(errorMsg); + logger.error(errorMsg); answer = new Answer(cmd, false, errorMsg); } else { answer = ep.sendMessage(cmd); @@ -794,7 +795,7 @@ private Answer copyTemplateToVolume(DataObject srcData, DataObject destData, Hos protected Answer copyOfflineVolume(DataObject srcData, DataObject destData, Host destHost) { // Copy PowerFlex/ScaleIO volume - LOGGER.debug(String.format("Initiating copy from PowerFlex template volume on host %s", destHost != null ? destHost.getId() : "")); + logger.debug(String.format("Initiating copy from PowerFlex template volume on host %s", destHost != null ? destHost.getId() : "")); String value = configDao.getValue(Config.CopyVolumeWait.key()); int copyVolumeWait = NumbersUtil.parseInt(value, Integer.parseInt(Config.CopyVolumeWait.getDefaultValue())); @@ -805,7 +806,7 @@ protected Answer copyOfflineVolume(DataObject srcData, DataObject destData, Host EndPoint ep = destHost != null ? RemoteHostEndPoint.getHypervisorHostEndPoint(destHost) : selector.select(srcData, encryptionRequired); if (ep == null) { String errorMsg = String.format("No remote endpoint to send command, unable to find a valid endpoint. Requires encryption support: %s", encryptionRequired); - LOGGER.error(errorMsg); + logger.error(errorMsg); answer = new Answer(cmd, false, errorMsg); } else { answer = ep.sendMessage(cmd); @@ -847,15 +848,15 @@ public Answer liveMigrateVolume(DataObject srcData, DataObject destData) { updateVolumeAfterCopyVolume(srcData, destData); updateSnapshotsAfterCopyVolume(srcData, destData); deleteSourceVolumeAfterSuccessfulBlockCopy(srcData, host); - LOGGER.debug(String.format("Successfully migrated migrate PowerFlex volume %d to storage pool %d", srcVolumeId, destPoolId)); + logger.debug(String.format("Successfully migrated migrate PowerFlex volume %d to storage pool %d", srcVolumeId, destPoolId)); answer = new Answer(null, true, null); } else { String errorMsg = "Failed to migrate PowerFlex volume: " + srcVolumeId + " to storage pool " + destPoolId; - LOGGER.debug(errorMsg); + logger.debug(errorMsg); answer = new Answer(null, false, errorMsg); } } catch (Exception e) { - LOGGER.error("Failed to migrate PowerFlex volume: " + srcVolumeId + " due to: " + e.getMessage()); + logger.error("Failed to migrate PowerFlex volume: " + srcVolumeId + " due to: " + e.getMessage()); answer = new Answer(null, false, e.getMessage()); } @@ -948,11 +949,11 @@ public void deleteSourceVolumeAfterSuccessfulBlockCopy(DataObject srcData, Host Boolean deleteResult = client.deleteVolume(scaleIOVolumeId); if (!deleteResult) { errMsg = "Failed to delete source PowerFlex volume with id: " + scaleIOVolumeId; - LOGGER.warn(errMsg); + logger.warn(errMsg); } } catch (Exception e) { errMsg = "Unable to delete source PowerFlex volume: " + srcVolumePath + " due to " + e.getMessage(); - LOGGER.warn(errMsg);; + logger.warn(errMsg);; } } @@ -969,12 +970,12 @@ public void revertBlockCopyVolumeOperations(DataObject srcData, DataObject destD Boolean deleteResult = client.deleteVolume(scaleIOVolumeId); if (!deleteResult) { errMsg = "Failed to delete PowerFlex volume with id: " + scaleIOVolumeId; - LOGGER.warn(errMsg); + logger.warn(errMsg); } } catch (Exception e) { errMsg = "Unable to delete destination PowerFlex volume: " + destVolumePath + " due to " + e.getMessage(); - LOGGER.warn(errMsg); + logger.warn(errMsg); throw new CloudRuntimeException(errMsg, e); } @@ -1110,11 +1111,11 @@ private Answer migrateVolume(DataObject srcData, DataObject destData) { answer = new Answer(null, true, null); } else { String errorMsg = "Failed to migrate PowerFlex volume: " + srcData.getId() + " to storage pool " + destPoolId; - LOGGER.debug(errorMsg); + logger.debug(errorMsg); answer = new Answer(null, false, errorMsg); } } catch (Exception e) { - LOGGER.error("Failed to migrate PowerFlex volume: " + srcData.getId() + " due to: " + e.getMessage()); + logger.error("Failed to migrate PowerFlex volume: " + srcData.getId() + " due to: " + e.getMessage()); answer = new Answer(null, false, e.getMessage()); } @@ -1164,7 +1165,7 @@ public boolean canCopy(DataObject srcData, DataObject destData) { } private void resizeVolume(VolumeInfo volumeInfo) { - LOGGER.debug("Resizing PowerFlex volume"); + logger.debug("Resizing PowerFlex volume"); Preconditions.checkArgument(volumeInfo != null, "volumeInfo cannot be null"); @@ -1185,7 +1186,7 @@ private void resizeVolume(VolumeInfo volumeInfo) { long newSizeIn8gbBoundary = (long) (Math.ceil(newSizeInGB / 8.0) * 8.0); if (scaleIOVolume.getSizeInKb() == newSizeIn8gbBoundary << 20) { - LOGGER.debug("No resize necessary at API"); + logger.debug("No resize necessary at API"); } else { scaleIOVolume = client.resizeVolume(scaleIOVolumeId, (int) newSizeIn8gbBoundary); if (scaleIOVolume == null) { @@ -1206,7 +1207,7 @@ private void resizeVolume(VolumeInfo volumeInfo) { } if (volumeInfo.getFormat().equals(Storage.ImageFormat.QCOW2) || attachedRunning) { - LOGGER.debug("Volume needs to be resized at the hypervisor host"); + logger.debug("Volume needs to be resized at the hypervisor host"); if (hostId == 0) { hostId = selector.select(volumeInfo, true).getId(); @@ -1234,9 +1235,9 @@ private void resizeVolume(VolumeInfo volumeInfo) { } else if (!answer.getResult()) { // for non-qcow2, notifying the running VM is going to be best-effort since we can't roll back // or avoid VM seeing a successful change at the PowerFlex volume after e.g. reboot - LOGGER.warn("Resized raw volume, but failed to notify. VM will see change on reboot. Error:" + answer.getDetails()); + logger.warn("Resized raw volume, but failed to notify. VM will see change on reboot. Error:" + answer.getDetails()); } else { - LOGGER.debug("Resized volume at host: " + answer.getDetails()); + logger.debug("Resized volume at host: " + answer.getDetails()); } } finally { if (!attachedRunning) { @@ -1259,7 +1260,7 @@ private void resizeVolume(VolumeInfo volumeInfo) { storagePoolDao.update(storagePoolId, storagePool); } catch (Exception e) { String errMsg = "Unable to resize PowerFlex volume: " + volumeInfo.getId() + " due to " + e.getMessage(); - LOGGER.warn(errMsg); + logger.warn(errMsg); throw new CloudRuntimeException(errMsg, e); } } @@ -1277,7 +1278,7 @@ public void resize(DataObject dataObject, AsyncCompletionCallback getStorageStats(StoragePool storagePool) { } } catch (Exception e) { String errMsg = "Unable to get storage stats for the pool: " + storagePool.getId() + " due to " + e.getMessage(); - LOGGER.warn(errMsg); + logger.warn(errMsg); throw new CloudRuntimeException(errMsg, e); } @@ -1340,7 +1341,7 @@ public Pair getVolumeStats(StoragePool storagePool, String volumePat } } catch (Exception e) { String errMsg = "Unable to get stats for the volume: " + volumePath + " in the pool: " + storagePool.getId() + " due to " + e.getMessage(); - LOGGER.warn(errMsg); + logger.warn(errMsg); throw new CloudRuntimeException(errMsg, e); } @@ -1361,7 +1362,7 @@ public boolean canHostAccessStoragePool(Host host, StoragePool pool) { final ScaleIOGatewayClient client = getScaleIOClient(pool.getId()); return client.isSdcConnected(poolHostVO.getLocalPath()); } catch (Exception e) { - LOGGER.warn("Unable to check the host: " + host.getId() + " access to storage pool: " + pool.getId() + " due to " + e.getMessage(), e); + logger.warn("Unable to check the host: " + host.getId() + " access to storage pool: " + pool.getId() + " due to " + e.getMessage(), e); return false; } } @@ -1371,7 +1372,7 @@ private void alertHostSdcDisconnection(Host host) { return; } - LOGGER.warn("SDC not connected on the host: " + host.getId()); + logger.warn("SDC not connected on the host: " + host.getId()); String msg = "SDC not connected on the host: " + host.getId() + ", reconnect the SDC to MDM"; alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "SDC disconnected on host: " + host.getUuid(), msg); } diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java index 17150699923d..a1186ae987d6 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java @@ -47,7 +47,8 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -75,7 +76,7 @@ import com.cloud.utils.exception.CloudRuntimeException; public class ScaleIOPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycle { - private static final Logger LOGGER = Logger.getLogger(ScaleIOPrimaryDataStoreLifeCycle.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject private ClusterDao clusterDao; @@ -111,7 +112,7 @@ private org.apache.cloudstack.storage.datastore.api.StoragePool findStoragePool( List storagePools = client.listStoragePools(); for (org.apache.cloudstack.storage.datastore.api.StoragePool pool : storagePools) { if (pool.getName().equals(storagePoolName)) { - LOGGER.info("Found PowerFlex storage pool: " + storagePoolName); + logger.info("Found PowerFlex storage pool: " + storagePoolName); final org.apache.cloudstack.storage.datastore.api.StoragePoolStatistics poolStatistics = client.getStoragePoolStatistics(pool.getId()); pool.setStatistics(poolStatistics); @@ -121,7 +122,7 @@ private org.apache.cloudstack.storage.datastore.api.StoragePool findStoragePool( } } } catch (NoSuchAlgorithmException | KeyManagementException | URISyntaxException e) { - LOGGER.error("Failed to add storage pool", e); + logger.error("Failed to add storage pool", e); throw new CloudRuntimeException("Failed to establish connection with PowerFlex Gateway to find and validate storage pool: " + storagePoolName); } throw new CloudRuntimeException("Failed to find the provided storage pool name: " + storagePoolName + " in the discovered PowerFlex storage pools"); @@ -178,7 +179,7 @@ public DataStore initialize(Map dsInfos) { try { storagePoolName = URLDecoder.decode(uri.getPath(), "UTF-8"); } catch (UnsupportedEncodingException e) { - LOGGER.error("[ignored] we are on a platform not supporting \"UTF-8\"!?!", e); + logger.error("[ignored] we are on a platform not supporting \"UTF-8\"!?!", e); } if (storagePoolName == null) { // if decoding fails, use getPath() anyway storagePoolName = uri.getPath(); @@ -270,7 +271,7 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { throw new CloudRuntimeException("No hosts are Up to associate a storage pool with in cluster: " + primaryDataStoreInfo.getClusterId()); } - LOGGER.debug("Attaching the pool to each of the hosts in the cluster: " + primaryDataStoreInfo.getClusterId()); + logger.debug("Attaching the pool to each of the hosts in the cluster: " + primaryDataStoreInfo.getClusterId()); List poolHosts = new ArrayList(); for (HostVO host : hostsInCluster) { try { @@ -278,12 +279,12 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { poolHosts.add(host); } } catch (Exception e) { - LOGGER.warn("Unable to establish a connection between " + host + " and " + primaryDataStoreInfo, e); + logger.warn("Unable to establish a connection between " + host + " and " + primaryDataStoreInfo, e); } } if (poolHosts.isEmpty()) { - LOGGER.warn("No host can access storage pool '" + primaryDataStoreInfo + "' on cluster '" + primaryDataStoreInfo.getClusterId() + "'."); + logger.warn("No host can access storage pool '" + primaryDataStoreInfo + "' on cluster '" + primaryDataStoreInfo.getClusterId() + "'."); primaryDataStoreDao.expunge(primaryDataStoreInfo.getId()); throw new CloudRuntimeException("Failed to create storage pool in the cluster: " + primaryDataStoreInfo.getClusterId() + " as it is not accessible to hosts"); } @@ -305,7 +306,7 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.Hyper checkConnectedSdcs(dataStore.getId()); - LOGGER.debug("Attaching the pool to each of the hosts in the zone: " + scope.getScopeId()); + logger.debug("Attaching the pool to each of the hosts in the zone: " + scope.getScopeId()); List hosts = resourceManager.listAllUpAndEnabledHostsInOneZoneByHypervisor(hypervisorType, scope.getScopeId()); List poolHosts = new ArrayList(); for (HostVO host : hosts) { @@ -314,11 +315,11 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.Hyper poolHosts.add(host); } } catch (Exception e) { - LOGGER.warn("Unable to establish a connection between " + host + " and " + dataStore, e); + logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); } } if (poolHosts.isEmpty()) { - LOGGER.warn("No host can access storage pool " + dataStore + " in this zone."); + logger.warn("No host can access storage pool " + dataStore + " in this zone."); primaryDataStoreDao.expunge(dataStore.getId()); throw new CloudRuntimeException("Failed to create storage pool as it is not accessible to hosts."); } @@ -333,12 +334,12 @@ private void checkConnectedSdcs(Long dataStoreId) { ScaleIOGatewayClient client = ScaleIOGatewayClientConnectionPool.getInstance().getClient(dataStoreId, storagePoolDetailsDao); haveConnectedSdcs = client.haveConnectedSdcs(); } catch (NoSuchAlgorithmException | KeyManagementException | URISyntaxException e) { - LOGGER.error(String.format("Failed to create storage pool for datastore: %s", dataStoreId), e); + logger.error(String.format("Failed to create storage pool for datastore: %s", dataStoreId), e); throw new CloudRuntimeException(String.format("Failed to establish connection with PowerFlex Gateway to create storage pool for datastore: %s", dataStoreId)); } if (!haveConnectedSdcs) { - LOGGER.debug(String.format("No connected SDCs found for the PowerFlex storage pool of datastore: %s", dataStoreId)); + logger.debug(String.format("No connected SDCs found for the PowerFlex storage pool of datastore: %s", dataStoreId)); throw new CloudRuntimeException(String.format("Failed to create storage pool as connected SDCs not found for datastore: %s", dataStoreId)); } } @@ -387,12 +388,12 @@ public boolean deleteDataStore(DataStore dataStore) { DeleteStoragePoolCommand deleteStoragePoolCommand = new DeleteStoragePoolCommand(storagePool); final Answer answer = agentMgr.easySend(poolHostVO.getHostId(), deleteStoragePoolCommand); if (answer != null && answer.getResult()) { - LOGGER.info("Successfully deleted storage pool: " + storagePool.getId() + " from host: " + poolHostVO.getHostId()); + logger.info("Successfully deleted storage pool: " + storagePool.getId() + " from host: " + poolHostVO.getHostId()); } else { if (answer != null) { - LOGGER.error("Failed to delete storage pool: " + storagePool.getId() + " from host: " + poolHostVO.getHostId() + " , result: " + answer.getResult()); + logger.error("Failed to delete storage pool: " + storagePool.getId() + " from host: " + poolHostVO.getHostId() + " , result: " + answer.getResult()); } else { - LOGGER.error("Failed to delete storage pool: " + storagePool.getId() + " from host: " + poolHostVO.getHostId()); + logger.error("Failed to delete storage pool: " + storagePool.getId() + " from host: " + poolHostVO.getHostId()); } } } @@ -423,7 +424,7 @@ public void updateStoragePool(StoragePool storagePool, Map detai } primaryDataStoreDao.updateCapacityBytes(storagePool.getId(), Long.parseLong(capacityBytes)); - LOGGER.info("Storage pool successfully updated"); + logger.info("Storage pool successfully updated"); } catch (Throwable e) { throw new CloudRuntimeException("Failed to update the storage pool" + e); } diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOHostListener.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOHostListener.java index bb269e85a958..c20f1f04f2bf 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOHostListener.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOHostListener.java @@ -34,7 +34,8 @@ import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -50,7 +51,7 @@ import com.cloud.utils.exception.CloudRuntimeException; public class ScaleIOHostListener implements HypervisorHostListener { - private static final Logger s_logger = Logger.getLogger(ScaleIOHostListener.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject private AgentManager _agentMgr; @Inject private AlertManager _alertMgr; @@ -69,7 +70,7 @@ public boolean hostAdded(long hostId) { public boolean hostConnect(long hostId, long poolId) { HostVO host = _hostDao.findById(hostId); if (host == null) { - s_logger.error("Failed to add host by HostListener as host was not found with id : " + hostId); + logger.error("Failed to add host by HostListener as host was not found with id : " + hostId); return false; } @@ -87,7 +88,7 @@ public boolean hostConnect(long hostId, long poolId) { Map poolDetails = answer.getPoolInfo().getDetails(); if (MapUtils.isEmpty(poolDetails)) { String msg = "SDC details not found on the host: " + hostId + ", (re)install SDC and restart agent"; - s_logger.warn(msg); + logger.warn(msg); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "SDC not found on host: " + host.getUuid(), msg); return false; } @@ -102,13 +103,13 @@ public boolean hostConnect(long hostId, long poolId) { if (StringUtils.isBlank(sdcId)) { String msg = "Couldn't retrieve SDC details from the host: " + hostId + ", (re)install SDC and restart agent"; - s_logger.warn(msg); + logger.warn(msg); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "SDC details not found on host: " + host.getUuid(), msg); return false; } if (!isHostSdcConnected(sdcId, poolId)) { - s_logger.warn("SDC not connected on the host: " + hostId); + logger.warn("SDC not connected on the host: " + hostId); String msg = "SDC not connected on the host: " + hostId + ", reconnect the SDC to MDM and restart agent"; _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "SDC disconnected on host: " + host.getUuid(), msg); return false; @@ -123,17 +124,17 @@ public boolean hostConnect(long hostId, long poolId) { _storagePoolHostDao.update(storagePoolHost.getId(), storagePoolHost); } - s_logger.info("Connection established between storage pool: " + storagePool + " and host: " + hostId); + logger.info("Connection established between storage pool: " + storagePool + " and host: " + hostId); return true; } private String getHostSdcId(String sdcGuid, long poolId) { try { - s_logger.debug(String.format("Try to get host SDC Id for pool: %s, with SDC guid %s", poolId, sdcGuid)); + logger.debug(String.format("Try to get host SDC Id for pool: %s, with SDC guid %s", poolId, sdcGuid)); ScaleIOGatewayClient client = ScaleIOGatewayClientConnectionPool.getInstance().getClient(poolId, _storagePoolDetailsDao); return client.getSdcIdByGuid(sdcGuid); } catch (NoSuchAlgorithmException | KeyManagementException | URISyntaxException e) { - s_logger.error(String.format("Failed to get host SDC Id for pool: %s", poolId), e); + logger.error(String.format("Failed to get host SDC Id for pool: %s", poolId), e); throw new CloudRuntimeException(String.format("Failed to establish connection with PowerFlex Gateway to get host SDC Id for pool: %s", poolId)); } } @@ -143,7 +144,7 @@ private boolean isHostSdcConnected(String sdcId, long poolId) { ScaleIOGatewayClient client = ScaleIOGatewayClientConnectionPool.getInstance().getClient(poolId, _storagePoolDetailsDao); return client.isSdcConnected(sdcId); } catch (NoSuchAlgorithmException | KeyManagementException | URISyntaxException e) { - s_logger.error("Failed to check host sdc connection", e); + logger.error("Failed to check host sdc connection", e); throw new CloudRuntimeException("Failed to establish connection with PowerFlex Gateway to check host sdc connection"); } } diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOPrimaryDatastoreProvider.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOPrimaryDatastoreProvider.java index 0cc82c0d9f1c..37d465a40c76 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOPrimaryDatastoreProvider.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOPrimaryDatastoreProvider.java @@ -28,12 +28,13 @@ import org.apache.cloudstack.storage.datastore.driver.ScaleIOPrimaryDataStoreDriver; import org.apache.cloudstack.storage.datastore.lifecycle.ScaleIOPrimaryDataStoreLifeCycle; import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.utils.component.ComponentContext; public class ScaleIOPrimaryDatastoreProvider implements PrimaryDataStoreProvider { - private static final Logger LOGGER = Logger.getLogger(ScaleIOPrimaryDatastoreProvider.class); + protected Logger logger = LogManager.getLogger(getClass()); private DataStoreLifeCycle lifeCycle; private PrimaryDataStoreDriver driver; diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/util/ScaleIOUtil.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/util/ScaleIOUtil.java index 736a43df6915..a2e01292d09f 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/util/ScaleIOUtil.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/util/ScaleIOUtil.java @@ -17,14 +17,15 @@ package org.apache.cloudstack.storage.datastore.util; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.utils.UuidUtils; import com.cloud.utils.script.Script; import org.apache.commons.lang3.StringUtils; public class ScaleIOUtil { - private static final Logger LOGGER = Logger.getLogger(ScaleIOUtil.class); + protected static Logger LOGGER = LogManager.getLogger(ScaleIOUtil.class); public static final String PROVIDER_NAME = "PowerFlex"; diff --git a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java index d3360f616ddc..c76b9bb3a302 100644 --- a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java +++ b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java @@ -50,7 +50,8 @@ import org.apache.cloudstack.storage.datastore.util.SolidFireUtil; import org.apache.cloudstack.storage.to.SnapshotObjectTO; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.to.DataObjectType; @@ -91,7 +92,7 @@ import com.google.common.base.Preconditions; public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { - private static final Logger LOGGER = Logger.getLogger(SolidFirePrimaryDataStoreDriver.class); + protected Logger logger = LogManager.getLogger(getClass()); private static final int LOWEST_HYPERVISOR_SNAPSHOT_RESERVE = 10; private static final long MIN_IOPS_FOR_TEMPLATE_VOLUME = 100L; private static final long MAX_IOPS_FOR_TEMPLATE_VOLUME = 20000L; @@ -169,7 +170,7 @@ public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore if (!lock.lock(SolidFireUtil.LOCK_TIME_IN_SECONDS)) { String errMsg = "Couldn't lock the DB (in grantAccess) on the following string: " + cluster.getUuid(); - LOGGER.warn(errMsg); + logger.warn(errMsg); throw new CloudRuntimeException(errMsg); } @@ -214,7 +215,7 @@ public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) } if (isRevokeAccessNotNeeded(dataObject)) { - LOGGER.debug("Skipping revoke access for Solidfire data object type:" + dataObject.getType() + " id:" + dataObject.getId()); + logger.debug("Skipping revoke access for Solidfire data object type:" + dataObject.getType() + " id:" + dataObject.getId()); return; } @@ -229,12 +230,12 @@ public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) if (!lock.lock(SolidFireUtil.LOCK_TIME_IN_SECONDS)) { String errMsg = "Couldn't lock the DB (in revokeAccess) on the following string: " + cluster.getUuid(); - LOGGER.warn(errMsg); + logger.warn(errMsg); throw new CloudRuntimeException(errMsg); } - LOGGER.debug("Revoking access for Solidfire data object type:" + dataObject.getType() + " id:" + dataObject.getId()); + logger.debug("Revoking access for Solidfire data object type:" + dataObject.getType() + " id:" + dataObject.getId()); try { SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, storagePoolDetailsDao); @@ -565,13 +566,13 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncComplet } else { errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to createAsync"; - LOGGER.error(errMsg); + logger.error(errMsg); } } catch (Exception ex) { errMsg = ex.getMessage(); - LOGGER.error(errMsg); + logger.error(errMsg); if (callback == null) { throw ex; @@ -840,7 +841,7 @@ public void deleteAsync(DataStore dataStore, DataObject dataObject, AsyncComplet catch (Exception ex) { errMsg = ex.getMessage(); - LOGGER.error(errMsg); + logger.error(errMsg); } if (callback != null) { @@ -950,7 +951,7 @@ public void takeSnapshot(SnapshotInfo snapshotInfo, AsyncCompletionCallback dsInfos) { lClusterDefaultMinIops = Long.parseLong(clusterDefaultMinIops); } } catch (NumberFormatException ex) { - s_logger.warn("Cannot parse the setting " + SolidFireUtil.CLUSTER_DEFAULT_MIN_IOPS + + logger.warn("Cannot parse the setting " + SolidFireUtil.CLUSTER_DEFAULT_MIN_IOPS + ", using default value: " + lClusterDefaultMinIops + ". Exception: " + ex); } @@ -181,7 +182,7 @@ public DataStore initialize(Map dsInfos) { lClusterDefaultMaxIops = Long.parseLong(clusterDefaultMaxIops); } } catch (NumberFormatException ex) { - s_logger.warn("Cannot parse the setting " + SolidFireUtil.CLUSTER_DEFAULT_MAX_IOPS + + logger.warn("Cannot parse the setting " + SolidFireUtil.CLUSTER_DEFAULT_MAX_IOPS + ", using default value: " + lClusterDefaultMaxIops + ". Exception: " + ex); } @@ -193,7 +194,7 @@ public DataStore initialize(Map dsInfos) { fClusterDefaultBurstIopsPercentOfMaxIops = Float.parseFloat(clusterDefaultBurstIopsPercentOfMaxIops); } } catch (NumberFormatException ex) { - s_logger.warn("Cannot parse the setting " + SolidFireUtil.CLUSTER_DEFAULT_BURST_IOPS_PERCENT_OF_MAX_IOPS + + logger.warn("Cannot parse the setting " + SolidFireUtil.CLUSTER_DEFAULT_BURST_IOPS_PERCENT_OF_MAX_IOPS + ", using default value: " + fClusterDefaultBurstIopsPercentOfMaxIops + ". Exception: " + ex); } @@ -247,7 +248,7 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { try { _storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); } catch (Exception e) { - s_logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); + logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); } } @@ -271,7 +272,7 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType h try { _storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); } catch (Exception e) { - s_logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); + logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); } } @@ -325,7 +326,7 @@ public boolean deleteDataStore(DataStore dataStore) { SolidFireUtil.deleteVolume(sfConnection, sfTemplateVolumeId); } catch (Exception ex) { - s_logger.error(ex.getMessage() != null ? ex.getMessage() : "Error deleting SolidFire template volume"); + logger.error(ex.getMessage() != null ? ex.getMessage() : "Error deleting SolidFire template volume"); } _tmpltPoolDao.remove(templatePoolRef.getId()); diff --git a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java index 557cc3f60f62..e32fef54883c 100644 --- a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/SolidFireSharedPrimaryDataStoreLifeCycle.java @@ -26,7 +26,8 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; @@ -73,7 +74,7 @@ import com.cloud.utils.exception.CloudRuntimeException; public class SolidFireSharedPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycle { - private static final Logger LOGGER = Logger.getLogger(SolidFireSharedPrimaryDataStoreLifeCycle.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject private AccountDao accountDao; @Inject private AccountDetailsDao accountDetailsDao; @@ -183,7 +184,7 @@ public DataStore initialize(Map dsInfos) { lMinIops = Long.parseLong(minIops); } } catch (Exception ex) { - LOGGER.info("[ignored] error getting Min IOPS: " + ex.getLocalizedMessage()); + logger.info("[ignored] error getting Min IOPS: " + ex.getLocalizedMessage()); } try { @@ -193,7 +194,7 @@ public DataStore initialize(Map dsInfos) { lMaxIops = Long.parseLong(maxIops); } } catch (Exception ex) { - LOGGER.info("[ignored] error getting Max IOPS: " + ex.getLocalizedMessage()); + logger.info("[ignored] error getting Max IOPS: " + ex.getLocalizedMessage()); } try { @@ -203,7 +204,7 @@ public DataStore initialize(Map dsInfos) { lBurstIops = Long.parseLong(burstIops); } } catch (Exception ex) { - LOGGER.info("[ignored] error getting Burst IOPS: " + ex.getLocalizedMessage()); + logger.info("[ignored] error getting Burst IOPS: " + ex.getLocalizedMessage()); } if (lMinIops > lMaxIops) { @@ -272,7 +273,7 @@ public DataStore initialize(Map dsInfos) { if (!lock.lock(SolidFireUtil.LOCK_TIME_IN_SECONDS)) { String errMsg = "Couldn't lock the DB on the following string: " + cluster.getUuid(); - LOGGER.debug(errMsg); + logger.debug(errMsg); throw new CloudRuntimeException(errMsg); } @@ -418,12 +419,12 @@ public boolean attachCluster(DataStore store, ClusterScope scope) { poolHosts.add(host); } catch (Exception e) { - LOGGER.warn("Unable to establish a connection between " + host + " and " + primaryDataStoreInfo, e); + logger.warn("Unable to establish a connection between " + host + " and " + primaryDataStoreInfo, e); } } if (poolHosts.isEmpty()) { - LOGGER.warn("No host can access storage pool '" + primaryDataStoreInfo + "' on cluster '" + primaryDataStoreInfo.getClusterId() + "'."); + logger.warn("No host can access storage pool '" + primaryDataStoreInfo + "' on cluster '" + primaryDataStoreInfo.getClusterId() + "'."); primaryDataStoreDao.expunge(primaryDataStoreInfo.getId()); @@ -479,7 +480,7 @@ private boolean createStoragePool(HostVO host, StoragePool storagePool) { msg = "Cannot create storage pool through host '" + hostId + "' due to CreateStoragePoolCommand returns null"; } - LOGGER.warn(msg); + logger.warn(msg); throw new CloudRuntimeException(msg); } @@ -562,7 +563,7 @@ public boolean deleteDataStore(DataStore dataStore) { final Answer answer = agentMgr.easySend(host.getHostId(), deleteCmd); if (answer != null && answer.getResult()) { - LOGGER.info("Successfully deleted storage pool using Host ID " + host.getHostId()); + logger.info("Successfully deleted storage pool using Host ID " + host.getHostId()); HostVO hostVO = hostDao.findById(host.getHostId()); @@ -575,10 +576,10 @@ public boolean deleteDataStore(DataStore dataStore) { } else { if (answer != null) { - LOGGER.error("Failed to delete storage pool using Host ID " + host.getHostId() + ": " + answer.getResult()); + logger.error("Failed to delete storage pool using Host ID " + host.getHostId() + ": " + answer.getResult()); } else { - LOGGER.error("Failed to delete storage pool using Host ID " + host.getHostId()); + logger.error("Failed to delete storage pool using Host ID " + host.getHostId()); } } } @@ -591,7 +592,7 @@ public boolean deleteDataStore(DataStore dataStore) { if (!lock.lock(SolidFireUtil.LOCK_TIME_IN_SECONDS)) { String errMsg = "Couldn't lock the DB on the following string: " + cluster.getUuid(); - LOGGER.debug(errMsg); + logger.debug(errMsg); throw new CloudRuntimeException(errMsg); } @@ -660,12 +661,12 @@ private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { if (answer == null) { String msg = "Unable to get an answer to the modify targets command"; - LOGGER.warn(msg); + logger.warn(msg); } else if (!answer.getResult()) { String msg = "Unable to modify target on the following host: " + hostId; - LOGGER.warn(msg); + logger.warn(msg); } } diff --git a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireHostListener.java b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireHostListener.java index 998a3f95a533..d84734283939 100644 --- a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireHostListener.java +++ b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireHostListener.java @@ -25,7 +25,8 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; @@ -55,7 +56,7 @@ import com.cloud.vm.dao.VMInstanceDao; public class SolidFireHostListener implements HypervisorHostListener { - private static final Logger LOGGER = Logger.getLogger(SolidFireHostListener.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject private AgentManager agentMgr; @Inject private AlertManager alertMgr; @@ -73,13 +74,13 @@ public boolean hostAdded(long hostId) { HostVO host = hostDao.findById(hostId); if (host == null) { - LOGGER.error(String.format("Failed to add host by SolidFireHostListener as host was not found with id = %s ", hostId)); + logger.error(String.format("Failed to add host by SolidFireHostListener as host was not found with id = %s ", hostId)); return false; } if (host.getClusterId() == null) { - LOGGER.error("Failed to add host by SolidFireHostListener as host has no associated cluster id"); + logger.error("Failed to add host by SolidFireHostListener as host has no associated cluster id"); return false; } @@ -295,6 +296,6 @@ private void sendModifyStoragePoolCommand(ModifyStoragePoolCommand cmd, StorageP assert (answer instanceof ModifyStoragePoolAnswer) : "ModifyStoragePoolAnswer expected ; Pool = " + storagePool.getId() + " Host = " + hostId; - LOGGER.info("Connection established between storage pool " + storagePool + " and host + " + hostId); + logger.info("Connection established between storage pool " + storagePool + " and host + " + hostId); } } diff --git a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireSharedHostListener.java b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireSharedHostListener.java index f111682739c5..98c8bfb51c19 100644 --- a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireSharedHostListener.java +++ b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireSharedHostListener.java @@ -32,7 +32,8 @@ import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.datastore.util.SolidFireUtil; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -51,7 +52,7 @@ import com.cloud.utils.exception.CloudRuntimeException; public class SolidFireSharedHostListener implements HypervisorHostListener { - private static final Logger LOGGER = Logger.getLogger(SolidFireSharedHostListener.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject private AgentManager agentMgr; @Inject private AlertManager alertMgr; @@ -67,13 +68,13 @@ public boolean hostAdded(long hostId) { HostVO host = hostDao.findById(hostId); if (host == null) { - LOGGER.error(String.format("Failed to add host by SolidFireSharedHostListener as host was not found with id = %s ", hostId)); + logger.error(String.format("Failed to add host by SolidFireSharedHostListener as host was not found with id = %s ", hostId)); return false; } if (host.getClusterId() == null) { - LOGGER.error("Failed to add host by SolidFireSharedHostListener as host has no associated cluster id"); + logger.error("Failed to add host by SolidFireSharedHostListener as host has no associated cluster id"); return false; } @@ -228,7 +229,7 @@ private ModifyStoragePoolAnswer sendModifyStoragePoolCommand(ModifyStoragePoolCo assert (answer instanceof ModifyStoragePoolAnswer) : "ModifyStoragePoolAnswer not returned from ModifyStoragePoolCommand; Storage pool = " + storagePool.getId() + "; Host = " + hostId; - LOGGER.info("Connection established between storage pool " + storagePool + " and host " + hostId); + logger.info("Connection established between storage pool " + storagePool + " and host " + hostId); return (ModifyStoragePoolAnswer)answer; } diff --git a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java index 47f2f8819f2c..671431f41635 100644 --- a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java +++ b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java @@ -28,7 +28,8 @@ import org.apache.commons.lang3.ArrayUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; @@ -80,7 +81,7 @@ import static org.apache.commons.lang.ArrayUtils.toPrimitive; public class SolidFireUtil { - private static final Logger LOGGER = Logger.getLogger(SolidFireUtil.class); + protected static Logger LOGGER = LogManager.getLogger(SolidFireUtil.class); public static final String PROVIDER_NAME = "SolidFire"; public static final String SHARED_PROVIDER_NAME = "SolidFireShared"; @@ -88,7 +89,7 @@ public class SolidFireUtil { private static final Random RANDOM = new Random(System.nanoTime()); public static final int LOCK_TIME_IN_SECONDS = 300; - public static final String LOG_PREFIX = "SolidFire: "; + public static final String LOGGER_PREFIX = "SolidFire: "; public static final String MANAGEMENT_VIP = "mVip"; public static final String STORAGE_VIP = "sVip"; diff --git a/plugins/storage/volume/storpool/pom.xml b/plugins/storage/volume/storpool/pom.xml index e30ff922e0a0..8a7fda0ce79a 100644 --- a/plugins/storage/volume/storpool/pom.xml +++ b/plugins/storage/volume/storpool/pom.xml @@ -42,9 +42,12 @@ ${project.version} - ch.qos.reload4j - reload4j - ${cs.reload4j.version} + org.apache.logging.log4j + log4j-core + + + org.apache.logging.log4j + log4j-api org.apache.commons diff --git a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolBackupSnapshotCommandWrapper.java b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolBackupSnapshotCommandWrapper.java index f83a4292e208..ade9e8370a8a 100644 --- a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolBackupSnapshotCommandWrapper.java +++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolBackupSnapshotCommandWrapper.java @@ -29,7 +29,6 @@ import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; import org.apache.cloudstack.utils.qemu.QemuImgFile; import org.apache.commons.io.FileUtils; -import org.apache.log4j.Logger; import com.cloud.agent.api.storage.StorPoolBackupSnapshotCommand; import com.cloud.agent.api.to.DataStoreTO; @@ -44,7 +43,6 @@ @ResourceWrapper(handles = StorPoolBackupSnapshotCommand.class) public final class StorPoolBackupSnapshotCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(StorPoolBackupSnapshotCommandWrapper.class); @Override public CopyCmdAnswer execute(final StorPoolBackupSnapshotCommand cmd, final LibvirtComputingResource libvirtComputingResource) { @@ -90,7 +88,7 @@ public CopyCmdAnswer execute(final StorPoolBackupSnapshotCommand cmd, final Libv } catch (final Exception e) { final String error = String.format("Failed to backup snapshot with id [%s] with a pool %s, due to %s", cmd.getSourceTO().getId(), cmd.getSourceTO().getDataStore().getUuid(), e.getMessage()); SP_LOG(error); - s_logger.debug(error); + logger.debug(error); return new CopyCmdAnswer(cmd, e); } finally { if (srcPath != null) { @@ -101,7 +99,7 @@ public CopyCmdAnswer execute(final StorPoolBackupSnapshotCommand cmd, final Libv try { secondaryPool.delete(); } catch (final Exception e) { - s_logger.debug("Failed to delete secondary storage", e); + logger.debug("Failed to delete secondary storage", e); } } } diff --git a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolBackupTemplateFromSnapshotCommandWrapper.java b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolBackupTemplateFromSnapshotCommandWrapper.java index 518cbb8d5e5f..da9528645d59 100644 --- a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolBackupTemplateFromSnapshotCommandWrapper.java +++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolBackupTemplateFromSnapshotCommandWrapper.java @@ -35,7 +35,6 @@ import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; import org.apache.cloudstack.utils.qemu.QemuImgFile; import org.apache.commons.io.FileUtils; -import org.apache.log4j.Logger; import com.cloud.agent.api.storage.StorPoolBackupTemplateFromSnapshotCommand; import com.cloud.agent.api.to.DataStoreTO; @@ -58,7 +57,6 @@ @ResourceWrapper(handles = StorPoolBackupTemplateFromSnapshotCommand.class) public class StorPoolBackupTemplateFromSnapshotCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(StorPoolBackupTemplateFromSnapshotCommandWrapper.class); @Override public CopyCmdAnswer execute(final StorPoolBackupTemplateFromSnapshotCommand cmd, final LibvirtComputingResource libvirtComputingResource) { @@ -142,7 +140,7 @@ public CopyCmdAnswer execute(final StorPoolBackupTemplateFromSnapshotCommand cmd } catch (final Exception e) { final String error = "failed to backup snapshot: " + e.getMessage(); SP_LOG(error); - s_logger.debug(error); + logger.debug(error); return new CopyCmdAnswer(cmd, e); } finally { if (srcPath != null) { @@ -153,7 +151,7 @@ public CopyCmdAnswer execute(final StorPoolBackupTemplateFromSnapshotCommand cmd try { secondaryPool.delete(); } catch (final Exception e) { - s_logger.debug("Failed to delete secondary storage", e); + logger.debug("Failed to delete secondary storage", e); } } } diff --git a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolCopyVolumeToSecondaryCommandWrapper.java b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolCopyVolumeToSecondaryCommandWrapper.java index bd50f43025f2..113fb11ea548 100644 --- a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolCopyVolumeToSecondaryCommandWrapper.java +++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolCopyVolumeToSecondaryCommandWrapper.java @@ -29,7 +29,6 @@ import org.apache.cloudstack.utils.qemu.QemuImg; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; import org.apache.cloudstack.utils.qemu.QemuImgFile; -import org.apache.log4j.Logger; import com.cloud.agent.api.storage.StorPoolCopyVolumeToSecondaryCommand; import com.cloud.agent.api.to.DataStoreTO; @@ -45,7 +44,6 @@ @ResourceWrapper(handles = StorPoolCopyVolumeToSecondaryCommand.class) public final class StorPoolCopyVolumeToSecondaryCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(StorPoolCopyVolumeToSecondaryCommandWrapper.class); @Override public CopyCmdAnswer execute(final StorPoolCopyVolumeToSecondaryCommand cmd, final LibvirtComputingResource libvirtComputingResource) { @@ -104,7 +102,7 @@ public CopyCmdAnswer execute(final StorPoolCopyVolumeToSecondaryCommand cmd, fin return new CopyCmdAnswer(dst); } catch (final Exception e) { final String error = "Failed to copy volume to secondary storage: " + e.getMessage(); - s_logger.debug(error); + logger.debug(error); return new CopyCmdAnswer(error); } finally { if (srcPath != null) { @@ -116,7 +114,7 @@ public CopyCmdAnswer execute(final StorPoolCopyVolumeToSecondaryCommand cmd, fin SP_LOG("StorpoolCopyVolumeToSecondaryCommandWrapper.execute: secondaryPool=%s " , secondaryPool); secondaryPool.delete(); } catch (final Exception e) { - s_logger.debug("Failed to delete secondary storage", e); + logger.debug("Failed to delete secondary storage", e); } } } diff --git a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolDownloadTemplateCommandWrapper.java b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolDownloadTemplateCommandWrapper.java index 87a46ba62c93..3e7118ab81d7 100644 --- a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolDownloadTemplateCommandWrapper.java +++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolDownloadTemplateCommandWrapper.java @@ -30,7 +30,6 @@ import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; import org.apache.cloudstack.utils.qemu.QemuImgFile; import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; import com.cloud.agent.api.storage.StorPoolDownloadTemplateCommand; import com.cloud.agent.api.to.DataStoreTO; @@ -47,7 +46,6 @@ @ResourceWrapper(handles = StorPoolDownloadTemplateCommand.class) public final class StorPoolDownloadTemplateCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(StorPoolDownloadTemplateCommandWrapper.class); @Override public CopyCmdAnswer execute(final StorPoolDownloadTemplateCommand cmd, final LibvirtComputingResource libvirtComputingResource) { @@ -120,7 +118,7 @@ public CopyCmdAnswer execute(final StorPoolDownloadTemplateCommand cmd, final Li return new CopyCmdAnswer(dst); } catch (final Exception e) { final String error = "Failed to copy template to primary: " + e.getMessage(); - s_logger.debug(error); + logger.debug(error); return new CopyCmdAnswer(cmd, e); } finally { if (dstPath != null) { @@ -131,7 +129,7 @@ public CopyCmdAnswer execute(final StorPoolDownloadTemplateCommand cmd, final Li try { secondaryPool.delete(); } catch (final Exception e) { - s_logger.debug("Failed to delete secondary storage", e); + logger.debug("Failed to delete secondary storage", e); } } } diff --git a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolDownloadVolumeCommandWrapper.java b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolDownloadVolumeCommandWrapper.java index d1a58a4aeb80..37284b597d2c 100644 --- a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolDownloadVolumeCommandWrapper.java +++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolDownloadVolumeCommandWrapper.java @@ -30,7 +30,6 @@ import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; import org.apache.cloudstack.utils.qemu.QemuImgFile; //import java.io.File; -import org.apache.log4j.Logger; import com.cloud.agent.api.storage.StorPoolDownloadVolumeCommand; import com.cloud.agent.api.to.DataStoreTO; @@ -48,7 +47,6 @@ @ResourceWrapper(handles = StorPoolDownloadVolumeCommand.class) public final class StorPoolDownloadVolumeCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(StorPoolDownloadVolumeCommandWrapper.class); @Override public CopyCmdAnswer execute(final StorPoolDownloadVolumeCommand cmd, final LibvirtComputingResource libvirtComputingResource) { @@ -143,7 +141,7 @@ public CopyCmdAnswer execute(final StorPoolDownloadVolumeCommand cmd, final Libv } catch (final Exception e) { final String error = "Failed to copy volume to primary: " + e.getMessage(); SP_LOG(error); - s_logger.debug(error); + logger.debug(error); return new CopyCmdAnswer(cmd, e); } finally { if (dstPath != null) { @@ -154,7 +152,7 @@ public CopyCmdAnswer execute(final StorPoolDownloadVolumeCommand cmd, final Libv try { secondaryPool.delete(); } catch (final Exception e) { - s_logger.debug("Failed to delete secondary storage", e); + logger.debug("Failed to delete secondary storage", e); } } } diff --git a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolModifyStorageCommandWrapper.java b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolModifyStorageCommandWrapper.java index 8bd8a52b667f..a44ff5473ae5 100644 --- a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolModifyStorageCommandWrapper.java +++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolModifyStorageCommandWrapper.java @@ -24,7 +24,6 @@ import java.util.Map.Entry; import java.util.Set; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.storage.StorPoolModifyStoragePoolAnswer; @@ -44,13 +43,12 @@ @ResourceWrapper(handles = StorPoolModifyStoragePoolCommand.class) public final class StorPoolModifyStorageCommandWrapper extends CommandWrapper { - private static final Logger log = Logger.getLogger(StorPoolModifyStorageCommandWrapper.class); @Override public Answer execute(final StorPoolModifyStoragePoolCommand command, final LibvirtComputingResource libvirtComputingResource) { String clusterId = StorPoolStoragePool.getStorPoolConfigParam("SP_CLUSTER_ID"); if (clusterId == null) { - log.debug(String.format("Could not get StorPool cluster id for a command [%s]", command.getClass())); + logger.debug(String.format("Could not get StorPool cluster id for a command [%s]", command.getClass())); return new Answer(command, false, "spNotFound"); } try { @@ -63,14 +61,14 @@ public Answer execute(final StorPoolModifyStoragePoolCommand command, final Libv storagePoolMgr.createStoragePool(command.getPool().getUuid(), command.getPool().getHost(), command.getPool().getPort(), command.getPool().getPath(), command.getPool() .getUserInfo(), command.getPool().getType()); if (storagepool == null) { - log.debug(String.format("Did not find a storage pool [%s]", command.getPool().getId())); + logger.debug(String.format("Did not find a storage pool [%s]", command.getPool().getId())); return new Answer(command, false, String.format("Failed to create storage pool [%s]", command.getPool().getId())); } final Map tInfo = new HashMap<>(); return new StorPoolModifyStoragePoolAnswer(command, storagepool.getCapacity(), storagepool.getAvailable(), tInfo, clusterId, storagepool.getStorageNodeId()); } catch (Exception e) { - log.debug(String.format("Could not modify storage due to %s", e.getMessage())); + logger.debug(String.format("Could not modify storage due to %s", e.getMessage())); return new Answer(command, e); } } @@ -82,7 +80,7 @@ public String attachOrDetachVolume(String command, String type, String volumeUui } String err = null; - Script sc = new Script("storpool", 300000, log); + Script sc = new Script("storpool", 300000, logger); sc.add("-M"); sc.add("-j"); sc.add(command); @@ -116,7 +114,7 @@ public String attachOrDetachVolume(String command, String type, String volumeUui } if (err != null) { - log.warn(err); + logger.warn(err); } return res; } diff --git a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolResizeVolumeCommandWrapper.java b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolResizeVolumeCommandWrapper.java index 9f9277768e65..8fc6b6bbcb8f 100644 --- a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolResizeVolumeCommandWrapper.java +++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolResizeVolumeCommandWrapper.java @@ -19,7 +19,6 @@ package com.cloud.hypervisor.kvm.resource.wrapper; -import org.apache.log4j.Logger; import com.cloud.agent.api.storage.ResizeVolumeAnswer; import com.cloud.agent.api.storage.StorPoolResizeVolumeCommand; @@ -37,7 +36,6 @@ @ResourceWrapper(handles = StorPoolResizeVolumeCommand.class) public final class StorPoolResizeVolumeCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(StorPoolResizeVolumeCommandWrapper.class); @Override public ResizeVolumeAnswer execute(final StorPoolResizeVolumeCommand command, final LibvirtComputingResource libvirtComputingResource) { @@ -51,7 +49,7 @@ public ResizeVolumeAnswer execute(final StorPoolResizeVolumeCommand command, fin if (currentSize == newSize) { // nothing to do - s_logger.info("No need to resize volume: current size " + currentSize + " is same as new size " + newSize); + logger.info("No need to resize volume: current size " + currentSize + " is same as new size " + newSize); return new ResizeVolumeAnswer(command, true, "success", currentSize); } @@ -65,7 +63,7 @@ public ResizeVolumeAnswer execute(final StorPoolResizeVolumeCommand command, fin if (!command.isAttached()) { StorPoolStorageAdaptor.attachOrDetachVolume("attach", "volume", path); } - final Script resizecmd = new Script(libvirtComputingResource.getResizeVolumePath(), libvirtComputingResource.getCmdsTimeout(), s_logger); + final Script resizecmd = new Script(libvirtComputingResource.getResizeVolumePath(), libvirtComputingResource.getCmdsTimeout(), logger); resizecmd.add("-s", String.valueOf(newSize)); resizecmd.add("-c", String.valueOf(currentSize)); resizecmd.add("-p", path); @@ -83,11 +81,11 @@ public ResizeVolumeAnswer execute(final StorPoolResizeVolumeCommand command, fin pool.refresh(); final long finalSize = pool.getPhysicalDisk(volid).getVirtualSize(); - s_logger.debug("after resize, size reports as " + finalSize + ", requested " + newSize); + logger.debug("after resize, size reports as " + finalSize + ", requested " + newSize); return new ResizeVolumeAnswer(command, true, "success", finalSize); } catch (final Exception e) { final String error = "Failed to resize volume: " + e.getMessage(); - s_logger.debug(error); + logger.debug(error); return new ResizeVolumeAnswer(command, false, error); } finally { if (!command.isAttached() && volPath != null) { diff --git a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolSetVolumeEncryptionCommandWrapper.java b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolSetVolumeEncryptionCommandWrapper.java index 8fdc28efc74f..6efc118cf7e3 100644 --- a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolSetVolumeEncryptionCommandWrapper.java +++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolSetVolumeEncryptionCommandWrapper.java @@ -36,7 +36,6 @@ import org.apache.cloudstack.utils.qemu.QemuImgFile; import org.apache.cloudstack.utils.qemu.QemuObject; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.libvirt.LibvirtException; import com.cloud.agent.api.Answer; @@ -54,7 +53,6 @@ @ResourceWrapper(handles = StorPoolSetVolumeEncryptionCommand.class) public class StorPoolSetVolumeEncryptionCommandWrapper extends CommandWrapper { - private static final Logger logger = Logger.getLogger(StorPoolSetVolumeEncryptionCommandWrapper.class); @Override public StorPoolSetVolumeEncryptionAnswer execute(StorPoolSetVolumeEncryptionCommand command, diff --git a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/storage/StorPoolStorageAdaptor.java b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/storage/StorPoolStorageAdaptor.java index d8f77ac88c63..c05d8b3ae08a 100644 --- a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/storage/StorPoolStorageAdaptor.java +++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/storage/StorPoolStorageAdaptor.java @@ -28,7 +28,8 @@ import java.util.Map; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.api.to.DiskTO; import com.cloud.storage.Storage; @@ -51,7 +52,7 @@ public static void SP_LOG(String fmt, Object... args) { } } - private static final Logger log = Logger.getLogger(StorPoolStorageAdaptor.class); + protected static Logger LOGGER = LogManager.getLogger(StorPoolStorageAdaptor.class); private static final Map storageUuidToStoragePool = new HashMap(); @@ -103,7 +104,7 @@ private static long getDeviceSize(final String devPath) { if (!file.exists()) { return 0; } - Script sc = new Script("blockdev", 0, log); + Script sc = new Script("blockdev", 0, LOGGER); sc.add("--getsize64", devPath); OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser(); @@ -112,7 +113,7 @@ private static long getDeviceSize(final String devPath) { if (res != null) { SP_LOG("Unable to retrieve device size for %s. Res: %s", devPath, res); - log.debug(String.format("Unable to retrieve device size for %s. Res: %s", devPath, res)); + LOGGER.debug(String.format("Unable to retrieve device size for %s. Res: %s", devPath, res)); return 0; } @@ -160,7 +161,7 @@ public static boolean attachOrDetachVolume(String command, String type, String v String err = null; for(int i = 0; i < numTries; i++) { - Script sc = new Script("storpool", 0, log); + Script sc = new Script("storpool", 0, LOGGER); sc.add("-M"); sc.add(command); sc.add(type, name); @@ -192,7 +193,7 @@ public static boolean attachOrDetachVolume(String command, String type, String v if (err != null) { SP_LOG(err); - log.warn(err); + LOGGER.warn(err); throw new CloudRuntimeException(err); } @@ -211,7 +212,7 @@ public static boolean resize(String newSize, String volumeUuid ) { SP_LOG("StorPoolStorageAdaptor.resize: size=%s, uuid=%s, name=%s", newSize, volumeUuid, name); - Script sc = new Script("storpool", 0, log); + Script sc = new Script("storpool", 0, LOGGER); sc.add("-M"); sc.add("volume"); sc.add(name); @@ -228,7 +229,7 @@ public static boolean resize(String newSize, String volumeUuid ) { String err = String.format("Unable to resize volume %s. Error: %s", name, res); SP_LOG(err); - log.warn(err); + LOGGER.warn(err); throw new CloudRuntimeException(err); } @@ -236,7 +237,7 @@ public static boolean resize(String newSize, String volumeUuid ) { public KVMPhysicalDisk getPhysicalDisk(String volumeUuid, KVMStoragePool pool) { SP_LOG("StorPoolStorageAdaptor.getPhysicalDisk: uuid=%s, pool=%s", volumeUuid, pool); - log.debug(String.format("getPhysicalDisk: uuid=%s, pool=%s", volumeUuid, pool)); + LOGGER.debug(String.format("getPhysicalDisk: uuid=%s, pool=%s", volumeUuid, pool)); final long deviceSize = getDeviceSize(volumeUuid); @@ -251,7 +252,7 @@ public KVMPhysicalDisk getPhysicalDisk(String volumeUuid, KVMStoragePool pool) { public boolean connectPhysicalDisk(String volumeUuid, KVMStoragePool pool, Map details) { SP_LOG("StorPoolStorageAdaptor.connectPhysicalDisk: uuid=%s, pool=%s", volumeUuid, pool); - log.debug(String.format("connectPhysicalDisk: uuid=%s, pool=%s", volumeUuid, pool)); + LOGGER.debug(String.format("connectPhysicalDisk: uuid=%s, pool=%s", volumeUuid, pool)); return attachOrDetachVolume("attach", "volume", volumeUuid); } @@ -260,19 +261,19 @@ public boolean connectPhysicalDisk(String volumeUuid, KVMStoragePool pool, Map volumeToDisconnect) { String volumeUuid = volumeToDisconnect.get(DiskTO.UUID); - log.debug(String.format("StorPoolStorageAdaptor.disconnectPhysicalDisk: map. uuid=%s", volumeUuid)); + LOGGER.debug(String.format("StorPoolStorageAdaptor.disconnectPhysicalDisk: map. uuid=%s", volumeUuid)); return attachOrDetachVolume("detach", "volume", volumeUuid); } @Override public boolean disconnectPhysicalDiskByPath(String localPath) { - log.debug(String.format("disconnectPhysicalDiskByPath: localPath=%s", localPath)); + LOGGER.debug(String.format("disconnectPhysicalDiskByPath: localPath=%s", localPath)); return attachOrDetachVolume("detach", "volume", localPath); } @@ -287,7 +288,7 @@ public boolean deletePhysicalDisk(String volumeUuid, KVMStoragePool pool, Storag throw new UnsupportedOperationException(err); } - Script sc = new Script("storpool", 0, log); + Script sc = new Script("storpool", 0, LOGGER); sc.add("-M"); sc.add("snapshot", name); sc.add("delete", name); @@ -298,7 +299,7 @@ public boolean deletePhysicalDisk(String volumeUuid, KVMStoragePool pool, Storag if (res != null) { final String err = String.format("Unable to delete StorPool snapshot '%s'. Error: %s", name, res); SP_LOG(err); - log.warn(err); + LOGGER.warn(err); throw new UnsupportedOperationException(err); } return true; // apparently ignored diff --git a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/storage/StorPoolStoragePool.java b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/storage/StorPoolStoragePool.java index 02095503c3b4..aa0a8849d90f 100644 --- a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/storage/StorPoolStoragePool.java +++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/storage/StorPoolStoragePool.java @@ -21,7 +21,8 @@ import java.util.Map.Entry; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.joda.time.Duration; import com.cloud.agent.api.to.HostTO; @@ -39,7 +40,7 @@ import com.google.gson.JsonSyntaxException; public class StorPoolStoragePool implements KVMStoragePool { - private static final Logger log = Logger.getLogger(StorPoolStoragePool.class); + protected Logger logger = LogManager.getLogger(StorPoolStoragePool.class); private String _uuid; private String _sourceHost; private int _sourcePort; @@ -199,8 +200,8 @@ public String createHeartBeatCommand(HAStoragePool primaryStoragePool, String ho boolean isStorageNodeUp = checkingHeartBeat(primaryStoragePool, null); if (!isStorageNodeUp && !hostValidation) { //restart the host - log.debug(String.format("The host [%s] will be restarted because the health check failed for the storage pool [%s]", hostPrivateIp, primaryStoragePool.getPool().getType())); - Script cmd = new Script(primaryStoragePool.getPool().getHearthBeatPath(), HeartBeatUpdateTimeout, log); + logger.debug(String.format("The host [%s] will be restarted because the health check failed for the storage pool [%s]", hostPrivateIp, primaryStoragePool.getPool().getType())); + Script cmd = new Script(primaryStoragePool.getPool().getHearthBeatPath(), HeartBeatUpdateTimeout, logger); cmd.add("-c"); cmd.execute(); return "Down"; @@ -214,7 +215,7 @@ public String getStorageNodeId() { } public static final String getStorPoolConfigParam(String param) { - Script sc = new Script("storpool_confget", 0, Logger.getLogger(StorPoolStoragePool.class)); + Script sc = new Script("storpool_confget", 0, LogManager.getLogger(StorPoolStoragePool.class)); OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser(); String configParam = null; @@ -289,7 +290,7 @@ private boolean checkIfNodeIsRunning(String response, Integer hostStorageNodeId) } private String executeStorPoolServiceListCmd(OutputInterpreter.AllLinesParser parser) { - Script sc = new Script("storpool", 0, log); + Script sc = new Script("storpool", 0, logger); sc.add("-j"); sc.add("service"); sc.add("list"); diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/collector/StorPoolAbandonObjectsCollector.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/collector/StorPoolAbandonObjectsCollector.java index f4821e269266..6258767921d2 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/collector/StorPoolAbandonObjectsCollector.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/collector/StorPoolAbandonObjectsCollector.java @@ -37,10 +37,9 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.cloudstack.storage.datastore.util.StorPoolHelper; +//import org.apache.cloudstack.storage.datastore.util.StorPoolHelper; import org.apache.cloudstack.storage.datastore.util.StorPoolUtil; import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; @@ -53,14 +52,13 @@ import com.google.gson.JsonObject; public class StorPoolAbandonObjectsCollector extends ManagerBase implements Configurable { - private static Logger log = Logger.getLogger(StorPoolAbandonObjectsCollector.class); @Inject private PrimaryDataStoreDao storagePoolDao; @Inject private StoragePoolDetailsDao storagePoolDetailsDao; private ScheduledExecutorService _volumeTagsUpdateExecutor; - private static final String ABANDON_LOG = "/var/log/cloudstack/management/storpool-abandoned-objects"; + private static final String ABANDON_LOGGER = "/var/log/cloudstack/management/storpool-abandoned-objects"; static final ConfigKey volumeCheckupTagsInterval = new ConfigKey("Advanced", Integer.class, @@ -91,7 +89,7 @@ public boolean start() { private void init() { List spPools = storagePoolDao.findPoolsByProvider(StorPoolUtil.SP_PROVIDER_NAME); if (CollectionUtils.isNotEmpty(spPools)) { - StorPoolHelper.appendLogger(log, ABANDON_LOG, "abandon"); +// StorPoolHelper.appendLogger(logger, ABANDON_LOGGER, "abandon"); } _volumeTagsUpdateExecutor = Executors.newScheduledThreadPool(2, new NamedThreadFactory("StorPoolAbandonObjectsCollector")); @@ -121,7 +119,7 @@ protected void runInContext() { JsonArray arr = StorPoolUtil.volumesList(StorPoolUtil.getSpConnection(storagePoolVO.getUuid(), storagePoolVO.getId(), storagePoolDetailsDao, storagePoolDao)); volumes.putAll(getStorPoolNamesAndCsTag(arr)); } catch (Exception e) { - log.debug(String.format("Could not collect abandon objects due to %s", e.getMessage()), e); + logger.debug(String.format("Could not collect abandon objects due to %s", e.getMessage()), e); } } Transaction.execute(new TransactionCallbackNoReturn() { @@ -139,10 +137,10 @@ public void doInTransactionWithoutResult(TransactionStatus status) { pstmt.executeUpdate(); } catch (SQLException e) { - log.info(String.format("[ignored] SQL failed to delete vm work job: %s ", + logger.info(String.format("[ignored] SQL failed to delete vm work job: %s ", e.getLocalizedMessage())); } catch (Throwable e) { - log.info(String.format("[ignored] caught an error during delete vm work job: %s", + logger.info(String.format("[ignored] caught an error during delete vm work job: %s", e.getLocalizedMessage())); } @@ -164,10 +162,10 @@ public void doInTransactionWithoutResult(TransactionStatus status) { String sqlVolumeOnHost = "SELECT f.* FROM `cloud`.`volumes_on_host1` f LEFT JOIN `cloud`.`storage_pool_details` v ON f.name=v.value where v.value is NULL"; findMissingRecordsInCS(txn, sqlVolumeOnHost, "volumes_on_host"); } catch (SQLException e) { - log.info(String.format("[ignored] SQL failed due to: %s ", + logger.info(String.format("[ignored] SQL failed due to: %s ", e.getLocalizedMessage())); } catch (Throwable e) { - log.info(String.format("[ignored] caught an error: %s", + logger.info(String.format("[ignored] caught an error: %s", e.getLocalizedMessage())); } finally { try { @@ -177,7 +175,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { pstmt.executeUpdate(); } catch (SQLException e) { txn.close(); - log.info(String.format("createTemporaryVolumeTable %s", e.getMessage())); + logger.info(String.format("createTemporaryVolumeTable %s", e.getMessage())); } txn.close(); } @@ -201,7 +199,7 @@ protected void runInContext() { JsonArray arr = StorPoolUtil.snapshotsList(StorPoolUtil.getSpConnection(storagePoolVO.getUuid(), storagePoolVO.getId(), storagePoolDetailsDao, storagePoolDao)); snapshots.putAll(getStorPoolNamesAndCsTag(arr)); } catch (Exception e) { - log.debug(String.format("Could not collect abandon objects due to %s", e.getMessage())); + logger.debug(String.format("Could not collect abandon objects due to %s", e.getMessage())); } } Transaction.execute(new TransactionCallbackNoReturn() { @@ -222,10 +220,10 @@ public void doInTransactionWithoutResult(TransactionStatus status) { "CREATE TEMPORARY TABLE `cloud`.`vm_templates1`(`id` bigint unsigned NOT NULL auto_increment, `name` varchar(255) NOT NULL,`tag` varchar(255) NOT NULL, PRIMARY KEY (`id`))"); pstmt.executeUpdate(); } catch (SQLException e) { - log.info(String.format("[ignored] SQL failed to delete vm work job: %s ", + logger.info(String.format("[ignored] SQL failed to delete vm work job: %s ", e.getLocalizedMessage())); } catch (Throwable e) { - log.info(String.format("[ignored] caught an error during delete vm work job: %s", + logger.info(String.format("[ignored] caught an error during delete vm work job: %s", e.getLocalizedMessage())); } @@ -262,10 +260,10 @@ public void doInTransactionWithoutResult(TransactionStatus status) { + " and spool.local_path is NULL"; findMissingRecordsInCS(txn, sqlTemplates, "snapshot"); } catch (SQLException e) { - log.info(String.format("[ignored] SQL failed due to: %s ", + logger.info(String.format("[ignored] SQL failed due to: %s ", e.getLocalizedMessage())); } catch (Throwable e) { - log.info(String.format("[ignored] caught an error: %s", + logger.info(String.format("[ignored] caught an error: %s", e.getLocalizedMessage())); } finally { try { @@ -277,7 +275,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { pstmt.executeUpdate(); } catch (SQLException e) { txn.close(); - log.info(String.format("createTemporaryVolumeTable %s", e.getMessage())); + logger.info(String.format("createTemporaryVolumeTable %s", e.getMessage())); } txn.close(); } @@ -304,7 +302,7 @@ private void findMissingRecordsInCS(TransactionLegacy txn, String sql, String ob String name = null; while (rs.next()) { name = rs.getString(2); - log.info(String.format( + logger.info(String.format( "CloudStack does not know about StorPool %s %s, it had to be a %s", object, name, rs.getString(3))); } } diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolPrimaryDataStoreDriver.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolPrimaryDataStoreDriver.java index d42a2ba0a354..32f1fa7bd119 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolPrimaryDataStoreDriver.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolPrimaryDataStoreDriver.java @@ -62,7 +62,6 @@ import org.apache.cloudstack.storage.volume.VolumeObject; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.collections4.MapUtils; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.storage.ResizeVolumeAnswer; @@ -108,10 +107,12 @@ import com.cloud.vm.VirtualMachine.State; import com.cloud.vm.VirtualMachineManager; import com.cloud.vm.dao.VMInstanceDao; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver { - private static final Logger log = Logger.getLogger(StorPoolPrimaryDataStoreDriver.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject private VolumeDao volumeDao; @@ -364,11 +365,11 @@ public void resize(DataObject data, AsyncCompletionCallback cal // try restoring volume to its initial size resp = StorPoolUtil.volumeUpdate(name, oldSize, true, oldMaxIops, conn); if (resp.getError() != null) { - log.debug(String.format("Could not resize StorPool volume %s back to its original size. Error: %s", name, resp.getError())); + logger.debug(String.format("Could not resize StorPool volume %s back to its original size. Error: %s", name, resp.getError())); } } } catch (Exception e) { - log.debug("sending resize command failed", e); + logger.debug("sending resize command failed", e); err = e.toString(); } } else { @@ -413,7 +414,7 @@ public void deleteAsync(DataStore dataStore, DataObject data, AsyncCompletionCal } if (err != null) { - log.error(err); + logger.error(err); StorPoolUtil.spLog(err); } @@ -562,7 +563,7 @@ public void copyAsync(DataObject srcData, DataObject dstData, AsyncCompletionCal SpApiResponse resp = StorPoolUtil.snapshotDelete(snapName, conn); if (resp.getError() != null) { final String err2 = String.format("Failed to cleanup StorPool snapshot '%s'. Error: %s.", snapName, resp.getError()); - log.error(err2); + logger.error(err2); StorPoolUtil.spLog(err2); } } @@ -593,7 +594,7 @@ public void copyAsync(DataObject srcData, DataObject dstData, AsyncCompletionCal if (answer != null && answer.getResult()) { SpApiResponse resSnapshot = StorPoolUtil.volumeSnapshot(volumeName, template.getUuid(), null, "template", "no", conn); if (resSnapshot.getError() != null) { - log.debug(String.format("Could not snapshot volume with ID=%s", volume.getId())); + logger.debug(String.format("Could not snapshot volume with ID=%s", volume.getId())); StorPoolUtil.spLog("Volume snapshot failed with error=%s", resSnapshot.getError().getDescr()); err = resSnapshot.getError().getDescr(); } @@ -675,7 +676,7 @@ public void copyAsync(DataObject srcData, DataObject dstData, AsyncCompletionCal if (err != null) { resp = StorPoolUtil.volumeDelete(StorPoolUtil.getNameFromResponse(resp, true), conn); if (resp.getError() != null) { - log.warn(String.format("Could not clean-up Storpool volume %s. Error: %s", name, resp.getError())); + logger.warn(String.format("Could not clean-up Storpool volume %s. Error: %s", name, resp.getError())); } } } else if (srcType == DataObjectType.TEMPLATE && dstType == DataObjectType.VOLUME) { @@ -768,7 +769,7 @@ public void copyAsync(DataObject srcData, DataObject dstData, AsyncCompletionCal if (err != null) { SpApiResponse resp3 = StorPoolUtil.volumeDelete(name, conn); if (resp3.getError() != null) { - log.warn(String.format("Could not clean-up Storpool volume %s. Error: %s", name, resp3.getError())); + logger.warn(String.format("Could not clean-up Storpool volume %s. Error: %s", name, resp3.getError())); } } } @@ -817,7 +818,7 @@ public void copyAsync(DataObject srcData, DataObject dstData, AsyncCompletionCal final SpApiResponse resp2 = StorPoolUtil.snapshotDelete(snapshotName, conn); if (resp2.getError() != null) { final String err2 = String.format("Failed to delete temporary StorPool snapshot %s. Error: %s", StorPoolUtil.getNameFromResponse(resp, true), resp2.getError()); - log.error(err2); + logger.error(err2); StorPoolUtil.spLog(err2); } } @@ -837,7 +838,7 @@ public void copyAsync(DataObject srcData, DataObject dstData, AsyncCompletionCal if (err != null) { StorPoolUtil.spLog("Failed due to %s", err); - log.error(err); + logger.error(err); answer = new Answer(cmd, false, err); } @@ -1054,7 +1055,7 @@ private String getVcPolicyTag(Long vmId) { } public void handleQualityOfServiceForVolumeMigration(VolumeInfo arg0, QualityOfServiceState arg1) { - log.debug(String.format("handleQualityOfServiceForVolumeMigration with volume name=%s is not supported", arg0.getName())); + logger.debug(String.format("handleQualityOfServiceForVolumeMigration with volume name=%s is not supported", arg0.getName())); } @@ -1135,10 +1136,10 @@ public void provideVmInfo(long vmId, long volumeId) { VMInstanceVO userVM = vmInstanceDao.findById(vmId); SpApiResponse resp = StorPoolUtil.volumeUpdateTags(volName, volume.getInstanceId() != null ? userVM.getUuid() : "", null, conn, getVcPolicyTag(vmId)); if (resp.getError() != null) { - log.warn(String.format("Could not update VC policy tags of a volume with id [%s]", volume.getUuid())); + logger.warn(String.format("Could not update VC policy tags of a volume with id [%s]", volume.getUuid())); } } catch (Exception e) { - log.warn(String.format("Could not update Virtual machine tags due to %s", e.getMessage())); + logger.warn(String.format("Could not update Virtual machine tags due to %s", e.getMessage())); } } } @@ -1158,10 +1159,10 @@ public void provideVmTags(long vmId, long volumeId, String tagValue) { String volName = StorPoolStorageAdaptor.getVolumeNameFromPath(volume.getPath(), true); SpApiResponse resp = StorPoolUtil.volumeUpdateVCTags(volName, conn, getVcPolicyTag(vmId)); if (resp.getError() != null) { - log.warn(String.format("Could not update VC policy tags of a volume with id [%s]", volume.getUuid())); + logger.warn(String.format("Could not update VC policy tags of a volume with id [%s]", volume.getUuid())); } } catch (Exception e) { - log.warn(String.format("Could not update Virtual machine tags due to %s", e.getMessage())); + logger.warn(String.format("Could not update Virtual machine tags due to %s", e.getMessage())); } } } diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolStatsCollector.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolStatsCollector.java index 359b11d491e0..a41ff66229c7 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolStatsCollector.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolStatsCollector.java @@ -35,7 +35,6 @@ import org.apache.cloudstack.storage.datastore.util.StorPoolUtil; import org.apache.cloudstack.storage.snapshot.StorPoolConfigurationManager; import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; @@ -47,8 +46,6 @@ public class StorPoolStatsCollector extends ManagerBase { - private static Logger log = Logger.getLogger(StorPoolStatsCollector.class); - @Inject private PrimaryDataStoreDao storagePoolDao; @Inject @@ -93,19 +90,19 @@ public void run() { if (CollectionUtils.isNotEmpty(spPools)) { volumesStats.clear(); - log.debug("Collecting StorPool volumes used space"); + logger.debug("Collecting StorPool volumes used space"); Map onePoolforZone = new HashMap<>(); for (StoragePoolVO storagePoolVO : spPools) { onePoolforZone.put(storagePoolVO.getDataCenterId(), storagePoolVO); } for (StoragePoolVO storagePool : onePoolforZone.values()) { try { - log.debug(String.format("Collecting volumes statistics for zone [%s]", storagePool.getDataCenterId())); + logger.debug(String.format("Collecting volumes statistics for zone [%s]", storagePool.getDataCenterId())); JsonArray arr = StorPoolUtil.volumesSpace(StorPoolUtil.getSpConnection(storagePool.getUuid(), storagePool.getId(), storagePoolDetailsDao, storagePoolDao)); volumesStats.putAll(getClusterVolumeOrTemplateSpace(arr, StorPoolObject.VOLUME)); } catch (Exception e) { - log.debug(String.format("Could not collect StorPool volumes statistics due to %s", e.getMessage())); + logger.debug(String.format("Could not collect StorPool volumes statistics due to %s", e.getMessage())); } } } @@ -126,12 +123,12 @@ public void run() { } for (StoragePoolVO storagePool : onePoolforZone.values()) { try { - log.debug(String.format("Collecting templates statistics for zone [%s]", storagePool.getDataCenterId())); + logger.debug(String.format("Collecting templates statistics for zone [%s]", storagePool.getDataCenterId())); JsonArray arr = StorPoolUtil.templatesStats(StorPoolUtil.getSpConnection(storagePool.getUuid(), storagePool.getId(), storagePoolDetailsDao, storagePoolDao)); templatesStats.put(storagePool.getDataCenterId(), getClusterVolumeOrTemplateSpace(arr, StorPoolObject.TEMPLATE)); } catch (Exception e) { - log.debug(String.format("Could not collect StorPool templates statistics %s", e.getMessage())); + logger.debug(String.format("Could not collect StorPool templates statistics %s", e.getMessage())); } } } diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/StorPoolPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/StorPoolPrimaryDataStoreLifeCycle.java index 4dbc7e4a22c9..339ee625c581 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/StorPoolPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/StorPoolPrimaryDataStoreLifeCycle.java @@ -38,7 +38,8 @@ import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpApiResponse; import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpConnectionDesc; import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.api.StoragePoolInfo; import com.cloud.host.HostVO; @@ -61,7 +62,7 @@ import com.cloud.utils.exception.CloudRuntimeException; public class StorPoolPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycle { - private static final Logger log = Logger.getLogger(StorPoolPrimaryDataStoreLifeCycle.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject protected PrimaryDataStoreHelper dataStoreHelper; @@ -92,7 +93,7 @@ public DataStore initialize(Map dsInfos) { } StorPoolUtil.spLog(""); - log.debug("initialize"); + logger.debug("initialize"); String name = (String)dsInfos.get("name"); String providerName = (String)dsInfos.get("providerName"); @@ -186,18 +187,18 @@ public void updateStoragePool(StoragePool storagePool, Map detai } StorPoolUtil.spLog(""); - log.debug("updateStoragePool"); + logger.debug("updateStoragePool"); return; } @Override public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) { - log.debug("attachHost"); + logger.debug("attachHost"); return true; } @Override public boolean attachCluster(DataStore store, ClusterScope scope) { - log.debug("attachCluster"); + logger.debug("attachCluster"); if (!scope.getScopeType().equals(ScopeType.ZONE)) { throw new UnsupportedOperationException("Only Zone-Wide scope is supported!"); } @@ -206,7 +207,7 @@ public boolean attachCluster(DataStore store, ClusterScope scope) { @Override public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) { - log.debug("attachZone"); + logger.debug("attachZone"); if (hypervisorType != HypervisorType.KVM) { throw new UnsupportedOperationException("Only KVM hypervisors supported!"); @@ -216,7 +217,7 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType h try { storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); } catch (Exception e) { - log.warn(String.format("Unable to establish a connection between host %s and pool %s due to %s", host, dataStore, e)); + logger.warn(String.format("Unable to establish a connection between host %s and pool %s due to %s", host, dataStore, e)); } } dataStoreHelper.attachZone(dataStore, hypervisorType); @@ -225,7 +226,7 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType h @Override public boolean maintain(DataStore dataStore) { - log.debug("maintain"); + logger.debug("maintain"); storagePoolAutmation.maintain(dataStore); dataStoreHelper.maintain(dataStore); @@ -234,7 +235,7 @@ public boolean maintain(DataStore dataStore) { @Override public boolean cancelMaintain(DataStore store) { - log.debug("cancelMaintain"); + logger.debug("cancelMaintain"); dataStoreHelper.cancelMaintain(store); storagePoolAutmation.cancelMaintain(store); @@ -243,7 +244,7 @@ public boolean cancelMaintain(DataStore store) { @Override public boolean deleteDataStore(DataStore store) { - log.debug("deleteDataStore"); + logger.debug("deleteDataStore"); long storagePoolId = store.getId(); List lstSnapshots = snapshotDao.listAll(); @@ -303,19 +304,19 @@ public boolean deleteDataStore(DataStore store) { @Override public boolean migrateToObjectStore(DataStore store) { - log.debug("migrateToObjectStore"); + logger.debug("migrateToObjectStore"); return false; } @Override public void enableStoragePool(DataStore dataStore) { - log.debug("enableStoragePool"); + logger.debug("enableStoragePool"); dataStoreHelper.enable(dataStore); } @Override public void disableStoragePool(DataStore dataStore) { - log.debug("disableStoragePool"); + logger.debug("disableStoragePool"); dataStoreHelper.disable(dataStore); } } diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/provider/StorPoolHostListener.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/provider/StorPoolHostListener.java index bf7642b9122e..b696990c5336 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/provider/StorPoolHostListener.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/provider/StorPoolHostListener.java @@ -40,7 +40,8 @@ import org.apache.cloudstack.storage.snapshot.StorPoolConfigurationManager; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -62,7 +63,7 @@ import com.cloud.utils.exception.CloudRuntimeException; public class StorPoolHostListener implements HypervisorHostListener { - private static final Logger log = Logger.getLogger(StorPoolHostListener .class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject private AgentManager agentMgr; @@ -151,7 +152,7 @@ public boolean hostConnect(long hostId, long poolId) throws StorageConflictExcep List localStoragePools = primaryStoreDao.listLocalStoragePoolByPath(pool.getDataCenterId(), datastoreName); for (StoragePoolVO localStoragePool : localStoragePools) { if (datastoreName.equals(localStoragePool.getPath())) { - log.warn("Storage pool: " + pool.getId() + " has already been added as local storage: " + localStoragePool.getName()); + logger.warn("Storage pool: " + pool.getId() + " has already been added as local storage: " + localStoragePool.getName()); throw new StorageConflictException("Cannot add shared storage pool: " + pool.getId() + " because it has already been added as local storage:" + localStoragePool.getName()); } @@ -259,7 +260,7 @@ private void addModifyCommandToCommandsAllowedInMaintenanceMode() { } catch (IllegalArgumentException | IllegalAccessException | NoSuchFieldException | SecurityException e) { String err = "Could not add StorPoolModifyStoragePoolCommand to s_commandsAllowedInMaintenanceMode array due to: %s"; StorPoolUtil.spLog(err, e.getMessage()); - log.warn(String.format(err, e.getMessage())); + logger.warn(String.format(err, e.getMessage())); } } diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolHelper.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolHelper.java index 9395f134fe10..5a84e699f52e 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolHelper.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolHelper.java @@ -19,10 +19,7 @@ package org.apache.cloudstack.storage.datastore.util; -import java.io.IOException; import java.sql.PreparedStatement; -import java.sql.Timestamp; -import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -41,10 +38,6 @@ import org.apache.cloudstack.storage.snapshot.StorPoolConfigurationManager; import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.commons.collections4.CollectionUtils; -import org.apache.log4j.Appender; -import org.apache.log4j.Logger; -import org.apache.log4j.PatternLayout; -import org.apache.log4j.RollingFileAppender; import com.cloud.dc.ClusterDetailsDao; import com.cloud.dc.ClusterDetailsVO; @@ -182,30 +175,30 @@ public static Map addStorPoolTags(String name, String vmUuid, St } // Initialize custom logger for updated volume and snapshots - public static void appendLogger(Logger log, String filePath, String kindOfLog) { - Appender appender = null; - PatternLayout patternLayout = new PatternLayout(); - patternLayout.setConversionPattern("%d{YYYY-MM-dd HH:mm:ss.SSS} %m%n"); - SimpleDateFormat sdf = new SimpleDateFormat("yyyyMMddHHmmss"); - Timestamp timestamp = new Timestamp(System.currentTimeMillis()); - String path = filePath + "-" + sdf.format(timestamp) + ".log"; - try { - appender = new RollingFileAppender(patternLayout, path); - log.setAdditivity(false); - log.addAppender(appender); - } catch (IOException e) { - e.printStackTrace(); - } - if (kindOfLog.equals("update")) { - StorPoolUtil.spLog( - "You can find information about volumes and snapshots, which will be updated in Database with their globalIs in %s log file", - path); - } else if (kindOfLog.equals("abandon")) { - StorPoolUtil.spLog( - "You can find information about volumes and snapshots, for which CloudStack doesn't have information in %s log file", - path); - } - } +// public static void appendLogger(Logger log, String filePath, String kindOfLog) { +// Appender appender = null; +// PatternLayout patternLayout = new PatternLayout(); +// patternLayout.setConversionPattern("%d{YYYY-MM-dd HH:mm:ss.SSS} %m%n"); +// SimpleDateFormat sdf = new SimpleDateFormat("yyyyMMddHHmmss"); +// Timestamp timestamp = new Timestamp(System.currentTimeMillis()); +// String path = filePath + "-" + sdf.format(timestamp) + ".log"; +// try { +// appender = new RollingFileAppender(patternLayout, path); +// log.setAdditivity(false); +// log.addAppender(appender); +// } catch (IOException e) { +// e.printStackTrace(); +// } +// if (kindOfLog.equals("update")) { +// StorPoolUtil.spLog( +// "You can find information about volumes and snapshots, which will be updated in Database with their globalIs in %s log file", +// path); +// } else if (kindOfLog.equals("abandon")) { +// StorPoolUtil.spLog( +// "You can find information about volumes and snapshots, for which CloudStack doesn't have information in %s log file", +// path); +// } +// } public static void setSpClusterIdIfNeeded(long hostId, String clusterId, ClusterDao clusterDao, HostDao hostDao, ClusterDetailsDao clusterDetails) { diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolUtil.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolUtil.java index e176d67c12db..8db93d968d60 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolUtil.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolUtil.java @@ -44,7 +44,8 @@ import org.apache.http.entity.StringEntity; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClientBuilder; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import java.io.BufferedReader; import java.io.File; @@ -67,7 +68,7 @@ import java.util.UUID; public class StorPoolUtil { - private static final Logger log = Logger.getLogger(StorPoolUtil.class); + protected static Logger LOGGER = LogManager.getLogger(StorPoolUtil.class); private static final File spLogFile = new File( Files.exists(Paths.get("/var/log/cloudstack/management/")) ? @@ -77,23 +78,23 @@ public class StorPoolUtil { private static PrintWriter spLogFileInitialize() { try { - log.info("INITIALIZE SP-LOG_FILE"); + LOGGER.info("INITIALIZE SP-LOGGER_FILE"); if (spLogFile.exists()) { final SimpleDateFormat sdf = new SimpleDateFormat("yyyyMMddHHmmss"); final Timestamp timestamp = new Timestamp(System.currentTimeMillis()); final File spLogFileRename = new File(spLogFile + "-" + sdf.format(timestamp)); final boolean ret = spLogFile.renameTo(spLogFileRename); if (!ret) { - log.warn("Unable to rename" + spLogFile + " to " + spLogFileRename); + LOGGER.warn("Unable to rename" + spLogFile + " to " + spLogFileRename); } else { - log.debug("Renamed " + spLogFile + " to " + spLogFileRename); + LOGGER.debug("Renamed " + spLogFile + " to " + spLogFileRename); } } else { spLogFile.getParentFile().mkdirs(); } return new PrintWriter(spLogFile); } catch (Exception e) { - log.info("INITIALIZE SP-LOG_FILE: " + e.getMessage()); + LOGGER.info("INITIALIZE SP-LOGGER_FILE: " + e.getMessage()); throw new RuntimeException(e); } } @@ -176,19 +177,19 @@ public SpConnectionDesc(String url) { extractUriParams(url); return; } catch (URISyntaxException e) { - log.debug("[ignore] the uri is not valid"); + LOGGER.debug("[ignore] the uri is not valid"); } String[] urlSplit = url.split(";"); if (urlSplit.length == 1 && !urlSplit[0].contains("=")) { this.templateName = url; - Script sc = new Script("storpool_confget", 0, log); + Script sc = new Script("storpool_confget", 0, LOGGER); OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser(); final String err = sc.execute(parser); if (err != null) { final String errMsg = String.format("Could not execute storpool_confget. Error: %s", err); - log.warn(errMsg); + LOGGER.warn(errMsg); throw new CloudRuntimeException(errMsg); } @@ -396,7 +397,7 @@ private static SpApiResponse POST(String query, Object json, SpConnectionDesc co Gson gson = new Gson(); String js = gson.toJson(json); StringEntity input = new StringEntity(js, ContentType.APPLICATION_JSON); - log.info("Request:" + js); + LOGGER.info("Request:" + js); req.setEntity(input); } @@ -586,7 +587,7 @@ public static SpApiResponse volumesGroupSnapshot(final List volu } json.put("tags", tags); json.put("volumes", volumes); - log.info("json:" + json); + LOGGER.info("json:" + json); return POST("MultiCluster/VolumesGroupSnapshot", json, conn); } diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/motion/StorPoolDataMotionStrategy.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/motion/StorPoolDataMotionStrategy.java index a735b0fe918b..bd5380cc1606 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/motion/StorPoolDataMotionStrategy.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/motion/StorPoolDataMotionStrategy.java @@ -59,7 +59,8 @@ import org.apache.cloudstack.storage.to.SnapshotObjectTO; import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.commons.collections.MapUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; @@ -99,7 +100,7 @@ @Component public class StorPoolDataMotionStrategy implements DataMotionStrategy { - private static final Logger log = Logger.getLogger(StorPoolDataMotionStrategy.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject private SnapshotDataFactory _snapshotDataFactory; @@ -189,7 +190,7 @@ public void copyAsync(DataObject srcData, DataObject destData, Host destHost, CopyCmdAnswer answer = null; String err = null; if (res.getError() != null) { - log.debug(String.format("Could not create volume from snapshot with ID=%s", snapshot.getId())); + logger.debug(String.format("Could not create volume from snapshot with ID=%s", snapshot.getId())); StorPoolUtil.spLog("Volume create failed with error=%s", res.getError().getDescr()); err = res.getError().getDescr(); } else { @@ -217,7 +218,7 @@ public void copyAsync(DataObject srcData, DataObject destData, Host destHost, if (answer != null && answer.getResult()) { SpApiResponse resSnapshot = StorPoolUtil.volumeFreeze(volumeName, conn); if (resSnapshot.getError() != null) { - log.debug(String.format("Could not snapshot volume with ID=%s", snapshot.getId())); + logger.debug(String.format("Could not snapshot volume with ID=%s", snapshot.getId())); StorPoolUtil.spLog("Volume freeze failed with error=%s", resSnapshot.getError().getDescr()); err = resSnapshot.getError().getDescr(); StorPoolUtil.volumeDelete(volumeName, conn); @@ -385,7 +386,7 @@ public void copyAsync(Map volumeDataStoreMap, VirtualMach errMsg = String.format( "Copy volume(s) of VM [%s] to storage(s) [%s] and VM to host [%s] failed in StorPoolDataMotionStrategy.copyAsync. Error message: [%s].", vmTO.getId(), srcHost.getId(), destHost.getId(), ex.getMessage()); - log.error(errMsg, ex); + logger.error(errMsg, ex); throw new CloudRuntimeException(errMsg); } finally { @@ -441,7 +442,7 @@ private void handlePostMigration(boolean success, Map sr throw new AgentUnavailableException(msg, destHost.getId()); } } catch (Exception e) { - log.debug("Failed to disconnect one or more (original) dest volumes", e); + logger.debug("Failed to disconnect one or more (original) dest volumes", e); } } @@ -469,10 +470,10 @@ private void handlePostMigration(boolean success, Map sr AsyncCallFuture destroyFuture = _volumeService.expungeVolumeAsync(srcVolumeInfo); if (destroyFuture.get().isFailed()) { - log.debug("Failed to clean up source volume on storage"); + logger.debug("Failed to clean up source volume on storage"); } } catch (Exception e) { - log.debug("Failed to clean up source volume on storage", e); + logger.debug("Failed to clean up source volume on storage", e); } // Update the volume ID for snapshots on secondary storage @@ -484,13 +485,13 @@ private void handlePostMigration(boolean success, Map sr try { disconnectHostFromVolume(destHost, destVolumeInfo.getPoolId(), destVolumeInfo.getPath()); } catch (Exception e) { - log.debug("Failed to disconnect (new) dest volume", e); + logger.debug("Failed to disconnect (new) dest volume", e); } try { _volumeService.revokeAccess(destVolumeInfo, destHost, destVolumeInfo.getDataStore()); } catch (Exception e) { - log.debug("Failed to revoke access from dest volume", e); + logger.debug("Failed to revoke access from dest volume", e); } destVolumeInfo.processEvent(Event.OperationFailed); @@ -504,10 +505,10 @@ private void handlePostMigration(boolean success, Map sr AsyncCallFuture destroyFuture = _volumeService.expungeVolumeAsync(destVolumeInfo); if (destroyFuture.get().isFailed()) { - log.debug("Failed to clean up dest volume on storage"); + logger.debug("Failed to clean up dest volume on storage"); } } catch (Exception e) { - log.debug("Failed to clean up dest volume on storage", e); + logger.debug("Failed to clean up dest volume on storage", e); } } } diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolSnapshotStrategy.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolSnapshotStrategy.java index 55d691f33e0b..0b58247c661c 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolSnapshotStrategy.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolSnapshotStrategy.java @@ -40,7 +40,8 @@ import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpApiResponse; import org.apache.cloudstack.storage.datastore.util.StorPoolUtil.SpConnectionDesc; import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.stereotype.Component; import com.cloud.exception.InvalidParameterValueException; @@ -60,7 +61,7 @@ @Component public class StorPoolSnapshotStrategy implements SnapshotStrategy { - private static final Logger log = Logger.getLogger(StorPoolSnapshotStrategy.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject private SnapshotDao _snapshotDao; @@ -90,11 +91,11 @@ public SnapshotInfo backupSnapshot(SnapshotInfo snapshotInfo) { snapshotObj.processEvent(Snapshot.Event.BackupToSecondary); snapshotObj.processEvent(Snapshot.Event.OperationSucceeded); } catch (NoTransitionException ex) { - log.debug("Failed to change state: " + ex.toString()); + logger.debug("Failed to change state: " + ex.toString()); try { snapshotObj.processEvent(Snapshot.Event.OperationFailed); } catch (NoTransitionException ex2) { - log.debug("Failed to change state: " + ex2.toString()); + logger.debug("Failed to change state: " + ex2.toString()); } } return snapshotInfo; @@ -131,7 +132,7 @@ public boolean deleteSnapshot(Long snapshotId, Long zoneId) { @Override public StrategyPriority canHandle(Snapshot snapshot, Long zoneId, SnapshotOperation op) { - log.debug(String.format("StorpoolSnapshotStrategy.canHandle: snapshot=%s, uuid=%s, op=%s", snapshot.getName(), snapshot.getUuid(), op)); + logger.debug(String.format("StorpoolSnapshotStrategy.canHandle: snapshot=%s, uuid=%s, op=%s", snapshot.getName(), snapshot.getUuid(), op)); if (op != SnapshotOperation.DELETE) { return StrategyPriority.CANT_HANDLE; @@ -160,7 +161,7 @@ public StrategyPriority canHandle(Snapshot snapshot, Long zoneId, SnapshotOperat } private boolean deleteSnapshotChain(SnapshotInfo snapshot) { - log.debug("delete snapshot chain for snapshot: " + snapshot.getId()); + logger.debug("delete snapshot chain for snapshot: " + snapshot.getId()); final SnapshotInfo snapOnImage = snapshot; boolean result = false; boolean resultIsSet = false; @@ -170,15 +171,15 @@ private boolean deleteSnapshotChain(SnapshotInfo snapshot) { SnapshotInfo child = snapshot.getChild(); if (child != null) { - log.debug("the snapshot has child, can't delete it on the storage"); + logger.debug("the snapshot has child, can't delete it on the storage"); break; } - log.debug("Snapshot: " + snapshot.getId() + " doesn't have children, so it's ok to delete it and its parents"); + logger.debug("Snapshot: " + snapshot.getId() + " doesn't have children, so it's ok to delete it and its parents"); SnapshotInfo parent = snapshot.getParent(); boolean deleted = false; if (parent != null) { if (parent.getPath() != null && parent.getPath().equalsIgnoreCase(snapshot.getPath())) { - log.debug("for empty delta snapshot, only mark it as destroyed in db"); + logger.debug("for empty delta snapshot, only mark it as destroyed in db"); snapshot.processEvent(Event.DestroyRequested); snapshot.processEvent(Event.OperationSuccessed); deleted = true; @@ -195,7 +196,7 @@ private boolean deleteSnapshotChain(SnapshotInfo snapshot) { if (r) { List cacheSnaps = snapshotDataFactory.listSnapshotOnCache(snapshot.getId()); for (SnapshotInfo cacheSnap : cacheSnaps) { - log.debug("Delete snapshot " + snapshot.getId() + " from image cache store: " + cacheSnap.getDataStore().getName()); + logger.debug("Delete snapshot " + snapshot.getId() + " from image cache store: " + cacheSnap.getDataStore().getName()); cacheSnap.delete(); } } @@ -204,7 +205,7 @@ private boolean deleteSnapshotChain(SnapshotInfo snapshot) { resultIsSet = true; } } catch (Exception e) { - log.debug("Failed to delete snapshot on storage. ", e); + logger.debug("Failed to delete snapshot on storage. ", e); } } } else { @@ -213,7 +214,7 @@ private boolean deleteSnapshotChain(SnapshotInfo snapshot) { snapshot = parent; } } catch (Exception e) { - log.debug("delete snapshot failed: ", e); + logger.debug("delete snapshot failed: ", e); } return result; } @@ -235,7 +236,7 @@ protected boolean deleteSnapshotOnImageAndPrimary(long snapshotId, DataStore sto obj.processEvent(Snapshot.Event.DestroyRequested); } } catch (NoTransitionException e) { - log.debug("Failed to set the state to destroying: ", e); + logger.debug("Failed to set the state to destroying: ", e); return false; } @@ -253,13 +254,13 @@ protected boolean deleteSnapshotOnImageAndPrimary(long snapshotId, DataStore sto } } } catch (Exception e) { - log.debug("Failed to delete snapshot: ", e); + logger.debug("Failed to delete snapshot: ", e); try { if (areLastSnapshotRef) { obj.processEvent(Snapshot.Event.OperationFailed); } } catch (NoTransitionException e1) { - log.debug("Failed to change snapshot state: " + e.toString()); + logger.debug("Failed to change snapshot state: " + e.toString()); } return false; } diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolVMSnapshotStrategy.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolVMSnapshotStrategy.java index 1172600c3423..d3c4b456f621 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolVMSnapshotStrategy.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/snapshot/StorPoolVMSnapshotStrategy.java @@ -36,7 +36,6 @@ import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.cloudstack.storage.vmsnapshot.DefaultVMSnapshotStrategy; import org.apache.cloudstack.storage.vmsnapshot.VMSnapshotHelper; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.api.VMSnapshotTO; @@ -66,7 +65,6 @@ @Component public class StorPoolVMSnapshotStrategy extends DefaultVMSnapshotStrategy { - private static final Logger log = Logger.getLogger(StorPoolVMSnapshotStrategy.class); @Inject private VMSnapshotHelper vmSnapshotHelper; @@ -94,7 +92,7 @@ public class StorPoolVMSnapshotStrategy extends DefaultVMSnapshotStrategy { @Override public VMSnapshot takeVMSnapshot(VMSnapshot vmSnapshot) { - log.info("KVMVMSnapshotStrategy take snapshot"); + logger.info("KVMVMSnapshotStrategy take snapshot"); UserVm userVm = userVmDao.findById(vmSnapshot.getVmId()); VMSnapshotVO vmSnapshotVO = (VMSnapshotVO) vmSnapshot; @@ -163,7 +161,7 @@ public VMSnapshot takeVMSnapshot(VMSnapshot vmSnapshot) { for (VolumeObjectTO volumeObjectTO : volumeTOs) { publishUsageEvents(EventTypes.EVENT_VM_SNAPSHOT_CREATE, vmSnapshot, userVm, volumeObjectTO); new_chain_size += volumeObjectTO.getSize(); - log.info("EventTypes.EVENT_VM_SNAPSHOT_CREATE publishUsageEvent" + volumeObjectTO); + logger.info("EventTypes.EVENT_VM_SNAPSHOT_CREATE publishUsageEvent" + volumeObjectTO); } publishUsageEvents(EventTypes.EVENT_VM_SNAPSHOT_ON_PRIMARY, vmSnapshot, userVm, new_chain_size - prev_chain_size, virtual_size); } else { @@ -171,15 +169,15 @@ public VMSnapshot takeVMSnapshot(VMSnapshot vmSnapshot) { } return vmSnapshot; } catch (Exception e) { - log.debug("Could not create VM snapshot:" + e.getMessage()); + logger.debug("Could not create VM snapshot:" + e.getMessage()); throw new CloudRuntimeException("Could not create VM snapshot:" + e.getMessage()); } finally { if (!result) { try { vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationFailed); - log.info(String.format("VMSnapshot.Event.OperationFailed vmSnapshot=%s", vmSnapshot)); + logger.info(String.format("VMSnapshot.Event.OperationFailed vmSnapshot=%s", vmSnapshot)); } catch (NoTransitionException nte) { - log.error("Cannot set vm state:" + nte.getMessage()); + logger.error("Cannot set vm state:" + nte.getMessage()); } } } @@ -219,7 +217,7 @@ public boolean deleteVMSnapshot(VMSnapshot vmSnapshot) { try { vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.ExpungeRequested); } catch (NoTransitionException e) { - log.debug("Failed to change vm snapshot state with event ExpungeRequested"); + logger.debug("Failed to change vm snapshot state with event ExpungeRequested"); throw new CloudRuntimeException( "Failed to change vm snapshot state with event ExpungeRequested: " + e.getMessage()); } @@ -241,13 +239,13 @@ public boolean deleteVMSnapshot(VMSnapshot vmSnapshot) { if (snapshotName == null) { err = String.format("Could not find StorPool's snapshot vm snapshot uuid=%s and volume uui=%s", vmSnapshot.getUuid(), volumeObjectTO.getUuid()); - log.error("Could not delete snapshot for vm:" + err); + logger.error("Could not delete snapshot for vm:" + err); } StorPoolUtil.spLog("StorpoolVMSnapshotStrategy.deleteVMSnapshot snapshotName=%s", snapshotName); resp = StorPoolUtil.snapshotDelete(snapshotName, conn); if (resp.getError() != null) { err = String.format("Could not delete storpool vm error=%s", resp.getError()); - log.error("Could not delete snapshot for vm:" + err); + logger.error("Could not delete snapshot for vm:" + err); } else { // do we need to clean database? if (snapshotDetailsVO != null) { @@ -278,7 +276,7 @@ public boolean deleteVMSnapshot(VMSnapshot vmSnapshot) { @Override public boolean revertVMSnapshot(VMSnapshot vmSnapshot) { - log.debug("Revert vm snapshot"); + logger.debug("Revert vm snapshot"); VMSnapshotVO vmSnapshotVO = (VMSnapshotVO) vmSnapshot; UserVmVO userVm = userVmDao.findById(vmSnapshot.getVmId()); @@ -306,7 +304,7 @@ public boolean revertVMSnapshot(VMSnapshot vmSnapshot) { if (snapshotName == null) { err = String.format("Could not find StorPool's snapshot vm snapshot uuid=%s and volume uui=%s", vmSnapshot.getUuid(), volumeObjectTO.getUuid()); - log.error("Could not delete snapshot for vm:" + err); + logger.error("Could not delete snapshot for vm:" + err); } String volumeName = StorPoolStorageAdaptor.getVolumeNameFromPath(volumeObjectTO.getPath(), true); VolumeDetailVO detail = volumeDetailsDao.findDetail(volumeObjectTO.getId(), StorPoolUtil.SP_PROVIDER_NAME); @@ -347,14 +345,14 @@ public boolean revertVMSnapshot(VMSnapshot vmSnapshot) { result = vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationSucceeded); } catch (CloudRuntimeException | NoTransitionException e) { String errMsg = String.format("Error while finalize create vm snapshot [%s] due to %s", vmSnapshot.getName(), e.getMessage()); - log.error(errMsg, e); + logger.error(errMsg, e); throw new CloudRuntimeException(errMsg); } finally { if (!result) { try { vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationFailed); } catch (NoTransitionException e1) { - log.error("Cannot set vm snapshot state due to: " + e1.getMessage()); + logger.error("Cannot set vm snapshot state due to: " + e1.getMessage()); } } } @@ -381,7 +379,7 @@ private void publishUsageEvents(String type, VMSnapshot vmSnapshot, UserVm userV vmSnapshot.getName(), 0L, 0L, vmSnapSize, virtualSize, VMSnapshot.class.getName(), vmSnapshot.getUuid()); } catch (Exception e) { - log.error("Failed to publis usage event " + type, e); + logger.error("Failed to publis usage event " + type, e); } } } diff --git a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LDAPConfigCmd.java b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LDAPConfigCmd.java index 2a643dda76f1..f738a870053e 100644 --- a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LDAPConfigCmd.java +++ b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LDAPConfigCmd.java @@ -22,7 +22,6 @@ import javax.inject.Inject; import org.apache.commons.lang.StringEscapeUtils; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -53,7 +52,6 @@ requestHasSensitiveInfo = true, responseHasSensitiveInfo = false) public class LDAPConfigCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(LDAPConfigCmd.class.getName()); @Inject diff --git a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LDAPRemoveCmd.java b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LDAPRemoveCmd.java index b915f97fe088..c70f84ffd699 100644 --- a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LDAPRemoveCmd.java +++ b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LDAPRemoveCmd.java @@ -26,7 +26,6 @@ import org.apache.cloudstack.api.response.LDAPRemoveResponse; import org.apache.cloudstack.ldap.LdapConfigurationVO; import org.apache.cloudstack.ldap.LdapManager; -import org.apache.log4j.Logger; import com.cloud.user.Account; import com.cloud.utils.Pair; @@ -38,7 +37,6 @@ @APICommand(name = "ldapRemove", description = "(Deprecated , use deleteLdapConfiguration) Remove the LDAP context for this site.", responseObject = LDAPConfigResponse.class, since = "3.0.1", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class LDAPRemoveCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(LDAPRemoveCmd.class.getName()); @Inject private LdapManager _ldapManager; diff --git a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapAddConfigurationCmd.java b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapAddConfigurationCmd.java index 7c592888364e..1131667d98ae 100644 --- a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapAddConfigurationCmd.java +++ b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapAddConfigurationCmd.java @@ -20,7 +20,6 @@ import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.response.DomainResponse; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiErrorCode; @@ -36,7 +35,6 @@ @APICommand(name = "addLdapConfiguration", description = "Add a new Ldap Configuration", responseObject = LdapConfigurationResponse.class, since = "4.2.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class LdapAddConfigurationCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(LdapAddConfigurationCmd.class.getName()); private static final String s_name = "ldapconfigurationresponse"; @Inject diff --git a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapCreateAccountCmd.java b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapCreateAccountCmd.java index 2196aa8d4f51..880ecea4d13c 100644 --- a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapCreateAccountCmd.java +++ b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapCreateAccountCmd.java @@ -36,7 +36,6 @@ import org.apache.cloudstack.ldap.LdapManager; import org.apache.cloudstack.ldap.LdapUser; import org.apache.cloudstack.ldap.NoLdapUserMatchingQueryException; -import org.apache.log4j.Logger; import org.bouncycastle.util.encoders.Base64; import javax.inject.Inject; @@ -47,7 +46,6 @@ @APICommand(name = "ldapCreateAccount", description = "Creates an account from an LDAP user", responseObject = AccountResponse.class, since = "4.2.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class LdapCreateAccountCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(LdapCreateAccountCmd.class.getName()); private static final String s_name = "createaccountresponse"; @Inject diff --git a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapDeleteConfigurationCmd.java b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapDeleteConfigurationCmd.java index 3ffebecfb95a..15e6c836d0db 100644 --- a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapDeleteConfigurationCmd.java +++ b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapDeleteConfigurationCmd.java @@ -20,7 +20,6 @@ import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.response.DomainResponse; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiErrorCode; @@ -36,7 +35,6 @@ @APICommand(name = "deleteLdapConfiguration", description = "Remove an Ldap Configuration", responseObject = LdapConfigurationResponse.class, since = "4.2.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class LdapDeleteConfigurationCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(LdapDeleteConfigurationCmd.class.getName()); private static final String s_name = "ldapconfigurationresponse"; @Inject diff --git a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapImportUsersCmd.java b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapImportUsersCmd.java index 96696d561cd2..087bd63c2969 100644 --- a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapImportUsersCmd.java +++ b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapImportUsersCmd.java @@ -42,7 +42,6 @@ import org.apache.cloudstack.ldap.LdapUser; import org.apache.cloudstack.ldap.NoLdapUserMatchingQueryException; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.bouncycastle.util.encoders.Base64; import com.cloud.domain.Domain; @@ -61,7 +60,6 @@ @APICommand(name = "importLdapUsers", description = "Import LDAP users", responseObject = LdapUserResponse.class, since = "4.3.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class LdapImportUsersCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(LdapImportUsersCmd.class.getName()); private static final String s_name = "ldapuserresponse"; @@ -108,18 +106,18 @@ public LdapImportUsersCmd(final LdapManager ldapManager, final DomainService dom private void createCloudstackUserAccount(LdapUser user, String accountName, Domain domain) { Account account = _accountService.getActiveAccountByName(accountName, domain.getId()); if (account == null) { - s_logger.debug("No account exists with name: " + accountName + " creating the account and an user with name: " + user.getUsername() + " in the account"); + logger.debug("No account exists with name: " + accountName + " creating the account and an user with name: " + user.getUsername() + " in the account"); _accountService.createUserAccount(user.getUsername(), generatePassword(), user.getFirstname(), user.getLastname(), user.getEmail(), timezone, accountName, getAccountType(), getRoleId(), domain.getId(), domain.getNetworkDomain(), details, UUID.randomUUID().toString(), UUID.randomUUID().toString(), User.Source.LDAP); } else { // check if the user exists. if yes, call update UserAccount csuser = _accountService.getActiveUserAccount(user.getUsername(), domain.getId()); if (csuser == null) { - s_logger.debug("No user exists with name: " + user.getUsername() + " creating a user in the account: " + accountName); + logger.debug("No user exists with name: " + user.getUsername() + " creating a user in the account: " + accountName); _accountService.createUser(user.getUsername(), generatePassword(), user.getFirstname(), user.getLastname(), user.getEmail(), timezone, accountName, domain.getId(), UUID.randomUUID().toString(), User.Source.LDAP); } else { - s_logger.debug("Account [name=%s] and user [name=%s] already exist in CloudStack. Executing the user update."); + logger.debug("Account [name=%s] and user [name=%s] already exist in CloudStack. Executing the user update."); UpdateUserCmd updateUserCmd = new UpdateUserCmd(); updateUserCmd.setId(csuser.getId()); @@ -148,7 +146,7 @@ public void execute() } } catch (NoLdapUserMatchingQueryException ex) { users = new ArrayList(); - s_logger.info("No Ldap user matching query. " + " ::: " + ex.getMessage()); + logger.info("No Ldap user matching query. " + " ::: " + ex.getMessage()); } List addedUsers = new ArrayList(); @@ -158,7 +156,7 @@ public void execute() createCloudstackUserAccount(user, getAccountName(user), domain); addedUsers.add(user); } catch (InvalidParameterValueException ex) { - s_logger.error("Failed to create user with username: " + user.getUsername() + " ::: " + ex.getMessage()); + logger.error("Failed to create user with username: " + user.getUsername() + " ::: " + ex.getMessage()); } } ListResponse response = new ListResponse(); diff --git a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapListConfigurationCmd.java b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapListConfigurationCmd.java index d12ca4ab6c1c..c34d026f89b2 100644 --- a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapListConfigurationCmd.java +++ b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapListConfigurationCmd.java @@ -23,7 +23,6 @@ import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.response.DomainResponse; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.BaseListCmd; @@ -39,7 +38,6 @@ @APICommand(name = "listLdapConfigurations", responseObject = LdapConfigurationResponse.class, description = "Lists all LDAP configurations", since = "4.2.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class LdapListConfigurationCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(LdapListConfigurationCmd.class.getName()); private static final String s_name = "ldapconfigurationresponse"; diff --git a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapListUsersCmd.java b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapListUsersCmd.java index 0c70c4d5b308..e5d434d38108 100644 --- a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapListUsersCmd.java +++ b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapListUsersCmd.java @@ -30,7 +30,6 @@ import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.response.DomainResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.BaseListCmd; @@ -79,7 +78,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, authorized = {RoleType.Admin,RoleType.DomainAdmin}) public class LdapListUsersCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(LdapListUsersCmd.class.getName()); private static final String s_name = "ldapuserresponse"; @Inject private LdapManager _ldapManager; @@ -169,7 +167,7 @@ private List getCloudstackUsers() { } private void traceUserList() { - if(s_logger.isTraceEnabled()) { + if(logger.isTraceEnabled()) { StringBuilder users = new StringBuilder(); for (UserResponse user : cloudstackUsers) { if (users.length()> 0) { @@ -178,13 +176,13 @@ private void traceUserList() { users.append(user.getUsername()); } - s_logger.trace(String.format("checking against %d cloudstackusers: %s.", this.cloudstackUsers.size(), users.toString())); + logger.trace(String.format("checking against %d cloudstackusers: %s.", this.cloudstackUsers.size(), users.toString())); } } private List applyUserFilter(List ldapResponses) { - if(s_logger.isTraceEnabled()) { - s_logger.trace(String.format("applying filter: %s or %s.", this.getListTypeString(), this.getUserFilter())); + if(logger.isTraceEnabled()) { + logger.trace(String.format("applying filter: %s or %s.", this.getListTypeString(), this.getUserFilter())); } List responseList = getUserFilter().filter(this,ldapResponses); return responseList; @@ -218,14 +216,14 @@ boolean isACloudstackUser(final LdapUser ldapUser) { if (cloudstackUsers != null) { for (final UserResponse cloudstackUser : cloudstackUsers) { if (ldapUser.getUsername().equals(cloudstackUser.getUsername())) { - if(s_logger.isTraceEnabled()) { - s_logger.trace(String.format("found user %s in cloudstack", ldapUser.getUsername())); + if(logger.isTraceEnabled()) { + logger.trace(String.format("found user %s in cloudstack", ldapUser.getUsername())); } rc = true; } else { - if(s_logger.isTraceEnabled()) { - s_logger.trace(String.format("ldap user %s does not match cloudstack user %s", ldapUser.getUsername(), cloudstackUser.getUsername())); + if(logger.isTraceEnabled()) { + logger.trace(String.format("ldap user %s does not match cloudstack user %s", ldapUser.getUsername(), cloudstackUser.getUsername())); } } } @@ -234,20 +232,20 @@ boolean isACloudstackUser(final LdapUser ldapUser) { } boolean isACloudstackUser(final LdapUserResponse ldapUser) { - if(s_logger.isTraceEnabled()) { - s_logger.trace("checking response : " + ldapUser.toString()); + if(logger.isTraceEnabled()) { + logger.trace("checking response : " + ldapUser.toString()); } final List cloudstackUsers = getCloudstackUsers(); if (cloudstackUsers != null && cloudstackUsers.size() != 0) { for (final UserResponse cloudstackUser : cloudstackUsers) { if (ldapUser.getUsername().equals(cloudstackUser.getUsername())) { - if(s_logger.isTraceEnabled()) { - s_logger.trace(String.format("found user %s in cloudstack user %s", ldapUser.getUsername(), cloudstackUser.getUsername())); + if(logger.isTraceEnabled()) { + logger.trace(String.format("found user %s in cloudstack user %s", ldapUser.getUsername(), cloudstackUser.getUsername())); } return true; } else { - if(s_logger.isTraceEnabled()) { - s_logger.trace(String.format("ldap user %s does not match cloudstack user %s", ldapUser.getUsername(), cloudstackUser.getUsername())); + if(logger.isTraceEnabled()) { + logger.trace(String.format("ldap user %s does not match cloudstack user %s", ldapUser.getUsername(), cloudstackUser.getUsername())); } } } @@ -348,8 +346,8 @@ static UserFilter fromString(String val) { * @return unfiltered list of the input list of ldap users */ public List filterNoFilter(List input) { - if(s_logger.isTraceEnabled()) { - s_logger.trace("returning unfiltered list of ldap users"); + if(logger.isTraceEnabled()) { + logger.trace("returning unfiltered list of ldap users"); } annotateUserListWithSources(input); return input; @@ -361,8 +359,8 @@ public List filterNoFilter(List input) { * @return a list of ldap users not already in ACS */ public List filterAnyDomain(List input) { - if(s_logger.isTraceEnabled()) { - s_logger.trace("filtering existing users"); + if(logger.isTraceEnabled()) { + logger.trace("filtering existing users"); } final List ldapResponses = new ArrayList(); for (final LdapUserResponse user : input) { @@ -394,8 +392,8 @@ private boolean isNotAlreadyImportedInTheCurrentDomain(LdapUserResponse user) { * @return a list of ldap users not already in ACS */ public List filterLocalDomain(List input) { - if(s_logger.isTraceEnabled()) { - s_logger.trace("filtering local domain users"); + if(logger.isTraceEnabled()) { + logger.trace("filtering local domain users"); } final List ldapResponses = new ArrayList(); String domainId = getCurrentDomainId(); @@ -430,8 +428,8 @@ private String getCurrentDomainId() { * @return annotated list of the users of the input list, that will be automatically imported or synchronised */ public List filterPotentialImport(List input) { - if(s_logger.isTraceEnabled()) { - s_logger.trace("should be filtering potential imports!!!"); + if(logger.isTraceEnabled()) { + logger.trace("should be filtering potential imports!!!"); } // functional possibility do not add only users not yet in cloudstack but include users that would be moved if they are so in ldap? // this means if they are part of a account linked to an ldap group/ou diff --git a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapUserSearchCmd.java b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapUserSearchCmd.java index a3c7d4f64068..b702beda1708 100644 --- a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapUserSearchCmd.java +++ b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LdapUserSearchCmd.java @@ -21,7 +21,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.BaseListCmd; @@ -38,7 +37,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class LdapUserSearchCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(LdapUserSearchCmd.class.getName()); private static final String s_name = "ldapuserresponse"; @Inject private LdapManager _ldapManager; @@ -75,7 +73,7 @@ public void execute() { try { users = _ldapManager.searchUsers(query); } catch (final NoLdapUserMatchingQueryException e) { - s_logger.debug(e.getMessage()); + logger.debug(e.getMessage()); } final List ldapUserResponses = createLdapUserResponse(users); diff --git a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LinkAccountToLdapCmd.java b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LinkAccountToLdapCmd.java index af5420ef488c..7e2114ea00f7 100644 --- a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LinkAccountToLdapCmd.java +++ b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LinkAccountToLdapCmd.java @@ -36,7 +36,6 @@ import org.apache.cloudstack.ldap.LdapManager; import org.apache.cloudstack.ldap.LdapUser; import org.apache.cloudstack.ldap.NoLdapUserMatchingQueryException; -import org.apache.log4j.Logger; import com.cloud.exception.InvalidParameterValueException; import com.cloud.user.Account; @@ -46,7 +45,6 @@ @APICommand(name = "linkAccountToLdap", description = "link a cloudstack account to a group or OU in ldap", responseObject = LinkDomainToLdapResponse.class, since = "4.11.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, authorized = {RoleType.Admin,RoleType.DomainAdmin}) public class LinkAccountToLdapCmd extends BaseCmd { - public static final Logger LOGGER = Logger.getLogger(LinkAccountToLdapCmd.class.getName()); @Parameter(name = ApiConstants.DOMAIN_ID, type = CommandType.UUID, required = true, entityType = DomainResponse.class, description = "The id of the domain that is to contain the linked account.") private Long domainId; @@ -79,7 +77,7 @@ public void execute() throws ServerApiException { try { ldapUser = _ldapManager.getUser(admin, type, ldapDomain, domainId); } catch (NoLdapUserMatchingQueryException e) { - LOGGER.debug("no ldap user matching username " + admin + " in the given group/ou", e); + logger.debug("no ldap user matching username " + admin + " in the given group/ou", e); } if (ldapUser != null && !ldapUser.isDisabled()) { Account account = _accountService.getActiveAccountByName(admin, domainId); @@ -89,15 +87,15 @@ public void execute() throws ServerApiException { .createUserAccount(admin, "", ldapUser.getFirstname(), ldapUser.getLastname(), ldapUser.getEmail(), null, admin, Account.Type.DOMAIN_ADMIN, RoleType.DomainAdmin.getId(), domainId, null, null, UUID.randomUUID().toString(), UUID.randomUUID().toString(), User.Source.LDAP); response.setAdminId(String.valueOf(userAccount.getAccountId())); - LOGGER.info("created an account with name " + admin + " in the given domain " + domainId); + logger.info("created an account with name " + admin + " in the given domain " + domainId); } catch (Exception e) { - LOGGER.info("an exception occurred while creating account with name " + admin + " in domain " + domainId, e); + logger.info("an exception occurred while creating account with name " + admin + " in domain " + domainId, e); } } else { - LOGGER.debug("an account with name " + admin + " already exists in the domain " + domainId); + logger.debug("an account with name " + admin + " already exists in the domain " + domainId); } } else { - LOGGER.debug("ldap user with username " + admin + " is disabled in the given group/ou"); + logger.debug("ldap user with username " + admin + " is disabled in the given group/ou"); } } response.setObjectName(this.getActualCommandName()); diff --git a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LinkDomainToLdapCmd.java b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LinkDomainToLdapCmd.java index db80ff345f5b..d5187f99c995 100644 --- a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LinkDomainToLdapCmd.java +++ b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/api/command/LinkDomainToLdapCmd.java @@ -35,7 +35,6 @@ import org.apache.cloudstack.ldap.LdapManager; import org.apache.cloudstack.ldap.LdapUser; import org.apache.cloudstack.ldap.NoLdapUserMatchingQueryException; -import org.apache.log4j.Logger; import com.cloud.user.Account; @@ -44,7 +43,6 @@ @APICommand(name = "linkDomainToLdap", description = "link an existing cloudstack domain to group or OU in ldap", responseObject = LinkDomainToLdapResponse.class, since = "4.6.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class LinkDomainToLdapCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(LinkDomainToLdapCmd.class.getName()); @Parameter(name = ApiConstants.DOMAIN_ID, type = CommandType.UUID, required = true, entityType = DomainResponse.class, description = "The id of the domain which has to be " + "linked to LDAP.") @@ -100,7 +98,7 @@ public void execute() throws ServerApiException { try { ldapUser = _ldapManager.getUser(admin, type, getLdapDomain(), domainId); } catch (NoLdapUserMatchingQueryException e) { - s_logger.debug("no ldap user matching username " + admin + " in the given group/ou", e); + logger.debug("no ldap user matching username " + admin + " in the given group/ou", e); } if (ldapUser != null && !ldapUser.isDisabled()) { Account account = _accountService.getActiveAccountByName(admin, domainId); @@ -109,15 +107,15 @@ public void execute() throws ServerApiException { UserAccount userAccount = _accountService.createUserAccount(admin, "", ldapUser.getFirstname(), ldapUser.getLastname(), ldapUser.getEmail(), null, admin, Account.Type.DOMAIN_ADMIN, RoleType.DomainAdmin.getId(), domainId, null, null, UUID.randomUUID().toString(), UUID.randomUUID().toString(), User.Source.LDAP); response.setAdminId(String.valueOf(userAccount.getAccountId())); - s_logger.info("created an account with name " + admin + " in the given domain " + domainId); + logger.info("created an account with name " + admin + " in the given domain " + domainId); } catch (Exception e) { - s_logger.info("an exception occurred while creating account with name " + admin +" in domain " + domainId, e); + logger.info("an exception occurred while creating account with name " + admin +" in domain " + domainId, e); } } else { - s_logger.debug("an account with name " + admin + " already exists in the domain " + domainId); + logger.debug("an account with name " + admin + " already exists in the domain " + domainId); } } else { - s_logger.debug("ldap user with username "+admin+" is disabled in the given group/ou"); + logger.debug("ldap user with username "+admin+" is disabled in the given group/ou"); } } response.setObjectName("LinkDomainToLdap"); diff --git a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/ADLdapUserManagerImpl.java b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/ADLdapUserManagerImpl.java index 2413d718b621..552d5969a9e4 100644 --- a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/ADLdapUserManagerImpl.java +++ b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/ADLdapUserManagerImpl.java @@ -28,10 +28,8 @@ import javax.naming.ldap.LdapContext; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; public class ADLdapUserManagerImpl extends OpenLdapUserManagerImpl implements LdapUserManager { - public static final Logger s_logger = Logger.getLogger(ADLdapUserManagerImpl.class.getName()); private static final String MICROSOFT_AD_NESTED_MEMBERS_FILTER = "memberOf:1.2.840.113556.1.4.1941:"; private static final String MICROSOFT_AD_MEMBERS_FILTER = "memberOf"; @@ -77,7 +75,7 @@ String generateADGroupSearchFilter(String groupName, Long domainId) { result.append(memberOfFilter); result.append(")"); - s_logger.debug("group search filter = " + result); + logger.debug("group search filter = " + result); return result.toString(); } diff --git a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/LdapAuthenticator.java b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/LdapAuthenticator.java index 41ef9573bb2d..b85098815943 100644 --- a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/LdapAuthenticator.java +++ b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/LdapAuthenticator.java @@ -26,7 +26,6 @@ import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.auth.UserAuthenticator; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.user.Account; import com.cloud.user.AccountManager; @@ -38,7 +37,6 @@ import com.cloud.utils.exception.CloudRuntimeException; public class LdapAuthenticator extends AdapterBase implements UserAuthenticator { - private static final Logger LOGGER = Logger.getLogger(LdapAuthenticator.class.getName()); @Inject private LdapManager _ldapManager; @@ -61,15 +59,15 @@ public LdapAuthenticator(final LdapManager ldapManager, final UserAccountDao use public Pair authenticate(final String username, final String password, final Long domainId, final Map requestParameters) { Pair rc = new Pair(false, null); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Retrieving ldap user: " + username); + if (logger.isDebugEnabled()) { + logger.debug("Retrieving ldap user: " + username); } // TODO not allowing an empty password is a policy we shouldn't decide on. A private cloud may well want to allow this. if (StringUtils.isNoneEmpty(username, password)) { if (_ldapManager.isLdapEnabled(domainId) || _ldapManager.isLdapEnabled()) { - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("LDAP is enabled in the ldapManager"); + if (logger.isTraceEnabled()) { + logger.trace("LDAP is enabled in the ldapManager"); } final UserAccount user = _userAccountDao.getUserAccount(username, domainId); if (user != null && ! User.Source.LDAP.equals(user.getSource())) { @@ -78,25 +76,25 @@ public Pair authenticate(final String use List ldapTrustMapVOs = getLdapTrustMapVOS(domainId); if(ldapTrustMapVOs != null && ldapTrustMapVOs.size() > 0) { if(ldapTrustMapVOs.size() == 1 && ldapTrustMapVOs.get(0).getAccountId() == 0) { - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("We have a single mapping of a domain to an ldap group or ou"); + if (logger.isTraceEnabled()) { + logger.trace("We have a single mapping of a domain to an ldap group or ou"); } rc = authenticate(username, password, domainId, user, ldapTrustMapVOs.get(0)); } else { - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("we are dealing with mapping of accounts in a domain to ldap groups"); + if (logger.isTraceEnabled()) { + logger.trace("we are dealing with mapping of accounts in a domain to ldap groups"); } rc = authenticate(username, password, domainId, user, ldapTrustMapVOs); } } else { - if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format("'this' domain (%d) is not linked to ldap follow normal authentication", domainId)); + if (logger.isTraceEnabled()) { + logger.trace(String.format("'this' domain (%d) is not linked to ldap follow normal authentication", domainId)); } rc = authenticate(username, password, domainId, user); } } } else { - LOGGER.debug("Username or Password cannot be empty"); + logger.debug("Username or Password cannot be empty"); } return rc; @@ -175,7 +173,7 @@ Pair authenticate(String username, String } } } catch (NoLdapUserMatchingQueryException e) { - LOGGER.debug(e.getMessage()); + logger.debug(e.getMessage()); disableUserInCloudStack(userAccount); } @@ -183,7 +181,7 @@ Pair authenticate(String username, String } private void tracelist(String msg, List listToTrace) { - if (LOGGER.isTraceEnabled()) { + if (logger.isTraceEnabled()) { StringBuilder logMsg = new StringBuilder(); logMsg.append(msg); logMsg.append(':'); @@ -191,13 +189,13 @@ private void tracelist(String msg, List listToTrace) { logMsg.append(' '); logMsg.append(listMember); } - LOGGER.trace(logMsg.toString()); + logger.trace(logMsg.toString()); } } private void logAndDisable(UserAccount userAccount, String msg, boolean remove) { - if (LOGGER.isInfoEnabled()) { - LOGGER.info(msg); + if (logger.isInfoEnabled()) { + logger.info(msg); } if(remove) { removeUserInCloudStack(userAccount); @@ -230,7 +228,7 @@ private Pair authenticate(String username final Account.Type accountType = ldapTrustMapVO.getAccountType(); processLdapUser(password, domainId, user, rc, ldapUser, accountType); } catch (NoLdapUserMatchingQueryException e) { - LOGGER.debug(e.getMessage()); + logger.debug(e.getMessage()); // no user in ldap ==>> disable user in cloudstack disableUserInCloudStack(user); } @@ -273,10 +271,10 @@ Pair authenticate(String username, String if(!ldapUser.isDisabled()) { result = _ldapManager.canAuthenticate(ldapUser.getPrincipal(), password, domainId); } else { - LOGGER.debug("user with principal "+ ldapUser.getPrincipal() + " is disabled in ldap"); + logger.debug("user with principal "+ ldapUser.getPrincipal() + " is disabled in ldap"); } } catch (NoLdapUserMatchingQueryException e) { - LOGGER.debug(e.getMessage()); + logger.debug(e.getMessage()); } } return processResultAndAction(user, result); diff --git a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/LdapContextFactory.java b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/LdapContextFactory.java index 0161adf9fda4..e6f23ef8ab3f 100644 --- a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/LdapContextFactory.java +++ b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/LdapContextFactory.java @@ -26,10 +26,11 @@ import javax.naming.ldap.LdapContext; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; public class LdapContextFactory { - private static final Logger s_logger = Logger.getLogger(LdapContextFactory.class.getName()); + protected Logger logger = LogManager.getLogger(getClass()); @Inject private LdapConfiguration _ldapConfiguration; @@ -58,7 +59,7 @@ private LdapContext createInitialDirContext(final String principal, final String private LdapContext createInitialDirContext(final String principal, final String password, final String providerUrl, final boolean isSystemContext, Long domainId) throws NamingException, IOException { Hashtable environment = getEnvironment(principal, password, providerUrl, isSystemContext, domainId); - s_logger.debug("initializing ldap with provider url: " + environment.get(Context.PROVIDER_URL)); + logger.debug("initializing ldap with provider url: " + environment.get(Context.PROVIDER_URL)); return new InitialLdapContext(environment, null); } @@ -70,7 +71,7 @@ private void enableSSL(final Hashtable environment, Long domainI final boolean sslStatus = _ldapConfiguration.getSSLStatus(domainId); if (sslStatus) { - s_logger.info("LDAP SSL enabled."); + logger.info("LDAP SSL enabled."); environment.put(Context.SECURITY_PROTOCOL, "ssl"); System.setProperty("javax.net.ssl.trustStore", _ldapConfiguration.getTrustStore(domainId)); System.setProperty("javax.net.ssl.trustStorePassword", _ldapConfiguration.getTrustStorePassword(domainId)); diff --git a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/LdapManagerImpl.java b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/LdapManagerImpl.java index b5b67c0c0a53..68f5580ed1be 100644 --- a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/LdapManagerImpl.java +++ b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/LdapManagerImpl.java @@ -52,7 +52,6 @@ import org.apache.cloudstack.ldap.dao.LdapConfigurationDao; import org.apache.cloudstack.ldap.dao.LdapTrustMapDao; import org.apache.commons.lang.Validate; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.domain.DomainVO; @@ -65,7 +64,6 @@ @Component public class LdapManagerImpl extends ComponentLifecycleBase implements LdapManager, LdapValidator { - private static final Logger LOGGER = Logger.getLogger(LdapManagerImpl.class.getName()); @Inject private LdapConfigurationDao _ldapConfigurationDao; @@ -106,7 +104,7 @@ public LdapManagerImpl(final LdapConfigurationDao ldapConfigurationDao, final Ld @Override public boolean configure(String name, Map params) throws ConfigurationException { super.configure(name, params); - LOGGER.debug("Configuring LDAP Manager"); + logger.debug("Configuring LDAP Manager"); addAccountRemovalListener(); addDomainRemovalListener(); @@ -126,7 +124,7 @@ public void onPublishMessage(String senderAddress, String subject, Object args) removeTrustmap(ldapTrustMapVO); } } catch (final Exception e) { - LOGGER.error("Caught exception while removing account linked to LDAP", e); + logger.error("Caught exception while removing account linked to LDAP", e); } } }); @@ -143,7 +141,7 @@ public void onPublishMessage(String senderAddress, String subject, Object args) removeTrustmap(ldapTrustMapVO); } } catch (final Exception e) { - LOGGER.error("Caught exception while removing trust-map for domain linked to LDAP", e); + logger.error("Caught exception while removing trust-map for domain linked to LDAP", e); } } }); @@ -152,7 +150,7 @@ public void onPublishMessage(String senderAddress, String subject, Object args) private void removeTrustmap(LdapTrustMapVO ldapTrustMapVO) { String msg = String.format("Removing link between LDAP: %s - type: %s and account: %s on domain: %s", ldapTrustMapVO.getName(), ldapTrustMapVO.getType().name(), ldapTrustMapVO.getAccountId(), ldapTrustMapVO.getDomainId()); - LOGGER.debug(msg); + logger.debug(msg); _ldapTrustMapDao.remove(ldapTrustMapVO.getId()); } @@ -181,10 +179,10 @@ private LdapConfigurationResponse addConfigurationInternal(final String hostname context = _ldapContextFactory.createBindContext(providerUrl,domainId); configuration = new LdapConfigurationVO(hostname, port, domainId); _ldapConfigurationDao.persist(configuration); - LOGGER.info("Added new ldap server with url: " + providerUrl + (domainId == null ? "": " for domain " + domainId)); + logger.info("Added new ldap server with url: " + providerUrl + (domainId == null ? "": " for domain " + domainId)); return createLdapConfigurationResponse(configuration); } catch (NamingException | IOException e) { - LOGGER.debug("NamingException while doing an LDAP bind", e); + logger.debug("NamingException while doing an LDAP bind", e); throw new InvalidParameterValueException("Unable to bind to the given LDAP server"); } finally { closeContext(context); @@ -207,13 +205,13 @@ public boolean canAuthenticate(final String principal, final String password, fi // TODO return the right account for this user final LdapContext context = _ldapContextFactory.createUserContext(principal, password, domainId); closeContext(context); - if(LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format("User(%s) authenticated for domain(%s)", principal, domainId)); + if(logger.isTraceEnabled()) { + logger.trace(String.format("User(%s) authenticated for domain(%s)", principal, domainId)); } return true; } catch (NamingException | IOException e) {/* AuthenticationException is caught as NamingException */ - LOGGER.debug("Exception while doing an LDAP bind for user "+" "+principal, e); - LOGGER.info("Failed to authenticate user: " + principal + ". incorrect password."); + logger.debug("Exception while doing an LDAP bind for user "+" "+principal, e); + logger.info("Failed to authenticate user: " + principal + ". incorrect password."); return false; } } @@ -224,7 +222,7 @@ private void closeContext(final LdapContext context) { context.close(); } } catch (final NamingException e) { - LOGGER.warn(e.getMessage(), e); + logger.warn(e.getMessage(), e); } } @@ -268,7 +266,7 @@ private LdapConfigurationResponse deleteConfigurationInternal(final String hostn throw new InvalidParameterValueException("Cannot find configuration with hostname " + hostname); } else { _ldapConfigurationDao.remove(configuration.getId()); - LOGGER.info("Removed ldap server with url: " + hostname + ':' + port + (domainId == null ? "" : " for domain id " + domainId)); + logger.info("Removed ldap server with url: " + hostname + ':' + port + (domainId == null ? "" : " for domain id " + domainId)); return createLdapConfigurationResponse(configuration); } } @@ -300,7 +298,7 @@ public LdapUser getUser(final String username, Long domainId) throws NoLdapUserM return _ldapUserManagerFactory.getInstance(_ldapConfiguration.getLdapProvider(null)).getUser(escapedUsername, context, domainId); } catch (NamingException | IOException e) { - LOGGER.debug("ldap Exception: ",e); + logger.debug("ldap Exception: ",e); throw new NoLdapUserMatchingQueryException("No Ldap User found for username: "+username); } finally { closeContext(context); @@ -321,7 +319,7 @@ public LdapUser getUser(final String username, final String type, final String n LdapUserManager userManagerFactory = _ldapUserManagerFactory.getInstance(ldapProvider); return userManagerFactory.getUser(escapedUsername, type, name, context, domainId); } catch (NamingException | IOException e) { - LOGGER.debug("ldap Exception: ",e); + logger.debug("ldap Exception: ",e); throw new NoLdapUserMatchingQueryException("No Ldap User found for username: "+username + " in group: " + name + " of type: " + type); } finally { closeContext(context); @@ -335,7 +333,7 @@ public List getUsers(Long domainId) throws NoLdapUserMatchingQueryExce context = _ldapContextFactory.createBindContext(domainId); return _ldapUserManagerFactory.getInstance(_ldapConfiguration.getLdapProvider(domainId)).getUsers(context, domainId); } catch (NamingException | IOException e) { - LOGGER.debug("ldap Exception: ",e); + logger.debug("ldap Exception: ",e); throw new NoLdapUserMatchingQueryException("*"); } finally { closeContext(context); @@ -349,7 +347,7 @@ public List getUsersInGroup(String groupName, Long domainId) throws No context = _ldapContextFactory.createBindContext(domainId); return _ldapUserManagerFactory.getInstance(_ldapConfiguration.getLdapProvider(domainId)).getUsersInGroup(groupName, context, domainId); } catch (NamingException | IOException e) { - LOGGER.debug("ldap NamingException: ",e); + logger.debug("ldap NamingException: ",e); throw new NoLdapUserMatchingQueryException("groupName=" + groupName); } finally { closeContext(context); @@ -387,7 +385,7 @@ public List searchUsers(final String username) throws NoLdapUserMatchi final String escapedUsername = LdapUtils.escapeLDAPSearchFilter(username); return _ldapUserManagerFactory.getInstance(_ldapConfiguration.getLdapProvider(null)).getUsers("*" + escapedUsername + "*", context, null); } catch (NamingException | IOException e) { - LOGGER.debug("ldap Exception: ",e); + logger.debug("ldap Exception: ",e); throw new NoLdapUserMatchingQueryException(username); } finally { closeContext(context); @@ -416,7 +414,7 @@ private LinkDomainToLdapResponse linkDomainToLdap(Long domainId, String type, St DomainVO domain = domainDao.findById(vo.getDomainId()); String domainUuid = ""; if (domain == null) { - LOGGER.error("no domain in database for id " + vo.getDomainId()); + logger.error("no domain in database for id " + vo.getDomainId()); } else { domainUuid = domain.getUuid(); } @@ -465,7 +463,7 @@ public LinkAccountToLdapResponse linkAccountToLdap(LinkAccountToLdapCmd cmd) { DomainVO domain = domainDao.findById(vo.getDomainId()); String domainUuid = ""; if (domain == null) { - LOGGER.error("no domain in database for id " + vo.getDomainId()); + logger.error("no domain in database for id " + vo.getDomainId()); } else { domainUuid = domain.getUuid(); } @@ -484,16 +482,16 @@ private void clearOldAccountMapping(LinkAccountToLdapCmd cmd) { String msg = String.format("group %s is mapped to account %d in the current domain (%s)", cmd.getLdapDomain(), oldVo.getAccountId(), cmd.getDomainId()); if (null == oldAcount.getRemoved()) { msg += ", delete the old map before mapping a new account to the same group."; - LOGGER.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } else { msg += ", the old map is deleted."; - LOGGER.warn(msg); + logger.warn(msg); _ldapTrustMapDao.expunge(oldVo.getId()); } } else { String msg = String.format("group %s is mapped to the current domain (%s) for autoimport and can not be used for autosync", cmd.getLdapDomain(), cmd.getDomainId()); - LOGGER.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } } diff --git a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/LdapUserManagerFactory.java b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/LdapUserManagerFactory.java index a6217dcb5cb8..55d482a29b1c 100644 --- a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/LdapUserManagerFactory.java +++ b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/LdapUserManagerFactory.java @@ -18,7 +18,8 @@ */ package org.apache.cloudstack.ldap; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.beans.BeansException; import org.springframework.beans.factory.config.AutowireCapableBeanFactory; import org.springframework.context.ApplicationContext; @@ -30,7 +31,7 @@ public class LdapUserManagerFactory implements ApplicationContextAware { - public static final Logger s_logger = Logger.getLogger(LdapUserManagerFactory.class.getName()); + protected Logger logger = LogManager.getLogger(getClass()); static Map ldapUserManagerMap = new HashMap<>(); diff --git a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/OpenLdapUserManagerImpl.java b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/OpenLdapUserManagerImpl.java index 12bda947f36c..4c125af2ea67 100644 --- a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/OpenLdapUserManagerImpl.java +++ b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/OpenLdapUserManagerImpl.java @@ -36,10 +36,11 @@ import org.apache.cloudstack.ldap.dao.LdapTrustMapDao; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; public class OpenLdapUserManagerImpl implements LdapUserManager { - private static final Logger LOGGER = Logger.getLogger(OpenLdapUserManagerImpl.class.getName()); + protected Logger logger = LogManager.getLogger(getClass()); @Inject protected LdapConfiguration _ldapConfiguration; @@ -112,8 +113,8 @@ private String generateSearchFilter(final String username, Long domainId) { result.append(")"); String returnString = result.toString(); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("constructed ldap query: " + returnString); + if (logger.isTraceEnabled()) { + logger.trace("constructed ldap query: " + returnString); } return returnString; } @@ -133,8 +134,8 @@ private List getMappedLdapGroups(Long domainId) { private String getMemberOfGroupString(String group, String memberOfAttribute) { final StringBuilder memberOfFilter = new StringBuilder(); if (null != group) { - if(LOGGER.isDebugEnabled()) { - LOGGER.debug("adding search filter for '" + group + + if(logger.isDebugEnabled()) { + logger.debug("adding search filter for '" + group + "', using '" + memberOfAttribute + "'"); } memberOfFilter.append("(" + memberOfAttribute + "="); @@ -253,7 +254,7 @@ public List getUsersInGroup(String groupName, LdapContext context, Lon try{ users.add(getUserForDn(userdn, context, domainId)); } catch (NamingException e){ - LOGGER.info("Userdn: " + userdn + " Not Found:: Exception message: " + e.getMessage()); + logger.info("Userdn: " + userdn + " Not Found:: Exception message: " + e.getMessage()); } } } @@ -292,8 +293,8 @@ public LdapUser searchUser(final String basedn, final String searchString, final searchControls.setReturningAttributes(_ldapConfiguration.getReturnAttributes(domainId)); NamingEnumeration results = context.search(basedn, searchString, searchControls); - if(LOGGER.isDebugEnabled()) { - LOGGER.debug("searching user(s) with filter: \"" + searchString + "\""); + if(logger.isDebugEnabled()) { + logger.debug("searching user(s) with filter: \"" + searchString + "\""); } final List users = new ArrayList(); while (results.hasMoreElements()) { @@ -342,7 +343,7 @@ public List searchUsers(final String username, final LdapContext conte } } } else { - LOGGER.info("No controls were sent from the ldap server"); + logger.info("No controls were sent from the ldap server"); } context.setRequestControls(new Control[] {new PagedResultsControl(pageSize, cookie, Control.CRITICAL)}); } while (cookie != null); diff --git a/plugins/user-authenticators/ldap/src/test/resources/log4j.xml b/plugins/user-authenticators/ldap/src/test/resources/log4j.xml index 031d2283580e..c369c454640e 100755 --- a/plugins/user-authenticators/ldap/src/test/resources/log4j.xml +++ b/plugins/user-authenticators/ldap/src/test/resources/log4j.xml @@ -19,60 +19,46 @@ under the License. --> - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + net.sf.cglib.proxy + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/plugins/user-authenticators/md5/src/main/java/org/apache/cloudstack/auth/MD5UserAuthenticator.java b/plugins/user-authenticators/md5/src/main/java/org/apache/cloudstack/auth/MD5UserAuthenticator.java index 3f3898f64649..7286f57d0488 100644 --- a/plugins/user-authenticators/md5/src/main/java/org/apache/cloudstack/auth/MD5UserAuthenticator.java +++ b/plugins/user-authenticators/md5/src/main/java/org/apache/cloudstack/auth/MD5UserAuthenticator.java @@ -23,7 +23,6 @@ import javax.inject.Inject; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.user.UserAccount; import com.cloud.user.dao.UserAccountDao; @@ -37,30 +36,29 @@ * */ public class MD5UserAuthenticator extends AdapterBase implements UserAuthenticator { - public static final Logger s_logger = Logger.getLogger(MD5UserAuthenticator.class); @Inject private UserAccountDao _userAccountDao; @Override public Pair authenticate(String username, String password, Long domainId, Map requestParameters) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Retrieving user: " + username); + if (logger.isDebugEnabled()) { + logger.debug("Retrieving user: " + username); } if (StringUtils.isAnyEmpty(username, password)) { - s_logger.debug("Username or Password cannot be empty"); + logger.debug("Username or Password cannot be empty"); return new Pair<>(false, null); } UserAccount user = _userAccountDao.getUserAccount(username, domainId); if (user == null) { - s_logger.debug("Unable to find user with " + username + " in domain " + domainId); + logger.debug("Unable to find user with " + username + " in domain " + domainId); return new Pair<>(false, null); } if (!user.getPassword().equals(encode(password))) { - s_logger.debug("Password does not match"); + logger.debug("Password does not match"); return new Pair<>(false, ActionOnFailedAuthentication.INCREMENT_INCORRECT_LOGIN_ATTEMPT_COUNT); } return new Pair<>(true, null); diff --git a/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/OAuth2AuthManagerImpl.java b/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/OAuth2AuthManagerImpl.java index 85730651248b..6d7123ebe8e7 100644 --- a/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/OAuth2AuthManagerImpl.java +++ b/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/OAuth2AuthManagerImpl.java @@ -34,7 +34,6 @@ import org.apache.cloudstack.oauth2.dao.OauthProviderDao; import org.apache.cloudstack.oauth2.vo.OauthProviderVO; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import javax.inject.Inject; import java.util.ArrayList; @@ -44,7 +43,6 @@ import java.util.Map; public class OAuth2AuthManagerImpl extends ManagerBase implements OAuth2AuthManager, Manager, Configurable { - private static final Logger s_logger = Logger.getLogger(OAuth2AuthManagerImpl.class); @Inject private UserDao _userDao; @@ -67,10 +65,10 @@ public List> getAuthCommands() { @Override public boolean start() { if (isOAuthPluginEnabled()) { - s_logger.info("OAUTH plugin loaded"); + logger.info("OAUTH plugin loaded"); initializeUserOAuth2AuthenticationProvidersMap(); } else { - s_logger.info("OAUTH plugin not enabled so not loading"); + logger.info("OAUTH plugin not enabled so not loading"); } return true; } diff --git a/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/OAuth2UserAuthenticator.java b/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/OAuth2UserAuthenticator.java index 8484a5ef798d..1f38adfd63bc 100644 --- a/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/OAuth2UserAuthenticator.java +++ b/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/OAuth2UserAuthenticator.java @@ -27,13 +27,11 @@ import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.auth.UserAuthenticator; import org.apache.cloudstack.auth.UserOAuth2Authenticator; -import org.apache.log4j.Logger; import javax.inject.Inject; import java.util.Map; public class OAuth2UserAuthenticator extends AdapterBase implements UserAuthenticator { - public static final Logger s_logger = Logger.getLogger(OAuth2UserAuthenticator.class); @Inject private UserAccountDao _userAccountDao; @@ -45,13 +43,13 @@ public class OAuth2UserAuthenticator extends AdapterBase implements UserAuthenti @Override public Pair authenticate(String username, String password, Long domainId, Map requestParameters) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Trying OAuth2 auth for user: " + username); + if (logger.isDebugEnabled()) { + logger.debug("Trying OAuth2 auth for user: " + username); } final UserAccount userAccount = _userAccountDao.getUserAccount(username, domainId); if (userAccount == null) { - s_logger.debug("Unable to find user with " + username + " in domain " + domainId + ", or user source is not OAUTH2"); + logger.debug("Unable to find user with " + username + " in domain " + domainId + ", or user source is not OAUTH2"); return new Pair(false, null); } else { User user = _userDao.getUser(userAccount.getId()); diff --git a/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/api/command/DeleteOAuthProviderCmd.java b/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/api/command/DeleteOAuthProviderCmd.java index 6cd3156f68a2..28f2a6362733 100644 --- a/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/api/command/DeleteOAuthProviderCmd.java +++ b/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/api/command/DeleteOAuthProviderCmd.java @@ -19,7 +19,6 @@ import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.oauth2.OAuth2AuthManager; import org.apache.cloudstack.oauth2.api.response.OauthProviderResponse; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -35,7 +34,6 @@ @APICommand(name = "deleteOauthProvider", description = "Deletes the registered OAuth provider", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, since = "4.19.0") public class DeleteOAuthProviderCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(DeleteOAuthProviderCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/api/command/ListOAuthProvidersCmd.java b/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/api/command/ListOAuthProvidersCmd.java index 597283ae33e1..abdbf65dbb42 100644 --- a/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/api/command/ListOAuthProvidersCmd.java +++ b/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/api/command/ListOAuthProvidersCmd.java @@ -39,7 +39,6 @@ import org.apache.cloudstack.oauth2.api.response.OauthProviderResponse; import org.apache.cloudstack.oauth2.vo.OauthProviderVO; import org.apache.commons.lang.ArrayUtils; -import org.apache.log4j.Logger; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; @@ -49,7 +48,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}, since = "4.19.0") public class ListOAuthProvidersCmd extends BaseListCmd implements APIAuthenticator { - public static final Logger s_logger = Logger.getLogger(ListOAuthProvidersCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// @@ -141,7 +139,7 @@ public void setAuthenticators(List authenticators) { } } if (_oauth2mgr == null) { - s_logger.error("No suitable Pluggable Authentication Manager found for listing OAuth providers"); + logger.error("No suitable Pluggable Authentication Manager found for listing OAuth providers"); } } } diff --git a/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/api/command/OauthLoginAPIAuthenticatorCmd.java b/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/api/command/OauthLoginAPIAuthenticatorCmd.java index 928fa76780a3..f9a1d10d3526 100644 --- a/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/api/command/OauthLoginAPIAuthenticatorCmd.java +++ b/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/api/command/OauthLoginAPIAuthenticatorCmd.java @@ -36,7 +36,6 @@ import org.apache.cloudstack.api.response.LoginCmdResponse; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.jetbrains.annotations.Nullable; import javax.inject.Inject; @@ -54,8 +53,6 @@ requestHasSensitiveInfo = true, responseObject = LoginCmdResponse.class, entityType = {}, since = "4.19.0") public class OauthLoginAPIAuthenticatorCmd extends BaseCmd implements APIAuthenticator { - public static final Logger s_logger = Logger.getLogger(OauthLoginAPIAuthenticatorCmd.class.getName()); - ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// ///////////////////////////////////////////////////// @@ -169,8 +166,8 @@ private String doOauthAuthentication(HttpSession session, Long domainId, String "failed to authenticate user, check if username/password are correct"); auditTrailSb.append(" " + ApiErrorCode.ACCOUNT_ERROR + " " + msg); serializedResponse = _apiServer.getSerializedApiError(ApiErrorCode.ACCOUNT_ERROR.getHttpCode(), msg, params, responseType); - if (s_logger.isTraceEnabled()) { - s_logger.trace(msg); + if (logger.isTraceEnabled()) { + logger.trace(msg); } } @@ -194,7 +191,7 @@ protected Long getDomainIdFromParams(Map params, StringBuilder } auditTrailSb.append(" domainid=" + domainId);// building the params for POST call } catch (final NumberFormatException e) { - s_logger.warn("Invalid domain id entered by user"); + logger.warn("Invalid domain id entered by user"); auditTrailSb.append(" " + HttpServletResponse.SC_UNAUTHORIZED + " " + "Invalid domain id entered, please enter a valid one"); throw new ServerApiException(ApiErrorCode.UNAUTHORIZED, _apiServer.getSerializedApiError(HttpServletResponse.SC_UNAUTHORIZED, "Invalid domain id entered, please enter a valid one", params, diff --git a/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/api/command/UpdateOAuthProviderCmd.java b/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/api/command/UpdateOAuthProviderCmd.java index b38423ffd485..1c79b7b144c8 100644 --- a/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/api/command/UpdateOAuthProviderCmd.java +++ b/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/api/command/UpdateOAuthProviderCmd.java @@ -21,7 +21,6 @@ import org.apache.cloudstack.oauth2.OAuth2AuthManager; import org.apache.cloudstack.oauth2.api.response.OauthProviderResponse; import org.apache.cloudstack.oauth2.vo.OauthProviderVO; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -38,7 +37,6 @@ @APICommand(name = "updateOauthProvider", description = "Updates the registered OAuth provider details", responseObject = OauthProviderResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, since = "4.19.0") public final class UpdateOAuthProviderCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(UpdateOAuthProviderCmd.class.getName()); ///////////////////////////////////////////////////// diff --git a/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/api/command/VerifyOAuthCodeAndGetUserCmd.java b/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/api/command/VerifyOAuthCodeAndGetUserCmd.java index 5dbeef10dcb4..bd49f87d6273 100644 --- a/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/api/command/VerifyOAuthCodeAndGetUserCmd.java +++ b/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/api/command/VerifyOAuthCodeAndGetUserCmd.java @@ -36,7 +36,6 @@ import org.apache.cloudstack.oauth2.OAuth2AuthManager; import org.apache.cloudstack.oauth2.api.response.OauthProviderResponse; import org.apache.commons.lang.ArrayUtils; -import org.apache.log4j.Logger; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; @@ -46,7 +45,6 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}, since = "4.19.0") public class VerifyOAuthCodeAndGetUserCmd extends BaseListCmd implements APIAuthenticator { - public static final Logger s_logger = Logger.getLogger(VerifyOAuthCodeAndGetUserCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// @@ -124,7 +122,7 @@ public void setAuthenticators(List authenticators) { } } if (_oauth2mgr == null) { - s_logger.error("No suitable Pluggable Authentication Manager found for listing OAuth providers"); + logger.error("No suitable Pluggable Authentication Manager found for listing OAuth providers"); } } } diff --git a/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/google/GoogleOAuth2Provider.java b/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/google/GoogleOAuth2Provider.java index aa0fc93776dd..42ed1451ccd5 100644 --- a/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/google/GoogleOAuth2Provider.java +++ b/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/google/GoogleOAuth2Provider.java @@ -32,7 +32,6 @@ import org.apache.cloudstack.oauth2.dao.OauthProviderDao; import org.apache.cloudstack.oauth2.vo.OauthProviderVO; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import javax.inject.Inject; import java.io.IOException; @@ -40,7 +39,6 @@ import java.util.List; public class GoogleOAuth2Provider extends AdapterBase implements UserOAuth2Authenticator { - private static final Logger s_logger = Logger.getLogger(GoogleOAuth2Provider.class); protected String accessToken = null; protected String refreshToken = null; diff --git a/plugins/user-authenticators/pbkdf2/src/main/java/org/apache/cloudstack/server/auth/PBKDF2UserAuthenticator.java b/plugins/user-authenticators/pbkdf2/src/main/java/org/apache/cloudstack/server/auth/PBKDF2UserAuthenticator.java index 3c2521f34306..edb7d338db91 100644 --- a/plugins/user-authenticators/pbkdf2/src/main/java/org/apache/cloudstack/server/auth/PBKDF2UserAuthenticator.java +++ b/plugins/user-authenticators/pbkdf2/src/main/java/org/apache/cloudstack/server/auth/PBKDF2UserAuthenticator.java @@ -27,7 +27,6 @@ import org.apache.cloudstack.auth.UserAuthenticator; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.bouncycastle.crypto.PBEParametersGenerator; import org.bouncycastle.crypto.generators.PKCS5S2ParametersGenerator; import org.bouncycastle.crypto.params.KeyParameter; @@ -41,7 +40,6 @@ import com.cloud.utils.exception.CloudRuntimeException; public class PBKDF2UserAuthenticator extends AdapterBase implements UserAuthenticator { - public static final Logger s_logger = Logger.getLogger(PBKDF2UserAuthenticator.class); private static final int s_saltlen = 64; private static final int s_rounds = 100000; private static final int s_keylen = 512; @@ -51,12 +49,12 @@ public class PBKDF2UserAuthenticator extends AdapterBase implements UserAuthenti @Override public Pair authenticate(String username, String password, Long domainId, Map requestParameters) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Retrieving user: " + username); + if (logger.isDebugEnabled()) { + logger.debug("Retrieving user: " + username); } if (StringUtils.isAnyEmpty(username, password)) { - s_logger.debug("Username or Password cannot be empty"); + logger.debug("Username or Password cannot be empty"); return new Pair(false, null); } @@ -65,7 +63,7 @@ public Pair authenticat if (user != null) { isValidUser = true; } else { - s_logger.debug("Unable to find user with " + username + " in domain " + domainId); + logger.debug("Unable to find user with " + username + " in domain " + domainId); } byte[] salt = new byte[0]; @@ -74,7 +72,7 @@ public Pair authenticat if (isValidUser) { String[] storedPassword = user.getPassword().split(":"); if ((storedPassword.length != 3) || (!StringUtils.isNumeric(storedPassword[2]))) { - s_logger.warn("The stored password for " + username + " isn't in the right format for this authenticator"); + logger.warn("The stored password for " + username + " isn't in the right format for this authenticator"); isValidUser = false; } else { // Encoding format = :: @@ -114,7 +112,7 @@ public String encode(String password) } catch (UnsupportedEncodingException e) { throw new CloudRuntimeException("Unable to hash password", e); } catch (InvalidKeySpecException e) { - s_logger.error("Exception in EncryptUtil.createKey ", e); + logger.error("Exception in EncryptUtil.createKey ", e); throw new CloudRuntimeException("Unable to hash password", e); } } diff --git a/plugins/user-authenticators/plain-text/src/main/java/org/apache/cloudstack/auth/PlainTextUserAuthenticator.java b/plugins/user-authenticators/plain-text/src/main/java/org/apache/cloudstack/auth/PlainTextUserAuthenticator.java index f38e88b76dbd..4e3d402222e7 100644 --- a/plugins/user-authenticators/plain-text/src/main/java/org/apache/cloudstack/auth/PlainTextUserAuthenticator.java +++ b/plugins/user-authenticators/plain-text/src/main/java/org/apache/cloudstack/auth/PlainTextUserAuthenticator.java @@ -20,7 +20,6 @@ import javax.inject.Inject; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.user.UserAccount; import com.cloud.user.dao.UserAccountDao; @@ -28,30 +27,29 @@ import com.cloud.utils.component.AdapterBase; public class PlainTextUserAuthenticator extends AdapterBase implements UserAuthenticator { - public static final Logger s_logger = Logger.getLogger(PlainTextUserAuthenticator.class); @Inject private UserAccountDao _userAccountDao; @Override public Pair authenticate(String username, String password, Long domainId, Map requestParameters) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Retrieving user: " + username); + if (logger.isDebugEnabled()) { + logger.debug("Retrieving user: " + username); } if (StringUtils.isAnyEmpty(username, password)) { - s_logger.debug("Username or Password cannot be empty"); + logger.debug("Username or Password cannot be empty"); return new Pair<>(false, null); } UserAccount user = _userAccountDao.getUserAccount(username, domainId); if (user == null) { - s_logger.debug("Unable to find user with " + username + " in domain " + domainId); + logger.debug("Unable to find user with " + username + " in domain " + domainId); return new Pair<>(false, null); } if (!user.getPassword().equals(password)) { - s_logger.debug("Password does not match"); + logger.debug("Password does not match"); return new Pair<>(false, ActionOnFailedAuthentication.INCREMENT_INCORRECT_LOGIN_ATTEMPT_COUNT); } return new Pair<>(true, null); diff --git a/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/api/command/AuthorizeSAMLSSOCmd.java b/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/api/command/AuthorizeSAMLSSOCmd.java index 9a7dadcdb8a9..c5f48d61c6fb 100644 --- a/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/api/command/AuthorizeSAMLSSOCmd.java +++ b/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/api/command/AuthorizeSAMLSSOCmd.java @@ -31,13 +31,11 @@ import org.apache.cloudstack.api.response.UserResponse; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.saml.SAML2AuthManager; -import org.apache.log4j.Logger; import javax.inject.Inject; @APICommand(name = "authorizeSamlSso", description = "Allow or disallow a user to use SAML SSO", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class AuthorizeSAMLSSOCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(AuthorizeSAMLSSOCmd.class.getName()); @Inject diff --git a/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/api/command/GetServiceProviderMetaDataCmd.java b/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/api/command/GetServiceProviderMetaDataCmd.java index e462e33cbfeb..50b075b44489 100644 --- a/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/api/command/GetServiceProviderMetaDataCmd.java +++ b/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/api/command/GetServiceProviderMetaDataCmd.java @@ -48,7 +48,6 @@ import org.apache.cloudstack.saml.SAML2AuthManager; import org.apache.cloudstack.saml.SAMLProviderMetadata; import org.apache.cloudstack.utils.security.ParserUtils; -import org.apache.log4j.Logger; import org.opensaml.Configuration; import org.opensaml.DefaultBootstrap; import org.opensaml.common.xml.SAMLConstants; @@ -95,7 +94,6 @@ @APICommand(name = "getSPMetadata", description = "Returns SAML2 CloudStack Service Provider MetaData", responseObject = SAMLMetaDataResponse.class, entityType = {}) public class GetServiceProviderMetaDataCmd extends BaseCmd implements APIAuthenticator { - public static final Logger s_logger = Logger.getLogger(GetServiceProviderMetaDataCmd.class.getName()); private static final String s_name = "spmetadataresponse"; @Inject @@ -130,7 +128,7 @@ public String authenticate(String command, Map params, HttpSes try { DefaultBootstrap.bootstrap(); } catch (ConfigurationException | FactoryConfigurationError e) { - s_logger.error("OpenSAML Bootstrapping error: " + e.getMessage()); + logger.error("OpenSAML Bootstrapping error: " + e.getMessage()); throw new ServerApiException(ApiErrorCode.ACCOUNT_ERROR, _apiServer.getSerializedApiError(ApiErrorCode.ACCOUNT_ERROR.getHttpCode(), "OpenSAML Bootstrapping error while creating SP MetaData", params, responseType)); @@ -167,7 +165,7 @@ public String authenticate(String command, Map params, HttpSes spSSODescriptor.getKeyDescriptors().add(signKeyDescriptor); spSSODescriptor.getKeyDescriptors().add(encKeyDescriptor); } catch (SecurityException e) { - s_logger.warn("Unable to add SP X509 descriptors:" + e.getMessage()); + logger.warn("Unable to add SP X509 descriptors:" + e.getMessage()); } NameIDFormat nameIDFormat = new NameIDFormatBuilder().buildObject(); @@ -281,7 +279,7 @@ public void setAuthenticators(List authenticators) { } } if (_samlAuthManager == null) { - s_logger.error("No suitable Pluggable Authentication Manager found for SAML2 getSPMetadata Cmd"); + logger.error("No suitable Pluggable Authentication Manager found for SAML2 getSPMetadata Cmd"); } } } diff --git a/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/api/command/ListAndSwitchSAMLAccountCmd.java b/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/api/command/ListAndSwitchSAMLAccountCmd.java index 25f056adf686..3e6b093abe13 100644 --- a/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/api/command/ListAndSwitchSAMLAccountCmd.java +++ b/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/api/command/ListAndSwitchSAMLAccountCmd.java @@ -46,7 +46,6 @@ import org.apache.cloudstack.api.response.UserResponse; import org.apache.cloudstack.saml.SAML2AuthManager; import org.apache.cloudstack.saml.SAMLUtils; -import org.apache.log4j.Logger; import com.cloud.api.response.ApiResponseSerializer; import com.cloud.domain.Domain; @@ -62,7 +61,6 @@ @APICommand(name = "listAndSwitchSamlAccount", description = "Lists and switches to other SAML accounts owned by the SAML user", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListAndSwitchSAMLAccountCmd extends BaseCmd implements APIAuthenticator { - public static final Logger s_logger = Logger.getLogger(ListAndSwitchSAMLAccountCmd.class.getName()); @Inject ApiServerService _apiServer; @@ -155,7 +153,7 @@ public String authenticate(final String command, final Map par return ApiResponseSerializer.toSerializedString(loginResponse, responseType); } } catch (CloudAuthenticationException | IOException exception) { - s_logger.debug("Failed to switch to request SAML user account due to: " + exception.getMessage()); + logger.debug("Failed to switch to request SAML user account due to: " + exception.getMessage()); } } else { List switchableAccounts = _userAccountDao.getAllUsersByNameAndEntity(currentUserAccount.getUsername(), currentUserAccount.getExternalEntity()); @@ -198,7 +196,7 @@ public void setAuthenticators(List authenticators) { } } if (_samlAuthManager == null) { - s_logger.error("No suitable Pluggable Authentication Manager found for SAML2 listAndSwitchSamlAccount Cmd"); + logger.error("No suitable Pluggable Authentication Manager found for SAML2 listAndSwitchSamlAccount Cmd"); } } diff --git a/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/api/command/ListIdpsCmd.java b/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/api/command/ListIdpsCmd.java index b61eae4382a9..09e5f1b6557e 100644 --- a/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/api/command/ListIdpsCmd.java +++ b/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/api/command/ListIdpsCmd.java @@ -30,7 +30,6 @@ import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.saml.SAML2AuthManager; import org.apache.cloudstack.saml.SAMLProviderMetadata; -import org.apache.log4j.Logger; import javax.inject.Inject; import javax.servlet.http.HttpServletRequest; @@ -43,7 +42,6 @@ @APICommand(name = "listIdps", description = "Returns list of discovered SAML Identity Providers", responseObject = IdpResponse.class, entityType = {}) public class ListIdpsCmd extends BaseCmd implements APIAuthenticator { - public static final Logger s_logger = Logger.getLogger(ListIdpsCmd.class.getName()); @Inject ApiServerService _apiServer; @@ -102,7 +100,7 @@ public void setAuthenticators(List authenticators) { } } if (_samlAuthManager == null) { - s_logger.error("No suitable Pluggable Authentication Manager found for SAML2 Login Cmd"); + logger.error("No suitable Pluggable Authentication Manager found for SAML2 Login Cmd"); } } } diff --git a/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/api/command/ListSamlAuthorizationCmd.java b/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/api/command/ListSamlAuthorizationCmd.java index db08ae0cce85..d400fadf2616 100644 --- a/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/api/command/ListSamlAuthorizationCmd.java +++ b/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/api/command/ListSamlAuthorizationCmd.java @@ -29,7 +29,6 @@ import org.apache.cloudstack.api.response.SamlAuthorizationResponse; import org.apache.cloudstack.api.response.UserResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import javax.inject.Inject; import java.util.ArrayList; @@ -37,7 +36,6 @@ @APICommand(name = "listSamlAuthorization", description = "Lists authorized users who can used SAML SSO", responseObject = SamlAuthorizationResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListSamlAuthorizationCmd extends BaseListCmd { - public static final Logger s_logger = Logger.getLogger(ListSamlAuthorizationCmd.class.getName()); private static final String s_name = "listsamlauthorizationsresponse"; @Inject diff --git a/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/api/command/SAML2LoginAPIAuthenticatorCmd.java b/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/api/command/SAML2LoginAPIAuthenticatorCmd.java index 6bb3e788a95c..fb4f4cc00a52 100644 --- a/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/api/command/SAML2LoginAPIAuthenticatorCmd.java +++ b/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/api/command/SAML2LoginAPIAuthenticatorCmd.java @@ -47,7 +47,6 @@ import org.apache.cloudstack.saml.SAMLTokenVO; import org.apache.cloudstack.saml.SAMLUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.opensaml.DefaultBootstrap; import org.opensaml.saml2.core.Assertion; import org.opensaml.saml2.core.EncryptedAssertion; @@ -81,7 +80,6 @@ @APICommand(name = "samlSso", description = "SP initiated SAML Single Sign On", requestHasSensitiveInfo = true, responseObject = LoginCmdResponse.class, entityType = {}) public class SAML2LoginAPIAuthenticatorCmd extends BaseCmd implements APIAuthenticator, Configurable { - public static final Logger s_logger = Logger.getLogger(SAML2LoginAPIAuthenticatorCmd.class.getName()); private static final String s_name = "loginresponse"; ///////////////////////////////////////////////////// @@ -139,7 +137,7 @@ public Response processSAMLResponse(String responseMessage) { responseObject = SAMLUtils.decodeSAMLResponse(responseMessage); } catch (ConfigurationException | FactoryConfigurationError | ParserConfigurationException | SAXException | IOException | UnmarshallingException e) { - s_logger.error("SAMLResponse processing error: " + e.getMessage()); + logger.error("SAMLResponse processing error: " + e.getMessage()); } return responseObject; } @@ -183,7 +181,7 @@ public String authenticate(final String command, final Map par } String authnId = SAMLUtils.generateSecureRandomId(); samlAuthManager.saveToken(authnId, domainPath, idpMetadata.getEntityId()); - s_logger.debug("Sending SAMLRequest id=" + authnId); + logger.debug("Sending SAMLRequest id=" + authnId); String redirectUrl = SAMLUtils.buildAuthnRequestUrl(authnId, spMetadata, idpMetadata, SAML2AuthManager.SAMLSignatureAlgorithm.value()); resp.sendRedirect(redirectUrl); return ""; @@ -207,7 +205,7 @@ public String authenticate(final String command, final Map par SAMLProviderMetadata idpMetadata = samlAuthManager.getIdPMetadata(issuer.getValue()); String responseToId = processedSAMLResponse.getInResponseTo(); - s_logger.debug("Received SAMLResponse in response to id=" + responseToId); + logger.debug("Received SAMLResponse in response to id=" + responseToId); SAMLTokenVO token = samlAuthManager.getToken(responseToId); if (token != null) { if (!(token.getEntity().equalsIgnoreCase(issuer.getValue()))) { @@ -232,7 +230,7 @@ public String authenticate(final String command, final Map par try { validator.validate(sig); } catch (ValidationException e) { - s_logger.error("SAML Response's signature failed to be validated by IDP signing key:" + e.getMessage()); + logger.error("SAML Response's signature failed to be validated by IDP signing key:" + e.getMessage()); throw new ServerApiException(ApiErrorCode.ACCOUNT_ERROR, apiServer.getSerializedApiError(ApiErrorCode.ACCOUNT_ERROR.getHttpCode(), "SAML Response's signature failed to be validated by IDP signing key", params, responseType)); @@ -266,7 +264,7 @@ public String authenticate(final String command, final Map par try { assertion = decrypter.decrypt(encryptedAssertion); } catch (DecryptionException e) { - s_logger.warn("SAML EncryptedAssertion error: " + e.toString()); + logger.warn("SAML EncryptedAssertion error: " + e.toString()); } if (assertion == null) { continue; @@ -279,7 +277,7 @@ public String authenticate(final String command, final Map par try { validator.validate(encSig); } catch (ValidationException e) { - s_logger.error("SAML Response's signature failed to be validated by IDP signing key:" + e.getMessage()); + logger.error("SAML Response's signature failed to be validated by IDP signing key:" + e.getMessage()); throw new ServerApiException(ApiErrorCode.ACCOUNT_ERROR, apiServer.getSerializedApiError(ApiErrorCode.ACCOUNT_ERROR.getHttpCode(), "SAML Response's signature failed to be validated by IDP signing key", params, responseType)); @@ -324,7 +322,7 @@ public String authenticate(final String command, final Map par return ApiResponseSerializer.toSerializedString(loginResponse, responseType); } } catch (CloudAuthenticationException | IOException exception) { - s_logger.debug("SAML Login failed to log in the user due to: " + exception.getMessage()); + logger.debug("SAML Login failed to log in the user due to: " + exception.getMessage()); } } } catch (IOException e) { @@ -367,7 +365,7 @@ public void setAuthenticators(List authenticators) { } } if (samlAuthManager == null) { - s_logger.error("No suitable Pluggable Authentication Manager found for SAML2 Login Cmd"); + logger.error("No suitable Pluggable Authentication Manager found for SAML2 Login Cmd"); } } diff --git a/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/api/command/SAML2LogoutAPIAuthenticatorCmd.java b/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/api/command/SAML2LogoutAPIAuthenticatorCmd.java index ccdc4b6bd5d8..ca46bef4b5a3 100644 --- a/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/api/command/SAML2LogoutAPIAuthenticatorCmd.java +++ b/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/api/command/SAML2LogoutAPIAuthenticatorCmd.java @@ -31,7 +31,6 @@ import org.apache.cloudstack.saml.SAMLPluginConstants; import org.apache.cloudstack.saml.SAMLProviderMetadata; import org.apache.cloudstack.saml.SAMLUtils; -import org.apache.log4j.Logger; import org.opensaml.DefaultBootstrap; import org.opensaml.saml2.core.LogoutRequest; import org.opensaml.saml2.core.Response; @@ -54,7 +53,6 @@ @APICommand(name = "samlSlo", description = "SAML Global Log Out API", responseObject = LogoutCmdResponse.class, entityType = {}) public class SAML2LogoutAPIAuthenticatorCmd extends BaseCmd implements APIAuthenticator { - public static final Logger s_logger = Logger.getLogger(SAML2LogoutAPIAuthenticatorCmd.class.getName()); private static final String s_name = "logoutresponse"; @Inject @@ -94,7 +92,7 @@ public String authenticate(String command, Map params, HttpSes try { resp.sendRedirect(SAML2AuthManager.SAMLCloudStackRedirectionUrl.value()); } catch (IOException ignored) { - s_logger.info("[ignored] sending redirected failed.", ignored); + logger.info("[ignored] sending redirected failed.", ignored); } return responseString; } @@ -102,7 +100,7 @@ public String authenticate(String command, Map params, HttpSes try { DefaultBootstrap.bootstrap(); } catch (ConfigurationException | FactoryConfigurationError e) { - s_logger.error("OpenSAML Bootstrapping error: " + e.getMessage()); + logger.error("OpenSAML Bootstrapping error: " + e.getMessage()); throw new ServerApiException(ApiErrorCode.ACCOUNT_ERROR, _apiServer.getSerializedApiError(ApiErrorCode.ACCOUNT_ERROR.getHttpCode(), "OpenSAML Bootstrapping error while creating SP MetaData", params, responseType)); @@ -119,12 +117,12 @@ public String authenticate(String command, Map params, HttpSes params, responseType)); } } catch (ConfigurationException | FactoryConfigurationError | ParserConfigurationException | SAXException | IOException | UnmarshallingException e) { - s_logger.error("SAMLResponse processing error: " + e.getMessage()); + logger.error("SAMLResponse processing error: " + e.getMessage()); } try { resp.sendRedirect(SAML2AuthManager.SAMLCloudStackRedirectionUrl.value()); } catch (IOException ignored) { - s_logger.info("[ignored] second redirected sending failed.", ignored); + logger.info("[ignored] second redirected sending failed.", ignored); } return responseString; } @@ -136,7 +134,7 @@ public String authenticate(String command, Map params, HttpSes try { resp.sendRedirect(SAML2AuthManager.SAMLCloudStackRedirectionUrl.value()); } catch (IOException ignored) { - s_logger.info("[ignored] final redirected failed.", ignored); + logger.info("[ignored] final redirected failed.", ignored); } return responseString; } @@ -146,7 +144,7 @@ public String authenticate(String command, Map params, HttpSes String redirectUrl = idpMetadata.getSloUrl() + "?SAMLRequest=" + SAMLUtils.encodeSAMLRequest(logoutRequest); resp.sendRedirect(redirectUrl); } catch (MarshallingException | IOException e) { - s_logger.error("SAML SLO error: " + e.getMessage()); + logger.error("SAML SLO error: " + e.getMessage()); throw new ServerApiException(ApiErrorCode.ACCOUNT_ERROR, _apiServer.getSerializedApiError(ApiErrorCode.ACCOUNT_ERROR.getHttpCode(), "SAML Single Logout Error", params, responseType)); @@ -167,7 +165,7 @@ public void setAuthenticators(List authenticators) { } } if (_samlAuthManager == null) { - s_logger.error("No suitable Pluggable Authentication Manager found for SAML2 Login Cmd"); + logger.error("No suitable Pluggable Authentication Manager found for SAML2 Login Cmd"); } } } diff --git a/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/saml/SAML2AuthManagerImpl.java b/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/saml/SAML2AuthManagerImpl.java index ba85b151eea9..0e8790d65586 100644 --- a/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/saml/SAML2AuthManagerImpl.java +++ b/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/saml/SAML2AuthManagerImpl.java @@ -57,7 +57,6 @@ import org.apache.cloudstack.utils.security.CertUtils; import org.apache.commons.codec.binary.Base64; import org.apache.commons.httpclient.HttpClient; -import org.apache.log4j.Logger; import org.bouncycastle.operator.OperatorCreationException; import org.opensaml.DefaultBootstrap; import org.opensaml.common.xml.SAMLConstants; @@ -92,7 +91,6 @@ @Component public class SAML2AuthManagerImpl extends AdapterBase implements SAML2AuthManager, Configurable { - private static final Logger s_logger = Logger.getLogger(SAML2AuthManagerImpl.class); private SAMLProviderMetadata _spMetadata = new SAMLProviderMetadata(); private Map _idpMetadataMap = new HashMap(); @@ -123,10 +121,10 @@ public String getSAMLIdentityProviderMetadataURL(){ @Override public boolean start() { if (isSAMLPluginEnabled()) { - s_logger.info("SAML auth plugin loaded"); + logger.info("SAML auth plugin loaded"); return setup(); } else { - s_logger.info("SAML auth plugin not enabled so not loading"); + logger.info("SAML auth plugin not enabled so not loading"); return super.start(); } } @@ -148,9 +146,9 @@ protected boolean initSP() { SAMLUtils.encodePrivateKey(keyPair.getPrivate()), SAMLUtils.encodePublicKey(keyPair.getPublic()), "samlsp-keypair"); keyStoreVO = _ksDao.findByName(SAMLPluginConstants.SAMLSP_KEYPAIR); - s_logger.info("No SAML keystore found, created and saved a new Service Provider keypair"); + logger.info("No SAML keystore found, created and saved a new Service Provider keypair"); } catch (final NoSuchProviderException | NoSuchAlgorithmException e) { - s_logger.error("Unable to create and save SAML keypair, due to: ", e); + logger.error("Unable to create and save SAML keypair, due to: ", e); } } @@ -179,7 +177,7 @@ protected boolean initSP() { _ksDao.save(SAMLPluginConstants.SAMLSP_X509CERT, Base64.encodeBase64String(bos.toByteArray()), "", "samlsp-x509cert"); bos.close(); } catch (final NoSuchAlgorithmException | NoSuchProviderException | CertificateException | SignatureException | InvalidKeyException | IOException | OperatorCreationException e) { - s_logger.error("SAML plugin won't be able to use X509 signed authentication", e); + logger.error("SAML plugin won't be able to use X509 signed authentication", e); } } else { try { @@ -188,7 +186,7 @@ protected boolean initSP() { spX509Key = (X509Certificate) si.readObject(); bi.close(); } catch (IOException | ClassNotFoundException ignored) { - s_logger.error("SAML Plugin won't be able to use X509 signed authentication. Failed to load X509 Certificate from Database."); + logger.error("SAML Plugin won't be able to use X509 signed authentication. Failed to load X509 Certificate from Database."); } } } @@ -215,7 +213,7 @@ protected boolean initSP() { private void addIdpToMap(EntityDescriptor descriptor, Map idpMap) { SAMLProviderMetadata idpMetadata = new SAMLProviderMetadata(); idpMetadata.setEntityId(descriptor.getEntityID()); - s_logger.debug("Adding IdP to the list of discovered IdPs: " + descriptor.getEntityID()); + logger.debug("Adding IdP to the list of discovered IdPs: " + descriptor.getEntityID()); if (descriptor.getOrganization() != null) { if (descriptor.getOrganization().getDisplayNames() != null) { for (OrganizationDisplayName orgName : descriptor.getOrganization().getDisplayNames()) { @@ -289,21 +287,21 @@ private void addIdpToMap(EntityDescriptor descriptor, Map metadataMap = new HashMap(); try { discoverAndAddIdp(_idpMetaDataProvider.getMetadata(), metadataMap); _idpMetadataMap = metadataMap; expireTokens(); - s_logger.debug("Finished refreshing SAML Metadata and expiring old auth tokens"); + logger.debug("Finished refreshing SAML Metadata and expiring old auth tokens"); } catch (MetadataProviderException e) { - s_logger.warn("SAML Metadata Refresh task failed with exception: " + e.getMessage()); + logger.warn("SAML Metadata Refresh task failed with exception: " + e.getMessage()); } } @@ -363,7 +361,7 @@ public void run() { private boolean setup() { if (!initSP()) { - s_logger.error("SAML Plugin failed to initialize, please fix the configuration and restart management server"); + logger.error("SAML Plugin failed to initialize, please fix the configuration and restart management server"); return false; } _timer = new Timer(); @@ -379,11 +377,11 @@ private boolean setup() { } else { File metadataFile = PropertiesUtil.findConfigFile(idpMetaDataUrl); if (metadataFile == null) { - s_logger.error("Provided Metadata is not a URL, Unable to locate metadata file from local path: " + idpMetaDataUrl); + logger.error("Provided Metadata is not a URL, Unable to locate metadata file from local path: " + idpMetaDataUrl); return false; } else{ - s_logger.debug("Provided Metadata is not a URL, trying to read metadata file from local path: " + metadataFile.getAbsolutePath()); + logger.debug("Provided Metadata is not a URL, trying to read metadata file from local path: " + metadataFile.getAbsolutePath()); _idpMetaDataProvider = new FilesystemMetadataProvider(_timer, metadataFile); } } @@ -393,14 +391,14 @@ private boolean setup() { _timer.scheduleAtFixedRate(new MetadataRefreshTask(), 0, _refreshInterval * 1000); } catch (MetadataProviderException e) { - s_logger.error("Unable to read SAML2 IDP MetaData URL, error:" + e.getMessage()); - s_logger.error("SAML2 Authentication may be unavailable"); + logger.error("Unable to read SAML2 IDP MetaData URL, error:" + e.getMessage()); + logger.error("SAML2 Authentication may be unavailable"); return false; } catch (ConfigurationException | FactoryConfigurationError e) { - s_logger.error("OpenSAML bootstrapping failed: error: " + e.getMessage()); + logger.error("OpenSAML bootstrapping failed: error: " + e.getMessage()); return false; } catch (NullPointerException e) { - s_logger.error("Unable to setup SAML Auth Plugin due to NullPointerException" + + logger.error("Unable to setup SAML Auth Plugin due to NullPointerException" + " please check the SAML global settings: " + e.getMessage()); return false; } @@ -478,7 +476,7 @@ public void saveToken(String authnId, String domainPath, String entity) { if (_samlTokenDao.findByUuid(authnId) == null) { _samlTokenDao.persist(token); } else { - s_logger.warn("Duplicate SAML token for entity=" + entity + " token id=" + authnId + " domain=" + domainPath); + logger.warn("Duplicate SAML token for entity=" + entity + " token id=" + authnId + " domain=" + domainPath); } } diff --git a/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/saml/SAML2UserAuthenticator.java b/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/saml/SAML2UserAuthenticator.java index 0a33bc111d56..6f9854ad17d2 100644 --- a/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/saml/SAML2UserAuthenticator.java +++ b/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/saml/SAML2UserAuthenticator.java @@ -20,7 +20,6 @@ import org.apache.cloudstack.auth.UserAuthenticator; import org.apache.cxf.common.util.StringUtils; -import org.apache.log4j.Logger; import com.cloud.user.User; import com.cloud.user.UserAccount; @@ -30,7 +29,6 @@ import com.cloud.utils.component.AdapterBase; public class SAML2UserAuthenticator extends AdapterBase implements UserAuthenticator { - public static final Logger s_logger = Logger.getLogger(SAML2UserAuthenticator.class); @Inject private UserAccountDao _userAccountDao; @@ -39,18 +37,18 @@ public class SAML2UserAuthenticator extends AdapterBase implements UserAuthentic @Override public Pair authenticate(String username, String password, Long domainId, Map requestParameters) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Trying SAML2 auth for user: " + username); + if (logger.isDebugEnabled()) { + logger.debug("Trying SAML2 auth for user: " + username); } if (StringUtils.isEmpty(username) || StringUtils.isEmpty(password)) { - s_logger.debug("Username or Password cannot be empty"); + logger.debug("Username or Password cannot be empty"); return new Pair(false, null); } final UserAccount userAccount = _userAccountDao.getUserAccount(username, domainId); if (userAccount == null || userAccount.getSource() != User.Source.SAML2) { - s_logger.debug("Unable to find user with " + username + " in domain " + domainId + ", or user source is not SAML2"); + logger.debug("Unable to find user with " + username + " in domain " + domainId + ", or user source is not SAML2"); return new Pair(false, null); } else { User user = _userDao.getUser(userAccount.getId()); diff --git a/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/saml/SAMLUtils.java b/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/saml/SAMLUtils.java index f10bc891368e..7ffe07a8609f 100644 --- a/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/saml/SAMLUtils.java +++ b/plugins/user-authenticators/saml2/src/main/java/org/apache/cloudstack/saml/SAMLUtils.java @@ -61,7 +61,8 @@ import org.apache.cloudstack.utils.security.CertUtils; import org.apache.cloudstack.utils.security.ParserUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.bouncycastle.operator.OperatorCreationException; import org.joda.time.DateTime; import org.opensaml.Configuration; @@ -104,7 +105,7 @@ import com.cloud.utils.HttpUtils; public class SAMLUtils { - public static final Logger s_logger = Logger.getLogger(SAMLUtils.class); + protected static Logger LOGGER = LogManager.getLogger(SAMLUtils.class); static final String charset = "abcdefghijklmnopqrstuvwxyz"; @@ -124,7 +125,7 @@ public static String getValueFromAttributeStatements(final List 0) { String value = attribute.getAttributeValues().get(0).getDOM().getTextContent(); - s_logger.debug("SAML attribute name: " + attribute.getName() + " friendly-name:" + attribute.getFriendlyName() + " value:" + value); + LOGGER.debug("SAML attribute name: " + attribute.getName() + " friendly-name:" + attribute.getFriendlyName() + " value:" + value); if (attributeKey.equals(attribute.getName()) || attributeKey.equals(attribute.getFriendlyName())) { return value; } @@ -159,7 +160,7 @@ public static String buildAuthnRequestUrl(final String authnId, final SAMLProvid String appendOperator = idpMetadata.getSsoUrl().contains("?") ? "&" : "?"; redirectUrl = idpMetadata.getSsoUrl() + appendOperator + SAMLUtils.generateSAMLRequestSignature("SAMLRequest=" + SAMLUtils.encodeSAMLRequest(authnRequest), privateKey, signatureAlgorithm); } catch (ConfigurationException | FactoryConfigurationError | MarshallingException | IOException | NoSuchAlgorithmException | InvalidKeyException | java.security.SignatureException e) { - s_logger.error("SAML AuthnRequest message building error: " + e.getMessage()); + LOGGER.error("SAML AuthnRequest message building error: " + e.getMessage()); } return redirectUrl; } @@ -311,7 +312,7 @@ public static String encodePublicKey(PublicKey key) { X509EncodedKeySpec spec = keyFactory.getKeySpec(key, X509EncodedKeySpec.class); return new String(org.bouncycastle.util.encoders.Base64.encode(spec.getEncoded()), Charset.forName("UTF-8")); } catch (InvalidKeySpecException e) { - s_logger.error("Unable to get KeyFactory:" + e.getMessage()); + LOGGER.error("Unable to get KeyFactory:" + e.getMessage()); } return null; } @@ -329,7 +330,7 @@ public static String encodePrivateKey(PrivateKey key) { PKCS8EncodedKeySpec.class); return new String(org.bouncycastle.util.encoders.Base64.encode(spec.getEncoded()), Charset.forName("UTF-8")); } catch (InvalidKeySpecException e) { - s_logger.error("Unable to get KeyFactory:" + e.getMessage()); + LOGGER.error("Unable to get KeyFactory:" + e.getMessage()); } return null; } @@ -348,7 +349,7 @@ public static PublicKey decodePublicKey(String publicKey) { try { return keyFactory.generatePublic(x509KeySpec); } catch (InvalidKeySpecException e) { - s_logger.error("Unable to create PublicKey from PublicKey string:" + e.getMessage()); + LOGGER.error("Unable to create PublicKey from PublicKey string:" + e.getMessage()); } return null; } @@ -367,7 +368,7 @@ public static PrivateKey decodePrivateKey(String privateKey) { try { return keyFactory.generatePrivate(pkscs8KeySpec); } catch (InvalidKeySpecException e) { - s_logger.error("Unable to create PrivateKey from privateKey string:" + e.getMessage()); + LOGGER.error("Unable to create PrivateKey from privateKey string:" + e.getMessage()); } return null; } diff --git a/plugins/user-authenticators/sha256salted/src/main/java/org/apache/cloudstack/auth/SHA256SaltedUserAuthenticator.java b/plugins/user-authenticators/sha256salted/src/main/java/org/apache/cloudstack/auth/SHA256SaltedUserAuthenticator.java index c6bdbe672fa0..0dbdf26f438e 100644 --- a/plugins/user-authenticators/sha256salted/src/main/java/org/apache/cloudstack/auth/SHA256SaltedUserAuthenticator.java +++ b/plugins/user-authenticators/sha256salted/src/main/java/org/apache/cloudstack/auth/SHA256SaltedUserAuthenticator.java @@ -25,7 +25,6 @@ import javax.inject.Inject; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.bouncycastle.util.encoders.Base64; import com.cloud.user.UserAccount; @@ -35,7 +34,6 @@ import com.cloud.utils.exception.CloudRuntimeException; public class SHA256SaltedUserAuthenticator extends AdapterBase implements UserAuthenticator { - public static final Logger s_logger = Logger.getLogger(SHA256SaltedUserAuthenticator.class); private static final String s_defaultPassword = "000000000000000000000000000="; private static final String s_defaultSalt = "0000000000000000000000000000000="; @Inject @@ -47,19 +45,19 @@ public class SHA256SaltedUserAuthenticator extends AdapterBase implements UserAu */ @Override public Pair authenticate(String username, String password, Long domainId, Map requestParameters) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Retrieving user: " + username); + if (logger.isDebugEnabled()) { + logger.debug("Retrieving user: " + username); } if (StringUtils.isAnyEmpty(username, password)) { - s_logger.debug("Username or Password cannot be empty"); + logger.debug("Username or Password cannot be empty"); return new Pair<>(false, null); } boolean realUser = true; UserAccount user = _userAccountDao.getUserAccount(username, domainId); if (user == null) { - s_logger.debug("Unable to find user with " + username + " in domain " + domainId); + logger.debug("Unable to find user with " + username + " in domain " + domainId); realUser = false; } /* Fake Data */ @@ -68,7 +66,7 @@ public Pair authenticate(String username, if (realUser) { String[] storedPassword = user.getPassword().split(":"); if (storedPassword.length != 2) { - s_logger.warn("The stored password for " + username + " isn't in the right format for this authenticator"); + logger.warn("The stored password for " + username + " isn't in the right format for this authenticator"); realUser = false; } else { realPassword = storedPassword[1]; diff --git a/plugins/user-two-factor-authenticators/static-pin/src/main/java/org/apache/cloudstack/auth/StaticPinUserTwoFactorAuthenticator.java b/plugins/user-two-factor-authenticators/static-pin/src/main/java/org/apache/cloudstack/auth/StaticPinUserTwoFactorAuthenticator.java index dd1b1580c351..b781f3265209 100644 --- a/plugins/user-two-factor-authenticators/static-pin/src/main/java/org/apache/cloudstack/auth/StaticPinUserTwoFactorAuthenticator.java +++ b/plugins/user-two-factor-authenticators/static-pin/src/main/java/org/apache/cloudstack/auth/StaticPinUserTwoFactorAuthenticator.java @@ -21,7 +21,6 @@ import com.cloud.user.UserAccount; import com.cloud.utils.exception.CloudRuntimeException; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.user.dao.UserAccountDao; import com.cloud.utils.component.AdapterBase; @@ -29,7 +28,6 @@ import java.security.SecureRandom; public class StaticPinUserTwoFactorAuthenticator extends AdapterBase implements UserTwoFactorAuthenticator { - public static final Logger s_logger = Logger.getLogger(StaticPinUserTwoFactorAuthenticator.class); @Inject private UserAccountDao _userAccountDao; @@ -48,7 +46,7 @@ public String getDescription() { public void check2FA(String code, UserAccount userAccount) throws CloudTwoFactorAuthenticationException { String expectedCode = getStaticPin(userAccount); if (expectedCode.equals(code)) { - s_logger.info("2FA matches user's input"); + logger.info("2FA matches user's input"); return; } throw new CloudTwoFactorAuthenticationException("two-factor authentication code provided is invalid"); diff --git a/plugins/user-two-factor-authenticators/totp/src/main/java/org/apache/cloudstack/auth/TotpUserTwoFactorAuthenticator.java b/plugins/user-two-factor-authenticators/totp/src/main/java/org/apache/cloudstack/auth/TotpUserTwoFactorAuthenticator.java index bb6939ad14f4..c7c4997f1894 100644 --- a/plugins/user-two-factor-authenticators/totp/src/main/java/org/apache/cloudstack/auth/TotpUserTwoFactorAuthenticator.java +++ b/plugins/user-two-factor-authenticators/totp/src/main/java/org/apache/cloudstack/auth/TotpUserTwoFactorAuthenticator.java @@ -26,7 +26,6 @@ import org.apache.commons.codec.binary.Base32; import org.apache.commons.codec.binary.Hex; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.user.dao.UserAccountDao; import com.cloud.utils.component.AdapterBase; @@ -34,7 +33,6 @@ import java.security.SecureRandom; public class TotpUserTwoFactorAuthenticator extends AdapterBase implements UserTwoFactorAuthenticator { - public static final Logger s_logger = Logger.getLogger(TotpUserTwoFactorAuthenticator.class); @Inject private UserAccountDao _userAccountDao; @@ -53,7 +51,7 @@ public String getDescription() { public void check2FA(String code, UserAccount userAccount) throws CloudTwoFactorAuthenticationException { String expectedCode = get2FACode(get2FAKey(userAccount)); if (expectedCode.equals(code)) { - s_logger.info("2FA matches user's input"); + logger.info("2FA matches user's input"); return; } throw new CloudTwoFactorAuthenticationException("two-factor authentication code provided is invalid"); diff --git a/pom.xml b/pom.xml index e6845f387179..3030302c24e7 100644 --- a/pom.xml +++ b/pom.xml @@ -81,9 +81,9 @@ 4.4.1 + 2.19.0 1.2.25 1.2.17 - 1.1.1 1.15 @@ -436,6 +436,21 @@ jstl ${cs.jstl.version} + + org.apache.logging.log4j + log4j-core + ${cs.log4j.version} + + + org.apache.logging.log4j + log4j-api + ${cs.log4j.version} + + + ch.qos.reload4j + reload4j + ${cs.reload4j.version} + log4j apache-log4j-extras @@ -447,11 +462,6 @@ - - ch.qos.reload4j - reload4j - ${cs.reload4j.version} - mysql mysql-connector-java diff --git a/server/conf/log4j-cloud.xml.in b/server/conf/log4j-cloud.xml.in index b75a4799942d..d466f7068b71 100755 --- a/server/conf/log4j-cloud.xml.in +++ b/server/conf/log4j-cloud.xml.in @@ -17,143 +17,105 @@ KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/server/src/main/java/com/cloud/acl/DomainChecker.java b/server/src/main/java/com/cloud/acl/DomainChecker.java index a8c9ab84f7ec..729c7a9e43a8 100644 --- a/server/src/main/java/com/cloud/acl/DomainChecker.java +++ b/server/src/main/java/com/cloud/acl/DomainChecker.java @@ -30,7 +30,6 @@ import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.query.QueryService; import org.apache.cloudstack.resourcedetail.dao.DiskOfferingDetailsDao; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.dc.DataCenter; @@ -99,7 +98,6 @@ public class DomainChecker extends AdapterBase implements SecurityChecker { @Inject private AccountService accountService; - public static final Logger s_logger = Logger.getLogger(DomainChecker.class.getName()); protected DomainChecker() { super(); } diff --git a/server/src/main/java/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java b/server/src/main/java/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java index 862e8acc9e0b..1e92a80a2805 100644 --- a/server/src/main/java/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java +++ b/server/src/main/java/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java @@ -27,7 +27,6 @@ import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.manager.allocator.HostAllocator; @@ -70,7 +69,6 @@ */ @Component public class FirstFitAllocator extends AdapterBase implements HostAllocator { - private static final Logger s_logger = Logger.getLogger(FirstFitAllocator.class); @Inject protected HostDao _hostDao = null; @Inject @@ -124,7 +122,7 @@ public List allocateTo(VirtualMachineProfile vmProfile, DeploymentPlan pla isVMDeployedWithUefi = true; } } - s_logger.info(" Guest VM is requested with Custom[UEFI] Boot Type "+ isVMDeployedWithUefi); + logger.info(" Guest VM is requested with Custom[UEFI] Boot Type "+ isVMDeployedWithUefi); if (type == Host.Type.Storage) { @@ -132,8 +130,8 @@ public List allocateTo(VirtualMachineProfile vmProfile, DeploymentPlan pla return new ArrayList(); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Looking for hosts in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId); + if (logger.isDebugEnabled()) { + logger.debug("Looking for hosts in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId); } String hostTagOnOffering = offering.getHostTag(); @@ -147,8 +145,8 @@ public List allocateTo(VirtualMachineProfile vmProfile, DeploymentPlan pla List hostsMatchingUefiTag = new ArrayList(); if(isVMDeployedWithUefi){ hostsMatchingUefiTag = _hostDao.listByHostCapability(type, clusterId, podId, dcId, Host.HOST_UEFI_ENABLE); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Hosts with tag '" + hostTagUefi + "' are:" + hostsMatchingUefiTag); + if (logger.isDebugEnabled()) { + logger.debug("Hosts with tag '" + hostTagUefi + "' are:" + hostsMatchingUefiTag); } } @@ -163,28 +161,28 @@ public List allocateTo(VirtualMachineProfile vmProfile, DeploymentPlan pla List hostsMatchingOfferingTag = new ArrayList(); List hostsMatchingTemplateTag = new ArrayList(); if (hasSvcOfferingTag) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Looking for hosts having tag specified on SvcOffering:" + hostTagOnOffering); + if (logger.isDebugEnabled()) { + logger.debug("Looking for hosts having tag specified on SvcOffering:" + hostTagOnOffering); } hostsMatchingOfferingTag = _hostDao.listByHostTag(type, clusterId, podId, dcId, hostTagOnOffering); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Hosts with tag '" + hostTagOnOffering + "' are:" + hostsMatchingOfferingTag); + if (logger.isDebugEnabled()) { + logger.debug("Hosts with tag '" + hostTagOnOffering + "' are:" + hostsMatchingOfferingTag); } } if (hasTemplateTag) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Looking for hosts having tag specified on Template:" + hostTagOnTemplate); + if (logger.isDebugEnabled()) { + logger.debug("Looking for hosts having tag specified on Template:" + hostTagOnTemplate); } hostsMatchingTemplateTag = _hostDao.listByHostTag(type, clusterId, podId, dcId, hostTagOnTemplate); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Hosts with tag '" + hostTagOnTemplate + "' are:" + hostsMatchingTemplateTag); + if (logger.isDebugEnabled()) { + logger.debug("Hosts with tag '" + hostTagOnTemplate + "' are:" + hostsMatchingTemplateTag); } } if (hasSvcOfferingTag && hasTemplateTag) { hostsMatchingOfferingTag.retainAll(hostsMatchingTemplateTag); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found " + hostsMatchingOfferingTag.size() + " Hosts satisfying both tags, host ids are:" + hostsMatchingOfferingTag); + if (logger.isDebugEnabled()) { + logger.debug("Found " + hostsMatchingOfferingTag.size() + " Hosts satisfying both tags, host ids are:" + hostsMatchingOfferingTag); } clusterHosts = hostsMatchingOfferingTag; @@ -206,7 +204,7 @@ public List allocateTo(VirtualMachineProfile vmProfile, DeploymentPlan pla if (clusterHosts.isEmpty()) { - s_logger.error(String.format("No suitable host found for vm [%s] with tags [%s].", vmProfile, hostTagOnOffering)); + logger.error(String.format("No suitable host found for vm [%s] with tags [%s].", vmProfile, hostTagOnOffering)); throw new CloudRuntimeException(String.format("No suitable host found for vm [%s].", vmProfile)); } // add all hosts that we are not considering to the avoid list @@ -250,25 +248,25 @@ public List allocateTo(VirtualMachineProfile vmProfile, DeploymentPlan pla hostsCopy.retainAll(_resourceMgr.listAllUpAndEnabledNonHAHosts(type, clusterId, podId, dcId)); } else { if (hasSvcOfferingTag) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Looking for hosts having tag specified on SvcOffering:" + hostTagOnOffering); + if (logger.isDebugEnabled()) { + logger.debug("Looking for hosts having tag specified on SvcOffering:" + hostTagOnOffering); } hostsCopy.retainAll(_hostDao.listByHostTag(type, clusterId, podId, dcId, hostTagOnOffering)); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Hosts with tag '" + hostTagOnOffering + "' are:" + hostsCopy); + if (logger.isDebugEnabled()) { + logger.debug("Hosts with tag '" + hostTagOnOffering + "' are:" + hostsCopy); } } if (hasTemplateTag) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Looking for hosts having tag specified on Template:" + hostTagOnTemplate); + if (logger.isDebugEnabled()) { + logger.debug("Looking for hosts having tag specified on Template:" + hostTagOnTemplate); } hostsCopy.retainAll(_hostDao.listByHostTag(type, clusterId, podId, dcId, hostTagOnTemplate)); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Hosts with tag '" + hostTagOnTemplate + "' are:" + hostsCopy); + if (logger.isDebugEnabled()) { + logger.debug("Hosts with tag '" + hostTagOnTemplate + "' are:" + hostsCopy); } } } @@ -294,20 +292,20 @@ protected List allocateTo(DeploymentPlan plan, ServiceOffering offering, V hosts = reorderHostsByCapacity(plan, hosts); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("FirstFitAllocator has " + hosts.size() + " hosts to check for allocation: " + hosts); + if (logger.isDebugEnabled()) { + logger.debug("FirstFitAllocator has " + hosts.size() + " hosts to check for allocation: " + hosts); } // We will try to reorder the host lists such that we give priority to hosts that have // the minimums to support a VM's requirements hosts = prioritizeHosts(template, offering, hosts); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found " + hosts.size() + " hosts for allocation after prioritization: " + hosts); + if (logger.isDebugEnabled()) { + logger.debug("Found " + hosts.size() + " hosts for allocation after prioritization: " + hosts); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Looking for speed=" + (offering.getCpu() * offering.getSpeed()) + "Mhz, Ram=" + offering.getRamSize() + " MB"); + if (logger.isDebugEnabled()) { + logger.debug("Looking for speed=" + (offering.getCpu() * offering.getSpeed()) + "Mhz, Ram=" + offering.getRamSize() + " MB"); } long serviceOfferingId = offering.getId(); @@ -319,16 +317,16 @@ protected List allocateTo(DeploymentPlan plan, ServiceOffering offering, V break; } if (avoid.shouldAvoid(host)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host name: " + host.getName() + ", hostId: " + host.getId() + " is in avoid set, skipping this and trying other available hosts"); + if (logger.isDebugEnabled()) { + logger.debug("Host name: " + host.getName() + ", hostId: " + host.getId() + " is in avoid set, skipping this and trying other available hosts"); } continue; } //find number of guest VMs occupying capacity on this host. if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host name: " + host.getName() + ", hostId: " + host.getId() + + if (logger.isDebugEnabled()) { + logger.debug("Host name: " + host.getName() + ", hostId: " + host.getId() + " already has max Running VMs(count includes system VMs), skipping this and trying other available hosts"); } avoid.addHost(host.getId()); @@ -339,27 +337,27 @@ protected List allocateTo(DeploymentPlan plan, ServiceOffering offering, V if ((offeringDetails = _serviceOfferingDetailsDao.findDetail(serviceOfferingId, GPU.Keys.vgpuType.toString())) != null) { ServiceOfferingDetailsVO groupName = _serviceOfferingDetailsDao.findDetail(serviceOfferingId, GPU.Keys.pciDevice.toString()); if(!_resourceMgr.isGPUDeviceAvailable(host.getId(), groupName.getValue(), offeringDetails.getValue())){ - s_logger.info("Host name: " + host.getName() + ", hostId: "+ host.getId() +" does not have required GPU devices available"); + logger.info("Host name: " + host.getName() + ", hostId: "+ host.getId() +" does not have required GPU devices available"); avoid.addHost(host.getId()); continue; } } Pair cpuCapabilityAndCapacity = _capacityMgr.checkIfHostHasCpuCapabilityAndCapacity(host, offering, considerReservedCapacity); if (cpuCapabilityAndCapacity.first() && cpuCapabilityAndCapacity.second()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found a suitable host, adding to list: " + host.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Found a suitable host, adding to list: " + host.getId()); } suitableHosts.add(host); } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Not using host " + host.getId() + "; host has cpu capability? " + cpuCapabilityAndCapacity.first() + ", host has capacity?" + cpuCapabilityAndCapacity.second()); + if (logger.isDebugEnabled()) { + logger.debug("Not using host " + host.getId() + "; host has cpu capability? " + cpuCapabilityAndCapacity.first() + ", host has capacity?" + cpuCapabilityAndCapacity.second()); } avoid.addHost(host.getId()); } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host Allocator returning " + suitableHosts.size() + " suitable hosts"); + if (logger.isDebugEnabled()) { + logger.debug("Host Allocator returning " + suitableHosts.size() + " suitable hosts"); } return suitableHosts; @@ -376,8 +374,8 @@ private List reorderHostsByCapacity(DeploymentPlan plan, List hostIdsByFreeCapacity = _capacityDao.orderHostsByFreeCapacity(zoneId, clusterId, capacityType); - if (s_logger.isDebugEnabled()) { - s_logger.debug("List of hosts in descending order of free capacity in the cluster: "+ hostIdsByFreeCapacity); + if (logger.isDebugEnabled()) { + logger.debug("List of hosts in descending order of free capacity in the cluster: "+ hostIdsByFreeCapacity); } //now filter the given list of Hosts by this ordered list @@ -406,8 +404,8 @@ private List reorderHostsByNumberOfVms(DeploymentPlan plan, List Long clusterId = plan.getClusterId(); List hostIdsByVmCount = _vmInstanceDao.listHostIdsByVmCount(dcId, podId, clusterId, account.getAccountId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("List of hosts in ascending order of number of VMs: " + hostIdsByVmCount); + if (logger.isDebugEnabled()) { + logger.debug("List of hosts in ascending order of number of VMs: " + hostIdsByVmCount); } //now filter the given list of Hosts by this ordered list @@ -459,9 +457,9 @@ protected List prioritizeHosts(VMTemplateVO template, ServiceOff hostsToCheck.addAll(hosts); } - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { if (noHvmHosts.size() > 0) { - s_logger.debug("Not considering hosts: " + noHvmHosts + " to deploy template: " + template + " as they are not HVM enabled"); + logger.debug("Not considering hosts: " + noHvmHosts + " to deploy template: " + template + " as they are not HVM enabled"); } } // If a host is tagged with the same guest OS category as the template, move it to a high priority list diff --git a/server/src/main/java/com/cloud/agent/manager/allocator/impl/FirstFitRoutingAllocator.java b/server/src/main/java/com/cloud/agent/manager/allocator/impl/FirstFitRoutingAllocator.java index 4f2f391494d9..8b7c2b38a445 100644 --- a/server/src/main/java/com/cloud/agent/manager/allocator/impl/FirstFitRoutingAllocator.java +++ b/server/src/main/java/com/cloud/agent/manager/allocator/impl/FirstFitRoutingAllocator.java @@ -19,20 +19,18 @@ import java.util.ArrayList; import java.util.List; - -import org.apache.log4j.NDC; - import com.cloud.deploy.DeploymentPlan; import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.host.Host; import com.cloud.host.Host.Type; import com.cloud.vm.VirtualMachineProfile; +import org.apache.logging.log4j.ThreadContext; public class FirstFitRoutingAllocator extends FirstFitAllocator { @Override public List allocateTo(VirtualMachineProfile vmProfile, DeploymentPlan plan, Type type, ExcludeList avoid, int returnUpTo) { try { - NDC.push("FirstFitRoutingAllocator"); + ThreadContext.push("FirstFitRoutingAllocator"); if (type != Host.Type.Routing) { // FirstFitRoutingAllocator is to find space on routing capable hosts only return new ArrayList(); @@ -40,7 +38,7 @@ public List allocateTo(VirtualMachineProfile vmProfile, DeploymentPlan pla //all hosts should be of type routing anyway. return super.allocateTo(vmProfile, plan, type, avoid, returnUpTo); } finally { - NDC.pop(); + ThreadContext.pop(); } } } diff --git a/server/src/main/java/com/cloud/agent/manager/allocator/impl/RecreateHostAllocator.java b/server/src/main/java/com/cloud/agent/manager/allocator/impl/RecreateHostAllocator.java index be6f4012f8f8..286bef7d39a5 100644 --- a/server/src/main/java/com/cloud/agent/manager/allocator/impl/RecreateHostAllocator.java +++ b/server/src/main/java/com/cloud/agent/manager/allocator/impl/RecreateHostAllocator.java @@ -26,7 +26,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; @@ -54,7 +53,6 @@ @Component public class RecreateHostAllocator extends FirstFitRoutingAllocator { - private final static Logger s_logger = Logger.getLogger(RecreateHostAllocator.class); @Inject HostPodDao _podDao; @@ -79,10 +77,10 @@ public List allocateTo(VirtualMachineProfile vm, DeploymentPlan plan, Type return hosts; } - s_logger.debug("First fit was unable to find a host"); + logger.debug("First fit was unable to find a host"); VirtualMachine.Type vmType = vm.getType(); if (vmType == VirtualMachine.Type.User) { - s_logger.debug("vm is not a system vm so let's just return empty list"); + logger.debug("vm is not a system vm so let's just return empty list"); return new ArrayList(); } @@ -91,11 +89,11 @@ public List allocateTo(VirtualMachineProfile vm, DeploymentPlan plan, Type //getting rid of direct.attached.untagged.vlan.enabled config param: Bug 7204 //basic network type for zone maps to direct untagged case if (dc.getNetworkType().equals(NetworkType.Basic)) { - s_logger.debug("Direct Networking mode so we can only allow the host to be allocated in the same pod due to public ip address cannot change"); + logger.debug("Direct Networking mode so we can only allow the host to be allocated in the same pod due to public ip address cannot change"); List vols = _volsDao.findByInstance(vm.getId()); VolumeVO vol = vols.get(0); long podId = vol.getPodId(); - s_logger.debug("Pod id determined from volume " + vol.getId() + " is " + podId); + logger.debug("Pod id determined from volume " + vol.getId() + " is " + podId); Iterator it = pcs.iterator(); while (it.hasNext()) { PodCluster pc = it.next(); @@ -116,7 +114,7 @@ public List allocateTo(VirtualMachineProfile vm, DeploymentPlan plan, Type } for (Pair pcId : avoidPcs) { - s_logger.debug("Removing " + pcId + " from the list of available pods"); + logger.debug("Removing " + pcId + " from the list of available pods"); pcs.remove(new PodCluster(new HostPodVO(pcId.first()), pcId.second() != null ? new ClusterVO(pcId.second()) : null)); } @@ -130,7 +128,7 @@ public List allocateTo(VirtualMachineProfile vm, DeploymentPlan plan, Type } - s_logger.debug("Unable to find any available pods at all!"); + logger.debug("Unable to find any available pods at all!"); return new ArrayList(); } diff --git a/server/src/main/java/com/cloud/agent/manager/allocator/impl/UserConcentratedAllocator.java b/server/src/main/java/com/cloud/agent/manager/allocator/impl/UserConcentratedAllocator.java index 224514e856d8..f710e5bc8460 100644 --- a/server/src/main/java/com/cloud/agent/manager/allocator/impl/UserConcentratedAllocator.java +++ b/server/src/main/java/com/cloud/agent/manager/allocator/impl/UserConcentratedAllocator.java @@ -26,7 +26,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; @@ -60,7 +59,6 @@ import static com.cloud.utils.NumbersUtil.toHumanReadableSize; public class UserConcentratedAllocator extends AdapterBase implements PodAllocator { - private final static Logger s_logger = Logger.getLogger(UserConcentratedAllocator.class); @Inject UserVmDao _vmDao; @@ -89,7 +87,7 @@ public Pair allocateTo(VirtualMachineTemplate template, ServiceOfferi List podsInZone = _podDao.listByDataCenterId(zoneId); if (podsInZone.size() == 0) { - s_logger.debug("No pods found in zone " + zone.getName()); + logger.debug("No pods found in zone " + zone.getName()); return null; } @@ -112,8 +110,8 @@ public Pair allocateTo(VirtualMachineTemplate template, ServiceOfferi dataCenterAndPodHasEnoughCapacity(zoneId, podId, (offering.getRamSize()) * 1024L * 1024L, Capacity.CAPACITY_TYPE_MEMORY, hostCandiates); if (!enoughCapacity) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Not enough RAM available in zone/pod to allocate storage for user VM (zone: " + zoneId + ", pod: " + podId + ")"); + if (logger.isDebugEnabled()) { + logger.debug("Not enough RAM available in zone/pod to allocate storage for user VM (zone: " + zoneId + ", pod: " + podId + ")"); } continue; } @@ -122,8 +120,8 @@ public Pair allocateTo(VirtualMachineTemplate template, ServiceOfferi enoughCapacity = dataCenterAndPodHasEnoughCapacity(zoneId, podId, ((long)offering.getCpu() * offering.getSpeed()), Capacity.CAPACITY_TYPE_CPU, hostCandiates); if (!enoughCapacity) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Not enough cpu available in zone/pod to allocate storage for user VM (zone: " + zoneId + ", pod: " + podId + ")"); + if (logger.isDebugEnabled()) { + logger.debug("Not enough cpu available in zone/pod to allocate storage for user VM (zone: " + zoneId + ", pod: " + podId + ")"); } continue; } @@ -147,13 +145,13 @@ public Pair allocateTo(VirtualMachineTemplate template, ServiceOfferi } if (availablePods.size() == 0) { - s_logger.debug("There are no pods with enough memory/CPU capacity in zone " + zone.getName()); + logger.debug("There are no pods with enough memory/CPU capacity in zone " + zone.getName()); return null; } else { // Return a random pod int next = _rand.nextInt(availablePods.size()); HostPodVO selectedPod = availablePods.get(next); - s_logger.debug("Found pod " + selectedPod.getName() + " in zone " + zone.getName()); + logger.debug("Found pod " + selectedPod.getName() + " in zone " + zone.getName()); return new Pair(selectedPod, podHostCandidates.get(selectedPod.getId())); } } @@ -165,9 +163,9 @@ private boolean dataCenterAndPodHasEnoughCapacity(long dataCenterId, long podId, sc.addAnd("capacityType", SearchCriteria.Op.EQ, capacityType); sc.addAnd("dataCenterId", SearchCriteria.Op.EQ, dataCenterId); sc.addAnd("podId", SearchCriteria.Op.EQ, podId); - s_logger.trace("Executing search"); + logger.trace("Executing search"); capacities = _capacityDao.search(sc, null); - s_logger.trace("Done with a search"); + logger.trace("Done with a search"); boolean enoughCapacity = false; if (capacities != null) { @@ -196,8 +194,8 @@ private boolean dataCenterAndPodHasEnoughCapacity(long dataCenterId, long podId, private boolean skipCalculation(VMInstanceVO vm) { if (vm.getState() == State.Expunging) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Skip counting capacity for Expunging VM : " + vm.getInstanceName()); + if (logger.isDebugEnabled()) { + logger.debug("Skip counting capacity for Expunging VM : " + vm.getInstanceName()); } return true; } @@ -217,8 +215,8 @@ private boolean skipCalculation(VMInstanceVO vm) { long millisecondsSinceLastUpdate = DateUtil.currentGMTTime().getTime() - vm.getUpdateTime().getTime(); if (millisecondsSinceLastUpdate > secondsToSkipVMs * 1000L) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Skip counting " + vm.getState().toString() + " vm " + vm.getInstanceName() + " in capacity allocation as it has been " + + if (logger.isDebugEnabled()) { + logger.debug("Skip counting " + vm.getState().toString() + " vm " + vm.getInstanceName() + " in capacity allocation as it has been " + vm.getState().toString().toLowerCase() + " for " + millisecondsSinceLastUpdate / 60000 + " minutes"); } return true; @@ -262,15 +260,15 @@ private long calcHostAllocatedCpuMemoryCapacity(long hostId, short capacityType) if (capacityType == Capacity.CAPACITY_TYPE_MEMORY) { usedCapacity += so.getRamSize() * 1024L * 1024L; - if (s_logger.isDebugEnabled()) { - s_logger.debug("Counting memory capacity used by vm: " + vm.getId() + ", size: " + so.getRamSize() + "MB, host: " + hostId + ", currently counted: " + + if (logger.isDebugEnabled()) { + logger.debug("Counting memory capacity used by vm: " + vm.getId() + ", size: " + so.getRamSize() + "MB, host: " + hostId + ", currently counted: " + toHumanReadableSize(usedCapacity) + " Bytes"); } } else if (capacityType == Capacity.CAPACITY_TYPE_CPU) { usedCapacity += so.getCpu() * so.getSpeed(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Counting cpu capacity used by vm: " + vm.getId() + ", cpu: " + so.getCpu() + ", speed: " + so.getSpeed() + ", currently counted: " + + if (logger.isDebugEnabled()) { + logger.debug("Counting cpu capacity used by vm: " + vm.getId() + ", cpu: " + so.getCpu() + ", speed: " + so.getSpeed() + ", currently counted: " + usedCapacity + " Bytes"); } } diff --git a/server/src/main/java/com/cloud/agent/manager/authn/impl/BasicAgentAuthManager.java b/server/src/main/java/com/cloud/agent/manager/authn/impl/BasicAgentAuthManager.java index 56bff0df6f98..e7e984e112cf 100644 --- a/server/src/main/java/com/cloud/agent/manager/authn/impl/BasicAgentAuthManager.java +++ b/server/src/main/java/com/cloud/agent/manager/authn/impl/BasicAgentAuthManager.java @@ -21,7 +21,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; @@ -37,7 +36,6 @@ @Component public class BasicAgentAuthManager extends AdapterBase implements AgentAuthorizer, StartupCommandProcessor { - private static final Logger s_logger = Logger.getLogger(BasicAgentAuthManager.class); @Inject HostDao _hostDao = null; @Inject @@ -52,7 +50,7 @@ public boolean processInitialConnect(StartupCommand[] cmd) throws ConnectionExce } catch (AgentAuthnException e) { throw new ConnectionException(true, "Failed to authenticate/authorize", e); } - s_logger.debug("Authorized agent with guid " + cmd[0].getGuid()); + logger.debug("Authorized agent with guid " + cmd[0].getGuid()); return false;//so that the next host creator can process it } diff --git a/server/src/main/java/com/cloud/alert/AlertManagerImpl.java b/server/src/main/java/com/cloud/alert/AlertManagerImpl.java index f550d80b51a7..8460ac0d33f6 100644 --- a/server/src/main/java/com/cloud/alert/AlertManagerImpl.java +++ b/server/src/main/java/com/cloud/alert/AlertManagerImpl.java @@ -45,7 +45,8 @@ import org.apache.cloudstack.utils.mailing.SMTPMailSender; import org.apache.commons.lang3.ArrayUtils; import org.apache.commons.lang3.math.NumberUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.alert.dao.AlertDao; import com.cloud.api.ApiDBUtils; @@ -84,7 +85,7 @@ import com.cloud.utils.db.SearchCriteria; public class AlertManagerImpl extends ManagerBase implements AlertManager, Configurable { - protected Logger logger = Logger.getLogger(AlertManagerImpl.class.getName()); + protected Logger logger = LogManager.getLogger(AlertManagerImpl.class.getName()); private static final long INITIAL_CAPACITY_CHECK_DELAY = 30L * 1000L; // Thirty seconds expressed in milliseconds. diff --git a/server/src/main/java/com/cloud/alert/ClusterAlertAdapter.java b/server/src/main/java/com/cloud/alert/ClusterAlertAdapter.java index 4d5246bf91ba..cc993445c231 100644 --- a/server/src/main/java/com/cloud/alert/ClusterAlertAdapter.java +++ b/server/src/main/java/com/cloud/alert/ClusterAlertAdapter.java @@ -21,7 +21,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.cluster.ClusterManager; @@ -36,7 +35,6 @@ @Component public class ClusterAlertAdapter extends AdapterBase implements AlertAdapter { - private static final Logger s_logger = Logger.getLogger(ClusterAlertAdapter.class); @Inject private AlertManager _alertMgr; @@ -44,8 +42,8 @@ public class ClusterAlertAdapter extends AdapterBase implements AlertAdapter { private ManagementServerHostDao _mshostDao; public void onClusterAlert(Object sender, EventArgs args) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Receive cluster alert, EventArgs: " + args.getClass().getName()); + if (logger.isDebugEnabled()) { + logger.debug("Receive cluster alert, EventArgs: " + args.getClass().getName()); } if (args instanceof ClusterNodeJoinEventArgs) { @@ -53,21 +51,21 @@ public void onClusterAlert(Object sender, EventArgs args) { } else if (args instanceof ClusterNodeLeftEventArgs) { onClusterNodeLeft(sender, (ClusterNodeLeftEventArgs)args); } else { - s_logger.error("Unrecognized cluster alert event"); + logger.error("Unrecognized cluster alert event"); } } private void onClusterNodeJoined(Object sender, ClusterNodeJoinEventArgs args) { - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { for (ManagementServerHostVO mshost : args.getJoinedNodes()) { - s_logger.debug("Handle cluster node join alert, joined node: " + mshost.getServiceIP() + ", msid: " + mshost.getMsid()); + logger.debug("Handle cluster node join alert, joined node: " + mshost.getServiceIP() + ", msid: " + mshost.getMsid()); } } for (ManagementServerHostVO mshost : args.getJoinedNodes()) { if (mshost.getId() == args.getSelf().longValue()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Management server node " + mshost.getServiceIP() + " is up, send alert"); + if (logger.isDebugEnabled()) { + logger.debug("Management server node " + mshost.getServiceIP() + " is up, send alert"); } _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_MANAGEMENT_NODE, 0, new Long(0), "Management server node " + mshost.getServiceIP() + " is up", ""); @@ -78,23 +76,23 @@ private void onClusterNodeJoined(Object sender, ClusterNodeJoinEventArgs args) { private void onClusterNodeLeft(Object sender, ClusterNodeLeftEventArgs args) { - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { for (ManagementServerHostVO mshost : args.getLeftNodes()) { - s_logger.debug("Handle cluster node left alert, leaving node: " + mshost.getServiceIP() + ", msid: " + mshost.getMsid()); + logger.debug("Handle cluster node left alert, leaving node: " + mshost.getServiceIP() + ", msid: " + mshost.getMsid()); } } for (ManagementServerHostVO mshost : args.getLeftNodes()) { if (mshost.getId() != args.getSelf().longValue()) { if (_mshostDao.increaseAlertCount(mshost.getId()) > 0) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Detected management server node " + mshost.getServiceIP() + " is down, send alert"); + if (logger.isDebugEnabled()) { + logger.debug("Detected management server node " + mshost.getServiceIP() + " is down, send alert"); } _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_MANAGEMENT_NODE, 0, new Long(0), "Management server node " + mshost.getServiceIP() + " is down", ""); } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Detected management server node " + mshost.getServiceIP() + " is down, but alert has already been set"); + if (logger.isDebugEnabled()) { + logger.debug("Detected management server node " + mshost.getServiceIP() + " is down, but alert has already been set"); } } } @@ -104,8 +102,8 @@ private void onClusterNodeLeft(Object sender, ClusterNodeLeftEventArgs args) { @Override public boolean configure(String name, Map params) throws ConfigurationException { - if (s_logger.isInfoEnabled()) { - s_logger.info("Start configuring cluster alert manager : " + name); + if (logger.isInfoEnabled()) { + logger.info("Start configuring cluster alert manager : " + name); } try { diff --git a/server/src/main/java/com/cloud/alert/ConsoleProxyAlertAdapter.java b/server/src/main/java/com/cloud/alert/ConsoleProxyAlertAdapter.java index cdcf68b10fdc..22a37a7e3d19 100644 --- a/server/src/main/java/com/cloud/alert/ConsoleProxyAlertAdapter.java +++ b/server/src/main/java/com/cloud/alert/ConsoleProxyAlertAdapter.java @@ -22,7 +22,6 @@ import javax.naming.ConfigurationException; import org.apache.cloudstack.alert.AlertService; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.consoleproxy.ConsoleProxyAlertEventArgs; @@ -38,7 +37,6 @@ @Component public class ConsoleProxyAlertAdapter extends AdapterBase implements AlertAdapter { - private static final Logger s_logger = Logger.getLogger(ConsoleProxyAlertAdapter.class); @Inject private AlertManager _alertMgr; @@ -48,8 +46,8 @@ public class ConsoleProxyAlertAdapter extends AdapterBase implements AlertAdapte private ConsoleProxyDao _consoleProxyDao; public void onProxyAlert(Object sender, ConsoleProxyAlertEventArgs args) { - if (s_logger.isDebugEnabled()) - s_logger.debug("received console proxy alert"); + if (logger.isDebugEnabled()) + logger.debug("received console proxy alert"); DataCenterVO dc = _dcDao.findById(args.getZoneId()); ConsoleProxyVO proxy = args.getProxy(); @@ -82,15 +80,15 @@ public void onProxyAlert(Object sender, ConsoleProxyAlertEventArgs args) { switch (args.getType()) { case ConsoleProxyAlertEventArgs.PROXY_CREATED: - if (s_logger.isDebugEnabled()) { - s_logger.debug("New console proxy created, " + zoneProxyPublicAndPrivateIp); + if (logger.isDebugEnabled()) { + logger.debug("New console proxy created, " + zoneProxyPublicAndPrivateIp); } break; case ConsoleProxyAlertEventArgs.PROXY_UP: message = "Console proxy up in " + zoneProxyPublicAndPrivateIp; - if (s_logger.isDebugEnabled()) { - s_logger.debug(message); + if (logger.isDebugEnabled()) { + logger.debug(message); } _alertMgr.sendAlert(AlertService.AlertType.ALERT_TYPE_CONSOLE_PROXY, args.getZoneId(), proxyPodIdToDeployIn, message, "Console proxy up " + zone); @@ -98,8 +96,8 @@ public void onProxyAlert(Object sender, ConsoleProxyAlertEventArgs args) { case ConsoleProxyAlertEventArgs.PROXY_DOWN: message = "Console proxy is down in " + zoneProxyPublicAndPrivateIp; - if (s_logger.isDebugEnabled()) { - s_logger.debug(message); + if (logger.isDebugEnabled()) { + logger.debug(message); } _alertMgr.sendAlert(AlertService.AlertType.ALERT_TYPE_CONSOLE_PROXY, args.getZoneId(), proxyPodIdToDeployIn, message, "Console proxy down " + zone); @@ -107,8 +105,8 @@ public void onProxyAlert(Object sender, ConsoleProxyAlertEventArgs args) { case ConsoleProxyAlertEventArgs.PROXY_REBOOTED: message = "Console proxy is rebooted in " + zoneProxyPublicAndPrivateIp; - if (s_logger.isDebugEnabled()) { - s_logger.debug(message); + if (logger.isDebugEnabled()) { + logger.debug(message); } _alertMgr.sendAlert(AlertService.AlertType.ALERT_TYPE_CONSOLE_PROXY, args.getZoneId(), proxyPodIdToDeployIn, message, "Console proxy rebooted " + zone); @@ -116,8 +114,8 @@ public void onProxyAlert(Object sender, ConsoleProxyAlertEventArgs args) { case ConsoleProxyAlertEventArgs.PROXY_CREATE_FAILURE: message = String.format("Console proxy creation failure. Zone [%s].", dc.getName()); - if (s_logger.isDebugEnabled()) { - s_logger.debug(message); + if (logger.isDebugEnabled()) { + logger.debug(message); } _alertMgr.sendAlert(AlertService.AlertType.ALERT_TYPE_CONSOLE_PROXY, args.getZoneId(), null, message + errorDetails, "Console proxy creation failure " + zone); @@ -125,8 +123,8 @@ public void onProxyAlert(Object sender, ConsoleProxyAlertEventArgs args) { case ConsoleProxyAlertEventArgs.PROXY_START_FAILURE: message = "Console proxy startup failure in " + zoneProxyPublicAndPrivateIp; - if (s_logger.isDebugEnabled()) { - s_logger.debug(message); + if (logger.isDebugEnabled()) { + logger.debug(message); } _alertMgr.sendAlert(AlertService.AlertType.ALERT_TYPE_CONSOLE_PROXY, args.getZoneId(), proxyPodIdToDeployIn, message + errorDetails, @@ -134,8 +132,8 @@ public void onProxyAlert(Object sender, ConsoleProxyAlertEventArgs args) { break; case ConsoleProxyAlertEventArgs.PROXY_FIREWALL_ALERT: - if (s_logger.isDebugEnabled()) { - s_logger.debug("Console proxy firewall alert, " + zoneProxyPublicAndPrivateIp); + if (logger.isDebugEnabled()) { + logger.debug("Console proxy firewall alert, " + zoneProxyPublicAndPrivateIp); } _alertMgr.sendAlert(AlertService.AlertType.ALERT_TYPE_CONSOLE_PROXY, args.getZoneId(), proxyPodIdToDeployIn, "Failed to open console proxy firewall port. " + @@ -144,8 +142,8 @@ public void onProxyAlert(Object sender, ConsoleProxyAlertEventArgs args) { case ConsoleProxyAlertEventArgs.PROXY_STORAGE_ALERT: message = zoneProxyPublicAndPrivateIp + ", message: " + args.getMessage(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Console proxy storage alert, " + message); + if (logger.isDebugEnabled()) { + logger.debug("Console proxy storage alert, " + message); } _alertMgr.sendAlert(AlertService.AlertType.ALERT_TYPE_STORAGE_MISC, args.getZoneId(), proxyPodIdToDeployIn, "Console proxy storage issue. " + message, "Console proxy alert " + zone); @@ -156,8 +154,8 @@ public void onProxyAlert(Object sender, ConsoleProxyAlertEventArgs args) { @Override public boolean configure(String name, Map params) throws ConfigurationException { - if (s_logger.isInfoEnabled()) - s_logger.info("Start configuring console proxy alert manager : " + name); + if (logger.isInfoEnabled()) + logger.info("Start configuring console proxy alert manager : " + name); try { SubscriptionMgr.getInstance().subscribe(ConsoleProxyManager.ALERT_SUBJECT, this, "onProxyAlert"); diff --git a/server/src/main/java/com/cloud/alert/SecondaryStorageVmAlertAdapter.java b/server/src/main/java/com/cloud/alert/SecondaryStorageVmAlertAdapter.java index c7d7c5c4fefa..8678765b9784 100644 --- a/server/src/main/java/com/cloud/alert/SecondaryStorageVmAlertAdapter.java +++ b/server/src/main/java/com/cloud/alert/SecondaryStorageVmAlertAdapter.java @@ -22,7 +22,6 @@ import javax.naming.ConfigurationException; import org.apache.cloudstack.alert.AlertService; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.dc.DataCenterVO; @@ -38,7 +37,6 @@ @Component public class SecondaryStorageVmAlertAdapter extends AdapterBase implements AlertAdapter { - private static final Logger s_logger = Logger.getLogger(SecondaryStorageVmAlertAdapter.class); @Inject private AlertManager _alertMgr; @@ -48,8 +46,8 @@ public class SecondaryStorageVmAlertAdapter extends AdapterBase implements Alert private SecondaryStorageVmDao _ssvmDao; public void onSSVMAlert(Object sender, SecStorageVmAlertEventArgs args) { - if (s_logger.isDebugEnabled()) - s_logger.debug("received secondary storage vm alert"); + if (logger.isDebugEnabled()) + logger.debug("received secondary storage vm alert"); DataCenterVO dc = _dcDao.findById(args.getZoneId()); SecondaryStorageVmVO secStorageVm = args.getSecStorageVm(); @@ -79,15 +77,15 @@ public void onSSVMAlert(Object sender, SecStorageVmAlertEventArgs args) { switch (args.getType()) { case SecStorageVmAlertEventArgs.SSVM_CREATED: - if (s_logger.isDebugEnabled()) { - s_logger.debug("New secondary storage vm created in " + zoneSecStorageVmPrivateAndPublicIp); + if (logger.isDebugEnabled()) { + logger.debug("New secondary storage vm created in " + zoneSecStorageVmPrivateAndPublicIp); } break; case SecStorageVmAlertEventArgs.SSVM_UP: message = "Secondary Storage Vm is up in " + zoneSecStorageVmPrivateAndPublicIp; - if (s_logger.isDebugEnabled()) { - s_logger.debug(message); + if (logger.isDebugEnabled()) { + logger.debug(message); } _alertMgr.sendAlert(AlertService.AlertType.ALERT_TYPE_SSVM, args.getZoneId(), secStorageVmPodIdToDeployIn, message, "Secondary Storage Vm up " + zone); @@ -95,8 +93,8 @@ public void onSSVMAlert(Object sender, SecStorageVmAlertEventArgs args) { case SecStorageVmAlertEventArgs.SSVM_DOWN: message = "Secondary Storage Vm is down in " + zoneSecStorageVmPrivateAndPublicIp; - if (s_logger.isDebugEnabled()) { - s_logger.debug(message); + if (logger.isDebugEnabled()) { + logger.debug(message); } _alertMgr.sendAlert(AlertService.AlertType.ALERT_TYPE_SSVM, args.getZoneId(), secStorageVmPodIdToDeployIn, message, "Secondary Storage Vm down " + zone); @@ -104,8 +102,8 @@ public void onSSVMAlert(Object sender, SecStorageVmAlertEventArgs args) { case SecStorageVmAlertEventArgs.SSVM_REBOOTED: message = "Secondary Storage Vm rebooted in " + zoneSecStorageVmPrivateAndPublicIp; - if (s_logger.isDebugEnabled()) { - s_logger.debug(message); + if (logger.isDebugEnabled()) { + logger.debug(message); } _alertMgr.sendAlert(AlertService.AlertType.ALERT_TYPE_SSVM, args.getZoneId(), secStorageVmPodIdToDeployIn, message, "Secondary Storage Vm rebooted " + zone); @@ -113,8 +111,8 @@ public void onSSVMAlert(Object sender, SecStorageVmAlertEventArgs args) { case SecStorageVmAlertEventArgs.SSVM_CREATE_FAILURE: message = String.format("Secondary Storage Vm creation failure in zone [%s].", dc.getName()); - if (s_logger.isDebugEnabled()) { - s_logger.debug(message); + if (logger.isDebugEnabled()) { + logger.debug(message); } _alertMgr.sendAlert(AlertService.AlertType.ALERT_TYPE_SSVM, args.getZoneId(), null, message + errorDetails, @@ -123,8 +121,8 @@ public void onSSVMAlert(Object sender, SecStorageVmAlertEventArgs args) { case SecStorageVmAlertEventArgs.SSVM_START_FAILURE: message = "Secondary Storage Vm startup failure in " + zoneSecStorageVmPrivateAndPublicIp; - if (s_logger.isDebugEnabled()) { - s_logger.debug(message); + if (logger.isDebugEnabled()) { + logger.debug(message); } _alertMgr.sendAlert(AlertService.AlertType.ALERT_TYPE_SSVM, args.getZoneId(), secStorageVmPodIdToDeployIn, message + errorDetails, @@ -132,8 +130,8 @@ public void onSSVMAlert(Object sender, SecStorageVmAlertEventArgs args) { break; case SecStorageVmAlertEventArgs.SSVM_FIREWALL_ALERT: - if (s_logger.isDebugEnabled()) { - s_logger.debug("Secondary Storage Vm firewall alert, " + zoneSecStorageVmPrivateAndPublicIp); + if (logger.isDebugEnabled()) { + logger.debug("Secondary Storage Vm firewall alert, " + zoneSecStorageVmPrivateAndPublicIp); } _alertMgr.sendAlert(AlertService.AlertType.ALERT_TYPE_SSVM, args.getZoneId(), secStorageVmPodIdToDeployIn, "Failed to open secondary storage vm firewall port. " @@ -141,8 +139,8 @@ public void onSSVMAlert(Object sender, SecStorageVmAlertEventArgs args) { break; case SecStorageVmAlertEventArgs.SSVM_STORAGE_ALERT: - if (s_logger.isDebugEnabled()) { - s_logger.debug("Secondary Storage Vm storage alert, " + zoneSecStorageVmPrivateAndPublicIp + ", message: " + args.getMessage()); + if (logger.isDebugEnabled()) { + logger.debug("Secondary Storage Vm storage alert, " + zoneSecStorageVmPrivateAndPublicIp + ", message: " + args.getMessage()); } _alertMgr.sendAlert(AlertService.AlertType.ALERT_TYPE_STORAGE_MISC, args.getZoneId(), secStorageVmPodIdToDeployIn, @@ -154,8 +152,8 @@ public void onSSVMAlert(Object sender, SecStorageVmAlertEventArgs args) { @Override public boolean configure(String name, Map params) throws ConfigurationException { - if (s_logger.isInfoEnabled()) - s_logger.info("Start configuring secondary storage vm alert manager : " + name); + if (logger.isInfoEnabled()) + logger.info("Start configuring secondary storage vm alert manager : " + name); try { SubscriptionMgr.getInstance().subscribe(SecondaryStorageVmManager.ALERT_SUBJECT, this, "onSSVMAlert"); diff --git a/server/src/main/java/com/cloud/api/ApiAsyncJobDispatcher.java b/server/src/main/java/com/cloud/api/ApiAsyncJobDispatcher.java index e09e95e2ce68..e70a6b4da639 100644 --- a/server/src/main/java/com/cloud/api/ApiAsyncJobDispatcher.java +++ b/server/src/main/java/com/cloud/api/ApiAsyncJobDispatcher.java @@ -32,7 +32,6 @@ import org.apache.cloudstack.framework.jobs.AsyncJobDispatcher; import org.apache.cloudstack.framework.jobs.AsyncJobManager; import org.apache.cloudstack.jobs.JobInfo; -import org.apache.log4j.Logger; import com.cloud.exception.InvalidParameterValueException; import com.cloud.user.Account; @@ -44,7 +43,6 @@ import com.google.gson.reflect.TypeToken; public class ApiAsyncJobDispatcher extends AdapterBase implements AsyncJobDispatcher { - private static final Logger s_logger = Logger.getLogger(ApiAsyncJobDispatcher.class); @Inject private ApiDispatcher _dispatcher; @@ -122,7 +120,7 @@ public void runJob(final AsyncJob job) { String errorMsg = null; int errorCode = ApiErrorCode.INTERNAL_ERROR.getHttpCode(); if (!(e instanceof ServerApiException)) { - s_logger.error("Unexpected exception while executing " + job.getCmd(), e); + logger.error("Unexpected exception while executing " + job.getCmd(), e); errorMsg = e.getMessage(); } else { ServerApiException sApiEx = (ServerApiException)e; diff --git a/server/src/main/java/com/cloud/api/ApiDispatcher.java b/server/src/main/java/com/cloud/api/ApiDispatcher.java index 09a7a92a4a16..d8eb26ea0a71 100644 --- a/server/src/main/java/com/cloud/api/ApiDispatcher.java +++ b/server/src/main/java/com/cloud/api/ApiDispatcher.java @@ -36,7 +36,8 @@ import org.apache.cloudstack.framework.jobs.AsyncJob; import org.apache.cloudstack.framework.jobs.AsyncJobManager; import org.apache.cloudstack.framework.jobs.impl.AsyncJobManagerImpl; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.api.dispatch.DispatchChain; import com.cloud.api.dispatch.DispatchChainFactory; @@ -48,7 +49,7 @@ import com.cloud.utils.exception.CloudRuntimeException; public class ApiDispatcher { - private static final Logger s_logger = Logger.getLogger(ApiDispatcher.class.getName()); + protected Logger logger = LogManager.getLogger(getClass()); Long _createSnapshotQueueSizeLimit; Long migrateQueueSizeLimit; @@ -157,7 +158,7 @@ public void dispatch(final BaseCmd cmd, final Map params, final return; } } else { - s_logger.trace("The queue size is unlimited, skipping the synchronizing"); + logger.trace("The queue size is unlimited, skipping the synchronizing"); } } } diff --git a/server/src/main/java/com/cloud/api/ApiResponseHelper.java b/server/src/main/java/com/cloud/api/ApiResponseHelper.java index e2b72f6175c7..2d2960af3f7b 100644 --- a/server/src/main/java/com/cloud/api/ApiResponseHelper.java +++ b/server/src/main/java/com/cloud/api/ApiResponseHelper.java @@ -219,7 +219,8 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.api.VgpuTypesInfo; import com.cloud.api.query.ViewResponseHelper; @@ -420,7 +421,7 @@ public class ApiResponseHelper implements ResponseGenerator { - private static final Logger s_logger = Logger.getLogger(ApiResponseHelper.class); + protected Logger logger = LogManager.getLogger(ApiResponseHelper.class); private static final DecimalFormat s_percentFormat = new DecimalFormat("##.##"); @Inject @@ -695,7 +696,7 @@ public SnapshotResponse createSnapshotResponse(Snapshot snapshot) { } if (snapshotInfo == null) { - s_logger.debug("Unable to find info for image store snapshot with uuid " + snapshot.getUuid()); + logger.debug("Unable to find info for image store snapshot with uuid " + snapshot.getUuid()); snapshotResponse.setRevertable(false); } else { snapshotResponse.setRevertable(snapshotInfo.isRevertable()); @@ -1133,7 +1134,7 @@ private void setVpcIdInResponse(Long vpcId, Consumer vpcUuidSetter, Cons _accountMgr.checkAccess(CallContext.current().getCallingAccount(), null, false, vpc); vpcUuidSetter.accept(vpc.getUuid()); } catch (PermissionDeniedException e) { - s_logger.debug("Not setting the vpcId to the response because the caller does not have access to the VPC"); + logger.debug("Not setting the vpcId to the response because the caller does not have access to the VPC"); } vpcNameSetter.accept(vpc.getName()); } @@ -2131,7 +2132,7 @@ public TemplatePermissionsResponse createTemplatePermissionsResponse(ResponseVie for (String accountName : accountNames) { Account account = ApiDBUtils.findAccountByNameDomain(accountName, templateOwner.getDomainId()); if (account == null) { - s_logger.error("Missing Account " + accountName + " in domain " + templateOwner.getDomainId()); + logger.error("Missing Account " + accountName + " in domain " + templateOwner.getDomainId()); continue; } @@ -2898,7 +2899,7 @@ public static void populateOwner(ControlledViewEntityResponse response, Controll private void populateAccount(ControlledEntityResponse response, long accountId) { Account account = ApiDBUtils.findAccountById(accountId); if (account == null) { - s_logger.debug("Unable to find account with id: " + accountId); + logger.debug("Unable to find account with id: " + accountId); } else if (account.getType() == Account.Type.PROJECT) { // find the project Project project = ApiDBUtils.findProjectByProjectAccountId(account.getId()); @@ -2907,7 +2908,7 @@ private void populateAccount(ControlledEntityResponse response, long accountId) response.setProjectName(project.getName()); response.setAccountName(account.getAccountName()); } else { - s_logger.debug("Unable to find project with id: " + account.getId()); + logger.debug("Unable to find project with id: " + account.getId()); } } else { response.setAccountName(account.getAccountName()); @@ -3827,7 +3828,7 @@ public Map> getUsageResourceTags() try { return _resourceTagDao.listTags(); } catch(Exception ex) { - s_logger.warn("Failed to get resource details for Usage data due to exception : ", ex); + logger.warn("Failed to get resource details for Usage data due to exception : ", ex); } return null; } @@ -5002,7 +5003,7 @@ protected void handleCertificateResponse(String certStr, DirectDownloadCertifica response.setValidity(String.format("From: [%s] - To: [%s]", certificate.getNotBefore(), certificate.getNotAfter())); } } catch (CertificateException e) { - s_logger.error("Error parsing direct download certificate: " + certStr, e); + logger.error("Error parsing direct download certificate: " + certStr, e); } } diff --git a/server/src/main/java/com/cloud/api/ApiSerializerHelper.java b/server/src/main/java/com/cloud/api/ApiSerializerHelper.java index 78a82cebe153..d12fbf8fa346 100644 --- a/server/src/main/java/com/cloud/api/ApiSerializerHelper.java +++ b/server/src/main/java/com/cloud/api/ApiSerializerHelper.java @@ -19,7 +19,8 @@ import java.util.HashMap; import java.util.Map; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; @@ -28,7 +29,7 @@ import org.apache.cloudstack.api.ResponseObject; public class ApiSerializerHelper { - public static final Logger s_logger = Logger.getLogger(ApiSerializerHelper.class.getName()); + protected static Logger LOGGER = LogManager.getLogger(ApiSerializerHelper.class); private static String token = "/"; public static String toSerializedString(Object result) { @@ -80,7 +81,7 @@ public static Object fromSerializedString(String result) { } return null; } catch (RuntimeException e) { - s_logger.error("Caught runtime exception when doing GSON deserialization on: " + result); + LOGGER.error("Caught runtime exception when doing GSON deserialization on: " + result); throw e; } } @@ -101,7 +102,7 @@ public static Map fromSerializedStringToMap(String result) { } } } catch (RuntimeException | JsonProcessingException e) { - s_logger.error("Caught runtime exception when doing GSON deserialization to map on: " + result, e); + LOGGER.error("Caught runtime exception when doing GSON deserialization to map on: " + result, e); } return objParams; diff --git a/server/src/main/java/com/cloud/api/ApiServer.java b/server/src/main/java/com/cloud/api/ApiServer.java index b602ed2edbc1..9ec7cabdb7c1 100644 --- a/server/src/main/java/com/cloud/api/ApiServer.java +++ b/server/src/main/java/com/cloud/api/ApiServer.java @@ -132,7 +132,8 @@ import org.apache.http.protocol.ResponseContent; import org.apache.http.protocol.ResponseDate; import org.apache.http.protocol.ResponseServer; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.bouncycastle.jce.provider.BouncyCastleProvider; import org.springframework.beans.factory.NoSuchBeanDefinitionException; import org.springframework.stereotype.Component; @@ -185,8 +186,6 @@ @Component public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiServerService, Configurable { - private static final Logger s_logger = Logger.getLogger(ApiServer.class.getName()); - private static final Logger s_accessLogger = Logger.getLogger("apiserver." + ApiServer.class.getName()); private static final String SANITIZATION_REGEX = "[\n\r]"; @@ -300,8 +299,8 @@ public void handleAsyncJobPublishEvent(String subject, String senderAddress, Obj AsyncJob job = eventInfo.first(); String jobEvent = eventInfo.second(); - if (s_logger.isTraceEnabled()) - s_logger.trace("Handle asyjob publish event " + jobEvent); + if (logger.isTraceEnabled()) + logger.trace("Handle asyjob publish event " + jobEvent); EventBus eventBus = null; try { @@ -329,11 +328,11 @@ public void handleAsyncJobPublishEvent(String subject, String senderAddress, Obj if (eventTypeObj != null) { cmdEventType = eventTypeObj; - if (s_logger.isDebugEnabled()) - s_logger.debug("Retrieved cmdEventType from job info: " + cmdEventType); + if (logger.isDebugEnabled()) + logger.debug("Retrieved cmdEventType from job info: " + cmdEventType); } else { - if (s_logger.isDebugEnabled()) - s_logger.debug("Unable to locate cmdEventType marker in job info. publish as unknown event"); + if (logger.isDebugEnabled()) + logger.debug("Unable to locate cmdEventType marker in job info. publish as unknown event"); } String contextDetails = cmdInfo.get("ctxDetails"); if(contextDetails != null) { @@ -374,7 +373,7 @@ public void handleAsyncJobPublishEvent(String subject, String senderAddress, Obj eventBus.publish(event); } catch (EventBusException evx) { String errMsg = "Failed to publish async job event on the event bus."; - s_logger.warn(errMsg, evx); + logger.warn(errMsg, evx); } } @@ -385,14 +384,14 @@ public boolean start() { final Long snapshotLimit = ConcurrentSnapshotsThresholdPerHost.value(); if (snapshotLimit == null || snapshotLimit.longValue() <= 0) { - s_logger.debug("Global concurrent snapshot config parameter " + ConcurrentSnapshotsThresholdPerHost.value() + " is less or equal 0; defaulting to unlimited"); + logger.debug("Global concurrent snapshot config parameter " + ConcurrentSnapshotsThresholdPerHost.value() + " is less or equal 0; defaulting to unlimited"); } else { dispatcher.setCreateSnapshotQueueSizeLimit(snapshotLimit); } final Long migrationLimit = VolumeApiService.ConcurrentMigrationsThresholdPerDatastore.value(); if (migrationLimit == null || migrationLimit.longValue() <= 0) { - s_logger.debug("Global concurrent migration config parameter " + VolumeApiService.ConcurrentMigrationsThresholdPerDatastore.value() + " is less or equal 0; defaulting to unlimited"); + logger.debug("Global concurrent migration config parameter " + VolumeApiService.ConcurrentMigrationsThresholdPerDatastore.value() + " is less or equal 0; defaulting to unlimited"); } else { dispatcher.setMigrateQueueSizeLimit(migrationLimit); } @@ -400,8 +399,8 @@ public boolean start() { final Set> cmdClasses = new HashSet>(); for (final PluggableService pluggableService : pluggableServices) { cmdClasses.addAll(pluggableService.getCommands()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Discovered plugin " + pluggableService.getClass().getSimpleName()); + if (logger.isDebugEnabled()) { + logger.debug("Discovered plugin " + pluggableService.getClass().getSimpleName()); } } @@ -451,7 +450,7 @@ public void handle(final HttpRequest request, final HttpResponse response, final try { paramList = URLEncodedUtils.parse(new URI(request.getRequestLine().getUri()), HttpUtils.UTF_8); } catch (final URISyntaxException e) { - s_logger.error("Error parsing url request", e); + logger.error("Error parsing url request", e); } // Use Multimap as the parameter map should be in the form (name=String, value=String[]) @@ -470,7 +469,7 @@ public void handle(final HttpRequest request, final HttpResponse response, final if(parameterMap.putIfAbsent(param.getName(), new String[]{param.getValue()}) != null) { String message = String.format("Query parameter '%s' has multiple values [%s, %s]. Only the last value will be respected." + "It is advised to pass only a single parameter", param.getName(), param.getValue(), parameterMap.get(param.getName())); - s_logger.warn(message); + logger.warn(message); } } } @@ -516,11 +515,11 @@ public void handle(final HttpRequest request, final HttpResponse response, final sb.append(" " + se.getErrorCode() + " " + se.getDescription()); } catch (final RuntimeException e) { // log runtime exception like NullPointerException to help identify the source easier - s_logger.error("Unhandled exception, ", e); + logger.error("Unhandled exception, ", e); throw e; } } finally { - s_accessLogger.info(sb.toString()); + logger.info(sb.toString()); CallContext.unregister(); } } @@ -556,13 +555,13 @@ public String handleRequest(final Map params, final String responseType, final S try { command = (String[])params.get("command"); if (command == null) { - s_logger.error("invalid request, no command sent"); - if (s_logger.isTraceEnabled()) { - s_logger.trace("dumping request parameters"); + logger.error("invalid request, no command sent"); + if (logger.isTraceEnabled()) { + logger.trace("dumping request parameters"); for (final Object key : params.keySet()) { final String keyStr = (String)key; final String[] value = (String[])params.get(key); - s_logger.trace(" key: " + keyStr + ", value: " + ((value == null) ? "'null'" : value[0])); + logger.trace(" key: " + keyStr + ", value: " + ((value == null) ? "'null'" : value[0])); } } throw new ServerApiException(ApiErrorCode.UNSUPPORTED_ACTION_ERROR, "Invalid request, no command sent"); @@ -587,7 +586,7 @@ public String handleRequest(final Map params, final String responseType, final S if (cmdClass != null) { APICommand annotation = cmdClass.getAnnotation(APICommand.class); if (annotation == null) { - s_logger.error("No APICommand annotation found for class " + cmdClass.getCanonicalName()); + logger.error("No APICommand annotation found for class " + cmdClass.getCanonicalName()); throw new CloudRuntimeException("No APICommand annotation found for class " + cmdClass.getCanonicalName()); } @@ -604,16 +603,16 @@ public String handleRequest(final Map params, final String responseType, final S buildAuditTrail(auditTrailSb, command[0], log.toString()); } else { final String errorString = "Unknown API command: " + command[0]; - s_logger.warn(errorString); + logger.warn(errorString); auditTrailSb.append(" " + errorString); throw new ServerApiException(ApiErrorCode.UNSUPPORTED_ACTION_ERROR, errorString); } } } catch (final InvalidParameterValueException ex) { - s_logger.info(ex.getMessage()); + logger.info(ex.getMessage()); throw new ServerApiException(ApiErrorCode.PARAM_ERROR, ex.getMessage(), ex); } catch (final IllegalArgumentException ex) { - s_logger.info(ex.getMessage()); + logger.info(ex.getMessage()); throw new ServerApiException(ApiErrorCode.PARAM_ERROR, ex.getMessage(), ex); } catch (final PermissionDeniedException ex) { final ArrayList idList = ex.getIdProxyList(); @@ -625,16 +624,16 @@ public String handleRequest(final Map params, final String responseType, final S buf.append(obj.getUuid()); buf.append(" "); } - s_logger.info("PermissionDenied: " + ex.getMessage() + " on objs: [" + buf.toString() + "]"); + logger.info("PermissionDenied: " + ex.getMessage() + " on objs: [" + buf.toString() + "]"); } else { - s_logger.info("PermissionDenied: " + ex.getMessage()); + logger.info("PermissionDenied: " + ex.getMessage()); } throw new ServerApiException(ApiErrorCode.ACCOUNT_ERROR, ex.getMessage(), ex); } catch (final AccountLimitException ex) { - s_logger.info(ex.getMessage()); + logger.info(ex.getMessage()); throw new ServerApiException(ApiErrorCode.ACCOUNT_RESOURCE_LIMIT_ERROR, ex.getMessage(), ex); } catch (final InsufficientCapacityException ex) { - s_logger.info(ex.getMessage()); + logger.info(ex.getMessage()); String errorMsg = ex.getMessage(); if (!accountMgr.isRootAdmin(CallContext.current().getCallingAccount().getId())) { // hide internal details to non-admin user for security reason @@ -642,10 +641,10 @@ public String handleRequest(final Map params, final String responseType, final S } throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, errorMsg, ex); } catch (final ResourceAllocationException ex) { - s_logger.info(ex.getMessage()); + logger.info(ex.getMessage()); throw new ServerApiException(ApiErrorCode.RESOURCE_ALLOCATION_ERROR, ex.getMessage(), ex); } catch (final ResourceUnavailableException ex) { - s_logger.info(ex.getMessage()); + logger.info(ex.getMessage()); String errorMsg = ex.getMessage(); if (!accountMgr.isRootAdmin(CallContext.current().getCallingAccount().getId())) { // hide internal details to non-admin user for security reason @@ -653,10 +652,10 @@ public String handleRequest(final Map params, final String responseType, final S } throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, errorMsg, ex); } catch (final ServerApiException ex) { - s_logger.info(ex.getDescription()); + logger.info(ex.getDescription()); throw ex; } catch (final Exception ex) { - s_logger.error("unhandled exception executing api command: " + ((command == null) ? "null" : command), ex); + logger.error("unhandled exception executing api command: " + ((command == null) ? "null" : command), ex); String errorMsg = ex.getMessage(); if (!accountMgr.isRootAdmin(CallContext.current().getCallingAccount().getId())) { // hide internal details to non-admin user for security reason @@ -760,7 +759,7 @@ private String queueCommand(final BaseCmd cmdObj, final Map para if (jobId == 0L) { final String errorMsg = "Unable to schedule async job for command " + job.getCmd(); - s_logger.warn(errorMsg); + logger.warn(errorMsg); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, errorMsg); } final String response; @@ -857,7 +856,7 @@ public boolean verifyRequest(final Map requestParameters, fina final String[] command = (String[])requestParameters.get(ApiConstants.COMMAND); if (command == null) { - s_logger.info("missing command, ignoring request..."); + logger.info("missing command, ignoring request..."); return false; } @@ -872,7 +871,7 @@ public boolean verifyRequest(final Map requestParameters, fina if (!s_apiNameCmdClassMap.containsKey(commandName) && !commandName.equals("login") && !commandName.equals("logout")) { final String errorMessage = "The given command " + commandName + " either does not exist, is not available" + " for user, or not available from ip address '" + remoteAddress.getHostAddress() + "'."; - s_logger.debug(errorMessage); + logger.debug(errorMessage); return false; } } @@ -915,7 +914,7 @@ public boolean verifyRequest(final Map requestParameters, fina // if api/secret key are passed to the parameters if ((signature == null) || (apiKey == null)) { - s_logger.debug("Expired session, missing signature, or missing apiKey -- ignoring request. Signature: " + signature + ", apiKey: " + apiKey); + logger.debug("Expired session, missing signature, or missing apiKey -- ignoring request. Signature: " + signature + ", apiKey: " + apiKey); return false; // no signature, bad request } @@ -924,14 +923,14 @@ public boolean verifyRequest(final Map requestParameters, fina if ("3".equals(signatureVersion)) { // New signature authentication. Check for expire parameter and its validity if (expires == null) { - s_logger.debug("Missing Expires parameter -- ignoring request."); + logger.debug("Missing Expires parameter -- ignoring request."); return false; } try { expiresTS = DateUtil.parseTZDateString(expires); } catch (final ParseException pe) { - s_logger.debug("Incorrect date format for Expires parameter", pe); + logger.debug("Incorrect date format for Expires parameter", pe); return false; } @@ -939,7 +938,7 @@ public boolean verifyRequest(final Map requestParameters, fina if (expiresTS.before(now)) { signature = signature.replaceAll(SANITIZATION_REGEX, "_"); apiKey = apiKey.replaceAll(SANITIZATION_REGEX, "_"); - s_logger.debug(String.format("Request expired -- ignoring ...sig [%s], apiKey [%s].", signature, apiKey)); + logger.debug(String.format("Request expired -- ignoring ...sig [%s], apiKey [%s].", signature, apiKey)); return false; } } @@ -950,7 +949,7 @@ public boolean verifyRequest(final Map requestParameters, fina // verify there is a user with this api key final Pair userAcctPair = accountMgr.findUserByApiKey(apiKey); if (userAcctPair == null) { - s_logger.debug("apiKey does not map to a valid user -- ignoring request, apiKey: " + apiKey); + logger.debug("apiKey does not map to a valid user -- ignoring request, apiKey: " + apiKey); return false; } @@ -958,7 +957,7 @@ public boolean verifyRequest(final Map requestParameters, fina final Account account = userAcctPair.second(); if (user.getState() != Account.State.ENABLED || !account.getState().equals(Account.State.ENABLED)) { - s_logger.info("disabled or locked user accessing the api, userid = " + user.getId() + "; name = " + user.getUsername() + "; state: " + user.getState() + + logger.info("disabled or locked user accessing the api, userid = " + user.getId() + "; name = " + user.getUsername() + "; state: " + user.getState() + "; accountState: " + account.getState()); return false; } @@ -970,7 +969,7 @@ public boolean verifyRequest(final Map requestParameters, fina // verify secret key exists secretKey = user.getSecretKey(); if (secretKey == null) { - s_logger.info("User does not have a secret key associated with the account -- ignoring request, username: " + user.getUsername()); + logger.info("User does not have a secret key associated with the account -- ignoring request, username: " + user.getUsername()); return false; } @@ -987,7 +986,7 @@ public boolean verifyRequest(final Map requestParameters, fina if (!equalSig) { signature = signature.replaceAll(SANITIZATION_REGEX, "_"); - s_logger.info(String.format("User signature [%s] is not equaled to computed signature [%s].", signature, computedSignature)); + logger.info(String.format("User signature [%s] is not equaled to computed signature [%s].", signature, computedSignature)); } else { CallContext.register(user, account); } @@ -995,7 +994,7 @@ public boolean verifyRequest(final Map requestParameters, fina } catch (final ServerApiException ex) { throw ex; } catch (final Exception ex) { - s_logger.error("unable to verify request signature"); + logger.error("unable to verify request signature"); } return false; } @@ -1004,10 +1003,10 @@ private boolean commandAvailable(final InetAddress remoteAddress, final String c try { checkCommandAvailable(user, commandName, remoteAddress); } catch (final RequestLimitException ex) { - s_logger.debug(ex.getMessage()); + logger.debug(ex.getMessage()); throw new ServerApiException(ApiErrorCode.API_LIMIT_EXCEED, ex.getMessage()); } catch (final UnavailableCommandException ex) { - s_logger.debug(ex.getMessage()); + logger.debug(ex.getMessage()); throw new ServerApiException(ApiErrorCode.UNSUPPORTED_ACTION_ERROR, ex.getMessage()); } catch (final PermissionDeniedException ex) { final String errorMessage = "The given command '" + commandName + "' either does not exist, is not available" + @@ -1016,7 +1015,7 @@ private boolean commandAvailable(final InetAddress remoteAddress, final String c } catch (final OriginDeniedException ex) { // in this case we can remove the session with extreme prejudice final String errorMessage = "The user '" + user.getUsername() + "' is not allowed to execute commands from ip address '" + remoteAddress.getHostName() + "'."; - s_logger.debug(errorMessage); + logger.debug(errorMessage); return false; } return true; @@ -1114,13 +1113,13 @@ public ResponseObject loginUser(final HttpSession session, final String username float offsetInHrs = 0f; if (timezone != null) { final TimeZone t = TimeZone.getTimeZone(timezone); - s_logger.info("Current user logged in under " + timezone + " timezone"); + logger.info("Current user logged in under " + timezone + " timezone"); final java.util.Date date = new java.util.Date(); final long longDate = date.getTime(); final float offsetInMs = (t.getOffset(longDate)); offsetInHrs = offsetInMs / (1000 * 60 * 60); - s_logger.info("Timezone offset from UTC is: " + offsetInHrs); + logger.info("Timezone offset from UTC is: " + offsetInHrs); } final Account account = accountMgr.getAccount(userAcct.getAccountId()); @@ -1196,7 +1195,7 @@ public boolean verifyUser(final Long userId) { if ((user == null) || (user.getRemoved() != null) || !user.getState().equals(Account.State.ENABLED) || (account == null) || !account.getState().equals(Account.State.ENABLED)) { - s_logger.warn("Deleted/Disabled/Locked user with id=" + userId + " attempting to access public API"); + logger.warn("Deleted/Disabled/Locked user with id=" + userId + " attempting to access public API"); return false; } return true; @@ -1212,9 +1211,9 @@ private void checkCommandAvailable(final User user, final String commandName, fi final Boolean apiSourceCidrChecksEnabled = ApiServiceConfiguration.ApiSourceCidrChecksEnabled.value(); if (apiSourceCidrChecksEnabled) { - s_logger.debug("CIDRs from which account '" + account.toString() + "' is allowed to perform API calls: " + accessAllowedCidrs); + logger.debug("CIDRs from which account '" + account.toString() + "' is allowed to perform API calls: " + accessAllowedCidrs); if (!NetUtils.isIpInCidrList(remoteAddress, accessAllowedCidrs.split(","))) { - s_logger.warn("Request by account '" + account.toString() + "' was denied since " + remoteAddress + " does not match " + accessAllowedCidrs); + logger.warn("Request by account '" + account.toString() + "' was denied since " + remoteAddress + " does not match " + accessAllowedCidrs); throw new OriginDeniedException("Calls from disallowed origin", account, remoteAddress); } } @@ -1280,7 +1279,7 @@ private void writeResponse(final HttpResponse resp, final String responseText, f } resp.setEntity(body); } catch (final Exception ex) { - s_logger.error("error!", ex); + logger.error("error!", ex); } } @@ -1290,6 +1289,8 @@ private void writeResponse(final HttpResponse resp, final String responseText, f // modify the // code to be very specific to our needs static class ListenerThread extends Thread { + + private static Logger LOGGER = LogManager.getLogger(ListenerThread.class); private HttpService _httpService = null; private ServerSocket _serverSocket = null; private HttpParams _params = null; @@ -1298,7 +1299,7 @@ public ListenerThread(final ApiServer requestHandler, final int port) { try { _serverSocket = new ServerSocket(port); } catch (final IOException ioex) { - s_logger.error("error initializing api server", ioex); + LOGGER.error("error initializing api server", ioex); return; } @@ -1328,7 +1329,7 @@ public ListenerThread(final ApiServer requestHandler, final int port) { @Override public void run() { - s_logger.info("ApiServer listening on port " + _serverSocket.getLocalPort()); + LOGGER.info("ApiServer listening on port " + _serverSocket.getLocalPort()); while (!Thread.interrupted()) { try { // Set up HTTP connection @@ -1341,7 +1342,7 @@ public void run() { } catch (final InterruptedIOException ex) { break; } catch (final IOException e) { - s_logger.error("I/O error initializing connection thread", e); + LOGGER.error("I/O error initializing connection thread", e); break; } } @@ -1366,15 +1367,15 @@ protected void runInContext() { _conn.close(); } } catch (final ConnectionClosedException ex) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("ApiServer: Client closed connection"); + if (logger.isTraceEnabled()) { + logger.trace("ApiServer: Client closed connection"); } } catch (final IOException ex) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("ApiServer: IOException - " + ex); + if (logger.isTraceEnabled()) { + logger.trace("ApiServer: IOException - " + ex); } } catch (final HttpException ex) { - s_logger.warn("ApiServer: Unrecoverable HTTP protocol violation" + ex); + logger.warn("ApiServer: Unrecoverable HTTP protocol violation" + ex); } finally { try { _conn.shutdown(); @@ -1414,7 +1415,7 @@ public String getSerializedApiError(final int errorCode, final String errorText, responseText = ApiResponseSerializer.toSerializedString(apiResponse, responseType); } catch (final Exception e) { - s_logger.error("Exception responding to http request", e); + logger.error("Exception responding to http request", e); } return responseText; } @@ -1464,7 +1465,7 @@ public String getSerializedApiError(final ServerApiException ex, final Map s_clientAddressHeaders = Collections .unmodifiableList(Arrays.asList("X-Forwarded-For", "HTTP_CLIENT_IP", "HTTP_X_FORWARDED_FOR", "Remote_Addr")); private static final String REPLACEMENT = "_"; - private static final String LOG_REPLACEMENTS = "[\n\r\t]"; + private static final String LOGGER_REPLACEMENTS = "[\n\r\t]"; @Inject ApiServerService apiServer; @@ -132,7 +132,7 @@ void utf8Fixup(final HttpServletRequest req, final Map params) String value = decodeUtf8(paramTokens[1]); params.put(name, new String[] {value}); } else { - s_logger.debug("Invalid parameter in URL found. param: " + param); + LOGGER.debug("Invalid parameter in URL found. param: " + param); } } } @@ -161,7 +161,7 @@ private void checkSingleQueryParameterValue(Map params) { if (v.length > 1) { String message = String.format("Query parameter '%s' has multiple values %s. Only the last value will be respected." + "It is advised to pass only a single parameter", k, Arrays.toString(v)); - s_logger.warn(message); + LOGGER.warn(message); } }); @@ -172,7 +172,7 @@ void processRequestInContext(final HttpServletRequest req, final HttpServletResp try { remoteAddress = getClientAddress(req); } catch (UnknownHostException e) { - s_logger.warn("UnknownHostException when trying to lookup remote IP-Address. This should never happen. Blocking request.", e); + LOGGER.warn("UnknownHostException when trying to lookup remote IP-Address. This should never happen. Blocking request.", e); final String response = apiServer.getSerializedApiError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "UnknownHostException when trying to lookup remote IP-Address", null, HttpUtils.RESPONSE_TYPE_XML); @@ -196,17 +196,17 @@ void processRequestInContext(final HttpServletRequest req, final HttpServletResp // logging the request start and end in management log for easy debugging String reqStr = ""; String cleanQueryString = StringUtils.cleanString(req.getQueryString()); - if (s_logger.isDebugEnabled()) { + if (LOGGER.isDebugEnabled()) { reqStr = auditTrailSb.toString() + " " + cleanQueryString; - s_logger.debug("===START=== " + reqStr); + LOGGER.debug("===START=== " + reqStr); } try { resp.setContentType(HttpUtils.XML_CONTENT_TYPE); HttpSession session = req.getSession(false); - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("session found: %s", session)); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("session found: %s", session)); } final Object[] responseTypeParam = params.get(ApiConstants.RESPONSE); if (responseTypeParam != null) { @@ -217,10 +217,10 @@ void processRequestInContext(final HttpServletRequest req, final HttpServletResp final String command = commandObj == null ? null : (String) commandObj[0]; final Object[] userObj = params.get(ApiConstants.USERNAME); String username = userObj == null ? null : (String)userObj[0]; - if (s_logger.isTraceEnabled()) { + if (LOGGER.isTraceEnabled()) { String logCommand = saveLogString(command); String logName = saveLogString(username); - s_logger.trace(String.format("command %s processing for user \"%s\"", + LOGGER.trace(String.format("command %s processing for user \"%s\"", logCommand, logName)); } @@ -243,15 +243,15 @@ void processRequestInContext(final HttpServletRequest req, final HttpServletResp if (ApiServer.EnableSecureSessionCookie.value()) { resp.setHeader("SET-COOKIE", String.format("JSESSIONID=%s;Secure;HttpOnly;Path=/client", session.getId())); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Session cookie is marked secure!"); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Session cookie is marked secure!"); } } } try { - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("apiAuthenticator.authenticate(%s, params[%d], %s, %s, %s, %s, %s,%s)", + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("apiAuthenticator.authenticate(%s, params[%d], %s, %s, %s, %s, %s,%s)", saveLogString(command), params.size(), session.getId(), remoteAddress.getHostAddress(), saveLogString(responseType), "auditTrailSb", "req", "resp")); } responseString = apiAuthenticator.authenticate(command, params, session, remoteAddress, responseType, auditTrailSb, req, resp); @@ -261,7 +261,7 @@ void processRequestInContext(final HttpServletRequest req, final HttpServletResp } catch (ServerApiException e) { httpResponseCode = e.getErrorCode().getHttpCode(); responseString = e.getMessage(); - s_logger.debug("Authentication failure: " + e.getMessage()); + LOGGER.debug("Authentication failure: " + e.getMessage()); } if (apiAuthenticator.getAPIType() == APIAuthenticationType.LOGOUT_API) { @@ -291,7 +291,7 @@ void processRequestInContext(final HttpServletRequest req, final HttpServletResp return; } } else { - s_logger.trace("no command available"); + LOGGER.trace("no command available"); } auditTrailSb.append(cleanQueryString); final boolean isNew = ((session == null) ? true : session.isNew()); @@ -300,15 +300,15 @@ void processRequestInContext(final HttpServletRequest req, final HttpServletResp // we no longer rely on web-session here, verifyRequest will populate user/account information // if a API key exists - if (isNew && s_logger.isTraceEnabled()) { - s_logger.trace(String.format("new session: %s", session)); + if (isNew && LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("new session: %s", session)); } if (!isNew && (command.equalsIgnoreCase(ValidateUserTwoFactorAuthenticationCodeCmd.APINAME) || (!skip2FAcheckForAPIs(command) && !skip2FAcheckForUser(session)))) { - s_logger.debug("Verifying two factor authentication"); + LOGGER.debug("Verifying two factor authentication"); boolean success = verify2FA(session, command, auditTrailSb, params, remoteAddress, responseType, req, resp); if (!success) { - s_logger.debug("Verification of two factor authentication failed"); + LOGGER.debug("Verification of two factor authentication failed"); return; } } @@ -321,8 +321,8 @@ void processRequestInContext(final HttpServletRequest req, final HttpServletResp if (account != null) { if (invalidateHttpSessionIfNeeded(req, resp, auditTrailSb, responseType, params, session, account)) return; } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("no account, this request will be validated through apikey(%s)/signature"); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("no account, this request will be validated through apikey(%s)/signature"); } } @@ -332,8 +332,8 @@ void processRequestInContext(final HttpServletRequest req, final HttpServletResp CallContext.register(accountMgr.getSystemUser(), accountMgr.getSystemAccount()); } setProjectContext(params); - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("verifying request for user %s from %s with %d parameters", + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("verifying request for user %s from %s with %d parameters", userId, remoteAddress.getHostAddress(), params.size())); } if (apiServer.verifyRequest(params, userId, remoteAddress)) { @@ -364,12 +364,12 @@ void processRequestInContext(final HttpServletRequest req, final HttpServletResp HttpUtils.writeHttpResponse(resp, serializedResponseText, se.getErrorCode().getHttpCode(), responseType, ApiServer.JSONcontentType.value()); auditTrailSb.append(" " + se.getErrorCode() + " " + se.getDescription()); } catch (final Exception ex) { - s_logger.error("unknown exception writing api response", ex); + LOGGER.error("unknown exception writing api response", ex); auditTrailSb.append(" unknown exception writing api response"); } finally { - s_accessLogger.info(auditTrailSb.toString()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("===END=== " + reqStr); + LOGGER.info(auditTrailSb.toString()); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("===END=== " + reqStr); } // cleanup user context to prevent from being peeked in other request context CallContext.unregister(); @@ -404,7 +404,7 @@ protected boolean skip2FAcheckForUser(HttpSession session) { Long userId = (Long) session.getAttribute("userid"); boolean is2FAverified = (boolean) session.getAttribute(ApiConstants.IS_2FA_VERIFIED); if (is2FAverified) { - s_logger.debug(String.format("Two factor authentication is already verified for the user %d, so skipping", userId)); + LOGGER.debug(String.format("Two factor authentication is already verified for the user %d, so skipping", userId)); skip2FAcheck = true; } else { UserAccount userAccount = accountMgr.getUserAccountById(userId); @@ -435,7 +435,7 @@ protected boolean verify2FA(HttpSession session, String command, StringBuilder a HttpUtils.writeHttpResponse(resp, responseString, HttpServletResponse.SC_OK, responseType, ApiServer.JSONcontentType.value()); verify2FA = true; } else { - s_logger.error("Cannot find API authenticator while verifying 2FA"); + LOGGER.error("Cannot find API authenticator while verifying 2FA"); auditTrailSb.append(" Cannot find API authenticator while verifying 2FA"); verify2FA = false; } @@ -459,7 +459,7 @@ protected boolean verify2FA(HttpSession session, String command, StringBuilder a errorMsg = "Two factor authentication is mandated by admin, user needs to setup 2FA using setupUserTwoFactorAuthentication API and" + " then verify 2FA using validateUserTwoFactorAuthenticationCode API before calling other APIs. Existing session is invalidated."; } - s_logger.error(errorMsg); + LOGGER.error(errorMsg); invalidateHttpSession(session, String.format("Unable to process the API request for %s from %s due to %s", userId, remoteAddress.getHostAddress(), errorMsg)); auditTrailSb.append(" " + ApiErrorCode.UNAUTHORIZED2FA + " " + errorMsg); @@ -481,7 +481,7 @@ protected void setClientAddressForConsoleEndpointAccess(String command, Map params, HttpSession session, String command, Long userId, String account, Object accountObj) { if ((userId != null) && (account != null) && (accountObj != null) && apiServer.verifyUser(userId)) { if (command == null) { - s_logger.info("missing command, ignoring request..."); + LOGGER.info("missing command, ignoring request..."); auditTrailSb.append(" " + HttpServletResponse.SC_BAD_REQUEST + " " + "no command specified"); final String serializedResponse = apiServer.getSerializedApiError(HttpServletResponse.SC_BAD_REQUEST, "no command specified", params, responseType); HttpUtils.writeHttpResponse(resp, serializedResponse, HttpServletResponse.SC_BAD_REQUEST, responseType, ApiServer.JSONcontentType.value()); @@ -525,13 +525,13 @@ private boolean invalidateHttpSessionIfNeeded(HttpServletRequest req, HttpServle public static void invalidateHttpSession(HttpSession session, String msg) { try { - if (s_logger.isTraceEnabled()) { - s_logger.trace(msg); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(msg); } session.invalidate(); } catch (final IllegalStateException ise) { - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("failed to invalidate session %s", session.getId())); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("failed to invalidate session %s", session.getId())); } } } @@ -539,7 +539,7 @@ public static void invalidateHttpSession(HttpSession session, String msg) { private void setProjectContext(Map requestParameters) { final String[] command = (String[])requestParameters.get(ApiConstants.COMMAND); if (command == null) { - s_logger.info("missing command, ignoring request..."); + LOGGER.info("missing command, ignoring request..."); return; } diff --git a/server/src/main/java/com/cloud/api/ApiSessionListener.java b/server/src/main/java/com/cloud/api/ApiSessionListener.java index 56da456b8e22..46ebb404a541 100644 --- a/server/src/main/java/com/cloud/api/ApiSessionListener.java +++ b/server/src/main/java/com/cloud/api/ApiSessionListener.java @@ -20,14 +20,15 @@ import javax.servlet.http.HttpSession; import javax.servlet.http.HttpSessionEvent; import javax.servlet.http.HttpSessionListener; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @WebListener public class ApiSessionListener implements HttpSessionListener { - public static final Logger LOGGER = Logger.getLogger(ApiSessionListener.class.getName()); + protected Logger logger = LogManager.getLogger(getClass()); private static Map sessions = new ConcurrentHashMap<>(); /** @@ -45,27 +46,27 @@ public static long getNumberOfSessions() { } public void sessionCreated(HttpSessionEvent event) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Session created by Id : " + event.getSession().getId() + " , session: " + event.getSession().toString() + " , source: " + event.getSource().toString() + " , event: " + event.toString()); + if (logger.isDebugEnabled()) { + logger.debug("Session created by Id : " + event.getSession().getId() + " , session: " + event.getSession().toString() + " , source: " + event.getSource().toString() + " , event: " + event.toString()); } synchronized (this) { HttpSession session = event.getSession(); sessions.put(session.getId(), event.getSession()); } - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Sessions count: " + getSessionCount()); + if (logger.isTraceEnabled()) { + logger.trace("Sessions count: " + getSessionCount()); } } public void sessionDestroyed(HttpSessionEvent event) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Session destroyed by Id : " + event.getSession().getId() + " , session: " + event.getSession().toString() + " , source: " + event.getSource().toString() + " , event: " + event.toString()); + if (logger.isDebugEnabled()) { + logger.debug("Session destroyed by Id : " + event.getSession().getId() + " , session: " + event.getSession().toString() + " , source: " + event.getSource().toString() + " , event: " + event.toString()); } synchronized (this) { sessions.remove(event.getSession().getId()); } - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Sessions count: " + getSessionCount()); + if (logger.isTraceEnabled()) { + logger.trace("Sessions count: " + getSessionCount()); } } } diff --git a/server/src/main/java/com/cloud/api/EncodedStringTypeAdapter.java b/server/src/main/java/com/cloud/api/EncodedStringTypeAdapter.java index 50dbd0d48cd7..a97541c8bc38 100644 --- a/server/src/main/java/com/cloud/api/EncodedStringTypeAdapter.java +++ b/server/src/main/java/com/cloud/api/EncodedStringTypeAdapter.java @@ -18,7 +18,8 @@ import java.lang.reflect.Type; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.google.gson.JsonElement; import com.google.gson.JsonPrimitive; @@ -28,7 +29,7 @@ import com.cloud.utils.encoding.URLEncoder; public class EncodedStringTypeAdapter implements JsonSerializer { - public static final Logger s_logger = Logger.getLogger(EncodedStringTypeAdapter.class.getName()); + protected Logger logger = LogManager.getLogger(getClass()); @Override public JsonElement serialize(String src, Type typeOfResponseObj, JsonSerializationContext ctx) { @@ -36,14 +37,14 @@ public JsonElement serialize(String src, Type typeOfResponseObj, JsonSerializati } - private static String encodeString(String value) { + private String encodeString(String value) { if (!ApiServer.isEncodeApiResponse()) { return value; } try { return new URLEncoder().encode(value).replaceAll("\\+", "%20"); } catch (Exception e) { - s_logger.warn("Unable to encode: " + value, e); + logger.warn("Unable to encode: " + value, e); } return value; } diff --git a/server/src/main/java/com/cloud/api/ResponseObjectTypeAdapter.java b/server/src/main/java/com/cloud/api/ResponseObjectTypeAdapter.java index f6f777efe3f9..1aee9bd5fb63 100644 --- a/server/src/main/java/com/cloud/api/ResponseObjectTypeAdapter.java +++ b/server/src/main/java/com/cloud/api/ResponseObjectTypeAdapter.java @@ -23,7 +23,8 @@ import org.apache.cloudstack.api.response.ExceptionResponse; import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.google.gson.JsonElement; import com.google.gson.JsonObject; @@ -31,7 +32,7 @@ import com.google.gson.JsonSerializer; public class ResponseObjectTypeAdapter implements JsonSerializer { - public static final Logger s_logger = Logger.getLogger(ResponseObjectTypeAdapter.class.getName()); + protected Logger logger = LogManager.getLogger(getClass()); @Override public JsonElement serialize(ResponseObject responseObj, Type typeOfResponseObj, JsonSerializationContext ctx) { @@ -53,16 +54,16 @@ public JsonElement serialize(ResponseObject responseObj, Type typeOfResponseObj, } } - private static Method getGetMethod(Object o, String propName) { + private Method getGetMethod(Object o, String propName) { Method method = null; String methodName = getGetMethodName("get", propName); try { method = o.getClass().getMethod(methodName); } catch (SecurityException e1) { - s_logger.error("Security exception in getting ResponseObject " + o.getClass().getName() + " get method for property: " + propName); + logger.error("Security exception in getting ResponseObject " + o.getClass().getName() + " get method for property: " + propName); } catch (NoSuchMethodException e1) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("ResponseObject " + o.getClass().getName() + " does not have " + methodName + "() method for property: " + propName + + if (logger.isTraceEnabled()) { + logger.trace("ResponseObject " + o.getClass().getName() + " does not have " + methodName + "() method for property: " + propName + ", will check is-prefixed method to see if it is boolean property"); } } @@ -74,9 +75,9 @@ private static Method getGetMethod(Object o, String propName) { try { method = o.getClass().getMethod(methodName); } catch (SecurityException e1) { - s_logger.error("Security exception in getting ResponseObject " + o.getClass().getName() + " get method for property: " + propName); + logger.error("Security exception in getting ResponseObject " + o.getClass().getName() + " get method for property: " + propName); } catch (NoSuchMethodException e1) { - s_logger.warn("ResponseObject " + o.getClass().getName() + " does not have " + methodName + "() method for property: " + propName); + logger.warn("ResponseObject " + o.getClass().getName() + " does not have " + methodName + "() method for property: " + propName); } return method; } diff --git a/server/src/main/java/com/cloud/api/auth/APIAuthenticationManagerImpl.java b/server/src/main/java/com/cloud/api/auth/APIAuthenticationManagerImpl.java index 1b8c2689063c..907ef088ee8d 100644 --- a/server/src/main/java/com/cloud/api/auth/APIAuthenticationManagerImpl.java +++ b/server/src/main/java/com/cloud/api/auth/APIAuthenticationManagerImpl.java @@ -22,7 +22,6 @@ import java.util.concurrent.ConcurrentHashMap; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.auth.APIAuthenticationManager; @@ -34,7 +33,6 @@ @SuppressWarnings("unchecked") public class APIAuthenticationManagerImpl extends ManagerBase implements APIAuthenticationManager { - public static final Logger s_logger = Logger.getLogger(APIAuthenticationManagerImpl.class.getName()); private List _apiAuthenticators; @@ -87,7 +85,7 @@ public List> getCommands() { if (commands != null) { cmdList.addAll(commands); } else { - s_logger.warn("API Authenticator returned null api commands:" + apiAuthenticator.getName()); + logger.warn("API Authenticator returned null api commands:" + apiAuthenticator.getName()); } } return cmdList; @@ -103,8 +101,8 @@ public APIAuthenticator getAPIAuthenticator(String name) { apiAuthenticator = ComponentContext.inject(apiAuthenticator); apiAuthenticator.setAuthenticators(_apiAuthenticators); } catch (InstantiationException | IllegalAccessException e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("APIAuthenticationManagerImpl::getAPIAuthenticator failed: " + e.getMessage()); + if (logger.isDebugEnabled()) { + logger.debug("APIAuthenticationManagerImpl::getAPIAuthenticator failed: " + e.getMessage()); } } } diff --git a/server/src/main/java/com/cloud/api/auth/DefaultLoginAPIAuthenticatorCmd.java b/server/src/main/java/com/cloud/api/auth/DefaultLoginAPIAuthenticatorCmd.java index 63385e22e32e..c9b03a85f4c7 100644 --- a/server/src/main/java/com/cloud/api/auth/DefaultLoginAPIAuthenticatorCmd.java +++ b/server/src/main/java/com/cloud/api/auth/DefaultLoginAPIAuthenticatorCmd.java @@ -34,7 +34,6 @@ import org.apache.cloudstack.api.auth.APIAuthenticator; import org.apache.cloudstack.api.auth.PluggableAPIAuthenticator; import org.apache.cloudstack.api.response.LoginCmdResponse; -import org.apache.log4j.Logger; import org.jetbrains.annotations.Nullable; import javax.inject.Inject; @@ -48,7 +47,6 @@ @APICommand(name = "login", description = "Logs a user into the CloudStack. A successful login attempt will generate a JSESSIONID cookie value that can be passed in subsequent Query command calls until the \"logout\" command has been issued or the session has expired.", requestHasSensitiveInfo = true, responseObject = LoginCmdResponse.class, entityType = {}) public class DefaultLoginAPIAuthenticatorCmd extends BaseCmd implements APIAuthenticator { - public static final Logger s_logger = Logger.getLogger(DefaultLoginAPIAuthenticatorCmd.class.getName()); ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// @@ -128,7 +126,7 @@ public String authenticate(String command, Map params, HttpSes } auditTrailSb.append(" domainid=" + domainId);// building the params for POST call } catch (final NumberFormatException e) { - s_logger.warn("Invalid domain id entered by user"); + logger.warn("Invalid domain id entered by user"); auditTrailSb.append(" " + HttpServletResponse.SC_UNAUTHORIZED + " " + "Invalid domain id entered, please enter a valid one"); throw new ServerApiException(ApiErrorCode.UNAUTHORIZED, _apiServer.getSerializedApiError(HttpServletResponse.SC_UNAUTHORIZED, "Invalid domain id entered, please enter a valid one", params, @@ -163,8 +161,8 @@ public String authenticate(String command, Map params, HttpSes "failed to authenticate user, check if username/password are correct"); auditTrailSb.append(" " + ApiErrorCode.ACCOUNT_ERROR + " " + msg); serializedResponse = _apiServer.getSerializedApiError(ApiErrorCode.ACCOUNT_ERROR.getHttpCode(), msg, params, responseType); - if (s_logger.isTraceEnabled()) { - s_logger.trace(msg); + if (logger.isTraceEnabled()) { + logger.trace(msg); } } } diff --git a/server/src/main/java/com/cloud/api/auth/DefaultLogoutAPIAuthenticatorCmd.java b/server/src/main/java/com/cloud/api/auth/DefaultLogoutAPIAuthenticatorCmd.java index 29d44e884c4b..6248f8f2f0ff 100644 --- a/server/src/main/java/com/cloud/api/auth/DefaultLogoutAPIAuthenticatorCmd.java +++ b/server/src/main/java/com/cloud/api/auth/DefaultLogoutAPIAuthenticatorCmd.java @@ -26,7 +26,6 @@ import org.apache.cloudstack.api.auth.APIAuthenticator; import org.apache.cloudstack.api.auth.PluggableAPIAuthenticator; import org.apache.cloudstack.api.response.LogoutCmdResponse; -import org.apache.log4j.Logger; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; @@ -38,7 +37,6 @@ @APICommand(name = "logout", description = "Logs out the user", responseObject = LogoutCmdResponse.class, entityType = {}) public class DefaultLogoutAPIAuthenticatorCmd extends BaseCmd implements APIAuthenticator { - public static final Logger s_logger = Logger.getLogger(DefaultLogoutAPIAuthenticatorCmd.class.getName()); ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// diff --git a/server/src/main/java/com/cloud/api/auth/SetupUserTwoFactorAuthenticationCmd.java b/server/src/main/java/com/cloud/api/auth/SetupUserTwoFactorAuthenticationCmd.java index 32a8f49f2c44..50be604e23c9 100644 --- a/server/src/main/java/com/cloud/api/auth/SetupUserTwoFactorAuthenticationCmd.java +++ b/server/src/main/java/com/cloud/api/auth/SetupUserTwoFactorAuthenticationCmd.java @@ -27,7 +27,6 @@ import org.apache.cloudstack.api.response.UserResponse; import org.apache.cloudstack.api.response.UserTwoFactorAuthenticationSetupResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import javax.inject.Inject; @@ -36,7 +35,6 @@ public class SetupUserTwoFactorAuthenticationCmd extends BaseCmd { public static final String APINAME = "setupUserTwoFactorAuthentication"; - public static final Logger s_logger = Logger.getLogger(SetupUserTwoFactorAuthenticationCmd.class.getName()); @Inject private AccountManager accountManager; diff --git a/server/src/main/java/com/cloud/api/auth/ValidateUserTwoFactorAuthenticationCodeCmd.java b/server/src/main/java/com/cloud/api/auth/ValidateUserTwoFactorAuthenticationCodeCmd.java index df9f8bfdab82..c5914e948db4 100644 --- a/server/src/main/java/com/cloud/api/auth/ValidateUserTwoFactorAuthenticationCodeCmd.java +++ b/server/src/main/java/com/cloud/api/auth/ValidateUserTwoFactorAuthenticationCodeCmd.java @@ -38,7 +38,6 @@ import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.resourcedetail.UserDetailVO; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import javax.inject.Inject; import javax.servlet.http.HttpServletRequest; @@ -54,7 +53,6 @@ public class ValidateUserTwoFactorAuthenticationCodeCmd extends BaseCmd implements APIAuthenticator { public static final String APINAME = "validateUserTwoFactorAuthenticationCode"; - public static final Logger s_logger = Logger.getLogger(ValidateUserTwoFactorAuthenticationCodeCmd.class.getName()); @Inject private AccountManager accountManager; @@ -125,8 +123,8 @@ public String authenticate(String command, Map params, HttpSes "failed to authenticate user, check if two factor authentication code is correct"); auditTrailSb.append(" " + ApiErrorCode.UNAUTHORIZED2FA + " " + msg); serializedResponse = _apiServer.getSerializedApiError(ApiErrorCode.UNAUTHORIZED2FA.getHttpCode(), msg, params, responseType); - if (s_logger.isTraceEnabled()) { - s_logger.trace(msg); + if (logger.isTraceEnabled()) { + logger.trace(msg); } } ServerApiException exception = new ServerApiException(ApiErrorCode.UNAUTHORIZED2FA, serializedResponse); diff --git a/server/src/main/java/com/cloud/api/dispatch/ParamGenericValidationWorker.java b/server/src/main/java/com/cloud/api/dispatch/ParamGenericValidationWorker.java index 009d88a983bd..bfe256305d51 100644 --- a/server/src/main/java/com/cloud/api/dispatch/ParamGenericValidationWorker.java +++ b/server/src/main/java/com/cloud/api/dispatch/ParamGenericValidationWorker.java @@ -25,7 +25,8 @@ import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.Parameter; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; /** * This worker validates parameters in a generic way, by using annotated @@ -37,7 +38,7 @@ */ public class ParamGenericValidationWorker implements DispatchWorker { - static Logger s_logger = Logger.getLogger(ParamGenericValidationWorker.class.getName()); + protected Logger logger = LogManager.getLogger(getClass()); protected static final List defaultParamNames = new ArrayList(); @@ -101,7 +102,7 @@ public void handle(final DispatchTask task) { } if (foundUnknownParam) { - s_logger.warn(String.format("Received unknown parameters for command %s. %s", cmd.getActualCommandName(), errorMsg)); + logger.warn(String.format("Received unknown parameters for command %s. %s", cmd.getActualCommandName(), errorMsg)); } } diff --git a/server/src/main/java/com/cloud/api/dispatch/ParamProcessWorker.java b/server/src/main/java/com/cloud/api/dispatch/ParamProcessWorker.java index 9f07db4b033b..bdba8dcace2d 100644 --- a/server/src/main/java/com/cloud/api/dispatch/ParamProcessWorker.java +++ b/server/src/main/java/com/cloud/api/dispatch/ParamProcessWorker.java @@ -50,7 +50,8 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.context.CallContext; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.exception.InvalidParameterValueException; import com.cloud.user.Account; @@ -62,7 +63,7 @@ public class ParamProcessWorker implements DispatchWorker { - private static final Logger s_logger = Logger.getLogger(ParamProcessWorker.class.getName()); + protected Logger logger = LogManager.getLogger(getClass()); private static final String inputFormatString = "yyyy-MM-dd"; private static final String newInputFormatString = "yyyy-MM-dd HH:mm:ss"; public static final DateFormat inputFormat = new SimpleDateFormat(inputFormatString); @@ -184,16 +185,16 @@ public void processParameters(final BaseCmd cmd, final Map params) { validateField(paramObj, parameterAnnotation); setFieldValue(field, cmd, paramObj, parameterAnnotation); } catch (final IllegalArgumentException argEx) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to execute API command " + cmd.getCommandName() + " due to invalid value " + paramObj + " for parameter " + + if (logger.isDebugEnabled()) { + logger.debug("Unable to execute API command " + cmd.getCommandName() + " due to invalid value " + paramObj + " for parameter " + parameterAnnotation.name()); } throw new ServerApiException(ApiErrorCode.PARAM_ERROR, "Unable to execute API command " + cmd.getCommandName().substring(0, cmd.getCommandName().length() - 8) + " due to invalid value " + paramObj + " for parameter " + parameterAnnotation.name()); } catch (final ParseException parseEx) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Invalid date parameter " + paramObj + " passed to command " + cmd.getCommandName().substring(0, cmd.getCommandName().length() - 8)); + if (logger.isDebugEnabled()) { + logger.debug("Invalid date parameter " + paramObj + " passed to command " + cmd.getCommandName().substring(0, cmd.getCommandName().length() - 8)); } throw new ServerApiException(ApiErrorCode.PARAM_ERROR, "Unable to parse date " + paramObj + " for command " + cmd.getCommandName().substring(0, cmd.getCommandName().length() - 8) + ", please pass dates in the format mentioned in the api documentation"); @@ -201,7 +202,7 @@ public void processParameters(final BaseCmd cmd, final Map params) { throw new ServerApiException(ApiErrorCode.PARAM_ERROR, "Unable to execute API command " + cmd.getCommandName().substring(0, cmd.getCommandName().length() - 8) + " due to invalid value. " + invEx.getMessage()); } catch (final CloudRuntimeException cloudEx) { - s_logger.error("CloudRuntimeException", cloudEx); + logger.error("CloudRuntimeException", cloudEx); // FIXME: Better error message? This only happens if the API command is not executable, which typically //means // there was @@ -296,8 +297,8 @@ private void doAccessChecks(BaseCmd cmd, Map entitiesToAcces owners = entityOwners.stream().map(id -> _accountMgr.getAccount(id)).toArray(Account[]::new); } else { if (cmd.getEntityOwnerId() == Account.ACCOUNT_ID_SYSTEM && cmd instanceof BaseAsyncCmd && ((BaseAsyncCmd)cmd).getApiResourceType() == ApiCommandResourceType.Network) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Skipping access check on the network owner if the owner is ROOT/system."); + if (logger.isDebugEnabled()) { + logger.debug("Skipping access check on the network owner if the owner is ROOT/system."); } owners = new Account[]{}; } else { @@ -404,7 +405,7 @@ private void setFieldValue(final Field field, final BaseCmd cmdObj, final Object case STRING: if ((paramObj != null)) { if (paramObj.toString().length() > annotation.length()) { - s_logger.error("Value greater than max allowed length " + annotation.length() + " for param: " + field.getName()); + logger.error("Value greater than max allowed length " + annotation.length() + " for param: " + field.getName()); throw new InvalidParameterValueException("Value greater than max allowed length " + annotation.length() + " for param: " + field.getName()); } else { field.set(cmdObj, paramObj.toString()); @@ -417,7 +418,7 @@ private void setFieldValue(final Field field, final BaseCmd cmdObj, final Object break; } } catch (final IllegalAccessException ex) { - s_logger.error("Error initializing command " + cmdObj.getCommandName() + ", field " + field.getName() + " is not accessible."); + logger.error("Error initializing command " + cmdObj.getCommandName() + ", field " + field.getName() + " is not accessible."); throw new CloudRuntimeException("Internal error initializing parameters for command " + cmdObj.getCommandName() + " [field " + field.getName() + " is not accessible]"); } @@ -427,16 +428,16 @@ private void parseAndSetDate(Field field, BaseCmd cmdObj, Object paramObj) throw field.set(cmdObj, DateUtil.parseTZDateString(paramObj.toString())); return; } catch (ParseException parseException) { - s_logger.debug(String.format("Could not parse date [%s] with timezone parser, trying to parse without timezone.", paramObj)); + logger.debug(String.format("Could not parse date [%s] with timezone parser, trying to parse without timezone.", paramObj)); } if (isObjInNewDateFormat(paramObj.toString())) { - s_logger.debug(String.format("Parsing date [%s] using the [%s] format.", paramObj, newInputFormatString)); + logger.debug(String.format("Parsing date [%s] using the [%s] format.", paramObj, newInputFormatString)); final DateFormat newFormat = newInputFormat; synchronized (newFormat) { field.set(cmdObj, newFormat.parse(paramObj.toString())); } } else { - s_logger.debug(String.format("Parsing date [%s] using the [%s] format.", paramObj, inputFormatString)); + logger.debug(String.format("Parsing date [%s] using the [%s] format.", paramObj, inputFormatString)); final DateFormat format = inputFormat; synchronized (format) { Date date = format.parse(paramObj.toString()); @@ -523,8 +524,8 @@ private Long translateUuidToInternalId(final String uuid, final Parameter annota } } if (internalId == null) { - if (s_logger.isDebugEnabled()) - s_logger.debug("Object entity uuid = " + uuid + " does not exist in the database."); + if (logger.isDebugEnabled()) + logger.debug("Object entity uuid = " + uuid + " does not exist in the database."); throw new InvalidParameterValueException("Invalid parameter " + annotation.name() + " value=" + uuid + " due to incorrect long value format, or entity does not exist or due to incorrect parameter annotation for the field in api cmd class."); } diff --git a/server/src/main/java/com/cloud/api/dispatch/ParamUnpackWorker.java b/server/src/main/java/com/cloud/api/dispatch/ParamUnpackWorker.java index c9bad2c686c3..1bde40b678a6 100644 --- a/server/src/main/java/com/cloud/api/dispatch/ParamUnpackWorker.java +++ b/server/src/main/java/com/cloud/api/dispatch/ParamUnpackWorker.java @@ -19,14 +19,15 @@ import java.util.HashMap; import java.util.Map; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.api.ApiErrorCode; import org.apache.cloudstack.api.ServerApiException; public class ParamUnpackWorker implements DispatchWorker { - private static final Logger s_logger = Logger.getLogger(ParamUnpackWorker.class); + protected Logger logger = LogManager.getLogger(getClass()); @SuppressWarnings({"unchecked", "rawtypes"}) @Override @@ -76,7 +77,7 @@ public void handle(final DispatchTask task) throws ServerApiException { parsedIndex = true; } } catch (final NumberFormatException nfe) { - s_logger.warn("Invalid parameter " + key + " received, unable to parse object array, returning an error."); + logger.warn("Invalid parameter " + key + " received, unable to parse object array, returning an error."); } if (!parsedIndex) { diff --git a/server/src/main/java/com/cloud/api/doc/ApiXmlDocWriter.java b/server/src/main/java/com/cloud/api/doc/ApiXmlDocWriter.java index d4beb2477098..5de5cd03fe13 100644 --- a/server/src/main/java/com/cloud/api/doc/ApiXmlDocWriter.java +++ b/server/src/main/java/com/cloud/api/doc/ApiXmlDocWriter.java @@ -37,7 +37,8 @@ import org.apache.cloudstack.api.response.TemplateResponse; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.api.response.VolumeResponse; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import java.io.File; import java.io.FileInputStream; @@ -64,7 +65,7 @@ import java.util.zip.ZipOutputStream; public class ApiXmlDocWriter { - public static final Logger s_logger = Logger.getLogger(ApiXmlDocWriter.class.getName()); + protected static Logger LOGGER = LogManager.getLogger(ApiXmlDocWriter.class); private static String s_dirName = ""; private static Map> s_apiNameCmdClassMap = new HashMap>(); @@ -233,7 +234,7 @@ private static void writeCommand(ObjectOutputStream out, String command) throws out.writeObject(apiCommand); } else { - s_logger.debug("Command " + command + " is not exposed in api doc"); + LOGGER.debug("Command " + command + " is not exposed in api doc"); } } @@ -388,7 +389,7 @@ static void addDir(File dirObj, ZipOutputStream out) throws IOException { out.closeEntry(); }catch(IOException ex) { - s_logger.error("addDir:Exception:"+ ex.getMessage(),ex); + LOGGER.error("addDir:Exception:"+ ex.getMessage(),ex); } } } @@ -417,9 +418,9 @@ private static void writeAlertTypes(String dirName) { } } } catch (IOException e) { - s_logger.error("Failed to create output stream to write an alert types ", e); + LOGGER.error("Failed to create output stream to write an alert types ", e); } catch (IllegalAccessException e) { - s_logger.error("Failed to read alert fields ", e); + LOGGER.error("Failed to read alert fields ", e); } } diff --git a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java index c3807fdb8d7e..8aa28849b4ec 100644 --- a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java +++ b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java @@ -178,7 +178,6 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.EnumUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.api.query.dao.AccountJoinDao; @@ -332,7 +331,6 @@ @Component public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements QueryService, Configurable { - public static final Logger s_logger = Logger.getLogger(QueryManagerImpl.class); private static final String ID_FIELD = "id"; @@ -2156,10 +2154,10 @@ public ListResponse searchForServers(ListHostsCmd cmd) { // FIXME: do we need to support list hosts with VmId, maybe we should // create another command just for this // Right now it is handled separately outside this QueryService - s_logger.debug(">>>Searching for hosts>>>"); + logger.debug(">>>Searching for hosts>>>"); Pair, Integer> hosts = searchForServersInternal(cmd); ListResponse response = new ListResponse(); - s_logger.debug(">>>Generating Response>>>"); + logger.debug(">>>Generating Response>>>"); List hostResponses = ViewResponseHelper.createHostResponse(cmd.getDetails(), hosts.first().toArray(new HostJoinVO[hosts.first().size()])); response.setResponses(hostResponses, hosts.second()); return response; @@ -3715,7 +3713,7 @@ private Pair, Integer> listDataCentersInternal(ListZonesC List domainIds = new ArrayList(); DomainVO domainRecord = _domainDao.findById(account.getDomainId()); if (domainRecord == null) { - s_logger.error("Could not find the domainId for account:" + account.getAccountName()); + logger.error("Could not find the domainId for account:" + account.getAccountName()); throw new CloudAuthenticationException("Could not find the domainId for account:" + account.getAccountName()); } domainIds.add(domainRecord.getId()); @@ -3755,7 +3753,7 @@ private Pair, Integer> listDataCentersInternal(ListZonesC List domainIds = new ArrayList(); DomainVO domainRecord = _domainDao.findById(account.getDomainId()); if (domainRecord == null) { - s_logger.error("Could not find the domainId for account:" + account.getAccountName()); + logger.error("Could not find the domainId for account:" + account.getAccountName()); throw new CloudAuthenticationException("Could not find the domainId for account:" + account.getAccountName()); } domainIds.add(domainRecord.getId()); @@ -3981,13 +3979,13 @@ private Pair, Integer> searchForTemplatesInternal(Long temp throw new InvalidParameterValueException("Please specify a valid template ID."); }// If ISO requested then it should be ISO. if (isIso && template.getFormat() != ImageFormat.ISO) { - s_logger.error("Template Id " + templateId + " is not an ISO"); + logger.error("Template Id " + templateId + " is not an ISO"); InvalidParameterValueException ex = new InvalidParameterValueException("Specified Template Id is not an ISO"); ex.addProxyObject(template.getUuid(), "templateId"); throw ex; }// If ISO not requested then it shouldn't be an ISO. if (!isIso && template.getFormat() == ImageFormat.ISO) { - s_logger.error("Incorrect format of the template id " + templateId); + logger.error("Incorrect format of the template id " + templateId); InvalidParameterValueException ex = new InvalidParameterValueException("Incorrect format " + template.getFormat() + " of the specified template id"); ex.addProxyObject(template.getUuid(), "templateId"); throw ex; @@ -4123,7 +4121,7 @@ else if (!template.isPublicTemplate() && caller.getType() != Account.Type.ADMIN) */ protected void applyPublicTemplateSharingRestrictions(SearchCriteria sc, Account caller) { if (caller.getType() == Account.Type.ADMIN) { - s_logger.debug(String.format("Account [%s] is a root admin. Therefore, it has access to all public templates.", caller)); + logger.debug(String.format("Account [%s] is a root admin. Therefore, it has access to all public templates.", caller)); return; } @@ -4135,7 +4133,7 @@ protected void applyPublicTemplateSharingRestrictions(SearchCriteria unsharableDomainIds) { if (domainId == account.getDomainId()) { - s_logger.trace(String.format("Domain [%s] will not be added to the set of domains with unshared templates since the account [%s] belongs to it.", domainId, account)); + logger.trace(String.format("Domain [%s] will not be added to the set of domains with unshared templates since the account [%s] belongs to it.", domainId, account)); return; } if (unsharableDomainIds.contains(domainId)) { - s_logger.trace(String.format("Domain [%s] is already on the set of domains with unshared templates.", domainId)); + logger.trace(String.format("Domain [%s] is already on the set of domains with unshared templates.", domainId)); return; } if (!checkIfDomainSharesTemplates(domainId)) { - s_logger.debug(String.format("Domain [%s] will be added to the set of domains with unshared templates as configuration [%s] is false.", domainId, QueryService.SharePublicTemplatesWithOtherDomains.key())); + logger.debug(String.format("Domain [%s] will be added to the set of domains with unshared templates as configuration [%s] is false.", domainId, QueryService.SharePublicTemplatesWithOtherDomains.key())); unsharableDomainIds.add(domainId); } } @@ -4791,7 +4789,7 @@ protected ManagementServerResponse createManagementServerResponse(ManagementServ @Override public List listRouterHealthChecks(GetRouterHealthCheckResultsCmd cmd) { - s_logger.info("Executing health check command " + cmd); + logger.info("Executing health check command " + cmd); long routerId = cmd.getRouterId(); if (!VirtualNetworkApplianceManager.RouterHealthChecksEnabled.value()) { throw new CloudRuntimeException("Router health checks are not enabled for router " + routerId); diff --git a/server/src/main/java/com/cloud/api/query/ViewResponseHelper.java b/server/src/main/java/com/cloud/api/query/ViewResponseHelper.java index 44096a799b7a..ebeab0b07731 100644 --- a/server/src/main/java/com/cloud/api/query/ViewResponseHelper.java +++ b/server/src/main/java/com/cloud/api/query/ViewResponseHelper.java @@ -61,7 +61,8 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.storage.datastore.db.ObjectStoreVO; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.api.ApiDBUtils; import com.cloud.api.query.vo.AccountJoinVO; @@ -101,7 +102,7 @@ */ public class ViewResponseHelper { - public static final Logger s_logger = Logger.getLogger(ViewResponseHelper.class); + protected Logger logger = LogManager.getLogger(getClass()); public static List createUserResponse(UserAccountJoinVO... users) { return createUserResponse(null, users); diff --git a/server/src/main/java/com/cloud/api/query/dao/AccountJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/AccountJoinDaoImpl.java index 790758c627fd..d4871a1d1364 100644 --- a/server/src/main/java/com/cloud/api/query/dao/AccountJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/AccountJoinDaoImpl.java @@ -21,7 +21,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.ApiConstants.DomainDetails; @@ -44,7 +43,6 @@ @Component public class AccountJoinDaoImpl extends GenericDaoBase implements AccountJoinDao { - public static final Logger s_logger = Logger.getLogger(AccountJoinDaoImpl.class); private final SearchBuilder acctIdSearch; @Inject diff --git a/server/src/main/java/com/cloud/api/query/dao/AffinityGroupJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/AffinityGroupJoinDaoImpl.java index 3c28106ea6c6..2a876ea82265 100644 --- a/server/src/main/java/com/cloud/api/query/dao/AffinityGroupJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/AffinityGroupJoinDaoImpl.java @@ -21,7 +21,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.apache.cloudstack.affinity.AffinityGroup; import org.apache.cloudstack.affinity.AffinityGroupResponse; @@ -34,7 +33,6 @@ import com.cloud.utils.db.SearchCriteria; public class AffinityGroupJoinDaoImpl extends GenericDaoBase implements AffinityGroupJoinDao { - public static final Logger s_logger = Logger.getLogger(AffinityGroupJoinDaoImpl.class); @Inject private ConfigurationDao _configDao; diff --git a/server/src/main/java/com/cloud/api/query/dao/AsyncJobJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/AsyncJobJoinDaoImpl.java index 32cd1c21dd6c..319e08deb39d 100644 --- a/server/src/main/java/com/cloud/api/query/dao/AsyncJobJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/AsyncJobJoinDaoImpl.java @@ -20,7 +20,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.ResponseObject; @@ -36,7 +35,6 @@ @Component public class AsyncJobJoinDaoImpl extends GenericDaoBase implements AsyncJobJoinDao { - public static final Logger s_logger = Logger.getLogger(AsyncJobJoinDaoImpl.class); private final SearchBuilder jobIdSearch; diff --git a/server/src/main/java/com/cloud/api/query/dao/DataCenterJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/DataCenterJoinDaoImpl.java index 50c5275390ed..24b7df5591ff 100644 --- a/server/src/main/java/com/cloud/api/query/dao/DataCenterJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/DataCenterJoinDaoImpl.java @@ -28,7 +28,6 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.context.CallContext; import org.apache.commons.lang3.ObjectUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.api.ApiDBUtils; @@ -46,7 +45,6 @@ @Component public class DataCenterJoinDaoImpl extends GenericDaoBase implements DataCenterJoinDao { - public static final Logger s_logger = Logger.getLogger(DataCenterJoinDaoImpl.class); private SearchBuilder dofIdSearch; @Inject diff --git a/server/src/main/java/com/cloud/api/query/dao/DiskOfferingJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/DiskOfferingJoinDaoImpl.java index 9592986151fc..1a2f4c534f66 100644 --- a/server/src/main/java/com/cloud/api/query/dao/DiskOfferingJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/DiskOfferingJoinDaoImpl.java @@ -26,7 +26,6 @@ import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.response.DiskOfferingResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.api.ApiDBUtils; @@ -44,7 +43,6 @@ @Component public class DiskOfferingJoinDaoImpl extends GenericDaoBase implements DiskOfferingJoinDao { - public static final Logger s_logger = Logger.getLogger(DiskOfferingJoinDaoImpl.class); @Inject VsphereStoragePolicyDao _vsphereStoragePolicyDao; diff --git a/server/src/main/java/com/cloud/api/query/dao/DomainJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/DomainJoinDaoImpl.java index 56f5417da3fa..61ac12e8c853 100644 --- a/server/src/main/java/com/cloud/api/query/dao/DomainJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/DomainJoinDaoImpl.java @@ -29,7 +29,6 @@ import org.apache.cloudstack.api.response.DomainResponse; import org.apache.cloudstack.api.response.ResourceLimitAndCountResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.api.ApiDBUtils; @@ -44,7 +43,6 @@ @Component public class DomainJoinDaoImpl extends GenericDaoBase implements DomainJoinDao { - public static final Logger s_logger = Logger.getLogger(DomainJoinDaoImpl.class); private SearchBuilder domainIdSearch; diff --git a/server/src/main/java/com/cloud/api/query/dao/DomainRouterJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/DomainRouterJoinDaoImpl.java index e3011bc4d66e..c6041c3e3732 100644 --- a/server/src/main/java/com/cloud/api/query/dao/DomainRouterJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/DomainRouterJoinDaoImpl.java @@ -25,7 +25,6 @@ import org.apache.cloudstack.annotation.dao.AnnotationDao; import org.apache.cloudstack.context.CallContext; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.response.DomainRouterResponse; @@ -51,7 +50,6 @@ @Component public class DomainRouterJoinDaoImpl extends GenericDaoBase implements DomainRouterJoinDao { - public static final Logger s_logger = Logger.getLogger(DomainRouterJoinDaoImpl.class); @Inject private ConfigurationDao _configDao; diff --git a/server/src/main/java/com/cloud/api/query/dao/HostJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/HostJoinDaoImpl.java index da81f42b41d9..f67c6d75994d 100644 --- a/server/src/main/java/com/cloud/api/query/dao/HostJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/HostJoinDaoImpl.java @@ -42,7 +42,6 @@ import org.apache.cloudstack.ha.dao.HAConfigDao; import org.apache.cloudstack.outofbandmanagement.dao.OutOfBandManagementDao; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.api.ApiDBUtils; @@ -62,7 +61,6 @@ @Component public class HostJoinDaoImpl extends GenericDaoBase implements HostJoinDao { - public static final Logger s_logger = Logger.getLogger(HostJoinDaoImpl.class); @Inject private ConfigurationDao _configDao; @@ -249,7 +247,7 @@ public HostResponse newHostResponse(HostJoinVO host, EnumSet detail try { hostResponse.setDetails(hostDetails); } catch (Exception e) { - s_logger.debug("failed to get host details", e); + logger.debug("failed to get host details", e); } } diff --git a/server/src/main/java/com/cloud/api/query/dao/HostTagDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/HostTagDaoImpl.java index 5395fd4a33b1..d2a34bf5e588 100644 --- a/server/src/main/java/com/cloud/api/query/dao/HostTagDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/HostTagDaoImpl.java @@ -23,7 +23,6 @@ import org.apache.cloudstack.api.response.HostTagResponse; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.api.query.vo.HostTagVO; @@ -33,7 +32,6 @@ @Component public class HostTagDaoImpl extends GenericDaoBase implements HostTagDao { - public static final Logger s_logger = Logger.getLogger(HostTagDaoImpl.class); @Inject private ConfigurationDao _configDao; diff --git a/server/src/main/java/com/cloud/api/query/dao/ImageStoreJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/ImageStoreJoinDaoImpl.java index 9c20d18678bb..9a0c271fdb48 100644 --- a/server/src/main/java/com/cloud/api/query/dao/ImageStoreJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/ImageStoreJoinDaoImpl.java @@ -27,7 +27,6 @@ import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.response.ImageStoreResponse; @@ -42,7 +41,6 @@ @Component public class ImageStoreJoinDaoImpl extends GenericDaoBase implements ImageStoreJoinDao { - public static final Logger s_logger = Logger.getLogger(ImageStoreJoinDaoImpl.class); @Inject private ConfigurationDao _configDao; diff --git a/server/src/main/java/com/cloud/api/query/dao/InstanceGroupJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/InstanceGroupJoinDaoImpl.java index 61e73d474d3c..4605c20287bb 100644 --- a/server/src/main/java/com/cloud/api/query/dao/InstanceGroupJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/InstanceGroupJoinDaoImpl.java @@ -23,7 +23,6 @@ import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.response.InstanceGroupResponse; @@ -39,7 +38,6 @@ @Component public class InstanceGroupJoinDaoImpl extends GenericDaoBase implements InstanceGroupJoinDao { - public static final Logger s_logger = Logger.getLogger(InstanceGroupJoinDaoImpl.class); private SearchBuilder vrIdSearch; diff --git a/server/src/main/java/com/cloud/api/query/dao/NetworkOfferingJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/NetworkOfferingJoinDaoImpl.java index d50f1610e69a..cdf414c8e59a 100644 --- a/server/src/main/java/com/cloud/api/query/dao/NetworkOfferingJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/NetworkOfferingJoinDaoImpl.java @@ -28,7 +28,6 @@ import com.cloud.utils.db.TransactionLegacy; import org.apache.commons.lang3.StringUtils; import org.apache.cloudstack.api.response.NetworkOfferingResponse; -import org.apache.log4j.Logger; import com.cloud.api.query.vo.NetworkOfferingJoinVO; import com.cloud.offering.NetworkOffering; @@ -38,7 +37,6 @@ import com.cloud.utils.net.NetUtils; public class NetworkOfferingJoinDaoImpl extends GenericDaoBase implements NetworkOfferingJoinDao { - public static final Logger s_logger = Logger.getLogger(NetworkOfferingJoinDaoImpl.class); private final SearchBuilder nofIdSearch; @@ -143,7 +141,7 @@ public NetworkOfferingJoinVO newNetworkOfferingView(NetworkOffering offering) { @Override public Map> listDomainsOfNetworkOfferingsUsedByDomainPath(String domainPath) { - s_logger.debug(String.format("Retrieving the domains of the network offerings used by domain with path [%s].", domainPath)); + logger.debug(String.format("Retrieving the domains of the network offerings used by domain with path [%s].", domainPath)); TransactionLegacy txn = TransactionLegacy.currentTxn(); try (PreparedStatement pstmt = txn.prepareStatement(LIST_DOMAINS_OF_NETWORK_OFFERINGS_USED_BY_DOMAIN_PATH)) { @@ -164,10 +162,10 @@ public Map> listDomainsOfNetworkOfferingsUsedByDomainPath(Str return domainsOfNetworkOfferingsUsedByDomainPath; } catch (SQLException e) { - s_logger.error(String.format("Failed to retrieve the domains of the network offerings used by domain with path [%s] due to [%s]. Returning an empty " + logger.error(String.format("Failed to retrieve the domains of the network offerings used by domain with path [%s] due to [%s]. Returning an empty " + "list of domains.", domainPath, e.getMessage())); - s_logger.debug(String.format("Failed to retrieve the domains of the network offerings used by domain with path [%s]. Returning an empty " + + logger.debug(String.format("Failed to retrieve the domains of the network offerings used by domain with path [%s]. Returning an empty " + "list of domains.", domainPath), e); return new HashMap<>(); diff --git a/server/src/main/java/com/cloud/api/query/dao/ProjectAccountJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/ProjectAccountJoinDaoImpl.java index bc650b3d8d32..3bd689025d45 100644 --- a/server/src/main/java/com/cloud/api/query/dao/ProjectAccountJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/ProjectAccountJoinDaoImpl.java @@ -19,7 +19,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.response.ProjectAccountResponse; @@ -32,7 +31,6 @@ @Component public class ProjectAccountJoinDaoImpl extends GenericDaoBase implements ProjectAccountJoinDao { - public static final Logger s_logger = Logger.getLogger(ProjectAccountJoinDaoImpl.class); private SearchBuilder paIdSearch; diff --git a/server/src/main/java/com/cloud/api/query/dao/ProjectInvitationJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/ProjectInvitationJoinDaoImpl.java index 8e155da31b61..127b2526bf26 100644 --- a/server/src/main/java/com/cloud/api/query/dao/ProjectInvitationJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/ProjectInvitationJoinDaoImpl.java @@ -19,7 +19,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.response.ProjectInvitationResponse; @@ -32,7 +31,6 @@ @Component public class ProjectInvitationJoinDaoImpl extends GenericDaoBase implements ProjectInvitationJoinDao { - public static final Logger s_logger = Logger.getLogger(ProjectInvitationJoinDaoImpl.class); private SearchBuilder piIdSearch; diff --git a/server/src/main/java/com/cloud/api/query/dao/ProjectJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/ProjectJoinDaoImpl.java index d893a5ca37a9..d1aebb591085 100644 --- a/server/src/main/java/com/cloud/api/query/dao/ProjectJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/ProjectJoinDaoImpl.java @@ -26,7 +26,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.ApiConstants.DomainDetails; @@ -52,7 +51,6 @@ @Component public class ProjectJoinDaoImpl extends GenericDaoBase implements ProjectJoinDao { - public static final Logger s_logger = Logger.getLogger(ProjectJoinDaoImpl.class); @Inject private ConfigurationDao _configDao; diff --git a/server/src/main/java/com/cloud/api/query/dao/ResourceTagJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/ResourceTagJoinDaoImpl.java index e1f6c6592698..644858ac7221 100644 --- a/server/src/main/java/com/cloud/api/query/dao/ResourceTagJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/ResourceTagJoinDaoImpl.java @@ -21,7 +21,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.response.ResourceTagResponse; @@ -38,7 +37,6 @@ @Component public class ResourceTagJoinDaoImpl extends GenericDaoBase implements ResourceTagJoinDao { - public static final Logger s_logger = Logger.getLogger(ResourceTagJoinDaoImpl.class); @Inject private ConfigurationDao _configDao; diff --git a/server/src/main/java/com/cloud/api/query/dao/SecurityGroupJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/SecurityGroupJoinDaoImpl.java index 0413d2162401..72a7e8a3d399 100644 --- a/server/src/main/java/com/cloud/api/query/dao/SecurityGroupJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/SecurityGroupJoinDaoImpl.java @@ -27,7 +27,6 @@ import org.apache.cloudstack.api.response.SecurityGroupResponse; import org.apache.cloudstack.api.response.SecurityGroupRuleResponse; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.api.ApiDBUtils; @@ -48,7 +47,6 @@ @Component public class SecurityGroupJoinDaoImpl extends GenericDaoBase implements SecurityGroupJoinDao { - public static final Logger s_logger = Logger.getLogger(SecurityGroupJoinDaoImpl.class); @Inject private ConfigurationDao _configDao; @@ -130,7 +128,7 @@ public SecurityGroupResponse newSecurityGroupResponse(SecurityGroupJoinVO vsg, A } List securityGroupVmMap = _securityGroupVMMapDao.listBySecurityGroup(vsg.getId()); - s_logger.debug("newSecurityGroupResponse() -> virtualmachine count: " + securityGroupVmMap.size()); + logger.debug("newSecurityGroupResponse() -> virtualmachine count: " + securityGroupVmMap.size()); sgResponse.setVirtualMachineCount(securityGroupVmMap.size()); for(SecurityGroupVMMapVO securityGroupVMMapVO : securityGroupVmMap) { diff --git a/server/src/main/java/com/cloud/api/query/dao/ServiceOfferingJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/ServiceOfferingJoinDaoImpl.java index 4a81cc8bfeec..41609e06be19 100644 --- a/server/src/main/java/com/cloud/api/query/dao/ServiceOfferingJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/ServiceOfferingJoinDaoImpl.java @@ -34,7 +34,6 @@ import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.response.ServiceOfferingResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.api.ApiDBUtils; @@ -49,7 +48,6 @@ @Component public class ServiceOfferingJoinDaoImpl extends GenericDaoBase implements ServiceOfferingJoinDao { - public static final Logger s_logger = Logger.getLogger(ServiceOfferingJoinDaoImpl.class); @Inject VsphereStoragePolicyDao _vsphereStoragePolicyDao; @@ -187,7 +185,7 @@ public ServiceOfferingJoinVO newServiceOfferingView(ServiceOffering offering) { @Override public Map> listDomainsOfServiceOfferingsUsedByDomainPath(String domainPath) { - s_logger.debug(String.format("Retrieving the domains of the service offerings used by domain with path [%s].", domainPath)); + logger.debug(String.format("Retrieving the domains of the service offerings used by domain with path [%s].", domainPath)); TransactionLegacy txn = TransactionLegacy.currentTxn(); try (PreparedStatement pstmt = txn.prepareStatement(LIST_DOMAINS_OF_SERVICE_OFFERINGS_USED_BY_DOMAIN_PATH)) { @@ -208,10 +206,10 @@ public Map> listDomainsOfServiceOfferingsUsedByDomainPath(Str return domainsOfServiceOfferingsUsedByDomainPath; } catch (SQLException e) { - s_logger.error(String.format("Failed to retrieve the domains of the service offerings used by domain with path [%s] due to [%s]. Returning an empty " + logger.error(String.format("Failed to retrieve the domains of the service offerings used by domain with path [%s] due to [%s]. Returning an empty " + "list of domains.", domainPath, e.getMessage())); - s_logger.debug(String.format("Failed to retrieve the domains of the service offerings used by domain with path [%s]. Returning an empty " + logger.debug(String.format("Failed to retrieve the domains of the service offerings used by domain with path [%s]. Returning an empty " + "list of domains.", domainPath), e); return new HashMap<>(); diff --git a/server/src/main/java/com/cloud/api/query/dao/SnapshotJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/SnapshotJoinDaoImpl.java index a913dd7f568a..0810540a377b 100644 --- a/server/src/main/java/com/cloud/api/query/dao/SnapshotJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/SnapshotJoinDaoImpl.java @@ -33,7 +33,6 @@ import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.query.QueryService; -import org.apache.log4j.Logger; import com.cloud.api.ApiResponseHelper; import com.cloud.api.query.vo.SnapshotJoinVO; @@ -48,8 +47,6 @@ public class SnapshotJoinDaoImpl extends GenericDaoBaseWithTagInformation implements SnapshotJoinDao { - public static final Logger s_logger = Logger.getLogger(SnapshotJoinDaoImpl.class); - @Inject private AccountService accountService; @Inject @@ -86,7 +83,7 @@ private void setSnapshotInfoDetailsInResponse(SnapshotJoinVO snapshot, SnapshotR SnapshotInfo snapshotInfo = null; snapshotInfo = snapshotDataFactory.getSnapshotWithRoleAndZone(snapshot.getId(), snapshot.getStoreRole(), snapshot.getDataCenterId()); if (snapshotInfo == null) { - s_logger.debug("Unable to find info for image store snapshot with uuid " + snapshot.getUuid()); + logger.debug("Unable to find info for image store snapshot with uuid " + snapshot.getUuid()); snapshotResponse.setRevertable(false); } else { snapshotResponse.setRevertable(snapshotInfo.isRevertable()); diff --git a/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java index c587eb40f189..76ca179868a8 100644 --- a/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java @@ -34,7 +34,6 @@ import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.utils.jsinterpreter.TagAsRuleHelper; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.api.ApiDBUtils; @@ -57,7 +56,6 @@ @Component public class StoragePoolJoinDaoImpl extends GenericDaoBase implements StoragePoolJoinDao { - public static final Logger s_logger = Logger.getLogger(StoragePoolJoinDaoImpl.class); @Inject private ConfigurationDao _configDao; @@ -419,7 +417,7 @@ public List findStoragePoolByScopeAndRuleTags(Long datacenterId, if (storagePoolVO != null) { filteredPools.add(storagePoolVO); } else { - s_logger.warn(String.format("Unable to find Storage Pool [%s] in the DB.", storagePoolJoinVO.getUuid())); + logger.warn(String.format("Unable to find Storage Pool [%s] in the DB.", storagePoolJoinVO.getUuid())); } } } diff --git a/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java index 501d413f117b..b50854a13e83 100644 --- a/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java @@ -48,7 +48,6 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.utils.security.DigestHelper; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.ResponseObject.ResponseView; @@ -86,7 +85,6 @@ @Component public class TemplateJoinDaoImpl extends GenericDaoBaseWithTagInformation implements TemplateJoinDao { - public static final Logger s_logger = Logger.getLogger(TemplateJoinDaoImpl.class); @Inject private ConfigurationDao _configDao; diff --git a/server/src/main/java/com/cloud/api/query/dao/UserAccountJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/UserAccountJoinDaoImpl.java index 4633c52ee598..c5b21f50d2d9 100644 --- a/server/src/main/java/com/cloud/api/query/dao/UserAccountJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/UserAccountJoinDaoImpl.java @@ -20,7 +20,6 @@ import com.cloud.user.AccountManagerImpl; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.response.UserResponse; @@ -34,7 +33,6 @@ @Component public class UserAccountJoinDaoImpl extends GenericDaoBase implements UserAccountJoinDao { - public static final Logger s_logger = Logger.getLogger(UserAccountJoinDaoImpl.class); private SearchBuilder vrIdSearch; diff --git a/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java index e5cc9ee72342..c331e913090e 100644 --- a/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java @@ -45,7 +45,6 @@ import org.apache.cloudstack.query.QueryService; import org.apache.commons.lang3.BooleanUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.api.ApiDBUtils; @@ -87,7 +86,6 @@ @Component public class UserVmJoinDaoImpl extends GenericDaoBaseWithTagInformation implements UserVmJoinDao { - public static final Logger s_logger = Logger.getLogger(UserVmJoinDaoImpl.class); @Inject private ConfigurationDao _configDao; diff --git a/server/src/main/java/com/cloud/api/query/dao/VolumeJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/VolumeJoinDaoImpl.java index 8fcad6ed45ab..d7e79ce91815 100644 --- a/server/src/main/java/com/cloud/api/query/dao/VolumeJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/VolumeJoinDaoImpl.java @@ -31,7 +31,6 @@ import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.api.ApiDBUtils; @@ -49,7 +48,6 @@ @Component public class VolumeJoinDaoImpl extends GenericDaoBaseWithTagInformation implements VolumeJoinDao { - public static final Logger s_logger = Logger.getLogger(VolumeJoinDaoImpl.class); @Inject private ConfigurationDao _configDao; diff --git a/server/src/main/java/com/cloud/api/query/dao/VpcOfferingJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/VpcOfferingJoinDaoImpl.java index af0940609ba8..b525b063354f 100644 --- a/server/src/main/java/com/cloud/api/query/dao/VpcOfferingJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/VpcOfferingJoinDaoImpl.java @@ -21,7 +21,6 @@ import org.apache.cloudstack.api.response.VpcOfferingResponse; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.api.query.vo.VpcOfferingJoinVO; import com.cloud.network.vpc.VpcOffering; @@ -31,7 +30,6 @@ import com.cloud.utils.net.NetUtils; public class VpcOfferingJoinDaoImpl extends GenericDaoBase implements VpcOfferingJoinDao { - public static final Logger s_logger = Logger.getLogger(VpcOfferingJoinDaoImpl.class); private SearchBuilder sofIdSearch; diff --git a/server/src/main/java/com/cloud/api/response/ApiResponseSerializer.java b/server/src/main/java/com/cloud/api/response/ApiResponseSerializer.java index e61601654542..c72c275c212d 100644 --- a/server/src/main/java/com/cloud/api/response/ApiResponseSerializer.java +++ b/server/src/main/java/com/cloud/api/response/ApiResponseSerializer.java @@ -39,7 +39,8 @@ import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import java.lang.reflect.Field; import java.lang.reflect.Modifier; @@ -52,10 +53,10 @@ import java.util.regex.Pattern; public class ApiResponseSerializer { - private static final Logger s_logger = Logger.getLogger(ApiResponseSerializer.class.getName()); + protected static Logger LOGGER = LogManager.getLogger(ApiResponseSerializer.class); public static String toSerializedString(ResponseObject result, String responseType) { - s_logger.trace("===Serializing Response==="); + LOGGER.trace("===Serializing Response==="); if (HttpUtils.RESPONSE_TYPE_JSON.equalsIgnoreCase(responseType)) { return toJSONSerializedString(result, new StringBuilder()); } else { @@ -64,7 +65,7 @@ public static String toSerializedString(ResponseObject result, String responseTy } public static String toSerializedStringWithSecureLogs(ResponseObject result, String responseType, StringBuilder log) { - s_logger.trace("===Serializing Response==="); + LOGGER.trace("===Serializing Response==="); if (HttpUtils.RESPONSE_TYPE_JSON.equalsIgnoreCase(responseType)) { return toJSONSerializedString(result, log); } else { @@ -253,7 +254,7 @@ private static void serializeResponseObjFieldsXML(StringBuilder sb, StringBuilde } } if (!permittedParameter) { - s_logger.trace("Ignoring parameter " + param.name() + " as the caller is not authorized to see it"); + LOGGER.trace("Ignoring parameter " + param.name() + " as the caller is not authorized to see it"); continue; } } @@ -372,7 +373,7 @@ private static String encodeParam(String value) { try { return new URLEncoder().encode(value).replaceAll("\\+", "%20"); } catch (Exception e) { - s_logger.warn("Unable to encode: " + value, e); + LOGGER.warn("Unable to encode: " + value, e); } return value; } diff --git a/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java b/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java index 6926f67daea7..d325ae4b95c0 100644 --- a/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java +++ b/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java @@ -36,7 +36,6 @@ import org.apache.cloudstack.framework.messagebus.MessageBus; import org.apache.cloudstack.framework.messagebus.PublishScope; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; @@ -100,7 +99,6 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, StateListener, Listener, ResourceListener, Configurable { - private static final Logger s_logger = Logger.getLogger(CapacityManagerImpl.class); @Inject CapacityDao _capacityDao; @Inject @@ -180,7 +178,7 @@ public boolean releaseVmCapacity(VirtualMachine vm, final boolean moveFromReserv if (hostId != null) { HostVO host = _hostDao.findById(hostId); if (host == null) { - s_logger.warn("Host " + hostId + " no long exist anymore!"); + logger.warn("Host " + hostId + " no long exist anymore!"); return true; } @@ -218,9 +216,9 @@ public void doInTransactionWithoutResult(TransactionStatus status) { long actualTotalMem = capacityMemory.getTotalCapacity(); long totalMem = (long)(actualTotalMem * memoryOvercommitRatio); long totalCpu = (long)(actualTotalCpu * cpuOvercommitRatio); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Hosts's actual total CPU: " + actualTotalCpu + " and CPU after applying overprovisioning: " + totalCpu); - s_logger.debug("Hosts's actual total RAM: " + toHumanReadableSize(actualTotalMem) + " and RAM after applying overprovisioning: " + toHumanReadableSize(totalMem)); + if (logger.isDebugEnabled()) { + logger.debug("Hosts's actual total CPU: " + actualTotalCpu + " and CPU after applying overprovisioning: " + totalCpu); + logger.debug("Hosts's actual total RAM: " + toHumanReadableSize(actualTotalMem) + " and RAM after applying overprovisioning: " + toHumanReadableSize(totalMem)); } if (!moveFromReserved) { @@ -256,11 +254,11 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } } - s_logger.debug("release cpu from host: " + hostId + ", old used: " + usedCpu + ",reserved: " + reservedCpu + ", actual total: " + actualTotalCpu + + logger.debug("release cpu from host: " + hostId + ", old used: " + usedCpu + ",reserved: " + reservedCpu + ", actual total: " + actualTotalCpu + ", total with overprovisioning: " + totalCpu + "; new used: " + capacityCpu.getUsedCapacity() + ",reserved:" + capacityCpu.getReservedCapacity() + "; movedfromreserved: " + moveFromReserved + ",moveToReservered" + moveToReservered); - s_logger.debug("release mem from host: " + hostId + ", old used: " + toHumanReadableSize(usedMem) + ",reserved: " + toHumanReadableSize(reservedMem) + ", total: " + toHumanReadableSize(totalMem) + "; new used: " + + logger.debug("release mem from host: " + hostId + ", old used: " + toHumanReadableSize(usedMem) + ",reserved: " + toHumanReadableSize(reservedMem) + ", total: " + toHumanReadableSize(totalMem) + "; new used: " + toHumanReadableSize(capacityMemory.getUsedCapacity()) + ",reserved:" + toHumanReadableSize(capacityMemory.getReservedCapacity()) + "; movedfromreserved: " + moveFromReserved + ",moveToReservered" + moveToReservered); @@ -272,7 +270,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { return true; } catch (Exception e) { - s_logger.debug("Failed to transit vm's state, due to " + e.getMessage()); + logger.debug("Failed to transit vm's state, due to " + e.getMessage()); return false; } } @@ -325,17 +323,17 @@ public void doInTransactionWithoutResult(TransactionStatus status) { long actualTotalMem = capacityMem.getTotalCapacity(); long totalCpu = (long)(actualTotalCpu * cpuOvercommitRatio); long totalMem = (long)(actualTotalMem * memoryOvercommitRatio); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Hosts's actual total CPU: " + actualTotalCpu + " and CPU after applying overprovisioning: " + totalCpu); + if (logger.isDebugEnabled()) { + logger.debug("Hosts's actual total CPU: " + actualTotalCpu + " and CPU after applying overprovisioning: " + totalCpu); } long freeCpu = totalCpu - (reservedCpu + usedCpu); long freeMem = totalMem - (reservedMem + usedMem); - if (s_logger.isDebugEnabled()) { - s_logger.debug("We are allocating VM, increasing the used capacity of this host:" + hostId); - s_logger.debug("Current Used CPU: " + usedCpu + " , Free CPU:" + freeCpu + " ,Requested CPU: " + cpu); - s_logger.debug("Current Used RAM: " + toHumanReadableSize(usedMem) + " , Free RAM:" + toHumanReadableSize(freeMem) + " ,Requested RAM: " + toHumanReadableSize(ram)); + if (logger.isDebugEnabled()) { + logger.debug("We are allocating VM, increasing the used capacity of this host:" + hostId); + logger.debug("Current Used CPU: " + usedCpu + " , Free CPU:" + freeCpu + " ,Requested CPU: " + cpu); + logger.debug("Current Used RAM: " + toHumanReadableSize(usedMem) + " , Free RAM:" + toHumanReadableSize(freeMem) + " ,Requested RAM: " + toHumanReadableSize(ram)); } capacityCpu.setUsedCapacity(usedCpu + cpu); capacityMem.setUsedCapacity(usedMem + ram); @@ -343,10 +341,10 @@ public void doInTransactionWithoutResult(TransactionStatus status) { if (fromLastHost) { /* alloc from reserved */ - if (s_logger.isDebugEnabled()) { - s_logger.debug("We are allocating VM to the last host again, so adjusting the reserved capacity if it is not less than required"); - s_logger.debug("Reserved CPU: " + reservedCpu + " , Requested CPU: " + cpu); - s_logger.debug("Reserved RAM: " + toHumanReadableSize(reservedMem) + " , Requested RAM: " + toHumanReadableSize(ram)); + if (logger.isDebugEnabled()) { + logger.debug("We are allocating VM to the last host again, so adjusting the reserved capacity if it is not less than required"); + logger.debug("Reserved CPU: " + reservedCpu + " , Requested CPU: " + cpu); + logger.debug("Reserved RAM: " + toHumanReadableSize(reservedMem) + " , Requested RAM: " + toHumanReadableSize(ram)); } if (reservedCpu >= cpu && reservedMem >= ram) { capacityCpu.setReservedCapacity(reservedCpu - cpu); @@ -356,18 +354,18 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } else { /* alloc from free resource */ if (!((reservedCpu + usedCpu + cpu <= totalCpu) && (reservedMem + usedMem + ram <= totalMem))) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host doesn't seem to have enough free capacity, but increasing the used capacity anyways, " + + if (logger.isDebugEnabled()) { + logger.debug("Host doesn't seem to have enough free capacity, but increasing the used capacity anyways, " + "since the VM is already starting on this host "); } } } - s_logger.debug("CPU STATS after allocation: for host: " + hostId + ", old used: " + usedCpu + ", old reserved: " + reservedCpu + ", actual total: " + + logger.debug("CPU STATS after allocation: for host: " + hostId + ", old used: " + usedCpu + ", old reserved: " + reservedCpu + ", actual total: " + actualTotalCpu + ", total with overprovisioning: " + totalCpu + "; new used:" + capacityCpu.getUsedCapacity() + ", reserved:" + capacityCpu.getReservedCapacity() + "; requested cpu:" + cpu + ",alloc_from_last:" + fromLastHost); - s_logger.debug("RAM STATS after allocation: for host: " + hostId + ", old used: " + toHumanReadableSize(usedMem) + ", old reserved: " + toHumanReadableSize(reservedMem) + ", total: " + + logger.debug("RAM STATS after allocation: for host: " + hostId + ", old used: " + toHumanReadableSize(usedMem) + ", old reserved: " + toHumanReadableSize(reservedMem) + ", total: " + toHumanReadableSize(totalMem) + "; new used: " + toHumanReadableSize(capacityMem.getUsedCapacity()) + ", reserved: " + toHumanReadableSize(capacityMem.getReservedCapacity()) + "; requested mem: " + toHumanReadableSize(ram) + ",alloc_from_last:" + fromLastHost); @@ -399,7 +397,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } }); } catch (Exception e) { - s_logger.error("Exception allocating VM capacity", e); + logger.error("Exception allocating VM capacity", e); if (e instanceof CloudRuntimeException) { throw e; } @@ -415,14 +413,14 @@ public boolean checkIfHostHasCpuCapability(long hostId, Integer cpuNum, Integer boolean isCpuNumGood = host.getCpus().intValue() >= cpuNum; boolean isCpuSpeedGood = host.getSpeed().intValue() >= cpuSpeed; if (isCpuNumGood && isCpuSpeedGood) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host: " + hostId + " has cpu capability (cpu:" + host.getCpus() + ", speed:" + host.getSpeed() + + if (logger.isDebugEnabled()) { + logger.debug("Host: " + hostId + " has cpu capability (cpu:" + host.getCpus() + ", speed:" + host.getSpeed() + ") to support requested CPU: " + cpuNum + " and requested speed: " + cpuSpeed); } return true; } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host: " + hostId + " doesn't have cpu capability (cpu:" + host.getCpus() + ", speed:" + host.getSpeed() + + if (logger.isDebugEnabled()) { + logger.debug("Host: " + hostId + " doesn't have cpu capability (cpu:" + host.getCpus() + ", speed:" + host.getSpeed() + ") to support requested CPU: " + cpuNum + " and requested speed: " + cpuSpeed); } return false; @@ -434,8 +432,8 @@ public boolean checkIfHostHasCapacity(long hostId, Integer cpu, long ram, boolea boolean considerReservedCapacity) { boolean hasCapacity = false; - if (s_logger.isDebugEnabled()) { - s_logger.debug("Checking if host: " + hostId + " has enough capacity for requested CPU: " + cpu + " and requested RAM: " + toHumanReadableSize(ram) + + if (logger.isDebugEnabled()) { + logger.debug("Checking if host: " + hostId + " has enough capacity for requested CPU: " + cpu + " and requested RAM: " + toHumanReadableSize(ram) + " , cpuOverprovisioningFactor: " + cpuOvercommitRatio); } @@ -444,13 +442,13 @@ public boolean checkIfHostHasCapacity(long hostId, Integer cpu, long ram, boolea if (capacityCpu == null || capacityMem == null) { if (capacityCpu == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cannot checkIfHostHasCapacity, Capacity entry for CPU not found in Db, for hostId: " + hostId); + if (logger.isDebugEnabled()) { + logger.debug("Cannot checkIfHostHasCapacity, Capacity entry for CPU not found in Db, for hostId: " + hostId); } } if (capacityMem == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cannot checkIfHostHasCapacity, Capacity entry for RAM not found in Db, for hostId: " + hostId); + if (logger.isDebugEnabled()) { + logger.debug("Cannot checkIfHostHasCapacity, Capacity entry for RAM not found in Db, for hostId: " + hostId); } } @@ -465,8 +463,8 @@ public boolean checkIfHostHasCapacity(long hostId, Integer cpu, long ram, boolea long actualTotalMem = capacityMem.getTotalCapacity(); long totalCpu = (long)(actualTotalCpu * cpuOvercommitRatio); long totalMem = (long)(actualTotalMem * memoryOvercommitRatio); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Hosts's actual total CPU: " + actualTotalCpu + " and CPU after applying overprovisioning: " + totalCpu); + if (logger.isDebugEnabled()) { + logger.debug("Hosts's actual total CPU: " + actualTotalCpu + " and CPU after applying overprovisioning: " + totalCpu); } String failureReason = ""; @@ -474,10 +472,10 @@ public boolean checkIfHostHasCapacity(long hostId, Integer cpu, long ram, boolea long freeCpu = reservedCpu; long freeMem = reservedMem; - if (s_logger.isDebugEnabled()) { - s_logger.debug("We need to allocate to the last host again, so checking if there is enough reserved capacity"); - s_logger.debug("Reserved CPU: " + freeCpu + " , Requested CPU: " + cpu); - s_logger.debug("Reserved RAM: " + toHumanReadableSize(freeMem) + " , Requested RAM: " + toHumanReadableSize(ram)); + if (logger.isDebugEnabled()) { + logger.debug("We need to allocate to the last host again, so checking if there is enough reserved capacity"); + logger.debug("Reserved CPU: " + freeCpu + " , Requested CPU: " + cpu); + logger.debug("Reserved RAM: " + toHumanReadableSize(freeMem) + " , Requested RAM: " + toHumanReadableSize(ram)); } /* alloc from reserved */ if (reservedCpu >= cpu) { @@ -495,8 +493,8 @@ public boolean checkIfHostHasCapacity(long hostId, Integer cpu, long ram, boolea long reservedMemValueToUse = reservedMem; if (!considerReservedCapacity) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("considerReservedCapacity is" + considerReservedCapacity + " , not considering reserved capacity for calculating free capacity"); + if (logger.isDebugEnabled()) { + logger.debug("considerReservedCapacity is" + considerReservedCapacity + " , not considering reserved capacity for calculating free capacity"); } reservedCpuValueToUse = 0; reservedMemValueToUse = 0; @@ -504,9 +502,9 @@ public boolean checkIfHostHasCapacity(long hostId, Integer cpu, long ram, boolea long freeCpu = totalCpu - (reservedCpuValueToUse + usedCpu); long freeMem = totalMem - (reservedMemValueToUse + usedMem); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Free CPU: " + freeCpu + " , Requested CPU: " + cpu); - s_logger.debug("Free RAM: " + toHumanReadableSize(freeMem) + " , Requested RAM: " + toHumanReadableSize(ram)); + if (logger.isDebugEnabled()) { + logger.debug("Free CPU: " + freeCpu + " , Requested CPU: " + cpu); + logger.debug("Free RAM: " + toHumanReadableSize(freeMem) + " , Requested RAM: " + toHumanReadableSize(ram)); } /* alloc from free resource */ if ((reservedCpuValueToUse + usedCpu + cpu <= totalCpu)) { @@ -521,29 +519,29 @@ public boolean checkIfHostHasCapacity(long hostId, Integer cpu, long ram, boolea } if (hasCapacity) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host has enough CPU and RAM available"); + if (logger.isDebugEnabled()) { + logger.debug("Host has enough CPU and RAM available"); } - s_logger.debug("STATS: Can alloc CPU from host: " + hostId + ", used: " + usedCpu + ", reserved: " + reservedCpu + ", actual total: " + actualTotalCpu + + logger.debug("STATS: Can alloc CPU from host: " + hostId + ", used: " + usedCpu + ", reserved: " + reservedCpu + ", actual total: " + actualTotalCpu + ", total with overprovisioning: " + totalCpu + "; requested cpu:" + cpu + ",alloc_from_last_host?:" + checkFromReservedCapacity + " ,considerReservedCapacity?: " + considerReservedCapacity); - s_logger.debug("STATS: Can alloc MEM from host: " + hostId + ", used: " + toHumanReadableSize(usedMem) + ", reserved: " + toHumanReadableSize(reservedMem) + ", total: " + toHumanReadableSize(totalMem) + + logger.debug("STATS: Can alloc MEM from host: " + hostId + ", used: " + toHumanReadableSize(usedMem) + ", reserved: " + toHumanReadableSize(reservedMem) + ", total: " + toHumanReadableSize(totalMem) + "; requested mem: " + toHumanReadableSize(ram) + ", alloc_from_last_host?: " + checkFromReservedCapacity + " , considerReservedCapacity?: " + considerReservedCapacity); } else { if (checkFromReservedCapacity) { - s_logger.debug("STATS: Failed to alloc resource from host: " + hostId + " reservedCpu: " + reservedCpu + ", requested cpu: " + cpu + ", reservedMem: " + + logger.debug("STATS: Failed to alloc resource from host: " + hostId + " reservedCpu: " + reservedCpu + ", requested cpu: " + cpu + ", reservedMem: " + toHumanReadableSize(reservedMem) + ", requested mem: " + toHumanReadableSize(ram)); } else { - s_logger.debug("STATS: Failed to alloc resource from host: " + hostId + ", reservedCpu: " + reservedCpu + ", used cpu: " + usedCpu + ", requested cpu: " + + logger.debug("STATS: Failed to alloc resource from host: " + hostId + ", reservedCpu: " + reservedCpu + ", used cpu: " + usedCpu + ", requested cpu: " + cpu + ", actual total cpu: " + actualTotalCpu + ", total cpu with overprovisioning: " + totalCpu + ", reservedMem: " + toHumanReadableSize(reservedMem) + ", used Mem: " + toHumanReadableSize(usedMem) + ", requested mem: " + toHumanReadableSize(ram) + ", total Mem:" + toHumanReadableSize(totalMem) + " ,considerReservedCapacity?: " + considerReservedCapacity); } - if (s_logger.isDebugEnabled()) { - s_logger.debug(failureReason + ", cannot allocate to this host."); + if (logger.isDebugEnabled()) { + logger.debug(failureReason + ", cannot allocate to this host."); } } @@ -655,13 +653,13 @@ public void updateCapacityForHost(final Host host, final Map vms = _vmDao.listUpByHostId(host.getId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found " + vms.size() + " VMs on host " + host.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Found " + vms.size() + " VMs on host " + host.getId()); } final List vosMigrating = _vmDao.listVmsMigratingFromHost(host.getId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found " + vosMigrating.size() + " VMs are Migrating from host " + host.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Found " + vosMigrating.size() + " VMs are Migrating from host " + host.getId()); } vms.addAll(vosMigrating); @@ -705,8 +703,8 @@ public void updateCapacityForHost(final Host host, final Map vmsByLastHostId = _vmDao.listByLastHostId(host.getId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found " + vmsByLastHostId.size() + " VM, not running on host " + host.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Found " + vmsByLastHostId.size() + " VM, not running on host " + host.getId()); } for (VMInstanceVO vm : vmsByLastHostId) { Float cpuOvercommitRatio = 1.0f; @@ -769,23 +767,23 @@ public void updateCapacityForHost(final Host host, final Map t Host lastHost = _hostDao.findById(vm.getLastHostId()); Host oldHost = _hostDao.findById(oldHostId); Host newHost = _hostDao.findById(vm.getHostId()); - s_logger.debug(String.format("%s state transited from [%s] to [%s] with event [%s]. VM's original host: %s, new host: %s, host before state transition: %s", vm, oldState, + logger.debug(String.format("%s state transited from [%s] to [%s] with event [%s]. VM's original host: %s, new host: %s, host before state transition: %s", vm, oldState, newState, event, lastHost, newHost, oldHost)); if (oldState == State.Starting) { @@ -972,7 +970,7 @@ public boolean postStateTransitionEvent(StateMachine2.Transition t if ((newState == State.Starting || newState == State.Migrating || event == Event.AgentReportMigrated) && vm.getHostId() != null) { boolean fromLastHost = false; if (vm.getHostId().equals(vm.getLastHostId())) { - s_logger.debug("VM starting again on the last host it was stopped on"); + logger.debug("VM starting again on the last host it was stopped on"); fromLastHost = true; } allocateVmCapacity(vm, fromLastHost); @@ -1016,7 +1014,7 @@ private void createCapacityEntry(StartupCommand startup, HostVO server) { CapacityVOCpu.setReservedCapacity(0); CapacityVOCpu.setTotalCapacity(newTotalCpu); } else { - s_logger.debug("What? new cpu is :" + newTotalCpu + ", old one is " + CapacityVOCpu.getUsedCapacity() + "," + CapacityVOCpu.getReservedCapacity() + + logger.debug("What? new cpu is :" + newTotalCpu + ", old one is " + CapacityVOCpu.getUsedCapacity() + "," + CapacityVOCpu.getReservedCapacity() + "," + CapacityVOCpu.getTotalCapacity()); } _capacityDao.update(CapacityVOCpu.getId(), CapacityVOCpu); @@ -1043,7 +1041,7 @@ private void createCapacityEntry(StartupCommand startup, HostVO server) { CapacityVOMem.setReservedCapacity(0); CapacityVOMem.setTotalCapacity(newTotalMem); } else { - s_logger.debug("What? new mem is :" + newTotalMem + ", old one is " + CapacityVOMem.getUsedCapacity() + "," + CapacityVOMem.getReservedCapacity() + + logger.debug("What? new mem is :" + newTotalMem + ", old one is " + CapacityVOMem.getUsedCapacity() + "," + CapacityVOMem.getReservedCapacity() + "," + CapacityVOMem.getTotalCapacity()); } _capacityDao.update(CapacityVOMem.getId(), CapacityVOMem); @@ -1085,14 +1083,14 @@ public boolean checkIfClusterCrossesThreshold(Long clusterId, Integer cpuRequest float cpuConsumption = _capacityDao.findClusterConsumption(clusterId, Capacity.CAPACITY_TYPE_CPU, cpuRequested); if (cpuConsumption / clusterCpuOverProvisioning > clusterCpuCapacityDisableThreshold) { - s_logger.debug("Cluster: " + clusterId + " cpu consumption " + cpuConsumption / clusterCpuOverProvisioning + logger.debug("Cluster: " + clusterId + " cpu consumption " + cpuConsumption / clusterCpuOverProvisioning + " crosses disable threshold " + clusterCpuCapacityDisableThreshold); return true; } float memoryConsumption = _capacityDao.findClusterConsumption(clusterId, Capacity.CAPACITY_TYPE_MEMORY, ramRequested); if (memoryConsumption / clusterMemoryOverProvisioning > clusterMemoryCapacityDisableThreshold) { - s_logger.debug("Cluster: " + clusterId + " memory consumption " + memoryConsumption / clusterMemoryOverProvisioning + logger.debug("Cluster: " + clusterId + " memory consumption " + memoryConsumption / clusterMemoryOverProvisioning + " crosses disable threshold " + clusterMemoryCapacityDisableThreshold); return true; } @@ -1230,14 +1228,14 @@ public void processPrepareMaintenaceEventBefore(Long hostId) { public boolean checkIfHostReachMaxGuestLimit(Host host) { HypervisorType hypervisorType = host.getHypervisorType(); if (hypervisorType.equals(HypervisorType.KVM)) { - s_logger.debug(String.format("Host {id: %s, name: %s, uuid: %s} is %s hypervisor type, no max guest limit check needed", host.getId(), host.getName(), host.getUuid(), hypervisorType)); + logger.debug(String.format("Host {id: %s, name: %s, uuid: %s} is %s hypervisor type, no max guest limit check needed", host.getId(), host.getName(), host.getUuid(), hypervisorType)); return false; } Long vmCount = _vmDao.countActiveByHostId(host.getId()); String hypervisorVersion = host.getHypervisorVersion(); Long maxGuestLimit = _hypervisorCapabilitiesDao.getMaxGuestsLimit(hypervisorType, hypervisorVersion); if (vmCount >= maxGuestLimit) { - s_logger.info(String.format("Host {id: %s, name: %s, uuid: %s} already reached max Running VMs(count includes system VMs), limit: %d, running VM count: %s", + logger.info(String.format("Host {id: %s, name: %s, uuid: %s} already reached max Running VMs(count includes system VMs), limit: %d, running VM count: %s", host.getId(), host.getName(), host.getUuid(), maxGuestLimit, vmCount)); return true; } diff --git a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java index 94d992f86369..949cb4237260 100644 --- a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java @@ -138,7 +138,6 @@ import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.alert.AlertManager; @@ -301,7 +300,6 @@ import com.googlecode.ipv6.IPv6Network; public class ConfigurationManagerImpl extends ManagerBase implements ConfigurationManager, ConfigurationService, Configurable { - public static final Logger s_logger = Logger.getLogger(ConfigurationManagerImpl.class); public static final String PERACCOUNT = "peraccount"; public static final String PERZONE = "perzone"; @@ -649,13 +647,13 @@ public boolean start() { if (mgtCidr == null || mgtCidr.trim().isEmpty()) { final String[] localCidrs = NetUtils.getLocalCidrs(); if (localCidrs != null && localCidrs.length > 0) { - s_logger.warn("Management network CIDR is not configured originally. Set it default to " + localCidrs[0]); + logger.warn("Management network CIDR is not configured originally. Set it default to " + localCidrs[0]); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_MANAGEMENT_NODE, 0, new Long(0), "Management network CIDR is not configured originally. Set it default to " + localCidrs[0], ""); _configDao.update(Config.ManagementNetwork.key(), Config.ManagementNetwork.getCategory(), localCidrs[0]); } else { - s_logger.warn("Management network CIDR is not properly configured and we are not able to find a default setting"); + logger.warn("Management network CIDR is not properly configured and we are not able to find a default setting"); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_MANAGEMENT_NODE, 0, new Long(0), "Management network CIDR is not properly configured and we are not able to find a default setting", ""); } @@ -675,7 +673,7 @@ public String updateConfiguration(final long userId, final String name, final St final String validationMsg = validateConfigurationValue(name, value, scope); if (validationMsg != null) { - s_logger.error("Invalid configuration option, name: " + name + ", value:" + value); + logger.error("Invalid configuration option, name: " + name + ", value:" + value); throw new InvalidParameterValueException(validationMsg); } @@ -789,7 +787,7 @@ public String updateConfiguration(final long userId, final String name, final St String previousValue = _configDao.getValue(name); if (!_configDao.update(name, category, value)) { - s_logger.error("Failed to update configuration option, name: " + name + ", value:" + value); + logger.error("Failed to update configuration option, name: " + name + ", value:" + value); throw new CloudRuntimeException("Failed to update configuration value. Please contact Cloud Support."); } @@ -889,7 +887,7 @@ private void updateCustomDisplayNameOnHypervisorsList(String previousValue, Stri String hypervisors = _configDao.getValue(hypervisorListConfigName); if (Arrays.asList(hypervisors.split(",")).contains(previousValue)) { hypervisors = hypervisors.replace(previousValue, newValue); - s_logger.info(String.format("Updating the hypervisor list configuration '%s' " + + logger.info(String.format("Updating the hypervisor list configuration '%s' " + "to match the new custom hypervisor display name", hypervisorListConfigName)); _configDao.update(hypervisorListConfigName, hypervisors); } @@ -927,7 +925,7 @@ public Configuration updateConfiguration(final UpdateCfgCmd cmd) throws InvalidP // FIX ME - All configuration parameters are not moved from config.java to configKey if (config == null) { if (_configDepot.get(name) == null) { - s_logger.warn("Probably the component manager where configuration variable " + name + " is defined needs to implement Configurable interface"); + logger.warn("Probably the component manager where configuration variable " + name + " is defined needs to implement Configurable interface"); throw new InvalidParameterValueException("Config parameter with name " + name + " doesn't exist"); } category = _configDepot.get(name).category(); @@ -1040,7 +1038,7 @@ public Pair resetConfiguration(final ResetCfgCmd cmd) thr if (config == null) { configKey = _configDepot.get(name); if (configKey == null) { - s_logger.warn("Probably the component manager where configuration variable " + name + " is defined needs to implement Configurable interface"); + logger.warn("Probably the component manager where configuration variable " + name + " is defined needs to implement Configurable interface"); throw new InvalidParameterValueException("Config parameter with name " + name + " doesn't exist"); } defaultValue = configKey.defaultValue(); @@ -1157,7 +1155,7 @@ public Pair resetConfiguration(final ResetCfgCmd cmd) thr default: if (!_configDao.update(name, category, defaultValue)) { - s_logger.error("Failed to reset configuration option, name: " + name + ", defaultValue:" + defaultValue); + logger.error("Failed to reset configuration option, name: " + name + ", defaultValue:" + defaultValue); throw new CloudRuntimeException("Failed to reset configuration value. Please contact Cloud Support."); } optionalValue = Optional.ofNullable(configKey != null ? configKey.value() : _configDao.findByName(name).getValue()); @@ -1172,7 +1170,7 @@ private String validateConfigurationValue(final String name, String value, final final ConfigurationVO cfg = _configDao.findByName(name); if (cfg == null) { - s_logger.error("Missing configuration variable " + name + " in configuration table"); + logger.error("Missing configuration variable " + name + " in configuration table"); return "Invalid configuration variable."; } @@ -1181,17 +1179,17 @@ private String validateConfigurationValue(final String name, String value, final if (!configScope.contains(scope) && !(ENABLE_ACCOUNT_SETTINGS_FOR_DOMAIN.value() && configScope.contains(ConfigKey.Scope.Account.toString()) && scope.equals(ConfigKey.Scope.Domain.toString()))) { - s_logger.error("Invalid scope id provided for the parameter " + name); + logger.error("Invalid scope id provided for the parameter " + name); return "Invalid scope id provided for the parameter " + name; } } Class type = null; final Config configuration = Config.getConfig(name); if (configuration == null) { - s_logger.warn("Did not find configuration " + name + " in Config.java. Perhaps moved to ConfigDepot"); + logger.warn("Did not find configuration " + name + " in Config.java. Perhaps moved to ConfigDepot"); final ConfigKey configKey = _configDepot.get(name); if(configKey == null) { - s_logger.warn("Did not find configuration " + name + " in ConfigDepot too."); + logger.warn("Did not find configuration " + name + " in ConfigDepot too."); return null; } type = configKey.type(); @@ -1214,7 +1212,7 @@ private String validateConfigurationValue(final String name, String value, final } } catch (final Exception e) { // catching generic exception as some throws NullPointerException and some throws NumberFormatExcpeion - s_logger.error(errMsg); + logger.error(errMsg); return errMsg; } @@ -1224,7 +1222,7 @@ private String validateConfigurationValue(final String name, String value, final } if (overprovisioningFactorsForValidation.contains(name)) { final String msg = "value cannot be null for the parameter " + name; - s_logger.error(msg); + logger.error(msg); return msg; } return null; @@ -1234,18 +1232,18 @@ private String validateConfigurationValue(final String name, String value, final try { if (overprovisioningFactorsForValidation.contains(name) && Float.parseFloat(value) <= 0f) { final String msg = name + " should be greater than 0"; - s_logger.error(msg); + logger.error(msg); throw new InvalidParameterValueException(msg); } } catch (final NumberFormatException e) { final String msg = "There was an error trying to parse the float value for: " + name; - s_logger.error(msg); + logger.error(msg); throw new InvalidParameterValueException(msg); } if (type.equals(Boolean.class)) { if (!(value.equals("true") || value.equals("false"))) { - s_logger.error("Configuration variable " + name + " is expecting true or false instead of " + value); + logger.error("Configuration variable " + name + " is expecting true or false instead of " + value); return "Please enter either 'true' or 'false'."; } return null; @@ -1260,7 +1258,7 @@ private String validateConfigurationValue(final String name, String value, final throw new InvalidParameterValueException(name+" value should be between 0 and 255. 0 value will disable this feature"); } } catch (final NumberFormatException e) { - s_logger.error("There was an error trying to parse the integer value for:" + name); + logger.error("There was an error trying to parse the integer value for:" + name); throw new InvalidParameterValueException("There was an error trying to parse the integer value for:" + name); } } @@ -1288,7 +1286,7 @@ private String validateConfigurationValue(final String name, String value, final } } } catch (final NumberFormatException e) { - s_logger.error("There was an error trying to parse the integer value for:" + name); + logger.error("There was an error trying to parse the integer value for:" + name); throw new InvalidParameterValueException("There was an error trying to parse the integer value for:" + name); } } @@ -1300,7 +1298,7 @@ private String validateConfigurationValue(final String name, String value, final throw new InvalidParameterValueException("Please enter a value between 0 and 1 for the configuration parameter: " + name); } } catch (final NumberFormatException e) { - s_logger.error("There was an error trying to parse the float value for:" + name); + logger.error("There was an error trying to parse the float value for:" + name); throw new InvalidParameterValueException("There was an error trying to parse the float value for:" + name); } } @@ -1333,7 +1331,7 @@ protected String validateIfIntValueIsInRange(String name, String value, String r final int max = Integer.parseInt(options[1]); final int val = Integer.parseInt(value); if (val < min || val > max) { - s_logger.error(String.format("Invalid value for configuration [%s]. Please enter a value in the range [%s].", name, range)); + logger.error(String.format("Invalid value for configuration [%s]. Please enter a value in the range [%s].", name, range)); return String.format("The provided value is not valid for this configuration. Please enter an integer in the range: [%s]", range); } return null; @@ -1381,9 +1379,9 @@ protected String validateRangePrivateIp(String name, String value) { if (NetUtils.isSiteLocalAddress(value)) { return null; } - s_logger.error(String.format("Value [%s] is not a valid private IP range for configuration [%s].", value, name)); + logger.error(String.format("Value [%s] is not a valid private IP range for configuration [%s].", value, name)); } catch (final NullPointerException e) { - s_logger.error(String.format("Error while parsing IP address for [%s].", name)); + logger.error(String.format("Error while parsing IP address for [%s].", name)); } return "a valid site local IP address"; } @@ -1439,7 +1437,7 @@ protected String validateRangeOther(String name, String value, String rangeOptio return null; } } - s_logger.error(String.format("Invalid value for configuration [%s].", name)); + logger.error(String.format("Invalid value for configuration [%s].", name)); return String.format("a valid value for this configuration (Options are: [%s])", rangeOption); } @@ -1752,7 +1750,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { if (lock == null) { String msg = "Unable to acquire lock on table to update the ip range of POD: " + pod.getName() + ", Creation failed."; - s_logger.warn(msg); + logger.warn(msg); throw new CloudRuntimeException(msg); } @@ -1767,7 +1765,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { } }); } catch (final Exception e) { - s_logger.error("Unable to create Pod IP range due to " + e.getMessage(), e); + logger.error("Unable to create Pod IP range due to " + e.getMessage(), e); throw new CloudRuntimeException("Failed to create Pod IP range. Please contact Cloud Support."); } @@ -1865,7 +1863,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { if (lock == null) { String msg = "Unable to acquire lock on table to update the ip range of POD: " + pod.getName() + ", Deletion failed."; - s_logger.warn(msg); + logger.warn(msg); throw new CloudRuntimeException(msg); } @@ -1884,7 +1882,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { } }); } catch (final Exception e) { - s_logger.error("Unable to delete Pod " + podId + "IP range due to " + e.getMessage(), e); + logger.error("Unable to delete Pod " + podId + "IP range due to " + e.getMessage(), e); throw new CloudRuntimeException("Failed to delete Pod " + podId + "IP range. Please contact Cloud Support."); } @@ -1946,7 +1944,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { } }); } catch (final Exception e) { - s_logger.error("Unable to update Pod " + podId + " IP range due to " + e.getMessage(), e); + logger.error("Unable to update Pod " + podId + " IP range due to " + e.getMessage(), e); throw new CloudRuntimeException("Failed to update Pod " + podId + " IP range. Please contact Cloud Support."); } } @@ -1986,7 +1984,7 @@ private void updatePodIpRangeInDb (long zoneId, long podId, Integer vlanId, Host lock = _podDao.acquireInLockTable(podId); if (lock == null) { String msg = "Unable to acquire lock on table to update the ip range of POD: " + pod.getName() + ", Update failed."; - s_logger.warn(msg); + logger.warn(msg); throw new CloudRuntimeException(msg); } List iPaddressesToAdd = new ArrayList(newIpRange); @@ -2007,7 +2005,7 @@ private void updatePodIpRangeInDb (long zoneId, long podId, Integer vlanId, Host } _podDao.update(podId, pod); } catch (final Exception e) { - s_logger.error("Unable to update Pod " + podId + " IP range due to database error " + e.getMessage(), e); + logger.error("Unable to update Pod " + podId + " IP range due to database error " + e.getMessage(), e); throw new CloudRuntimeException("Failed to update Pod " + podId + " IP range. Please contact Cloud Support."); } finally { if (lock != null) { @@ -2094,7 +2092,7 @@ public DataCenterGuestIpv6Prefix doInTransaction(TransactionStatus status) { } }); } catch (final Exception e) { - s_logger.error(String.format("Unable to add IPv6 prefix for zone: %s due to %s", zone, e.getMessage()), e); + logger.error(String.format("Unable to add IPv6 prefix for zone: %s due to %s", zone, e.getMessage()), e); throw new CloudRuntimeException(String.format("Unable to add IPv6 prefix for zone ID: %s. Please contact Cloud Support.", zone.getUuid())); } return dataCenterGuestIpv6Prefix; @@ -2132,7 +2130,7 @@ public boolean deleteDataCenterGuestIpv6Prefix(DeleteGuestNetworkIpv6PrefixCmd c List prefixSubnets = ipv6GuestPrefixSubnetNetworkMapDao.listUsedByPrefix(prefixId); if (CollectionUtils.isNotEmpty(prefixSubnets)) { List usedSubnets = prefixSubnets.stream().map(Ipv6GuestPrefixSubnetNetworkMapVO::getSubnet).collect(Collectors.toList()); - s_logger.error(String.format("Subnets for guest IPv6 prefix {ID: %s, %s} are in use: %s", prefix.getUuid(), prefix.getPrefix(), String.join(", ", usedSubnets))); + logger.error(String.format("Subnets for guest IPv6 prefix {ID: %s, %s} are in use: %s", prefix.getUuid(), prefix.getPrefix(), String.join(", ", usedSubnets))); throw new CloudRuntimeException(String.format("Unable to delete guest network IPv6 prefix ID: %s. Prefix subnets are in use.", prefix.getUuid())); } ipv6GuestPrefixSubnetNetworkMapDao.deleteByPrefixId(prefixId); @@ -2251,7 +2249,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { messageBus.publish(_name, MESSAGE_DELETE_POD_IP_RANGE_EVENT, PublishScope.LOCAL, pod); messageBus.publish(_name, MESSAGE_CREATE_POD_IP_RANGE_EVENT, PublishScope.LOCAL, pod); } catch (final Exception e) { - s_logger.error("Unable to edit pod due to " + e.getMessage(), e); + logger.error("Unable to edit pod due to " + e.getMessage(), e); throw new CloudRuntimeException("Failed to edit pod. Please contact Cloud Support."); } @@ -2753,7 +2751,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { _networkSvc.addTrafficTypeToPhysicalNetwork(mgmtPhyNetwork.getId(), TrafficType.Storage.toString(), "vlan", mgmtTraffic.getXenNetworkLabel(), mgmtTraffic.getKvmNetworkLabel(), mgmtTraffic.getVmwareNetworkLabel(), mgmtTraffic.getSimulatorNetworkLabel(), mgmtTraffic.getVlan(), mgmtTraffic.getHypervNetworkLabel(), mgmtTraffic.getOvm3NetworkLabel()); - s_logger.info("No storage traffic type was specified by admin, create default storage traffic on physical network " + mgmtPhyNetwork.getId() + logger.info("No storage traffic type was specified by admin, create default storage traffic on physical network " + mgmtPhyNetwork.getId() + " with same configure of management traffic type"); } } catch (final InvalidParameterValueException ex) { @@ -3246,7 +3244,7 @@ protected ServiceOfferingVO createServiceOffering(final long userId, final boole try { detailEntryValue = URLDecoder.decode(detailEntry.getValue(), "UTF-8"); } catch (UnsupportedEncodingException | IllegalArgumentException e) { - s_logger.error("Cannot decode extra configuration value for key: " + detailEntry.getKey() + ", skipping it"); + logger.error("Cannot decode extra configuration value for key: " + detailEntry.getKey() + ", skipping it"); continue; } } @@ -4535,7 +4533,7 @@ private Vlan commitVlan(final Long zoneId, final Long podId, final String startI final String ip6Gateway, final String ip6Cidr, final Domain domain, final Account vlanOwner, final Network network, final Pair> sameSubnet) { final GlobalLock commitVlanLock = GlobalLock.getInternLock("CommitVlan"); commitVlanLock.lock(5); - s_logger.debug("Acquiring lock for committing vlan"); + logger.debug("Acquiring lock for committing vlan"); try { Vlan vlan = Transaction.execute(new TransactionCallback() { @Override @@ -4551,7 +4549,7 @@ public Vlan doInTransaction(final TransactionStatus status) { if (supportsMultipleSubnets == null || !Boolean.valueOf(supportsMultipleSubnets)) { throw new InvalidParameterValueException("The dhcp service provider for this network does not support dhcp across multiple subnets"); } - s_logger.info("adding a new subnet to the network " + network.getId()); + logger.info("adding a new subnet to the network " + network.getId()); } else if (sameSubnet != null) { // if it is same subnet the user might not send the vlan and the // netmask details. so we are @@ -4957,7 +4955,7 @@ private VlanVO commitVlanAndIpRange(final long zoneId, final long networkId, fin @Override public VlanVO doInTransaction(final TransactionStatus status) { VlanVO vlan = new VlanVO(vlanType, vlanId, vlanGateway, vlanNetmask, zone.getId(), ipRange, networkId, physicalNetworkId, vlanIp6Gateway, vlanIp6Cidr, ipv6Range); - s_logger.debug("Saving vlan range " + vlan); + logger.debug("Saving vlan range " + vlan); vlan = _vlanDao.persist(vlan); // IPv6 use a used ip map, is different from ipv4, no need to save @@ -5111,14 +5109,14 @@ private void updateVlanAndIpv4Range(final long id, final VlanVO vlanRange, throw new CloudRuntimeException("Unable to acquire vlan configuration: " + id); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("lock vlan " + id + " is acquired"); + if (logger.isDebugEnabled()) { + logger.debug("lock vlan " + id + " is acquired"); } commitUpdateVlanAndIpRange(id, newStartIP, newEndIP, currentStartIP, currentEndIP, gateway, netmask,true, isRangeForSystemVM, forSystemVms); } catch (final Exception e) { - s_logger.error("Unable to edit VlanRange due to " + e.getMessage(), e); + logger.error("Unable to edit VlanRange due to " + e.getMessage(), e); throw new CloudRuntimeException("Failed to edit VlanRange. Please contact Cloud Support."); } finally { _vlanDao.releaseFromLockTable(id); @@ -5165,14 +5163,14 @@ private void updateVlanAndIpv6Range(final long id, final VlanVO vlanRange, throw new CloudRuntimeException("Unable to acquire vlan configuration: " + id); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("lock vlan " + id + " is acquired"); + if (logger.isDebugEnabled()) { + logger.debug("lock vlan " + id + " is acquired"); } commitUpdateVlanAndIpRange(id, startIpv6, endIpv6, currentStartIPv6, currentEndIPv6, ip6Gateway, ip6Cidr, false, isRangeForSystemVM,forSystemVms); } catch (final Exception e) { - s_logger.error("Unable to edit VlanRange due to " + e.getMessage(), e); + logger.error("Unable to edit VlanRange due to " + e.getMessage(), e); throw new CloudRuntimeException("Failed to edit VlanRange. Please contact Cloud Support."); } finally { _vlanDao.releaseFromLockTable(id); @@ -5187,7 +5185,7 @@ private VlanVO commitUpdateVlanAndIpRange(final Long id, final String newStartIP @Override public VlanVO doInTransaction(final TransactionStatus status) { VlanVO vlanRange = _vlanDao.findById(id); - s_logger.debug("Updating vlan range " + vlanRange.getId()); + logger.debug("Updating vlan range " + vlanRange.getId()); if (ipv4) { vlanRange.setIpRange(newStartIP + "-" + newEndIP); vlanRange.setVlanGateway(gateway); @@ -5297,8 +5295,8 @@ public boolean deleteVlanAndPublicIpRange(final long userId, final long vlanDbId throw new CloudRuntimeException("Unable to acquire vlan configuration: " + vlanDbId); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("lock vlan " + vlanDbId + " is acquired"); + if (logger.isDebugEnabled()) { + logger.debug("lock vlan " + vlanDbId + " is acquired"); } for (final IPAddressVO ip : ips) { boolean success = true; @@ -5323,7 +5321,7 @@ public boolean deleteVlanAndPublicIpRange(final long userId, final long vlanDbId success = _ipAddrMgr.disassociatePublicIpAddress(ip.getId(), userId, caller); } if (!success) { - s_logger.warn("Some ip addresses failed to be released as a part of vlan " + vlanDbId + " removal"); + logger.warn("Some ip addresses failed to be released as a part of vlan " + vlanDbId + " removal"); } else { resourceCountToBeDecrement++; final boolean usageHidden = _ipAddrMgr.isUsageHidden(ip); @@ -5358,17 +5356,17 @@ public boolean deleteVlanAndPublicIpRange(final long userId, final long vlanDbId @Override public void doInTransactionWithoutResult(final TransactionStatus status) { _publicIpAddressDao.deletePublicIPRange(vlanDbId); - s_logger.debug(String.format("Delete Public IP Range (from user_ip_address, where vlan_db_id=%s)", vlanDbId)); + logger.debug(String.format("Delete Public IP Range (from user_ip_address, where vlan_db_id=%s)", vlanDbId)); _vlanDao.remove(vlanDbId); - s_logger.debug(String.format("Mark vlan as Remove vlan (vlan_db_id=%s)", vlanDbId)); + logger.debug(String.format("Mark vlan as Remove vlan (vlan_db_id=%s)", vlanDbId)); SearchBuilder sb = podVlanMapDao.createSearchBuilder(); sb.and("vlan_db_id", sb.entity().getVlanDbId(), SearchCriteria.Op.EQ); SearchCriteria sc = sb.create(); sc.setParameters("vlan_db_id", vlanDbId); podVlanMapDao.remove(sc); - s_logger.debug(String.format("Delete vlan_db_id=%s in pod_vlan_map", vlanDbId)); + logger.debug(String.format("Delete vlan_db_id=%s in pod_vlan_map", vlanDbId)); } }); @@ -5513,7 +5511,7 @@ public boolean releasePublicIpRange(final long vlanDbId, final long userId, fina VlanVO vlan = _vlanDao.findById(vlanDbId); if(vlan == null) { // Nothing to do if vlan can't be found - s_logger.warn(String.format("Skipping the process for releasing public IP range as could not find a VLAN with ID '%s' for Account '%s' and User '%s'." + logger.warn(String.format("Skipping the process for releasing public IP range as could not find a VLAN with ID '%s' for Account '%s' and User '%s'." ,vlanDbId, caller, userId)); return true; } @@ -5548,14 +5546,14 @@ public boolean releasePublicIpRange(final long vlanDbId, final long userId, fina if (vlan == null) { throw new CloudRuntimeException("Unable to acquire vlan configuration: " + vlanDbId); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("lock vlan " + vlanDbId + " is acquired"); + if (logger.isDebugEnabled()) { + logger.debug("lock vlan " + vlanDbId + " is acquired"); } for (final IPAddressVO ip : ips) { // Disassociate allocated IP's that are not in use if (!ip.isOneToOneNat() && !ip.isSourceNat() && !(_firewallDao.countRulesByIpId(ip.getId()) > 0)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing Public IP addresses" + ip + " of vlan " + vlanDbId + " as part of Public IP" + " range release to the system pool"); + if (logger.isDebugEnabled()) { + logger.debug("Releasing Public IP addresses" + ip + " of vlan " + vlanDbId + " as part of Public IP" + " range release to the system pool"); } success = success && _ipAddrMgr.disassociatePublicIpAddress(ip.getId(), userId, caller); } else { @@ -5563,7 +5561,7 @@ public boolean releasePublicIpRange(final long vlanDbId, final long userId, fina } } if (!success) { - s_logger.warn("Some Public IP addresses that were not in use failed to be released as a part of" + " vlan " + vlanDbId + "release to the system pool"); + logger.warn("Some Public IP addresses that were not in use failed to be released as a part of" + " vlan " + vlanDbId + "release to the system pool"); } } finally { _vlanDao.releaseFromLockTable(vlanDbId); @@ -5584,7 +5582,7 @@ public boolean releasePublicIpRange(final long vlanDbId, final long userId, fina _resourceLimitMgr.decrementResourceCount(acctVln.get(0).getAccountId(), ResourceType.public_ip, new Long(ips.size())); success = true; } else if (isDomainSpecific && _domainVlanMapDao.remove(domainVlan.get(0).getId())) { - s_logger.debug("Remove the vlan from domain_vlan_map successfully."); + logger.debug("Remove the vlan from domain_vlan_map successfully."); success = true; } else { success = false; @@ -5887,8 +5885,8 @@ public boolean deleteVlanIpRange(final DeleteVlanIpRangeCmd cmd) { public void checkDiskOfferingAccess(final Account caller, final DiskOffering dof, DataCenter zone) { for (final SecurityChecker checker : _secChecker) { if (checker.checkAccess(caller, dof, zone)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Access granted to " + caller + " to disk offering:" + dof.getId() + " by " + checker.getName()); + if (logger.isDebugEnabled()) { + logger.debug("Access granted to " + caller + " to disk offering:" + dof.getId() + " by " + checker.getName()); } return; } else { @@ -5904,8 +5902,8 @@ public void checkDiskOfferingAccess(final Account caller, final DiskOffering dof public void checkZoneAccess(final Account caller, final DataCenter zone) { for (final SecurityChecker checker : _secChecker) { if (checker.checkAccess(caller, zone)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Access granted to " + caller + " to zone:" + zone.getId() + " by " + checker.getName()); + if (logger.isDebugEnabled()) { + logger.debug("Access granted to " + caller + " to zone:" + zone.getId() + " by " + checker.getName()); } return; } else { @@ -6142,7 +6140,7 @@ public NetworkOffering createNetworkOffering(final CreateNetworkOfferingCmd cmd) // dhcp provider and userdata provider should be same because vm will be contacting dhcp server for user data. if (dhcpProvider == null && IsVrUserdataProvider) { - s_logger.debug("User data provider VR can't be selected without VR as dhcp provider. In this case VM fails to contact the DHCP server for userdata"); + logger.debug("User data provider VR can't be selected without VR as dhcp provider. In this case VM fails to contact the DHCP server for userdata"); throw new InvalidParameterValueException("Without VR as dhcp provider, User data can't selected for VR. Please select VR as DHCP provider "); } @@ -6202,7 +6200,7 @@ public NetworkOffering createNetworkOffering(final CreateNetworkOfferingCmd cmd) // if Firewall service is missing, add Firewall service/provider // combination if (firewallProvider != null) { - s_logger.debug("Adding Firewall service with provider " + firewallProvider.getName()); + logger.debug("Adding Firewall service with provider " + firewallProvider.getName()); final Set firewallProviderSet = new HashSet(); firewallProviderSet.add(firewallProvider); serviceProviderMap.put(Service.Firewall, firewallProviderSet); @@ -6586,7 +6584,7 @@ public NetworkOfferingVO doInTransaction(final TransactionStatus status) { NetworkOfferingVO offering = offeringFinal; // 1) create network offering object - s_logger.debug("Adding network offering " + offering); + logger.debug("Adding network offering " + offering); offering.setConcurrentConnections(maxconn); offering.setKeepAliveEnabled(enableKeepAlive); offering = _networkOfferingDao.persist(offering, details); @@ -6602,7 +6600,7 @@ public NetworkOfferingVO doInTransaction(final TransactionStatus status) { } final NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO(offering.getId(), service, provider); _ntwkOffServiceMapDao.persist(offService); - s_logger.trace("Added service for the network offering: " + offService + " with provider " + provider.getName()); + logger.trace("Added service for the network offering: " + offService + " with provider " + provider.getName()); } if (vpcOff) { @@ -6613,7 +6611,7 @@ public NetworkOfferingVO doInTransaction(final TransactionStatus status) { } else { final NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO(offering.getId(), service, null); _ntwkOffServiceMapDao.persist(offService); - s_logger.trace("Added service for the network offering: " + offService + " with null provider"); + logger.trace("Added service for the network offering: " + offService + " with null provider"); } } if (offering != null) { @@ -7228,7 +7226,7 @@ public AccountVO markDefaultZone(final String accountName, final long domainId, // Check if the account exists final Account account = _accountDao.findEnabledAccount(accountName, domainId); if (account == null) { - s_logger.error("Unable to find account by name: " + accountName + " in domain " + domainId); + logger.error("Unable to find account by name: " + accountName + " in domain " + domainId); throw new InvalidParameterValueException("Account by name: " + accountName + " doesn't exist in domain " + domainId); } @@ -7358,11 +7356,11 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { } }); } catch (final CloudRuntimeException e) { - s_logger.error(e); + logger.error(e); return false; } } else { - s_logger.trace("Domain id=" + domainId + " has no domain specific virtual ip ranges, nothing to release"); + logger.trace("Domain id=" + domainId + " has no domain specific virtual ip ranges, nothing to release"); } return true; } @@ -7384,11 +7382,11 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { } }); } catch (final CloudRuntimeException e) { - s_logger.error(e); + logger.error(e); return false; } } else { - s_logger.trace("Account id=" + accountId + " has no account specific virtual ip ranges, nothing to release"); + logger.trace("Account id=" + accountId + " has no account specific virtual ip ranges, nothing to release"); } return true; } @@ -7703,7 +7701,7 @@ public ConfigKey[] getConfigKeys() { public String getConfigurationType(final String configName) { final ConfigurationVO cfg = _configDao.findByName(configName); if (cfg == null) { - s_logger.warn("Configuration " + configName + " not found"); + logger.warn("Configuration " + configName + " not found"); return Configuration.ValueType.String.name(); } @@ -7714,10 +7712,10 @@ public String getConfigurationType(final String configName) { Class type = null; final Config c = Config.getConfig(configName); if (c == null) { - s_logger.warn("Configuration " + configName + " no found. Perhaps moved to ConfigDepot"); + logger.warn("Configuration " + configName + " no found. Perhaps moved to ConfigDepot"); final ConfigKey configKey = _configDepot.get(configName); if (configKey == null) { - s_logger.warn("Couldn't find configuration " + configName + " in ConfigDepot too."); + logger.warn("Couldn't find configuration " + configName + " in ConfigDepot too."); return Configuration.ValueType.String.name(); } type = configKey.type(); @@ -7759,7 +7757,7 @@ public Pair getConfigurationGroupAndSubGroup(final String config final ConfigurationVO cfg = _configDao.findByName(configName); if (cfg == null) { - s_logger.warn("Configuration " + configName + " not found"); + logger.warn("Configuration " + configName + " not found"); throw new InvalidParameterValueException("configuration with name " + configName + " doesn't exist"); } diff --git a/server/src/main/java/com/cloud/consoleproxy/AgentBasedConsoleProxyManager.java b/server/src/main/java/com/cloud/consoleproxy/AgentBasedConsoleProxyManager.java index a71c692aab11..2e45b0f745bd 100644 --- a/server/src/main/java/com/cloud/consoleproxy/AgentBasedConsoleProxyManager.java +++ b/server/src/main/java/com/cloud/consoleproxy/AgentBasedConsoleProxyManager.java @@ -22,7 +22,6 @@ import javax.naming.ConfigurationException; import org.apache.cloudstack.consoleproxy.ConsoleAccessManager; -import org.apache.log4j.Logger; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.security.keys.KeysManager; @@ -47,7 +46,6 @@ import com.cloud.vm.dao.VMInstanceDao; public class AgentBasedConsoleProxyManager extends ManagerBase implements ConsoleProxyManager { - private static final Logger s_logger = Logger.getLogger(AgentBasedConsoleProxyManager.class); @Inject protected HostDao _hostDao; @@ -103,8 +101,8 @@ public int getVncPort(VMInstanceVO vm) { @Override public boolean configure(String name, Map params) throws ConfigurationException { - if (s_logger.isInfoEnabled()) { - s_logger.info("Start configuring AgentBasedConsoleProxyManager"); + if (logger.isInfoEnabled()) { + logger.info("Start configuring AgentBasedConsoleProxyManager"); } Map configs = _configDao.getConfiguration("management-server", params); @@ -129,8 +127,8 @@ public boolean configure(String name, Map params) throws Configu _agentMgr, _keysMgr, consoleAccessManager)); _agentMgr.registerForHostEvents(_listener, true, true, false); - if (s_logger.isInfoEnabled()) { - s_logger.info("AgentBasedConsoleProxyManager has been configured. SSL enabled: " + _sslEnabled); + if (logger.isInfoEnabled()) { + logger.info("AgentBasedConsoleProxyManager has been configured. SSL enabled: " + _sslEnabled); } return true; } @@ -143,22 +141,22 @@ HostVO findHost(VMInstanceVO vm) { public ConsoleProxyInfo assignProxy(long dataCenterId, long userVmId) { UserVmVO userVm = _userVmDao.findById(userVmId); if (userVm == null) { - s_logger.warn("User VM " + userVmId + " no longer exists, return a null proxy for user vm:" + userVmId); + logger.warn("User VM " + userVmId + " no longer exists, return a null proxy for user vm:" + userVmId); return null; } HostVO host = findHost(userVm); if (host != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Assign embedded console proxy running at " + host.getName() + " to user vm " + userVmId + " with public IP " + host.getPublicIpAddress()); + if (logger.isDebugEnabled()) { + logger.debug("Assign embedded console proxy running at " + host.getName() + " to user vm " + userVmId + " with public IP " + host.getPublicIpAddress()); } // only private IP, public IP, host id have meaningful values, rest // of all are place-holder values String publicIp = host.getPublicIpAddress(); if (publicIp == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host " + host.getName() + "/" + host.getPrivateIpAddress() + + if (logger.isDebugEnabled()) { + logger.debug("Host " + host.getName() + "/" + host.getPrivateIpAddress() + " does not have public interface, we will return its private IP for cosole proxy."); } publicIp = host.getPrivateIpAddress(); @@ -172,7 +170,7 @@ public ConsoleProxyInfo assignProxy(long dataCenterId, long userVmId) { return new ConsoleProxyInfo(_sslEnabled, publicIp, _consoleProxyPort, urlPort, _consoleProxyUrlDomain); } else { - s_logger.warn("Host that VM is running is no longer available, console access to VM " + userVmId + " will be temporarily unavailable."); + logger.warn("Host that VM is running is no longer available, console access to VM " + userVmId + " will be temporarily unavailable."); } return null; } diff --git a/server/src/main/java/com/cloud/consoleproxy/AgentBasedStandaloneConsoleProxyManager.java b/server/src/main/java/com/cloud/consoleproxy/AgentBasedStandaloneConsoleProxyManager.java index 70afd8ab1d92..60e2265c41c9 100644 --- a/server/src/main/java/com/cloud/consoleproxy/AgentBasedStandaloneConsoleProxyManager.java +++ b/server/src/main/java/com/cloud/consoleproxy/AgentBasedStandaloneConsoleProxyManager.java @@ -18,7 +18,6 @@ import java.util.List; -import org.apache.log4j.Logger; import com.cloud.host.Host; import com.cloud.host.HostVO; @@ -30,13 +29,12 @@ * to non ACS console proxy services. The documentation that describe its use and requirements can be found in QuickCloud. */ public class AgentBasedStandaloneConsoleProxyManager extends AgentBasedConsoleProxyManager { - private static final Logger s_logger = Logger.getLogger(AgentBasedStandaloneConsoleProxyManager.class); @Override public ConsoleProxyInfo assignProxy(long dataCenterId, long userVmId) { UserVmVO userVm = _userVmDao.findById(userVmId); if (userVm == null) { - s_logger.warn("User VM " + userVmId + " no longer exists, return a null proxy for user vm:" + userVmId); + logger.warn("User VM " + userVmId + " no longer exists, return a null proxy for user vm:" + userVmId); return null; } @@ -61,21 +59,21 @@ public ConsoleProxyInfo assignProxy(long dataCenterId, long userVmId) { } } if (allocatedHost == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Failed to find a console proxy at host: " + host.getName() + " and in the pod: " + host.getPodId() + " to user vm " + userVmId); + if (logger.isDebugEnabled()) { + logger.debug("Failed to find a console proxy at host: " + host.getName() + " and in the pod: " + host.getPodId() + " to user vm " + userVmId); } return null; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Assign standalone console proxy running at " + allocatedHost.getName() + " to user vm " + userVmId + " with public IP " + if (logger.isDebugEnabled()) { + logger.debug("Assign standalone console proxy running at " + allocatedHost.getName() + " to user vm " + userVmId + " with public IP " + allocatedHost.getPublicIpAddress()); } // only private IP, public IP, host id have meaningful values, rest of all are place-holder values String publicIp = allocatedHost.getPublicIpAddress(); if (publicIp == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host " + allocatedHost.getName() + "/" + allocatedHost.getPrivateIpAddress() + if (logger.isDebugEnabled()) { + logger.debug("Host " + allocatedHost.getName() + "/" + allocatedHost.getPrivateIpAddress() + " does not have public interface, we will return its private IP for cosole proxy."); } publicIp = allocatedHost.getPrivateIpAddress(); @@ -88,7 +86,7 @@ public ConsoleProxyInfo assignProxy(long dataCenterId, long userVmId) { return new ConsoleProxyInfo(_sslEnabled, publicIp, _consoleProxyPort, urlPort, _consoleProxyUrlDomain); } else { - s_logger.warn("Host that VM is running is no longer available, console access to VM " + userVmId + " will be temporarily unavailable."); + logger.warn("Host that VM is running is no longer available, console access to VM " + userVmId + " will be temporarily unavailable."); } return null; } diff --git a/server/src/main/java/com/cloud/consoleproxy/AgentHookBase.java b/server/src/main/java/com/cloud/consoleproxy/AgentHookBase.java index efc5a1b5d84b..fdbacb5c8c2e 100644 --- a/server/src/main/java/com/cloud/consoleproxy/AgentHookBase.java +++ b/server/src/main/java/com/cloud/consoleproxy/AgentHookBase.java @@ -22,13 +22,13 @@ import java.util.Date; import org.apache.cloudstack.consoleproxy.ConsoleAccessManager; -import org.apache.cloudstack.consoleproxy.ConsoleAccessManagerImpl; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.security.keys.KeysManager; import org.apache.cloudstack.framework.security.keystore.KeystoreManager; import org.apache.commons.codec.binary.Base64; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.AgentManager; import com.cloud.agent.api.AgentControlAnswer; @@ -62,7 +62,7 @@ * can reuse */ public abstract class AgentHookBase implements AgentHook { - private static final Logger s_logger = Logger.getLogger(AgentHookBase.class); + protected Logger logger = LogManager.getLogger(getClass()); VMInstanceDao _instanceDao; HostDao _hostDao; @@ -91,40 +91,40 @@ public AgentControlAnswer onConsoleAccessAuthentication(ConsoleAccessAuthenticat String sessionUuid = cmd.getSessionUuid(); if (ticketInUrl == null) { - s_logger.error("Access ticket could not be found, you could be running an old version of console proxy. vmId: " + cmd.getVmId()); + logger.error("Access ticket could not be found, you could be running an old version of console proxy. vmId: " + cmd.getVmId()); return new ConsoleAccessAuthenticationAnswer(cmd, false); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Console authentication. Ticket in url for " + cmd.getHost() + ":" + cmd.getPort() + "-" + cmd.getVmId() + " is " + ticketInUrl); + if (logger.isDebugEnabled()) { + logger.debug("Console authentication. Ticket in url for " + cmd.getHost() + ":" + cmd.getPort() + "-" + cmd.getVmId() + " is " + ticketInUrl); } if (!cmd.isReauthenticating()) { - String ticket = ConsoleAccessManagerImpl.genAccessTicket(cmd.getHost(), cmd.getPort(), cmd.getSid(), cmd.getVmId(), sessionUuid); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Console authentication. Ticket in 1 minute boundary for " + cmd.getHost() + ":" + cmd.getPort() + "-" + cmd.getVmId() + " is " + ticket); + String ticket = consoleAccessManager.genAccessTicket(cmd.getHost(), cmd.getPort(), cmd.getSid(), cmd.getVmId(), sessionUuid); + if (logger.isDebugEnabled()) { + logger.debug("Console authentication. Ticket in 1 minute boundary for " + cmd.getHost() + ":" + cmd.getPort() + "-" + cmd.getVmId() + " is " + ticket); } if (!consoleAccessManager.isSessionAllowed(sessionUuid)) { - s_logger.error(String.format("Session [%s] has been already used or does not exist.", sessionUuid)); + logger.error(String.format("Session [%s] has been already used or does not exist.", sessionUuid)); return new ConsoleAccessAuthenticationAnswer(cmd, false); } - s_logger.debug(String.format("Acquiring session [%s] as it was just used.", sessionUuid)); + logger.debug(String.format("Acquiring session [%s] as it was just used.", sessionUuid)); consoleAccessManager.acquireSession(sessionUuid); if (!ticket.equals(ticketInUrl)) { Date now = new Date(); // considering of minute round-up - String minuteEarlyTicket = ConsoleAccessManagerImpl.genAccessTicket(cmd.getHost(), cmd.getPort(), cmd.getSid(), cmd.getVmId(), new Date(now.getTime() - 60 * 1000), sessionUuid); + String minuteEarlyTicket = consoleAccessManager.genAccessTicket(cmd.getHost(), cmd.getPort(), cmd.getSid(), cmd.getVmId(), new Date(now.getTime() - 60 * 1000), sessionUuid); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Console authentication. Ticket in 2-minute boundary for " + cmd.getHost() + ":" + cmd.getPort() + "-" + cmd.getVmId() + " is " + + if (logger.isDebugEnabled()) { + logger.debug("Console authentication. Ticket in 2-minute boundary for " + cmd.getHost() + ":" + cmd.getPort() + "-" + cmd.getVmId() + " is " + minuteEarlyTicket); } if (!minuteEarlyTicket.equals(ticketInUrl)) { - s_logger.error("Access ticket expired or has been modified. vmId: " + cmd.getVmId() + "ticket in URL: " + ticketInUrl + + logger.error("Access ticket expired or has been modified. vmId: " + cmd.getVmId() + "ticket in URL: " + ticketInUrl + ", tickets to check against: " + ticket + "," + minuteEarlyTicket); return new ConsoleAccessAuthenticationAnswer(cmd, false); } @@ -132,8 +132,8 @@ public AgentControlAnswer onConsoleAccessAuthentication(ConsoleAccessAuthenticat } if (cmd.getVmId() != null && cmd.getVmId().isEmpty()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Invalid vm id sent from proxy(happens when proxy session has terminated)"); + if (logger.isDebugEnabled()) { + logger.debug("Invalid vm id sent from proxy(happens when proxy session has terminated)"); } return new ConsoleAccessAuthenticationAnswer(cmd, false); } @@ -143,24 +143,24 @@ public AgentControlAnswer onConsoleAccessAuthentication(ConsoleAccessAuthenticat vm = _instanceDao.findById(Long.parseLong(cmd.getVmId())); } if (vm == null) { - s_logger.error("Invalid vm id " + cmd.getVmId() + " sent from console access authentication"); + logger.error("Invalid vm id " + cmd.getVmId() + " sent from console access authentication"); return new ConsoleAccessAuthenticationAnswer(cmd, false); } if (vm.getHostId() == null) { - s_logger.warn("VM " + vmId + " lost host info, failed authentication request"); + logger.warn("VM " + vmId + " lost host info, failed authentication request"); return new ConsoleAccessAuthenticationAnswer(cmd, false); } HostVO host = _hostDao.findById(vm.getHostId()); if (host == null) { - s_logger.warn("VM " + vmId + "'s host does not exist, fail authentication request"); + logger.warn("VM " + vmId + "'s host does not exist, fail authentication request"); return new ConsoleAccessAuthenticationAnswer(cmd, false); } String sid = cmd.getSid(); if (sid == null || !sid.equals(vm.getVncPassword())) { - s_logger.warn("sid " + sid + " in url does not match stored sid."); + logger.warn("sid " + sid + " in url does not match stored sid."); return new ConsoleAccessAuthenticationAnswer(cmd, false); } @@ -168,7 +168,7 @@ public AgentControlAnswer onConsoleAccessAuthentication(ConsoleAccessAuthenticat ConsoleAccessAuthenticationAnswer authenticationAnswer = new ConsoleAccessAuthenticationAnswer(cmd, true); authenticationAnswer.setReauthenticating(true); - s_logger.info("Re-authentication request, ask host " + vm.getHostId() + " for new console info"); + logger.info("Re-authentication request, ask host " + vm.getHostId() + " for new console info"); GetVncPortAnswer answer = (GetVncPortAnswer)_agentMgr.easySend(vm.getHostId(), new GetVncPortCommand(vm.getId(), vm.getInstanceName())); if (answer != null && answer.getResult()) { @@ -176,19 +176,19 @@ public AgentControlAnswer onConsoleAccessAuthentication(ConsoleAccessAuthenticat if (parsedHostInfo.second() != null && parsedHostInfo.third() != null) { - s_logger.info("Re-authentication result. vm: " + vm.getId() + ", tunnel url: " + parsedHostInfo.second() + ", tunnel session: " + + logger.info("Re-authentication result. vm: " + vm.getId() + ", tunnel url: " + parsedHostInfo.second() + ", tunnel session: " + parsedHostInfo.third()); authenticationAnswer.setTunnelUrl(parsedHostInfo.second()); authenticationAnswer.setTunnelSession(parsedHostInfo.third()); } else { - s_logger.info("Re-authentication result. vm: " + vm.getId() + ", host address: " + parsedHostInfo.first() + ", port: " + answer.getPort()); + logger.info("Re-authentication result. vm: " + vm.getId() + ", host address: " + parsedHostInfo.first() + ", port: " + answer.getPort()); authenticationAnswer.setHost(parsedHostInfo.first()); authenticationAnswer.setPort(answer.getPort()); } } else { - s_logger.warn("Re-authentication request failed"); + logger.warn("Re-authentication request failed"); authenticationAnswer.setSuccess(false); } @@ -219,7 +219,7 @@ public void startAgentHttpHandlerInVM(StartupProxyCommand startupCmd) { ksBits = _ksMgr.getKeystoreBits(ConsoleProxyManager.CERTIFICATE_NAME, ConsoleProxyManager.CERTIFICATE_NAME, storePassword); //ks manager raises exception if ksBits are null, hence no need to explicltly handle the condition } else { - s_logger.debug("SSL is disabled for console proxy. To enable SSL, please configure consoleproxy.sslEnabled and consoleproxy.url.domain global settings."); + logger.debug("SSL is disabled for console proxy. To enable SSL, please configure consoleproxy.sslEnabled and consoleproxy.url.domain global settings."); } cmd = new StartConsoleProxyAgentHttpHandlerCommand(ksBits, storePassword); @@ -232,22 +232,22 @@ public void startAgentHttpHandlerInVM(StartupProxyCommand startupCmd) { if (consoleProxyHost != null) { Answer answer = _agentMgr.send(consoleProxyHost.getId(), cmd); if (answer == null || !answer.getResult()) { - s_logger.error("Console proxy agent reported that it failed to execute http handling startup command"); + logger.error("Console proxy agent reported that it failed to execute http handling startup command"); } else { - s_logger.info("Successfully sent out command to start HTTP handling in console proxy agent"); + logger.info("Successfully sent out command to start HTTP handling in console proxy agent"); } } }catch (NoSuchAlgorithmException e) { - s_logger.error("Unexpected exception in SecureRandom Algorithm selection ", e); + logger.error("Unexpected exception in SecureRandom Algorithm selection ", e); } catch (AgentUnavailableException e) { - s_logger.error("Unable to send http handling startup command to the console proxy resource for proxy:" + startupCmd.getProxyVmId(), e); + logger.error("Unable to send http handling startup command to the console proxy resource for proxy:" + startupCmd.getProxyVmId(), e); } catch (OperationTimedoutException e) { - s_logger.error("Unable to send http handling startup command(time out) to the console proxy resource for proxy:" + startupCmd.getProxyVmId(), e); + logger.error("Unable to send http handling startup command(time out) to the console proxy resource for proxy:" + startupCmd.getProxyVmId(), e); } catch (OutOfMemoryError e) { - s_logger.error("Unrecoverable OutOfMemory Error, exit and let it be re-launched"); + logger.error("Unrecoverable OutOfMemory Error, exit and let it be re-launched"); System.exit(1); } catch (Exception e) { - s_logger.error( + logger.error( "Unexpected exception when sending http handling startup command(time out) to the console proxy resource for proxy:" + startupCmd.getProxyVmId(), e); } } @@ -266,7 +266,7 @@ private String getEncryptorPassword() { if (keyIvPair.getIvBytes() == null || keyIvPair.getIvBytes().length != 16 || keyIvPair.getKeyBytes() == null || keyIvPair.getKeyBytes().length != 16) { - s_logger.warn("Console access AES KeyIV sanity check failed, reset and regenerate"); + logger.warn("Console access AES KeyIV sanity check failed, reset and regenerate"); _keysMgr.resetEncryptionKeyIV(); } else { break; diff --git a/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java b/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java index c1d4a22bf773..028ecd31b63a 100644 --- a/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java +++ b/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java @@ -51,7 +51,6 @@ import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.BooleanUtils; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -167,7 +166,6 @@ **/ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxyManager, VirtualMachineGuru, SystemVmLoadScanHandler, ResourceStateAdapter, Configurable { - private static final Logger s_logger = Logger.getLogger(ConsoleProxyManagerImpl.class); private static final int DEFAULT_CAPACITY_SCAN_INTERVAL_IN_MILLISECONDS = 30000; private static final int ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_SYNC_IN_SECONDS = 180; @@ -286,8 +284,8 @@ public void onAgentDisconnect(long agentId, com.cloud.host.Status state) { HostVO host = _hostDao.findById(agentId); if (host.getType() == Type.ConsoleProxy) { String name = host.getName(); - if (s_logger.isInfoEnabled()) { - s_logger.info("Console proxy agent disconnected, proxy: " + name); + if (logger.isInfoEnabled()) { + logger.info("Console proxy agent disconnected, proxy: " + name); } if (name != null && name.startsWith("v-")) { String[] tokens = name.split("-"); @@ -296,13 +294,13 @@ public void onAgentDisconnect(long agentId, com.cloud.host.Status state) { try { proxyVmId = Long.parseLong(tokenSecondElement); } catch (NumberFormatException e) { - s_logger.error(String.format("[%s] is not a valid number, unable to parse [%s].", tokenSecondElement, e.getMessage()), e); + logger.error(String.format("[%s] is not a valid number, unable to parse [%s].", tokenSecondElement, e.getMessage()), e); return; } final ConsoleProxyVO proxy = consoleProxyDao.findById(proxyVmId); - if (proxy == null && s_logger.isInfoEnabled()) { - s_logger.info("Console proxy agent disconnected but corresponding console proxy VM no longer exists in DB, proxy: " + name); + if (proxy == null && logger.isInfoEnabled()) { + logger.info("Console proxy agent disconnected but corresponding console proxy VM no longer exists in DB, proxy: " + name); } } else { assert (false) : "Invalid console proxy name: " + name; @@ -317,7 +315,7 @@ protected HostVO findConsoleProxyHost(StartupProxyCommand startupCmd) { long proxyVmId = startupCmd.getProxyVmId(); ConsoleProxyVO consoleProxy = consoleProxyDao.findById(proxyVmId); if (consoleProxy == null) { - s_logger.info("Proxy " + proxyVmId + " is no longer in DB, skip sending startup command"); + logger.info("Proxy " + proxyVmId + " is no longer in DB, skip sending startup command"); return null; } @@ -335,13 +333,13 @@ public ConsoleProxyInfo assignProxy(final long dataCenterId, final long vmId) { } if (proxy.getPublicIpAddress() == null) { - s_logger.warn(String.format("Assigned console proxy [%s] does not have a valid public IP address.", proxy.toString())); + logger.warn(String.format("Assigned console proxy [%s] does not have a valid public IP address.", proxy.toString())); return null; } KeystoreVO ksVo = _ksDao.findByName(ConsoleProxyManager.CERTIFICATE_NAME); if (proxy.isSslEnabled() && ksVo == null) { - s_logger.warn(String.format("SSL is enabled for console proxy [%s] but no server certificate found in database.", proxy.toString())); + logger.warn(String.format("SSL is enabled for console proxy [%s] but no server certificate found in database.", proxy.toString())); } ConsoleProxyInfo info; @@ -359,13 +357,13 @@ public ConsoleProxyVO doAssignProxy(long dataCenterId, long vmId) { VMInstanceVO vm = vmInstanceDao.findById(vmId); if (vm == null) { - s_logger.warn("VM " + vmId + " no longer exists, return a null proxy for vm:" + vmId); + logger.warn("VM " + vmId + " no longer exists, return a null proxy for vm:" + vmId); return null; } if (!availableVmStateOnAssignProxy.contains(vm.getState())) { - if (s_logger.isInfoEnabled()) { - s_logger.info(String.format("Detected that %s is not currently in \"Starting\", \"Running\", \"Stopping\" or \"Migrating\" state, it will fail the proxy assignment.", vm.toString())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Detected that %s is not currently in \"Starting\", \"Running\", \"Stopping\" or \"Migrating\" state, it will fail the proxy assignment.", vm.toString())); } return null; } @@ -377,18 +375,18 @@ public ConsoleProxyVO doAssignProxy(long dataCenterId, long vmId) { if (proxy != null) { if (!isInAssignableState(proxy)) { - if (s_logger.isInfoEnabled()) { - s_logger.info("A previous assigned proxy is not assignable now, reassign console proxy for user vm : " + vmId); + if (logger.isInfoEnabled()) { + logger.info("A previous assigned proxy is not assignable now, reassign console proxy for user vm : " + vmId); } proxy = null; } else { if (consoleProxyDao.getProxyActiveLoad(proxy.getId()) < capacityPerProxy || hasPreviousSession(proxy, vm)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Assign previous allocated console proxy for user vm : " + vmId); + if (logger.isDebugEnabled()) { + logger.debug("Assign previous allocated console proxy for user vm : " + vmId); } if (proxy.getActiveSession() >= capacityPerProxy) { - s_logger.warn("Assign overloaded proxy to user VM as previous session exists, user vm : " + vmId); + logger.warn("Assign overloaded proxy to user VM as previous session exists, user vm : " + vmId); } } else { proxy = null; @@ -404,12 +402,12 @@ public ConsoleProxyVO doAssignProxy(long dataCenterId, long vmId) { allocProxyLock.unlock(); } } else { - s_logger.error("Unable to acquire synchronization lock to get/allocate proxy resource for vm :" + vmId + + logger.error("Unable to acquire synchronization lock to get/allocate proxy resource for vm :" + vmId + ". Previous console proxy allocation is taking too long"); } if (proxy == null) { - s_logger.warn("Unable to find or allocate console proxy resource"); + logger.warn("Unable to find or allocate console proxy resource"); return null; } @@ -439,7 +437,7 @@ private boolean hasPreviousSession(ConsoleProxyVO proxy, VMInstanceVO vm) { String details = detailsInBytes != null ? new String(detailsInBytes, Charset.forName("US-ASCII")) : null; status = parseJsonToConsoleProxyStatus(details); } catch (JsonParseException e) { - s_logger.warn(String.format("Unable to parse proxy [%s] session details [%s] due to [%s].", proxy.toString(), Arrays.toString(proxy.getSessionDetails()), e.getMessage()), e); + logger.warn(String.format("Unable to parse proxy [%s] session details [%s] due to [%s].", proxy.toString(), Arrays.toString(proxy.getSessionDetails()), e.getMessage()), e); } if (status != null && status.getConnections() != null) { @@ -450,7 +448,7 @@ private boolean hasPreviousSession(ConsoleProxyVO proxy, VMInstanceVO vm) { try { taggedVmId = Long.parseLong(connection.tag); } catch (NumberFormatException e) { - s_logger.warn(String.format("Unable to parse console proxy connection info passed through tag [%s] due to [%s].", connection.tag, e.getMessage()), e); + logger.warn(String.format("Unable to parse console proxy connection info passed through tag [%s] due to [%s].", connection.tag, e.getMessage()), e); } } @@ -461,7 +459,7 @@ private boolean hasPreviousSession(ConsoleProxyVO proxy, VMInstanceVO vm) { return DateUtil.currentGMTTime().getTime() - vm.getProxyAssignTime().getTime() < proxySessionTimeoutValue; } else { - s_logger.warn(String.format("Unable to retrieve load info from proxy [%s] on an overloaded proxy.", proxy.toString())); + logger.warn(String.format("Unable to retrieve load info from proxy [%s] on an overloaded proxy.", proxy.toString())); return false; } } @@ -485,9 +483,9 @@ public ConsoleProxyVO startProxy(long proxyVmId, boolean ignoreRestartSetting) { return proxy; } - s_logger.warn(String.format("Console proxy [%s] must be in \"Stopped\" state to start proxy. Current state [%s].", proxy.toString(), proxy.getState())); + logger.warn(String.format("Console proxy [%s] must be in \"Stopped\" state to start proxy. Current state [%s].", proxy.toString(), proxy.getState())); } catch ( ConcurrentOperationException | InsufficientCapacityException | OperationTimedoutException | ResourceUnavailableException ex) { - s_logger.warn(String.format("Unable to start proxy [%s] due to [%s].", proxyVmId, ex.getMessage()), ex); + logger.warn(String.format("Unable to start proxy [%s] due to [%s].", proxyVmId, ex.getMessage()), ex); } return null; @@ -495,8 +493,8 @@ public ConsoleProxyVO startProxy(long proxyVmId, boolean ignoreRestartSetting) { public ConsoleProxyVO assignProxyFromRunningPool(long dataCenterId) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Assign console proxy from running pool for request from data center : " + dataCenterId); + if (logger.isDebugEnabled()) { + logger.debug("Assign console proxy from running pool for request from data center : " + dataCenterId); } ConsoleProxyAllocator allocator = getCurrentAllocator(); @@ -510,8 +508,8 @@ public ConsoleProxyVO assignProxyFromRunningPool(long dataCenterId) { it.remove(); } } - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Running [%s] proxy instances [%s].", runningList.size(), runningList.stream().map(proxy -> proxy.toString()).collect(Collectors.joining(", ")))); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Running [%s] proxy instances [%s].", runningList.size(), runningList.stream().map(proxy -> proxy.toString()).collect(Collectors.joining(", ")))); } List> l = consoleProxyDao.getProxyLoadMatrix(); @@ -523,8 +521,8 @@ public ConsoleProxyVO assignProxyFromRunningPool(long dataCenterId) { loadInfo.put(proxyId, countRunningVms); - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Running proxy instance allocation {\"proxyId\": %s, \"countRunningVms\": %s}.", proxyId, countRunningVms)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Running proxy instance allocation {\"proxyId\": %s, \"countRunningVms\": %s}.", proxyId, countRunningVms)); } } @@ -533,14 +531,14 @@ public ConsoleProxyVO assignProxyFromRunningPool(long dataCenterId) { Long allocated = allocator.allocProxy(runningList, loadInfo, dataCenterId); if (allocated == null) { - s_logger.debug(String.format("Console proxy not found, unable to assign console proxy from running pool for request from zone [%s].", dataCenterId)); + logger.debug(String.format("Console proxy not found, unable to assign console proxy from running pool for request from zone [%s].", dataCenterId)); return null; } return consoleProxyDao.findById(allocated); } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Empty running proxy pool for now in data center : " + dataCenterId); + if (logger.isDebugEnabled()) { + logger.debug("Empty running proxy pool for now in data center : " + dataCenterId); } } @@ -560,13 +558,13 @@ public ConsoleProxyVO assignProxyFromStoppedPool(long dataCenterId) { public ConsoleProxyVO startNew(long dataCenterId) throws ConcurrentOperationException { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Assign console proxy from a newly started instance for request from data center : " + dataCenterId); + if (logger.isDebugEnabled()) { + logger.debug("Assign console proxy from a newly started instance for request from data center : " + dataCenterId); } if (!allowToLaunchNew(dataCenterId)) { String configKey = Config.ConsoleProxyLaunchMax.key(); - s_logger.warn(String.format("The number of launched console proxys on zone [%s] has reached the limit [%s]. Limit set in [%s].", dataCenterId, configurationDao.getValue(configKey), configKey)); + logger.warn(String.format("The number of launched console proxys on zone [%s] has reached the limit [%s]. Limit set in [%s].", dataCenterId, configurationDao.getValue(configKey), configKey)); return null; } @@ -580,8 +578,8 @@ public ConsoleProxyVO startNew(long dataCenterId) throws ConcurrentOperationExce long proxyVmId = (Long)context.get("proxyVmId"); if (proxyVmId == 0) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Unable to create proxy instance in zone [%s].", dataCenterId)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Unable to create proxy instance in zone [%s].", dataCenterId)); } return null; } @@ -592,8 +590,8 @@ public ConsoleProxyVO startNew(long dataCenterId) throws ConcurrentOperationExce new ConsoleProxyAlertEventArgs(ConsoleProxyAlertEventArgs.PROXY_CREATED, dataCenterId, proxy.getId(), proxy, null)); return proxy; } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to allocate console proxy storage, remove the console proxy record from DB, proxy id: " + proxyVmId); + if (logger.isDebugEnabled()) { + logger.debug("Unable to allocate console proxy storage, remove the console proxy record from DB, proxy id: " + proxyVmId); } } return null; @@ -708,7 +706,7 @@ protected Map createProxyInstance(long dataCenterId, VMTemplateV virtualMachineManager.allocate(name, template, serviceOffering, networks, plan, null); } catch (InsufficientCapacityException e) { String message = String.format("Unable to allocate proxy [%s] on zone [%s] due to [%s].", proxy.toString(), dataCenterId, e.getMessage()); - s_logger.warn(message, e); + logger.warn(message, e); throw new CloudRuntimeException(message, e); } @@ -738,8 +736,8 @@ public void handleAgentDisconnect(long agentId, com.cloud.host.Status state) { HostVO host = hostDao.findById(agentId); if (host.getType() == Type.ConsoleProxy) { String name = host.getName(); - if (s_logger.isInfoEnabled()) { - s_logger.info("Console proxy agent disconnected, proxy: " + name); + if (logger.isInfoEnabled()) { + logger.info("Console proxy agent disconnected, proxy: " + name); } if (name != null && name.startsWith("v-")) { String[] tokens = name.split("-"); @@ -747,13 +745,13 @@ public void handleAgentDisconnect(long agentId, com.cloud.host.Status state) { try { proxyVmId = Long.parseLong(tokens[1]); } catch (NumberFormatException e) { - s_logger.error("Unexpected exception " + e.getMessage(), e); + logger.error("Unexpected exception " + e.getMessage(), e); return; } final ConsoleProxyVO proxy = consoleProxyDao.findById(proxyVmId); - if (proxy == null && s_logger.isInfoEnabled()) { - s_logger.info("Console proxy agent disconnected but corresponding console proxy VM no longer exists in DB, proxy: " + name); + if (proxy == null && logger.isInfoEnabled()) { + logger.info("Console proxy agent disconnected but corresponding console proxy VM no longer exists in DB, proxy: " + name); } } else { assert (false) : "Invalid console proxy name: " + name; @@ -779,8 +777,8 @@ private boolean isConsoleProxyVmRequired(long dcId) { private boolean allowToLaunchNew(long dcId) { if (!isConsoleProxyVmRequired(dcId)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Console proxy vm not required in zone " + dcId + " not launching"); + if (logger.isDebugEnabled()) { + logger.debug("Console proxy vm not required in zone " + dcId + " not launching"); } return false; } @@ -798,8 +796,8 @@ private boolean checkCapacity(ConsoleProxyLoadInfo proxyCountInfo, ConsoleProxyL } private void allocCapacity(long dataCenterId) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Allocating console proxy standby capacity for zone [%s].", dataCenterId)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Allocating console proxy standby capacity for zone [%s].", dataCenterId)); } ConsoleProxyVO proxy = null; @@ -808,26 +806,26 @@ private void allocCapacity(long dataCenterId) { boolean consoleProxyVmFromStoppedPool = false; proxy = assignProxyFromStoppedPool(dataCenterId); if (proxy == null) { - if (s_logger.isInfoEnabled()) { - s_logger.info("No stopped console proxy is available, need to allocate a new console proxy"); + if (logger.isInfoEnabled()) { + logger.info("No stopped console proxy is available, need to allocate a new console proxy"); } if (allocProxyLock.lock(ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_SYNC_IN_SECONDS)) { try { proxy = startNew(dataCenterId); } catch (ConcurrentOperationException e) { - s_logger.warn(String.format("Unable to start new console proxy on zone [%s] due to [%s].", dataCenterId, e.getMessage()), e); + logger.warn(String.format("Unable to start new console proxy on zone [%s] due to [%s].", dataCenterId, e.getMessage()), e); } finally { allocProxyLock.unlock(); } } else { - if (s_logger.isInfoEnabled()) { - s_logger.info("Unable to acquire synchronization lock for console proxy vm allocation, wait for next scan"); + if (logger.isInfoEnabled()) { + logger.info("Unable to acquire synchronization lock for console proxy vm allocation, wait for next scan"); } } } else { - if (s_logger.isInfoEnabled()) { - s_logger.info("Found a stopped console proxy, starting it. Vm id : " + proxy.getId()); + if (logger.isInfoEnabled()) { + logger.info("Found a stopped console proxy, starting it. Vm id : " + proxy.getId()); } consoleProxyVmFromStoppedPool = true; } @@ -837,14 +835,14 @@ private void allocCapacity(long dataCenterId) { proxy = startProxy(proxyVmId, false); if (proxy != null) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Console proxy " + proxy.getHostName() + " is started"); + if (logger.isInfoEnabled()) { + logger.info("Console proxy " + proxy.getHostName() + " is started"); } SubscriptionMgr.getInstance().notifySubscribers(ConsoleProxyManager.ALERT_SUBJECT, this, new ConsoleProxyAlertEventArgs(ConsoleProxyAlertEventArgs.PROXY_UP, dataCenterId, proxy.getId(), proxy, null)); } else { - if (s_logger.isInfoEnabled()) { - s_logger.info("Unable to start console proxy vm for standby capacity, vm id : " + proxyVmId + ", will recycle it and start a new one"); + if (logger.isInfoEnabled()) { + logger.info("Unable to start console proxy vm for standby capacity, vm id : " + proxyVmId + ", will recycle it and start a new one"); } if (consoleProxyVmFromStoppedPool) { @@ -854,7 +852,7 @@ private void allocCapacity(long dataCenterId) { } } catch (Exception e) { errorString = e.getMessage(); - s_logger.warn(String.format("Unable to allocate console proxy standby capacity for zone [%s] due to [%s].", dataCenterId, e.getMessage()), e); + logger.warn(String.format("Unable to allocate console proxy standby capacity for zone [%s] due to [%s].", dataCenterId, e.getMessage()), e); throw e; } finally { if (proxy == null || proxy.getState() != State.Running) @@ -866,8 +864,8 @@ private void allocCapacity(long dataCenterId) { public boolean isZoneReady(Map zoneHostInfoMap, long dataCenterId) { List hosts = hostDao.listByDataCenterId(dataCenterId); if (CollectionUtils.isEmpty(hosts)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Zone " + dataCenterId + " has no host available which is enabled and in Up state"); + if (logger.isDebugEnabled()) { + logger.debug("Zone " + dataCenterId + " has no host available which is enabled and in Up state"); } return false; } @@ -875,8 +873,8 @@ public boolean isZoneReady(Map zoneHostInfoMap, long dataCen if (zoneHostInfo != null && isZoneHostReady(zoneHostInfo)) { VMTemplateVO template = vmTemplateDao.findSystemVMReadyTemplate(dataCenterId, HypervisorType.Any); if (template == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("System vm template is not ready at data center " + dataCenterId + ", wait until it is ready to launch console proxy vm"); + if (logger.isDebugEnabled()) { + logger.debug("System vm template is not ready at data center " + dataCenterId + ", wait until it is ready to launch console proxy vm"); } return false; } @@ -893,13 +891,13 @@ public boolean isZoneReady(Map zoneHostInfoMap, long dataCen if (CollectionUtils.isNotEmpty(l) && l.get(0).second() > 0) { return true; } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Primary storage is not ready, wait until it is ready to launch console proxy"); + if (logger.isDebugEnabled()) { + logger.debug("Primary storage is not ready, wait until it is ready to launch console proxy"); } } } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Zone [%s] is ready, but console proxy template [%s] is not ready on secondary storage.", dataCenterId, template.getId())); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Zone [%s] is ready, but console proxy template [%s] is not ready on secondary storage.", dataCenterId, template.getId())); } } } @@ -933,8 +931,8 @@ private synchronized Map getZoneHostInfo() { @Override public boolean start() { - if (s_logger.isInfoEnabled()) { - s_logger.info("Start console proxy manager"); + if (logger.isInfoEnabled()) { + logger.info("Start console proxy manager"); } return true; @@ -942,8 +940,8 @@ public boolean start() { @Override public boolean stop() { - if (s_logger.isInfoEnabled()) { - s_logger.info("Stop console proxy manager"); + if (logger.isInfoEnabled()) { + logger.info("Stop console proxy manager"); } loadScanner.stop(); @@ -956,8 +954,8 @@ public boolean stop() { public boolean stopProxy(long proxyVmId) { ConsoleProxyVO proxy = consoleProxyDao.findById(proxyVmId); if (proxy == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Stopping console proxy failed: console proxy " + proxyVmId + " no longer exists"); + if (logger.isDebugEnabled()) { + logger.debug("Stopping console proxy failed: console proxy " + proxyVmId + " no longer exists"); } return false; } @@ -966,7 +964,7 @@ public boolean stopProxy(long proxyVmId) { virtualMachineManager.stop(proxy.getUuid()); return true; } catch (CloudRuntimeException | ResourceUnavailableException e) { - s_logger.warn(String.format("Unable to stop console proxy [%s] due to [%s].", proxy.toString(), e.getMessage()), e); + logger.warn(String.format("Unable to stop console proxy [%s] due to [%s].", proxy.toString(), e.getMessage()), e); return false; } } @@ -990,7 +988,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { }); } } catch (Exception e) { - s_logger.error(String.format("Unable to set console proxy management state to [%s] due to [%s].", state, e.getMessage()), e); + logger.error(String.format("Unable to set console proxy management state to [%s] due to [%s].", state, e.getMessage()), e); } } @@ -1007,7 +1005,7 @@ public ConsoleProxyManagementState getManagementState() { } } - s_logger.error(String.format("Value [%s] set in global configuration [%s] is not a valid console proxy management state.", value, configKey)); + logger.error(String.format("Value [%s] set in global configuration [%s] is not a valid console proxy management state.", value, configKey)); return null; } @@ -1025,7 +1023,7 @@ public void resumeLastManagementState() { configurationDao.update(Config.ConsoleProxyManagementState.key(), Config.ConsoleProxyManagementState.getCategory(), lastState.toString()); } } catch (Exception e) { - s_logger.error(String.format("Unable to resume last management state due to [%s].", e.getMessage()), e); + logger.error(String.format("Unable to resume last management state due to [%s].", e.getMessage()), e); } } @@ -1041,7 +1039,7 @@ private ConsoleProxyManagementState getLastManagementState() { } } - s_logger.error(String.format("Value [%s] set in global configuration [%s] is not a valid console proxy management state.", value, configKey)); + logger.error(String.format("Value [%s] set in global configuration [%s] is not a valid console proxy management state.", value, configKey)); return null; } @@ -1058,8 +1056,8 @@ public boolean rebootProxy(long proxyVmId) { final Answer answer = agentManager.easySend(proxy.getHostId(), cmd); if (answer != null && answer.getResult()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully reboot console proxy " + proxy.getHostName()); + if (logger.isDebugEnabled()) { + logger.debug("Successfully reboot console proxy " + proxy.getHostName()); } SubscriptionMgr.getInstance().notifySubscribers(ConsoleProxyManager.ALERT_SUBJECT, this, @@ -1067,8 +1065,8 @@ public boolean rebootProxy(long proxyVmId) { return true; } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("failed to reboot console proxy : " + proxy.getHostName()); + if (logger.isDebugEnabled()) { + logger.debug("failed to reboot console proxy : " + proxy.getHostName()); } return false; @@ -1092,13 +1090,13 @@ public boolean destroyProxy(long vmId) { consoleProxyDao.remove(vmId); HostVO host = hostDao.findByTypeNameAndZoneId(proxy.getDataCenterId(), proxy.getHostName(), Host.Type.ConsoleProxy); if (host != null) { - s_logger.debug(String.format("Removing host [%s] entry for proxy [%s].", host.toString(), vmId)); + logger.debug(String.format("Removing host [%s] entry for proxy [%s].", host.toString(), vmId)); return hostDao.remove(host.getId()); } return true; } catch (ResourceUnavailableException e) { - s_logger.warn(String.format("Unable to destroy console proxy [%s] due to [%s].", proxy, e.getMessage()), e); + logger.warn(String.format("Unable to destroy console proxy [%s] due to [%s].", proxy, e.getMessage()), e); return false; } } @@ -1114,8 +1112,8 @@ private String getAllocProxyLockName() { @Override public boolean configure(String name, Map params) throws ConfigurationException { - if (s_logger.isInfoEnabled()) { - s_logger.info("Start configuring console proxy manager : " + name); + if (logger.isInfoEnabled()) { + logger.info("Start configuring console proxy manager : " + name); } Map configs = configurationDao.getConfiguration("management-server", params); @@ -1127,7 +1125,7 @@ public boolean configure(String name, Map params) throws Configu consoleProxyUrlDomain = configs.get(Config.ConsoleProxyUrlDomain.key()); if( sslEnabled && (consoleProxyUrlDomain == null || consoleProxyUrlDomain.isEmpty())) { - s_logger.warn("Empty console proxy domain, explicitly disabling SSL"); + logger.warn("Empty console proxy domain, explicitly disabling SSL"); sslEnabled = false; } @@ -1153,9 +1151,9 @@ public boolean configure(String name, Map params) throws Configu useStorageVm = true; } - if (s_logger.isInfoEnabled()) { - s_logger.info("Console proxy max session soft limit : " + capacityPerProxy); - s_logger.info("Console proxy standby capacity : " + standbyCapacity); + if (logger.isInfoEnabled()) { + logger.info("Console proxy max session soft limit : " + capacityPerProxy); + logger.info("Console proxy standby capacity : " + standbyCapacity); } instance = configs.get("instance.name"); @@ -1180,14 +1178,14 @@ public boolean configure(String name, Map params) throws Configu serviceOfferingVO = serviceOfferingDao.findByUuid(cpvmSrvcOffIdStr); if (serviceOfferingVO == null) { try { - s_logger.debug(String.format("Unable to find a service offering by the UUID for console proxy VM with the value [%s] set in the configuration [%s]. Trying to find by the ID.", cpvmSrvcOffIdStr, configKey)); + logger.debug(String.format("Unable to find a service offering by the UUID for console proxy VM with the value [%s] set in the configuration [%s]. Trying to find by the ID.", cpvmSrvcOffIdStr, configKey)); serviceOfferingVO = serviceOfferingDao.findById(Long.parseLong(cpvmSrvcOffIdStr)); } catch (NumberFormatException ex) { - s_logger.warn(String.format("Unable to find a service offering by the ID for console proxy VM with the value [%s] set in the configuration [%s]. The value is not a valid integer number. Error: [%s].", cpvmSrvcOffIdStr, configKey, ex.getMessage()), ex); + logger.warn(String.format("Unable to find a service offering by the ID for console proxy VM with the value [%s] set in the configuration [%s]. The value is not a valid integer number. Error: [%s].", cpvmSrvcOffIdStr, configKey, ex.getMessage()), ex); } } if (serviceOfferingVO == null) { - s_logger.warn(String.format("Unable to find a service offering by the UUID or ID for console proxy VM with the value [%s] set in the configuration [%s]", cpvmSrvcOffIdStr, configKey)); + logger.warn(String.format("Unable to find a service offering by the UUID or ID for console proxy VM with the value [%s] set in the configuration [%s]", cpvmSrvcOffIdStr, configKey)); } } @@ -1200,7 +1198,7 @@ public boolean configure(String name, Map params) throws Configu if (offerings == null || offerings.size() < 2) { String msg = "Data integrity problem : System Offering For Console Proxy has been removed?"; - s_logger.error(msg); + logger.error(msg); throw new ConfigurationException(msg); } } @@ -1214,8 +1212,8 @@ public boolean configure(String name, Map params) throws Configu staticPort = NumbersUtil.parseInt(configurationDao.getValue("consoleproxy.static.port"), 8443); } - if (s_logger.isInfoEnabled()) { - s_logger.info("Console Proxy Manager is configured."); + if (logger.isInfoEnabled()) { + logger.info("Console Proxy Manager is configured."); } return true; } @@ -1280,10 +1278,10 @@ public boolean finalizeVirtualMachineProfile(VirtualMachineProfile profile, Depl if (nic.getTrafficType() == TrafficType.Management) { String mgmt_cidr = configurationDao.getValue(Config.ManagementNetwork.key()); if (NetUtils.isValidCidrList(mgmt_cidr)) { - s_logger.debug("Management server cidr list is " + mgmt_cidr); + logger.debug("Management server cidr list is " + mgmt_cidr); buf.append(" mgmtcidr=").append(mgmt_cidr); } else { - s_logger.error("Invalid management cidr list: " + mgmt_cidr); + logger.error("Invalid management cidr list: " + mgmt_cidr); } buf.append(" localgw=").append(dest.getPod().getGateway()); } @@ -1306,8 +1304,8 @@ public boolean finalizeVirtualMachineProfile(VirtualMachineProfile profile, Depl } buf.append(" keystore_password=").append(VirtualMachineGuru.getEncodedString(PasswordGenerator.generateRandomPassword(16))); String bootArgs = buf.toString(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Boot Args for " + profile + ": " + bootArgs); + if (logger.isDebugEnabled()) { + logger.debug("Boot Args for " + profile + ": " + bootArgs); } return true; @@ -1351,7 +1349,7 @@ public boolean finalizeCommandsOnStart(Commands cmds, VirtualMachineProfile prof if (controlNic == null) { if (managementNic == null) { - s_logger.error("Management network doesn't exist for the console proxy vm " + profile.getVirtualMachine()); + logger.error("Management network doesn't exist for the console proxy vm " + profile.getVirtualMachine()); return false; } controlNic = managementNic; @@ -1370,7 +1368,7 @@ public boolean finalizeCommandsOnStart(Commands cmds, VirtualMachineProfile prof public boolean finalizeStart(VirtualMachineProfile profile, long hostId, Commands cmds, ReservationContext context) { CheckSshAnswer answer = (CheckSshAnswer)cmds.getAnswer("checkSsh"); if (answer == null || !answer.getResult()) { - s_logger.warn(String.format("Unable to use SSH on the VM [%s] due to [%s].", profile.toString(), answer == null ? "null answer" : answer.getDetails())); + logger.warn(String.format("Unable to use SSH on the VM [%s] due to [%s].", profile.toString(), answer == null ? "null answer" : answer.getDetails())); return false; } @@ -1383,7 +1381,7 @@ public boolean finalizeStart(VirtualMachineProfile profile, long hostId, Command consoleProxyDao.update(consoleVm.getId(), consoleVm); } } catch (InsufficientAddressCapacityException ex) { - s_logger.warn(String.format("Unable to retrieve system IP and enable static NAT for the VM [%s] due to [%s].", profile.toString(), ex.getMessage()), ex); + logger.warn(String.format("Unable to retrieve system IP and enable static NAT for the VM [%s] due to [%s].", profile.toString(), ex.getMessage()), ex); return false; } @@ -1409,7 +1407,7 @@ public void finalizeStop(VirtualMachineProfile profile, Answer answer) { try { rulesManager.disableStaticNat(ip.getId(), ctx.getCallingAccount(), ctx.getCallingUserId(), true); } catch (ResourceUnavailableException ex) { - s_logger.error(String.format("Unable to disable static NAT and release system IP [%s] as a part of VM [%s] stop due to [%s].", ip.toString(), profile.toString(), ex.getMessage()), ex); + logger.error(String.format("Unable to disable static NAT and release system IP [%s] as a part of VM [%s] stop due to [%s].", ip.toString(), profile.toString(), ex.getMessage()), ex); } } } @@ -1458,13 +1456,13 @@ private void scanManagementState() { private void handleResetSuspending() { List runningProxies = consoleProxyDao.getProxyListInStates(State.Running); for (ConsoleProxyVO proxy : runningProxies) { - s_logger.info("Stop console proxy " + proxy.getId() + " because of we are currently in ResetSuspending management mode"); + logger.info("Stop console proxy " + proxy.getId() + " because of we are currently in ResetSuspending management mode"); stopProxy(proxy.getId()); } List proxiesInTransition = consoleProxyDao.getProxyListInStates(State.Running, State.Starting, State.Stopping); if (CollectionUtils.isEmpty(proxiesInTransition)) { - s_logger.info("All previous console proxy VMs in transition mode ceased the mode, we will now resume to last management state"); + logger.info("All previous console proxy VMs in transition mode ceased the mode, we will now resume to last management state"); resumeLastManagementState(); } } @@ -1474,15 +1472,15 @@ public boolean canScan() { scanManagementState(); if (!reserveStandbyCapacity()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Reserving standby capacity is disabled, skip capacity scan"); + if (logger.isDebugEnabled()) { + logger.debug("Reserving standby capacity is disabled, skip capacity scan"); } return false; } List upPools = primaryDataStoreDao.listByStatus(StoragePoolStatus.Up); if (CollectionUtils.isEmpty(upPools)) { - s_logger.debug("Skip capacity scan as there is no Primary Storage in 'Up' state"); + logger.debug("Skip capacity scan as there is no Primary Storage in 'Up' state"); return false; } @@ -1492,8 +1490,8 @@ public boolean canScan() { @Override public Long[] getScannablePools() { List zoneIds = dataCenterDao.listEnabledNonEdgeZoneIds(); - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Enabled non-edge zones available for scan: %s", org.apache.commons.lang3.StringUtils.join(zoneIds, ","))); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Enabled non-edge zones available for scan: %s", org.apache.commons.lang3.StringUtils.join(zoneIds, ","))); } return zoneIds.toArray(Long[]::new); } @@ -1501,23 +1499,23 @@ public Long[] getScannablePools() { @Override public boolean isPoolReadyForScan(Long dataCenterId) { if (!isZoneReady(zoneHostInfoMap, dataCenterId)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Zone " + dataCenterId + " is not ready to launch console proxy yet"); + if (logger.isDebugEnabled()) { + logger.debug("Zone " + dataCenterId + " is not ready to launch console proxy yet"); } return false; } List l = consoleProxyDao.getProxyListInStates(VirtualMachine.State.Starting, VirtualMachine.State.Stopping); if (l.size() > 0) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Zone " + dataCenterId + " has " + l.size() + " console proxy VM(s) in transition state"); + if (logger.isDebugEnabled()) { + logger.debug("Zone " + dataCenterId + " has " + l.size() + " console proxy VM(s) in transition state"); } return false; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Zone " + dataCenterId + " is ready to launch console proxy"); + if (logger.isDebugEnabled()) { + logger.debug("Zone " + dataCenterId + " is ready to launch console proxy"); } return true; } @@ -1535,8 +1533,8 @@ public Pair scanPool(Long dataCenterId) { } if (!checkCapacity(proxyInfo, vmInfo)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Expand console proxy standby capacity for zone " + proxyInfo.getName()); + if (logger.isDebugEnabled()) { + logger.debug("Expand console proxy standby capacity for zone " + proxyInfo.getName()); } return new Pair<>(AfterScanAction.expand, null); @@ -1623,7 +1621,7 @@ protected void updateConsoleProxyStatus(String statusInfo, Long proxyVmId) { try { status = parseJsonToConsoleProxyStatus(statusInfo); } catch (JsonParseException e) { - s_logger.warn(String.format("Unable to parse load info [%s] from proxy {\"vmId\": %s} due to [%s].", statusInfo, proxyVmId, e.getMessage()), e); + logger.warn(String.format("Unable to parse load info [%s] from proxy {\"vmId\": %s} due to [%s].", statusInfo, proxyVmId, e.getMessage()), e); } int count = 0; @@ -1638,7 +1636,7 @@ protected void updateConsoleProxyStatus(String statusInfo, Long proxyVmId) { } details = statusInfo.getBytes(Charset.forName("US-ASCII")); } else { - s_logger.debug(String.format("Unable to retrieve load info from proxy {\"vmId\": %s}. Invalid load info [%s].", proxyVmId, statusInfo)); + logger.debug(String.format("Unable to retrieve load info from proxy {\"vmId\": %s}. Invalid load info [%s].", proxyVmId, statusInfo)); } consoleProxyDao.update(proxyVmId, count, DateUtil.currentGMTTime(), details); diff --git a/server/src/main/java/com/cloud/dc/dao/DedicatedResourceDaoImpl.java b/server/src/main/java/com/cloud/dc/dao/DedicatedResourceDaoImpl.java index 31f2e361cd59..3535baa2144c 100644 --- a/server/src/main/java/com/cloud/dc/dao/DedicatedResourceDaoImpl.java +++ b/server/src/main/java/com/cloud/dc/dao/DedicatedResourceDaoImpl.java @@ -31,7 +31,6 @@ import com.cloud.host.dao.HostDao; import com.cloud.utils.db.Filter; import org.springframework.stereotype.Component; -import org.apache.log4j.Logger; import com.cloud.dc.DedicatedResourceVO; import com.cloud.utils.Pair; @@ -51,8 +50,6 @@ @DB public class DedicatedResourceDaoImpl extends GenericDaoBase implements DedicatedResourceDao { - public static Logger LOGGER = Logger.getLogger(DedicatedResourceDaoImpl.class.getName()); - @Inject protected HostDao hostDao; @@ -451,7 +448,7 @@ public List findHostsByZone(Long zoneId) { @Override public Map> listDomainsOfDedicatedResourcesUsedByDomainPath(String domainPath) { - LOGGER.debug(String.format("Retrieving the domains of the dedicated resources used by domain with path [%s].", domainPath)); + logger.debug(String.format("Retrieving the domains of the dedicated resources used by domain with path [%s].", domainPath)); TransactionLegacy txn = TransactionLegacy.currentTxn(); try (PreparedStatement pstmt = txn.prepareStatement(LIST_DOMAINS_OF_DEDICATED_RESOURCES_USED_BY_DOMAIN_PATH)) { @@ -472,10 +469,10 @@ public Map> listDomainsOfDedicatedResourcesUsedByDomainPath(S return domainsOfDedicatedResourcesUsedByDomainPath; } catch (SQLException e) { - LOGGER.error(String.format("Failed to retrieve the domains of the dedicated resources used by domain with path [%s] due to [%s]. Returning an empty " + logger.error(String.format("Failed to retrieve the domains of the dedicated resources used by domain with path [%s] due to [%s]. Returning an empty " + "list of domains.", domainPath, e.getMessage())); - LOGGER.debug(String.format("Failed to retrieve the domains of the dedicated resources used by domain with path [%s]. Returning an empty " + logger.debug(String.format("Failed to retrieve the domains of the dedicated resources used by domain with path [%s]. Returning an empty " + "list of domains.", domainPath), e); return new HashMap<>(); diff --git a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java index cb22e81f3669..75b47c357bdc 100644 --- a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java +++ b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java @@ -36,6 +36,18 @@ import javax.naming.ConfigurationException; import org.apache.cloudstack.affinity.AffinityGroupDomainMapVO; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.user.AccountVO; +import com.cloud.user.dao.AccountDao; +import com.cloud.exception.StorageUnavailableException; +import com.cloud.utils.db.Filter; +import com.cloud.utils.fsm.StateMachine2; + +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.Configurable; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.cloudstack.affinity.AffinityGroupProcessor; import org.apache.cloudstack.affinity.AffinityGroupService; import org.apache.cloudstack.affinity.AffinityGroupVMMapVO; @@ -48,8 +60,6 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; -import org.apache.cloudstack.framework.config.ConfigKey; -import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.messagebus.MessageBus; import org.apache.cloudstack.framework.messagebus.MessageSubscriber; @@ -57,9 +67,6 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.utils.identity.ManagementServerNode; -import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; @@ -91,7 +98,6 @@ import com.cloud.exception.AffinityConflictException; import com.cloud.exception.ConnectionException; import com.cloud.exception.InsufficientServerCapacityException; -import com.cloud.exception.StorageUnavailableException; import com.cloud.gpu.GPU; import com.cloud.host.DetailVO; import com.cloud.host.Host; @@ -112,32 +118,26 @@ import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolHostVO; -import com.cloud.storage.VMTemplateVO; import com.cloud.storage.Volume; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.GuestOSCategoryDao; import com.cloud.storage.dao.GuestOSDao; import com.cloud.storage.dao.StoragePoolHostDao; -import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.user.AccountManager; -import com.cloud.user.AccountVO; -import com.cloud.user.dao.AccountDao; import com.cloud.utils.DateUtil; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.component.Manager; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.db.DB; -import com.cloud.utils.db.Filter; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; import com.cloud.utils.db.TransactionCallback; import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.fsm.StateListener; -import com.cloud.utils.fsm.StateMachine2; import com.cloud.vm.DiskProfile; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; @@ -150,7 +150,6 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements DeploymentPlanningManager, Manager, Listener, StateListener, Configurable { - private static final Logger s_logger = Logger.getLogger(DeploymentPlanningManagerImpl.class); @Inject AgentManager _agentMgr; @Inject @@ -288,7 +287,7 @@ protected void avoidOtherClustersForDeploymentIfMigrationDisabled(VirtualMachine return; } final Long lastHostClusterId = lastHost.getClusterId(); - s_logger.warn(String.format("VM last host ID: %d belongs to zone ID: %s for which config - %s is false and storage migration would be needed for inter-cluster migration, therefore, adding all other clusters except ID: %d from this zone to avoid list", + logger.warn(String.format("VM last host ID: %d belongs to zone ID: %s for which config - %s is false and storage migration would be needed for inter-cluster migration, therefore, adding all other clusters except ID: %d from this zone to avoid list", lastHost.getId(), vm.getDataCenterId(), ConfigurationManagerImpl.MIGRATE_VM_ACROSS_CLUSTERS.key(), lastHostClusterId)); List clusterIds = _clusterDao.listAllClusters(lastHost.getDataCenterId()); Set existingAvoidedClusters = avoids.getClustersToAvoid(); @@ -310,13 +309,13 @@ public DeployDestination planDeployment(VirtualMachineProfile vmProfile, Deploym if (vm.getType() == VirtualMachine.Type.User || vm.getType() == VirtualMachine.Type.DomainRouter) { checkForNonDedicatedResources(vmProfile, dc, avoids); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("DeploymentPlanner allocation algorithm: " + planner); + if (logger.isDebugEnabled()) { + logger.debug("DeploymentPlanner allocation algorithm: " + planner); - s_logger.debug("Trying to allocate a host and storage pools from dc:" + plan.getDataCenterId() + ", pod:" + plan.getPodId() + ",cluster:" + + logger.debug("Trying to allocate a host and storage pools from dc:" + plan.getDataCenterId() + ", pod:" + plan.getPodId() + ",cluster:" + plan.getClusterId() + ", requested cpu: " + cpu_requested + ", requested ram: " + toHumanReadableSize(ram_requested)); - s_logger.debug("Is ROOT volume READY (pool already allocated)?: " + (plan.getPoolId() != null ? "Yes" : "No")); + logger.debug("Is ROOT volume READY (pool already allocated)?: " + (plan.getPoolId() != null ? "Yes" : "No")); } avoidDisabledResources(vmProfile, dc, avoids); @@ -326,25 +325,25 @@ public DeployDestination planDeployment(VirtualMachineProfile vmProfile, Deploym if (plan.getHostId() != null && haVmTag == null) { Long hostIdSpecified = plan.getHostId(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("DeploymentPlan has host_id specified, choosing this host: " + hostIdSpecified); + if (logger.isDebugEnabled()) { + logger.debug("DeploymentPlan has host_id specified, choosing this host: " + hostIdSpecified); } HostVO host = _hostDao.findById(hostIdSpecified); if (host != null && StringUtils.isNotBlank(uefiFlag) && "yes".equalsIgnoreCase(uefiFlag)) { DetailVO uefiHostDetail = _hostDetailsDao.findDetail(host.getId(), Host.HOST_UEFI_ENABLE); if (uefiHostDetail == null || "false".equalsIgnoreCase(uefiHostDetail.getValue())) { - s_logger.debug("Cannot deploy to specified host as host does n't support uefi vm deployment, returning."); + logger.debug("Cannot deploy to specified host as host does n't support uefi vm deployment, returning."); return null; } } if (host == null) { - s_logger.debug("The specified host cannot be found"); + logger.debug("The specified host cannot be found"); } else if (avoids.shouldAvoid(host)) { - s_logger.debug("The specified host is in avoid set"); + logger.debug("The specified host is in avoid set"); } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug( + if (logger.isDebugEnabled()) { + logger.debug( "Looking for suitable pools for this host under zone: " + host.getDataCenterId() + ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId()); } @@ -355,7 +354,7 @@ public DeployDestination planDeployment(VirtualMachineProfile vmProfile, Deploym boolean displayStorage = getDisplayStorageFromVmProfile(vmProfile); if (vm.getHypervisorType() == HypervisorType.BareMetal) { DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap(), displayStorage); - s_logger.debug("Returning Deployment Destination: " + dest); + logger.debug("Returning Deployment Destination: " + dest); return dest; } @@ -370,10 +369,10 @@ public DeployDestination planDeployment(VirtualMachineProfile vmProfile, Deploym _hostDao.loadDetails(host); if (volumesRequireEncryption && !Boolean.parseBoolean(host.getDetail(Host.HOST_VOLUME_ENCRYPTION))) { - s_logger.warn(String.format("VM's volumes require encryption support, and provided host %s can't handle it", host)); + logger.warn(String.format("VM's volumes require encryption support, and provided host %s can't handle it", host)); return null; } else { - s_logger.debug(String.format("Volume encryption requirements are met by provided host %s", host)); + logger.debug(String.format("Volume encryption requirements are met by provided host %s", host)); } // choose the potential pool for this VM for this host @@ -393,12 +392,12 @@ public DeployDestination planDeployment(VirtualMachineProfile vmProfile, Deploym storageVolMap.remove(vol); } DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap, displayStorage); - s_logger.debug("Returning Deployment Destination: " + dest); + logger.debug("Returning Deployment Destination: " + dest); return dest; } } } - s_logger.debug("Cannot deploy to specified host, returning."); + logger.debug("Cannot deploy to specified host, returning."); return null; } @@ -411,17 +410,17 @@ public DeployDestination planDeployment(VirtualMachineProfile vmProfile, Deploym } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Deploy avoids pods: " + avoids.getPodsToAvoid() + ", clusters: " + avoids.getClustersToAvoid() + ", hosts: " + avoids.getHostsToAvoid()); - s_logger.debug("Deploy hosts with priorities " + plan.getHostPriorities() + " , hosts have NORMAL priority by default"); + if (logger.isDebugEnabled()) { + logger.debug("Deploy avoids pods: " + avoids.getPodsToAvoid() + ", clusters: " + avoids.getClustersToAvoid() + ", hosts: " + avoids.getHostsToAvoid()); + logger.debug("Deploy hosts with priorities " + plan.getHostPriorities() + " , hosts have NORMAL priority by default"); } // call planners // DataCenter dc = _dcDao.findById(vm.getDataCenterId()); // check if datacenter is in avoid set if (avoids.shouldAvoid(dc)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("DataCenter id = '" + dc.getId() + "' provided is in avoid set, DeploymentPlanner cannot allocate the VM, returning."); + if (logger.isDebugEnabled()) { + logger.debug("DataCenter id = '" + dc.getId() + "' provided is in avoid set, DeploymentPlanner cannot allocate the VM, returning."); } return null; } @@ -444,7 +443,7 @@ public DeployDestination planDeployment(VirtualMachineProfile vmProfile, Deploym boolean considerLastHost = vm.getLastHostId() != null && haVmTag == null && (considerLastHostStr == null || Boolean.TRUE.toString().equalsIgnoreCase(considerLastHostStr)); if (considerLastHost) { - s_logger.debug("This VM has last host_id specified, trying to choose the same host: " + vm.getLastHostId()); + logger.debug("This VM has last host_id specified, trying to choose the same host: " + vm.getLastHostId()); HostVO host = _hostDao.findById(vm.getLastHostId()); lastHost = host; @@ -452,23 +451,23 @@ public DeployDestination planDeployment(VirtualMachineProfile vmProfile, Deploym _hostDao.loadDetails(host); ServiceOfferingDetailsVO offeringDetails = null; if (host == null) { - s_logger.debug("The last host of this VM cannot be found"); + logger.debug("The last host of this VM cannot be found"); } else if (avoids.shouldAvoid(host)) { - s_logger.debug("The last host of this VM is in avoid set"); + logger.debug("The last host of this VM is in avoid set"); } else if (plan.getClusterId() != null && host.getClusterId() != null && !plan.getClusterId().equals(host.getClusterId())) { - s_logger.debug("The last host of this VM cannot be picked as the plan specifies different clusterId: " + logger.debug("The last host of this VM cannot be picked as the plan specifies different clusterId: " + plan.getClusterId()); } else if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)) { - s_logger.debug("The last Host, hostId: " + host.getId() + + logger.debug("The last Host, hostId: " + host.getId() + " already has max Running VMs(count includes system VMs), skipping this and trying other available hosts"); } else if ((offeringDetails = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.vgpuType.toString())) != null) { ServiceOfferingDetailsVO groupName = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.pciDevice.toString()); if(!_resourceMgr.isGPUDeviceAvailable(host.getId(), groupName.getValue(), offeringDetails.getValue())){ - s_logger.debug("The last host of this VM does not have required GPU devices available"); + logger.debug("The last host of this VM does not have required GPU devices available"); } } else if (volumesRequireEncryption && !Boolean.parseBoolean(host.getDetail(Host.HOST_VOLUME_ENCRYPTION))) { - s_logger.warn(String.format("The last host of this VM %s does not support volume encryption, which is required by this VM.", host)); + logger.warn(String.format("The last host of this VM %s does not support volume encryption, which is required by this VM.", host)); } else { if (host.getStatus() == Status.Up) { if (checkVmProfileAndHost(vmProfile, host)) { @@ -495,15 +494,15 @@ public DeployDestination planDeployment(VirtualMachineProfile vmProfile, Deploym boolean displayStorage = getDisplayStorageFromVmProfile(vmProfile); if (hostHasCapacity && hostHasCpuCapability) { - s_logger.debug("The last host of this VM is UP and has enough capacity"); - s_logger.debug("Now checking for suitable pools under zone: " + host.getDataCenterId() + logger.debug("The last host of this VM is UP and has enough capacity"); + logger.debug("Now checking for suitable pools under zone: " + host.getDataCenterId() + ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId()); Pod pod = _podDao.findById(host.getPodId()); Cluster cluster = _clusterDao.findById(host.getClusterId()); if (vm.getHypervisorType() == HypervisorType.BareMetal) { DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap(), displayStorage); - s_logger.debug("Returning Deployment Destination: " + dest); + logger.debug("Returning Deployment Destination: " + dest); return dest; } @@ -536,20 +535,20 @@ public DeployDestination planDeployment(VirtualMachineProfile vmProfile, Deploym } DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap, displayStorage); - s_logger.debug("Returning Deployment Destination: " + dest); + logger.debug("Returning Deployment Destination: " + dest); return dest; } } } else { - s_logger.debug("The last host of this VM does not have enough capacity"); + logger.debug("The last host of this VM does not have enough capacity"); } } } else { - s_logger.debug("The last host of this VM is not UP or is not enabled, host status is: " + host.getStatus().name() + ", host resource state is: " + + logger.debug("The last host of this VM is not UP or is not enabled, host status is: " + host.getStatus().name() + ", host resource state is: " + host.getResourceState()); } } - s_logger.debug("Cannot choose the last host to deploy this VM "); + logger.debug("Cannot choose the last host to deploy this VM "); } avoidOtherClustersForDeploymentIfMigrationDisabled(vm, lastHost, avoids); @@ -591,10 +590,10 @@ public DeployDestination planDeployment(VirtualMachineProfile vmProfile, Deploym avoids.addHost(dest.getHost().getId()); if (volumesRequireEncryption && !Boolean.parseBoolean(_hostDetailsDao.findDetail(hostId, Host.HOST_VOLUME_ENCRYPTION).getValue())) { - s_logger.warn(String.format("VM's volumes require encryption support, and the planner-provided host %s can't handle it", dest.getHost())); + logger.warn(String.format("VM's volumes require encryption support, and the planner-provided host %s can't handle it", dest.getHost())); continue; } else { - s_logger.debug(String.format("VM's volume encryption requirements are met by host %s", dest.getHost())); + logger.debug(String.format("VM's volume encryption requirements are met by host %s", dest.getHost())); } if (checkIfHostFitsPlannerUsage(hostId, DeploymentPlanner.PlannerResourceUsage.Shared)) { @@ -734,7 +733,7 @@ protected boolean checkVmProfileAndHost(final VirtualMachineProfile vmProfile, f if (offering.getHostTag() != null) { _hostDao.loadHostTags(host); if (!host.checkHostServiceOfferingTags(offering)) { - s_logger.debug("Service Offering host tag does not match the last host of this VM"); + logger.debug("Service Offering host tag does not match the last host of this VM"); return false; } } @@ -746,7 +745,7 @@ protected boolean checkVmProfileAndHost(final VirtualMachineProfile vmProfile, f if (hostDetail != null) { String guestOSCategoryIdString = hostDetail.getValue(); if (String.valueOf(guestOSCategoryId) != guestOSCategoryIdString) { - s_logger.debug("The last host has different guest.os.category.id than guest os category of VM, skipping"); + logger.debug("The last host has different guest.os.category.id than guest os category of VM, skipping"); return false; } } @@ -809,16 +808,16 @@ public void checkForNonDedicatedResources(VirtualMachineProfile vmProfile, DataC //Only when the type is instance VM and not explicitly dedicated. if (vm.getType() == VirtualMachine.Type.User && !isExplicit) { //add explicitly dedicated resources in avoidList - if (s_logger.isDebugEnabled()) { - s_logger.debug("Adding pods to avoid lists for non-explicit VM deployment: " + allPodsInDc); + if (logger.isDebugEnabled()) { + logger.debug("Adding pods to avoid lists for non-explicit VM deployment: " + allPodsInDc); } avoids.addPodList(allPodsInDc); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Adding clusters to avoid lists for non-explicit VM deployment: " + allClustersInDc); + if (logger.isDebugEnabled()) { + logger.debug("Adding clusters to avoid lists for non-explicit VM deployment: " + allClustersInDc); } avoids.addClusterList(allClustersInDc); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Adding hosts to avoid lists for non-explicit VM deployment: " + allHostsInDc); + if (logger.isDebugEnabled()) { + logger.debug("Adding hosts to avoid lists for non-explicit VM deployment: " + allHostsInDc); } avoids.addHostList(allHostsInDc); } @@ -898,16 +897,16 @@ public void checkForNonDedicatedResources(VirtualMachineProfile vmProfile, DataC } //Add in avoid list or no addition if no dedication - if (s_logger.isDebugEnabled()) { - s_logger.debug("Adding pods to avoid lists: " + allPodsInDc); + if (logger.isDebugEnabled()) { + logger.debug("Adding pods to avoid lists: " + allPodsInDc); } avoids.addPodList(allPodsInDc); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Adding clusters to avoid lists: " + allClustersInDc); + if (logger.isDebugEnabled()) { + logger.debug("Adding clusters to avoid lists: " + allClustersInDc); } avoids.addClusterList(allClustersInDc); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Adding hosts to avoid lists: " + allHostsInDc); + if (logger.isDebugEnabled()) { + logger.debug("Adding hosts to avoid lists: " + allHostsInDc); } avoids.addHostList(allHostsInDc); } @@ -958,7 +957,7 @@ protected boolean checkIfHostFitsPlannerUsage(final long hostId, final PlannerRe if (hostResourceType == resourceUsageRequired) { return true; } else { - s_logger.debug("Cannot use this host for usage: " + resourceUsageRequired + ", since this host has been reserved for planner usage : " + + logger.debug("Cannot use this host for usage: " + resourceUsageRequired + ", since this host has been reserved for planner usage : " + hostResourceType); return false; } @@ -971,7 +970,7 @@ protected boolean checkIfHostFitsPlannerUsage(final long hostId, final PlannerRe public Boolean doInTransaction(TransactionStatus status) { final PlannerHostReservationVO lockedEntry = _plannerHostReserveDao.lockRow(id, true); if (lockedEntry == null) { - s_logger.error("Unable to lock the host entry for reservation, host: " + hostId); + logger.error("Unable to lock the host entry for reservation, host: " + hostId); return false; } // check before updating @@ -984,7 +983,7 @@ public Boolean doInTransaction(TransactionStatus status) { if (lockedEntry.getResourceUsage() == resourceUsageRequired) { return true; } else { - s_logger.debug("Cannot use this host for usage: " + resourceUsageRequired + ", since this host has been reserved for planner usage : " + + logger.debug("Cannot use this host for usage: " + resourceUsageRequired + ", since this host has been reserved for planner usage : " + hostResourceTypeFinal); return false; } @@ -1009,8 +1008,8 @@ public boolean checkHostReservationRelease(final Long hostId) { // check if any VMs are starting or running on this host List vms = _vmInstanceDao.listUpByHostId(hostId); if (vms.size() > 0) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cannot release reservation, Found " + vms.size() + " VMs Running on host " + hostId); + if (logger.isDebugEnabled()) { + logger.debug("Cannot release reservation, Found " + vms.size() + " VMs Running on host " + hostId); } return false; } @@ -1023,8 +1022,8 @@ public boolean checkHostReservationRelease(final Long hostId) { for (VMInstanceVO stoppedVM : vmsByLastHostId) { long secondsSinceLastUpdate = (DateUtil.currentGMTTime().getTime() - stoppedVM.getUpdateTime().getTime()) / 1000; if (secondsSinceLastUpdate < _vmCapacityReleaseInterval) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cannot release reservation, Found VM: " + stoppedVM + " Stopped but reserved on host " + hostId); + if (logger.isDebugEnabled()) { + logger.debug("Cannot release reservation, Found VM: " + stoppedVM + " Stopped but reserved on host " + hostId); } return false; } @@ -1034,8 +1033,8 @@ public boolean checkHostReservationRelease(final Long hostId) { // check if any VMs are stopping on or migrating to this host List vmsStoppingMigratingByHostId = _vmInstanceDao.findByHostInStates(hostId, State.Stopping, State.Migrating, State.Starting); if (vmsStoppingMigratingByHostId.size() > 0) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cannot release reservation, Found " + vmsStoppingMigratingByHostId.size() + " VMs stopping/migrating/starting on host " + hostId); + if (logger.isDebugEnabled()) { + logger.debug("Cannot release reservation, Found " + vmsStoppingMigratingByHostId.size() + " VMs stopping/migrating/starting on host " + hostId); } return false; } @@ -1046,14 +1045,14 @@ public boolean checkHostReservationRelease(final Long hostId) { List vmsStartingNoHost = _vmInstanceDao.listStartingWithNoHostId(); if (vmsStartingNoHost.size() > 0) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cannot release reservation, Found " + vms.size() + " VMs starting as of now and no hostId yet stored"); + if (logger.isDebugEnabled()) { + logger.debug("Cannot release reservation, Found " + vms.size() + " VMs starting as of now and no hostId yet stored"); } return false; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host has no VMs associated, releasing the planner reservation for host " + hostId); + if (logger.isDebugEnabled()) { + logger.debug("Host has no VMs associated, releasing the planner reservation for host " + hostId); } final long id = reservationEntry.getId(); @@ -1063,7 +1062,7 @@ public boolean checkHostReservationRelease(final Long hostId) { public Boolean doInTransaction(TransactionStatus status) { final PlannerHostReservationVO lockedEntry = _plannerHostReserveDao.lockRow(id, true); if (lockedEntry == null) { - s_logger.error("Unable to lock the host entry for reservation, host: " + hostId); + logger.error("Unable to lock the host entry for reservation, host: " + hostId); return false; } // check before updating @@ -1086,11 +1085,11 @@ class HostReservationReleaseChecker extends ManagedContextTimerTask { @Override protected void runInContext() { try { - s_logger.debug("Checking if any host reservation can be released ... "); + logger.debug("Checking if any host reservation can be released ... "); checkHostReservations(); - s_logger.debug("Done running HostReservationReleaseChecker ... "); + logger.debug("Done running HostReservationReleaseChecker ... "); } catch (Throwable t) { - s_logger.error("Exception in HostReservationReleaseChecker", t); + logger.error("Exception in HostReservationReleaseChecker", t); } } } @@ -1184,7 +1183,7 @@ public boolean configure(final String name, final Map params) th @Override public void onPublishMessage(String senderAddress, String subject, Object obj) { VMInstanceVO vm = ((VMInstanceVO)obj); - s_logger.debug("MessageBus message: host reserved capacity released for VM: " + vm.getLastHostId() + + logger.debug("MessageBus message: host reserved capacity released for VM: " + vm.getLastHostId() + ", checking if host reservation can be released for host:" + vm.getLastHostId()); Long hostId = vm.getLastHostId(); checkHostReservationRelease(hostId); @@ -1244,20 +1243,20 @@ public void cleanupVMReservations() { private DeployDestination checkClustersforDestination(List clusterList, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, DataCenter dc, DeploymentPlanner.PlannerResourceUsage resourceUsageRequired, ExcludeList plannerAvoidOutput) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("ClusterId List to consider: " + clusterList); + if (logger.isTraceEnabled()) { + logger.trace("ClusterId List to consider: " + clusterList); } for (Long clusterId : clusterList) { ClusterVO clusterVO = _clusterDao.findById(clusterId); if (clusterVO.getHypervisorType() != vmProfile.getHypervisorType()) { - s_logger.debug("Cluster: " + clusterId + " has HyperVisorType that does not match the VM, skipping this cluster"); + logger.debug("Cluster: " + clusterId + " has HyperVisorType that does not match the VM, skipping this cluster"); avoid.addCluster(clusterVO.getId()); continue; } - s_logger.debug("Checking resources in Cluster: " + clusterId + " under Pod: " + clusterVO.getPodId()); + logger.debug("Checking resources in Cluster: " + clusterId + " under Pod: " + clusterVO.getPodId()); // search for resources(hosts and storage) under this zone, pod, // cluster. DataCenterDeployment potentialPlan = @@ -1266,7 +1265,7 @@ private DeployDestination checkClustersforDestination(List clusterList, Vi Pod pod = _podDao.findById(clusterVO.getPodId()); if (CollectionUtils.isNotEmpty(avoid.getPodsToAvoid()) && avoid.getPodsToAvoid().contains(pod.getId())) { - s_logger.debug("The cluster is in a disabled pod : " + pod.getId()); + logger.debug("The cluster is in a disabled pod : " + pod.getId()); } else { // find suitable hosts under this cluster, need as many hosts as we // get. @@ -1297,14 +1296,14 @@ private DeployDestination checkClustersforDestination(List clusterList, Vi } boolean displayStorage = getDisplayStorageFromVmProfile(vmProfile); DeployDestination dest = new DeployDestination(dc, pod, clusterVO, host, storageVolMap, displayStorage); - s_logger.debug("Returning Deployment Destination: " + dest); + logger.debug("Returning Deployment Destination: " + dest); return dest; } } else { - s_logger.debug("No suitable storagePools found under this Cluster: " + clusterId); + logger.debug("No suitable storagePools found under this Cluster: " + clusterId); } } else { - s_logger.debug("No suitable hosts found under this Cluster: " + clusterId); + logger.debug("No suitable hosts found under this Cluster: " + clusterId); } } @@ -1312,7 +1311,7 @@ private DeployDestination checkClustersforDestination(List clusterList, Vi avoid.addCluster(clusterVO.getId()); } } - s_logger.debug("Could not find suitable Deployment Destination for this VM under any clusters, returning. "); + logger.debug("Could not find suitable Deployment Destination for this VM under any clusters, returning. "); return null; } @@ -1424,7 +1423,7 @@ private Pair findVMStorageRequirements(VirtualMachineProfile v protected Pair> findPotentialDeploymentResources(List suitableHosts, Map> suitableVolumeStoragePools, ExcludeList avoid, PlannerResourceUsage resourceUsageRequired, List readyAndReusedVolumes, List preferredHosts, VirtualMachine vm) { - s_logger.debug("Trying to find a potenial host and associated storage pools from the suitable host/pool lists for this VM"); + logger.debug("Trying to find a potenial host and associated storage pools from the suitable host/pool lists for this VM"); boolean hostCanAccessPool = false; boolean haveEnoughSpace = false; @@ -1451,7 +1450,7 @@ public int compare(Volume v1, Volume v2) { if (deployAsIs) { storage = new HashMap<>(); // Find the common suitable pools - s_logger.debug("Trying to allocate all the VM volumes to a single storage pool"); + logger.debug("Trying to allocate all the VM volumes to a single storage pool"); Set suitablePools = new HashSet<>(); List notAllowedPools = new ArrayList<>(); for (List pools : suitableVolumeStoragePools.values()) { @@ -1461,7 +1460,7 @@ public int compare(Volume v1, Volume v2) { } else { for (StoragePool pool : pools) { if (!suitablePools.contains(pool)) { - s_logger.debug("Storage pool " + pool.getUuid() + " not allowed for this VM"); + logger.debug("Storage pool " + pool.getUuid() + " not allowed for this VM"); notAllowedPools.add(pool); } } @@ -1469,7 +1468,7 @@ public int compare(Volume v1, Volume v2) { } suitablePools.removeAll(notAllowedPools); if (CollectionUtils.isEmpty(suitablePools)) { - s_logger.debug("Could not find a storage pool to fit all the VM volumes on this host"); + logger.debug("Could not find a storage pool to fit all the VM volumes on this host"); continue; } @@ -1490,7 +1489,7 @@ public int compare(Volume v1, Volume v2) { continue; } } catch (StorageUnavailableException e) { - s_logger.warn(String.format("Could not verify storage policy complaince against storage pool %s due to exception %s", storagePool.getUuid(), e.getMessage())); + logger.warn(String.format("Could not verify storage policy complaince against storage pool %s due to exception %s", storagePool.getUuid(), e.getMessage())); continue; } haveEnoughSpace = true; @@ -1498,7 +1497,7 @@ public int compare(Volume v1, Volume v2) { } if (hostCanAccessPool && haveEnoughSpace && hostAffinityCheck) { for (Volume vol : volumesOrderBySizeDesc) { - s_logger.debug("Found a suitable storage pool for all the VM volumes: " + storagePool.getUuid()); + logger.debug("Found a suitable storage pool for all the VM volumes: " + storagePool.getUuid()); storage.put(vol, storagePool); } break; @@ -1507,7 +1506,7 @@ public int compare(Volume v1, Volume v2) { } else { for (Volume vol : volumesOrderBySizeDesc) { haveEnoughSpace = false; - s_logger.debug("Checking if host: " + potentialHost.getId() + " can access any suitable storage pool for volume: " + vol.getVolumeType()); + logger.debug("Checking if host: " + potentialHost.getId() + " can access any suitable storage pool for volume: " + vol.getVolumeType()); List volumePoolList = suitableVolumeStoragePools.get(vol); hostCanAccessPool = false; hostAffinityCheck = checkAffinity(potentialHost, preferredHosts); @@ -1529,7 +1528,7 @@ public int compare(Volume v1, Volume v2) { continue; } } catch (StorageUnavailableException e) { - s_logger.warn(String.format("Could not verify storage policy complaince against storage pool %s due to exception %s", potentialSPool.getUuid(), e.getMessage())); + logger.warn(String.format("Could not verify storage policy complaince against storage pool %s due to exception %s", potentialSPool.getUuid(), e.getMessage())); continue; } } @@ -1548,11 +1547,11 @@ public int compare(Volume v1, Volume v2) { break; } if (!haveEnoughSpace) { - s_logger.warn("insufficient capacity to allocate all volumes"); + logger.warn("insufficient capacity to allocate all volumes"); break; } if (!hostAffinityCheck) { - s_logger.debug("Host affinity check failed"); + logger.debug("Host affinity check failed"); break; } } @@ -1566,18 +1565,18 @@ public int compare(Volume v1, Volume v2) { boolean plannerUsageFits = checkIfHostFitsPlannerUsage(potentialHost.getId(), resourceUsageRequired); if (hostCanAccessPool && haveEnoughSpace && hostAffinityCheck && hostMeetsEncryptionRequirements && plannerUsageFits) { - s_logger.debug("Found a potential host " + "id: " + potentialHost.getId() + " name: " + potentialHost.getName() + + logger.debug("Found a potential host " + "id: " + potentialHost.getId() + " name: " + potentialHost.getName() + " and associated storage pools for this VM"); volumeAllocationMap.clear(); return new Pair>(potentialHost, storage); } else { if (!hostMeetsEncryptionRequirements) { - s_logger.debug("Potential host " + potentialHost + " did not meet encryption requirements of all volumes"); + logger.debug("Potential host " + potentialHost + " did not meet encryption requirements of all volumes"); } avoid.addHost(potentialHost.getId()); } } - s_logger.debug("Could not find a potential host that has associated storage pools from the suitable host/pool lists for this VM"); + logger.debug("Could not find a potential host that has associated storage pools from the suitable host/pool lists for this VM"); return null; } @@ -1613,7 +1612,7 @@ protected boolean hostCanAccessSPool(Host host, StoragePool pool) { hostCanAccessSPool = true; } - s_logger.debug("Host: " + host.getId() + (hostCanAccessSPool ? " can" : " cannot") + " access pool: " + pool.getId()); + logger.debug("Host: " + host.getId() + (hostCanAccessSPool ? " can" : " cannot") + " access pool: " + pool.getId()); return hostCanAccessSPool; } @@ -1627,7 +1626,7 @@ protected List findSuitableHosts(VirtualMachineProfile vmProfile, Deployme } if (suitableHosts.isEmpty()) { - s_logger.debug("No suitable hosts found"); + logger.debug("No suitable hosts found"); } // re-order hosts by priority @@ -1638,7 +1637,7 @@ protected List findSuitableHosts(VirtualMachineProfile vmProfile, Deployme @Override public void reorderHostsByPriority(Map priorities, List hosts) { - s_logger.info("Re-ordering hosts " + hosts + " by priorities " + priorities); + logger.info("Re-ordering hosts " + hosts + " by priorities " + priorities); hosts.removeIf(host -> DataCenterDeployment.PROHIBITED_HOST_PRIORITY.equals(getHostPriority(priorities, host.getId()))); @@ -1651,7 +1650,7 @@ public int compare(Host host1, Host host2) { } ); - s_logger.info("Hosts after re-ordering are: " + hosts); + logger.info("Hosts after re-ordering are: " + hosts); } private Integer getHostPriority(Map priorities, Long hostId) { @@ -1684,16 +1683,16 @@ protected Pair>, List> findSuitablePoolsFo Set poolsToAvoidOutput = new HashSet<>(originalAvoidPoolSet); for (VolumeVO toBeCreated : volumesTobeCreated) { - s_logger.debug("Checking suitable pools for volume (Id, Type): (" + toBeCreated.getId() + "," + toBeCreated.getVolumeType().name() + ")"); + logger.debug("Checking suitable pools for volume (Id, Type): (" + toBeCreated.getId() + "," + toBeCreated.getVolumeType().name() + ")"); if (toBeCreated.getState() == Volume.State.Allocated && toBeCreated.getPoolId() != null) { toBeCreated.setPoolId(null); if (!_volsDao.update(toBeCreated.getId(), toBeCreated)) { throw new CloudRuntimeException(String.format("Error updating volume [%s] to clear pool Id.", toBeCreated.getId())); } - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { String msg = String.format("Setting pool_id to NULL for volume id=%s as it is in Allocated state", toBeCreated.getId()); - s_logger.debug(msg); + logger.debug(msg); } } // If the plan specifies a poolId, it means that this VM's ROOT @@ -1701,7 +1700,7 @@ protected Pair>, List> findSuitablePoolsFo // In this case, also check if rest of the volumes are ready and can // be reused. if (plan.getPoolId() != null || (toBeCreated.getVolumeType() == Volume.Type.DATADISK && toBeCreated.getPoolId() != null && toBeCreated.getState() == Volume.State.Ready)) { - s_logger.debug("Volume has pool already allocated, checking if pool can be reused, poolId: " + toBeCreated.getPoolId()); + logger.debug("Volume has pool already allocated, checking if pool can be reused, poolId: " + toBeCreated.getPoolId()); List suitablePools = new ArrayList(); StoragePool pool = null; if (toBeCreated.getPoolId() != null) { @@ -1724,12 +1723,12 @@ protected Pair>, List> findSuitablePoolsFo canReusePool = true; } } else { - s_logger.debug("Pool of the volume does not fit the specified plan, need to reallocate a pool for this volume"); + logger.debug("Pool of the volume does not fit the specified plan, need to reallocate a pool for this volume"); canReusePool = false; } if (canReusePool) { - s_logger.debug("Planner need not allocate a pool for this volume since its READY"); + logger.debug("Planner need not allocate a pool for this volume since its READY"); suitablePools.add(pool); suitableVolumeStoragePools.put(toBeCreated, suitablePools); if (!(toBeCreated.getState() == Volume.State.Allocated || toBeCreated.getState() == Volume.State.Creating)) { @@ -1738,21 +1737,21 @@ protected Pair>, List> findSuitablePoolsFo continue; } } else { - s_logger.debug("Pool of the volume is in avoid set, need to reallocate a pool for this volume"); + logger.debug("Pool of the volume is in avoid set, need to reallocate a pool for this volume"); } } else { - s_logger.debug("Pool of the volume is in maintenance, need to reallocate a pool for this volume"); + logger.debug("Pool of the volume is in maintenance, need to reallocate a pool for this volume"); } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("We need to allocate new storagepool for this volume"); + if (logger.isDebugEnabled()) { + logger.debug("We need to allocate new storagepool for this volume"); } if (!isRootAdmin(vmProfile)) { if (!isEnabledForAllocation(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cannot allocate new storagepool for this volume in this cluster, allocation state is disabled"); - s_logger.debug("Cannot deploy to this specified plan, allocation state is disabled, returning."); + if (logger.isDebugEnabled()) { + logger.debug("Cannot allocate new storagepool for this volume in this cluster, allocation state is disabled"); + logger.debug("Cannot deploy to this specified plan, allocation state is disabled, returning."); } // Cannot find suitable storage pools under this cluster for // this volume since allocation_state is disabled. @@ -1764,7 +1763,7 @@ protected Pair>, List> findSuitablePoolsFo } } - s_logger.debug("Calling StoragePoolAllocators to find suitable pools"); + logger.debug("Calling StoragePoolAllocators to find suitable pools"); DiskOfferingVO diskOffering = _diskOfferingDao.findById(toBeCreated.getDiskOfferingId()); @@ -1776,7 +1775,7 @@ protected Pair>, List> findSuitablePoolsFo Boolean useLocalStorageForSystemVM = ConfigurationManagerImpl.SystemVMUseLocalStorage.valueIn(zone.getId()); if (useLocalStorageForSystemVM != null) { useLocalStorage = useLocalStorageForSystemVM.booleanValue(); - s_logger.debug("System VMs will use " + (useLocalStorage ? "local" : "shared") + " storage for zone id=" + plan.getDataCenterId()); + logger.debug("System VMs will use " + (useLocalStorage ? "local" : "shared") + " storage for zone id=" + plan.getDataCenterId()); } } else { useLocalStorage = diskOffering.isUseLocalStorage(); @@ -1799,7 +1798,7 @@ protected Pair>, List> findSuitablePoolsFo } if (!foundPotentialPools) { - s_logger.debug("No suitable pools found for volume: " + toBeCreated + " under cluster: " + plan.getClusterId()); + logger.debug("No suitable pools found for volume: " + toBeCreated + " under cluster: " + plan.getClusterId()); // No suitable storage pools found under this cluster for this // volume. - remove any suitable pools found for other volumes. // All volumes should get suitable pools under this cluster; @@ -1822,7 +1821,7 @@ protected Pair>, List> findSuitablePoolsFo } if (suitableVolumeStoragePools.isEmpty()) { - s_logger.debug("No suitable pools found"); + logger.debug("No suitable pools found"); } return new Pair>, List>(suitableVolumeStoragePools, readyAndReusedVolumes); @@ -1854,12 +1853,12 @@ private Optional getPreferredStoragePool(List poolList Optional storagePool = getMatchingStoragePool(accountStoragePoolUuid, poolList); if (storagePool.isPresent()) { - s_logger.debug("A storage pool is specified for this account, so we will use this storage pool for allocation: " + logger.debug("A storage pool is specified for this account, so we will use this storage pool for allocation: " + storagePool.get().getUuid()); } else { String globalStoragePoolUuid = StorageManager.PreferredStoragePool.value(); storagePool = getMatchingStoragePool(globalStoragePoolUuid, poolList); - storagePool.ifPresent(pool -> s_logger.debug("A storage pool is specified in global setting, so we will use this storage pool for allocation: " + storagePool.ifPresent(pool -> logger.debug("A storage pool is specified in global setting, so we will use this storage pool for allocation: " + pool.getUuid())); } return storagePool; @@ -1869,19 +1868,19 @@ private boolean isEnabledForAllocation(long zoneId, Long podId, Long clusterId) // Check if the zone exists in the system DataCenterVO zone = _dcDao.findById(zoneId); if (zone != null && Grouping.AllocationState.Disabled == zone.getAllocationState()) { - s_logger.info("Zone is currently disabled, cannot allocate to this zone: " + zoneId); + logger.info("Zone is currently disabled, cannot allocate to this zone: " + zoneId); return false; } Pod pod = _podDao.findById(podId); if (pod != null && Grouping.AllocationState.Disabled == pod.getAllocationState()) { - s_logger.info("Pod is currently disabled, cannot allocate to this pod: " + podId); + logger.info("Pod is currently disabled, cannot allocate to this pod: " + podId); return false; } Cluster cluster = _clusterDao.findById(clusterId); if (cluster != null && Grouping.AllocationState.Disabled == cluster.getAllocationState()) { - s_logger.info("Cluster is currently disabled, cannot allocate to this cluster: " + clusterId); + logger.info("Cluster is currently disabled, cannot allocate to this cluster: " + clusterId); return false; } diff --git a/server/src/main/java/com/cloud/deploy/FirstFitPlanner.java b/server/src/main/java/com/cloud/deploy/FirstFitPlanner.java index c2969ecce504..eaf36162ab6b 100644 --- a/server/src/main/java/com/cloud/deploy/FirstFitPlanner.java +++ b/server/src/main/java/com/cloud/deploy/FirstFitPlanner.java @@ -33,7 +33,6 @@ import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.log4j.Logger; import com.cloud.capacity.Capacity; import com.cloud.capacity.CapacityManager; @@ -73,7 +72,6 @@ import com.cloud.host.dao.HostDetailsDao; public class FirstFitPlanner extends AdapterBase implements DeploymentClusterPlanner, Configurable, DeploymentPlanner { - private static final Logger s_logger = Logger.getLogger(FirstFitPlanner.class); @Inject protected HostDao hostDao; @Inject @@ -134,8 +132,8 @@ public List orderClusters(VirtualMachineProfile vmProfile, DeploymentPlan //check if datacenter is in avoid set if (avoid.shouldAvoid(dc)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("DataCenter id = '" + dc.getId() + "' provided is in avoid set, DeploymentPlanner cannot allocate the VM, returning."); + if (logger.isDebugEnabled()) { + logger.debug("DataCenter id = '" + dc.getId() + "' provided is in avoid set, DeploymentPlanner cannot allocate the VM, returning."); } return null; } @@ -143,29 +141,29 @@ public List orderClusters(VirtualMachineProfile vmProfile, DeploymentPlan List clusterList = new ArrayList(); if (plan.getClusterId() != null) { Long clusterIdSpecified = plan.getClusterId(); - s_logger.debug("Searching resources only under specified Cluster: " + clusterIdSpecified); + logger.debug("Searching resources only under specified Cluster: " + clusterIdSpecified); ClusterVO cluster = clusterDao.findById(plan.getClusterId()); if (cluster != null) { if (avoid.shouldAvoid(cluster)) { - s_logger.debug("The specified cluster is in avoid set, returning."); + logger.debug("The specified cluster is in avoid set, returning."); } else { clusterList.add(clusterIdSpecified); removeClustersCrossingThreshold(clusterList, avoid, vmProfile, plan); } } else { - s_logger.debug("The specified cluster cannot be found, returning."); + logger.debug("The specified cluster cannot be found, returning."); avoid.addCluster(plan.getClusterId()); return null; } } else if (plan.getPodId() != null) { //consider clusters under this pod only Long podIdSpecified = plan.getPodId(); - s_logger.debug("Searching resources only under specified Pod: " + podIdSpecified); + logger.debug("Searching resources only under specified Pod: " + podIdSpecified); HostPodVO pod = podDao.findById(podIdSpecified); if (pod != null) { if (avoid.shouldAvoid(pod)) { - s_logger.debug("The specified pod is in avoid set, returning."); + logger.debug("The specified pod is in avoid set, returning."); } else { clusterList = scanClustersForDestinationInZoneOrPod(podIdSpecified, false, vmProfile, plan, avoid); if (clusterList == null) { @@ -173,12 +171,12 @@ public List orderClusters(VirtualMachineProfile vmProfile, DeploymentPlan } } } else { - s_logger.debug("The specified Pod cannot be found, returning."); + logger.debug("The specified Pod cannot be found, returning."); avoid.addPod(plan.getPodId()); return null; } } else { - s_logger.debug("Searching all possible resources under this Zone: " + plan.getDataCenterId()); + logger.debug("Searching all possible resources under this Zone: " + plan.getDataCenterId()); boolean applyAllocationAtPods = Boolean.parseBoolean(configDao.getValue(Config.ApplyAllocationAlgorithmToPods.key())); if (applyAllocationAtPods) { @@ -257,14 +255,14 @@ private List scanPodsForDestination(VirtualMachineProfile vmProfile, Deplo if (!podsWithCapacity.isEmpty()) { if (avoid.getPodsToAvoid() != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Removing from the podId list these pods from avoid set: " + avoid.getPodsToAvoid()); + if (logger.isDebugEnabled()) { + logger.debug("Removing from the podId list these pods from avoid set: " + avoid.getPodsToAvoid()); } podsWithCapacity.removeAll(avoid.getPodsToAvoid()); } } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("No pods found having a host with enough capacity, returning."); + if (logger.isDebugEnabled()) { + logger.debug("No pods found having a host with enough capacity, returning."); } return null; } @@ -273,8 +271,8 @@ private List scanPodsForDestination(VirtualMachineProfile vmProfile, Deplo prioritizedPodIds = reorderPods(podCapacityInfo, vmProfile, plan); if (prioritizedPodIds == null || prioritizedPodIds.isEmpty()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("No Pods found for destination, returning."); + if (logger.isDebugEnabled()) { + logger.debug("No Pods found for destination, returning."); } return null; } @@ -282,7 +280,7 @@ private List scanPodsForDestination(VirtualMachineProfile vmProfile, Deplo List clusterList = new ArrayList(); //loop over pods for (Long podId : prioritizedPodIds) { - s_logger.debug("Checking resources under Pod: " + podId); + logger.debug("Checking resources under Pod: " + podId); List clustersUnderPod = scanClustersForDestinationInZoneOrPod(podId, false, vmProfile, plan, avoid); if (clustersUnderPod != null) { clusterList.addAll(clustersUnderPod); @@ -290,8 +288,8 @@ private List scanPodsForDestination(VirtualMachineProfile vmProfile, Deplo } return clusterList; } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("No Pods found after removing disabled pods and pods in avoid list, returning."); + if (logger.isDebugEnabled()) { + logger.debug("No Pods found after removing disabled pods and pods in avoid list, returning."); } return null; } @@ -377,7 +375,7 @@ protected void removeClustersCrossingThreshold(List clusterListForVmAlloca "Cannot allocate cluster list %s for VM creation since their allocated percentage crosses the disable capacity threshold defined at each cluster at" + " Global Settings Configuration [name: %s, value: %s] for capacity Type : %s, skipping these clusters", clustersCrossingThreshold.toString(), configurationName, String.valueOf(configurationValue), CapacityVO.getCapacityName(capacity)); - s_logger.warn(warnMessageForClusterReachedCapacityThreshold); + logger.warn(warnMessageForClusterReachedCapacityThreshold); } } @@ -396,8 +394,8 @@ private List scanClustersForDestinationInZoneOrPod(long id, boolean isZone List prioritizedClusterIds = clusterCapacityInfo.first(); if (!prioritizedClusterIds.isEmpty()) { if (avoid.getClustersToAvoid() != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Removing from the clusterId list these clusters from avoid set: " + avoid.getClustersToAvoid()); + if (logger.isDebugEnabled()) { + logger.debug("Removing from the clusterId list these clusters from avoid set: " + avoid.getClustersToAvoid()); } prioritizedClusterIds.removeAll(avoid.getClustersToAvoid()); } @@ -409,8 +407,8 @@ private List scanClustersForDestinationInZoneOrPod(long id, boolean isZone } } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("No clusters found having a host with enough capacity, returning."); + if (logger.isDebugEnabled()) { + logger.debug("No clusters found having a host with enough capacity, returning."); } return null; } @@ -418,8 +416,8 @@ private List scanClustersForDestinationInZoneOrPod(long id, boolean isZone List clusterList = reorderClusters(id, isZone, clusterCapacityInfo, vmProfile, plan); return clusterList; //return checkClustersforDestination(clusterList, vmProfile, plan, avoid, dc); } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("No clusters found after removing disabled clusters and clusters in avoid list, returning."); + if (logger.isDebugEnabled()) { + logger.debug("No clusters found after removing disabled clusters and clusters in avoid list, returning."); } return null; } @@ -455,8 +453,8 @@ protected Pair, Map> listClustersByCapacity(long id, lo //although an aggregate value may be false indicator that a cluster can host a vm, it will at the least eliminate those clusters which definitely cannot //we need clusters having enough cpu AND RAM to host this particular VM and order them by aggregate cluster capacity - if (s_logger.isDebugEnabled()) { - s_logger.debug("Listing clusters in order of aggregate capacity, that have (at least one host with) enough CPU and RAM capacity under this " + + if (logger.isDebugEnabled()) { + logger.debug("Listing clusters in order of aggregate capacity, that have (at least one host with) enough CPU and RAM capacity under this " + (isZone ? "Zone: " : "Pod: ") + id); } String capacityTypeToOrder = configDao.getValue(Config.HostCapacityTypeToOrderClusters.key()); @@ -466,19 +464,19 @@ protected Pair, Map> listClustersByCapacity(long id, lo } List clusterIdswithEnoughCapacity = capacityDao.listClustersInZoneOrPodByHostCapacities(id, vmId, requiredCpu, requiredRam, capacityType, isZone); - if (s_logger.isTraceEnabled()) { - s_logger.trace("ClusterId List having enough CPU and RAM capacity: " + clusterIdswithEnoughCapacity); + if (logger.isTraceEnabled()) { + logger.trace("ClusterId List having enough CPU and RAM capacity: " + clusterIdswithEnoughCapacity); } Pair, Map> result = capacityDao.orderClustersByAggregateCapacity(id, vmId, capacityType, isZone); List clusterIdsOrderedByAggregateCapacity = result.first(); //only keep the clusters that have enough capacity to host this VM - if (s_logger.isTraceEnabled()) { - s_logger.trace("ClusterId List in order of aggregate capacity: " + clusterIdsOrderedByAggregateCapacity); + if (logger.isTraceEnabled()) { + logger.trace("ClusterId List in order of aggregate capacity: " + clusterIdsOrderedByAggregateCapacity); } clusterIdsOrderedByAggregateCapacity.retainAll(clusterIdswithEnoughCapacity); - if (s_logger.isTraceEnabled()) { - s_logger.trace("ClusterId List having enough CPU and RAM capacity & in order of aggregate capacity: " + clusterIdsOrderedByAggregateCapacity); + if (logger.isTraceEnabled()) { + logger.trace("ClusterId List having enough CPU and RAM capacity & in order of aggregate capacity: " + clusterIdsOrderedByAggregateCapacity); } return result; @@ -490,8 +488,8 @@ protected Pair, Map> listPodsByCapacity(long zoneId, in //although an aggregate value may be false indicator that a pod can host a vm, it will at the least eliminate those pods which definitely cannot //we need pods having enough cpu AND RAM to host this particular VM and order them by aggregate pod capacity - if (s_logger.isDebugEnabled()) { - s_logger.debug("Listing pods in order of aggregate capacity, that have (at least one host with) enough CPU and RAM capacity under this Zone: " + zoneId); + if (logger.isDebugEnabled()) { + logger.debug("Listing pods in order of aggregate capacity, that have (at least one host with) enough CPU and RAM capacity under this Zone: " + zoneId); } String capacityTypeToOrder = configDao.getValue(Config.HostCapacityTypeToOrderClusters.key()); short capacityType = Capacity.CAPACITY_TYPE_CPU; @@ -500,19 +498,19 @@ protected Pair, Map> listPodsByCapacity(long zoneId, in } List podIdswithEnoughCapacity = capacityDao.listPodsByHostCapacities(zoneId, requiredCpu, requiredRam, capacityType); - if (s_logger.isTraceEnabled()) { - s_logger.trace("PodId List having enough CPU and RAM capacity: " + podIdswithEnoughCapacity); + if (logger.isTraceEnabled()) { + logger.trace("PodId List having enough CPU and RAM capacity: " + podIdswithEnoughCapacity); } Pair, Map> result = capacityDao.orderPodsByAggregateCapacity(zoneId, capacityType); List podIdsOrderedByAggregateCapacity = result.first(); //only keep the clusters that have enough capacity to host this VM - if (s_logger.isTraceEnabled()) { - s_logger.trace("PodId List in order of aggregate capacity: " + podIdsOrderedByAggregateCapacity); + if (logger.isTraceEnabled()) { + logger.trace("PodId List in order of aggregate capacity: " + podIdsOrderedByAggregateCapacity); } podIdsOrderedByAggregateCapacity.retainAll(podIdswithEnoughCapacity); - if (s_logger.isTraceEnabled()) { - s_logger.trace("PodId List having enough CPU and RAM capacity & in order of aggregate capacity: " + podIdsOrderedByAggregateCapacity); + if (logger.isTraceEnabled()) { + logger.trace("PodId List having enough CPU and RAM capacity & in order of aggregate capacity: " + podIdsOrderedByAggregateCapacity); } return result; @@ -525,14 +523,14 @@ private void removeClustersWithoutMatchingTag(List clusterListForVmAllocat matchingClusters.addAll(hostDao.findClustersThatMatchHostTagRule(hostTagOnOffering)); if (matchingClusters.isEmpty()) { - s_logger.error(String.format("No suitable host found for the following compute offering tags [%s].", hostTagOnOffering)); + logger.error(String.format("No suitable host found for the following compute offering tags [%s].", hostTagOnOffering)); throw new CloudRuntimeException("No suitable host found."); } clusterListForVmAllocation.retainAll(matchingClusters); - if (s_logger.isDebugEnabled()) { - s_logger.debug("The clusterId list for the given offering tag: " + clusterListForVmAllocation); + if (logger.isDebugEnabled()) { + logger.debug("The clusterId list for the given offering tag: " + clusterListForVmAllocation); } } diff --git a/server/src/main/java/com/cloud/event/ActionEventUtils.java b/server/src/main/java/com/cloud/event/ActionEventUtils.java index 36461d20e421..8ea936848773 100644 --- a/server/src/main/java/com/cloud/event/ActionEventUtils.java +++ b/server/src/main/java/com/cloud/event/ActionEventUtils.java @@ -36,7 +36,8 @@ import org.apache.cloudstack.framework.events.EventBusException; import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.beans.factory.NoSuchBeanDefinitionException; import com.cloud.configuration.Config; @@ -56,7 +57,7 @@ import com.cloud.utils.db.EntityManager; public class ActionEventUtils { - private static final Logger s_logger = Logger.getLogger(ActionEventUtils.class); + protected static Logger LOGGER = LogManager.getLogger(ActionEventUtils.class); private static EventDao s_eventDao; private static AccountDao s_accountDao; @@ -236,7 +237,7 @@ private static void publishOnEventBus(long userId, long accountId, String eventC try { s_eventBus.publish(event); } catch (EventBusException e) { - s_logger.warn("Failed to publish action event on the event bus."); + LOGGER.warn("Failed to publish action event on the event bus."); } } @@ -256,7 +257,7 @@ private static Ternary getResourceDetailsUsingEntityClassA try { entityUuid = getEntityUuid(entityClass, param); } catch (Exception e){ - s_logger.debug("Caught exception while finding entityUUID, moving on"); + LOGGER.debug("Caught exception while finding entityUUID, moving on"); } } if (param instanceof Long) { @@ -344,7 +345,7 @@ private static Ternary updateParentResourceCases(Ternary(id, ((Identity)objVO).getUuid(), type.toString()); } catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException e) { - s_logger.debug(String.format("Parent resource for resource ID: %d, type: %s can not be found using method %s", details.first(), type, methodName)); + LOGGER.debug(String.format("Parent resource for resource ID: %d, type: %s can not be found using method %s", details.first(), type, methodName)); } return details; } @@ -371,7 +372,7 @@ private static Ternary getResourceDetails(Long resourceId, private static long getDomainId(long accountId) { AccountVO account = s_accountDao.findByIdIncludingRemoved(accountId); if (account == null) { - s_logger.error("Failed to find account(including removed ones) by id '" + accountId + "'"); + LOGGER.error("Failed to find account(including removed ones) by id '" + accountId + "'"); return 0; } return account.getDomainId(); @@ -390,7 +391,7 @@ private static void populateFirstClassEntities(Map eventDescript eventDescription.put(ReflectUtil.getEntityName(clz), uuid); } } catch (Exception e){ - s_logger.trace("Caught exception while populating first class entities for event bus, moving on"); + LOGGER.trace("Caught exception while populating first class entities for event bus, moving on"); } } diff --git a/server/src/main/java/com/cloud/event/AlertGenerator.java b/server/src/main/java/com/cloud/event/AlertGenerator.java index 9e12486db477..27698f27862c 100644 --- a/server/src/main/java/com/cloud/event/AlertGenerator.java +++ b/server/src/main/java/com/cloud/event/AlertGenerator.java @@ -25,7 +25,8 @@ import javax.annotation.PostConstruct; import javax.inject.Inject; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.beans.factory.NoSuchBeanDefinitionException; import org.springframework.stereotype.Component; @@ -44,7 +45,7 @@ @Component public class AlertGenerator { - private static final Logger s_logger = Logger.getLogger(AlertGenerator.class); + protected static Logger LOGGER = LogManager.getLogger(AlertGenerator.class); private static DataCenterDao s_dcDao; private static HostPodDao s_podDao; protected static EventBus s_eventBus = null; @@ -109,7 +110,7 @@ public static void publishAlertOnEventBus(String alertType, long dataCenterId, L try { s_eventBus.publish(event); } catch (EventBusException e) { - s_logger.warn("Failed to publish alert on the event bus."); + LOGGER.warn("Failed to publish alert on the event bus."); } } } diff --git a/server/src/main/java/com/cloud/event/dao/EventJoinDaoImpl.java b/server/src/main/java/com/cloud/event/dao/EventJoinDaoImpl.java index 24c699a35add..f51df27a741f 100644 --- a/server/src/main/java/com/cloud/event/dao/EventJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/event/dao/EventJoinDaoImpl.java @@ -27,7 +27,6 @@ import org.apache.cloudstack.api.response.EventResponse; import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.api.ApiResponseHelper; @@ -42,7 +41,6 @@ @Component public class EventJoinDaoImpl extends GenericDaoBase implements EventJoinDao { - public static final Logger s_logger = Logger.getLogger(EventJoinDaoImpl.class); private SearchBuilder vrSearch; diff --git a/server/src/main/java/com/cloud/ha/AbstractInvestigatorImpl.java b/server/src/main/java/com/cloud/ha/AbstractInvestigatorImpl.java index 147cecdc6408..b65865e732bf 100644 --- a/server/src/main/java/com/cloud/ha/AbstractInvestigatorImpl.java +++ b/server/src/main/java/com/cloud/ha/AbstractInvestigatorImpl.java @@ -23,7 +23,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -40,7 +39,6 @@ import com.cloud.utils.db.SearchCriteria.Op; public abstract class AbstractInvestigatorImpl extends AdapterBase implements Investigator { - private static final Logger s_logger = Logger.getLogger(AbstractInvestigatorImpl.class); @Inject private final HostDao _hostDao = null; @@ -90,32 +88,32 @@ protected Status testIpAddress(Long hostId, String testHostIp) { try { Answer pingTestAnswer = _agentMgr.send(hostId, new PingTestCommand(testHostIp)); if (pingTestAnswer == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("host (" + testHostIp + ") returns Unknown (null) answer"); + if (logger.isDebugEnabled()) { + logger.debug("host (" + testHostIp + ") returns Unknown (null) answer"); } return Status.Unknown; } if (pingTestAnswer.getResult()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("host (" + testHostIp + ") has been successfully pinged, returning that host is up"); + if (logger.isDebugEnabled()) { + logger.debug("host (" + testHostIp + ") has been successfully pinged, returning that host is up"); } // computing host is available, but could not reach agent, return false return Status.Up; } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("host (" + testHostIp + ") cannot be pinged, returning Unknown (I don't know) state"); + if (logger.isDebugEnabled()) { + logger.debug("host (" + testHostIp + ") cannot be pinged, returning Unknown (I don't know) state"); } return Status.Unknown; } } catch (AgentUnavailableException e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("host (" + testHostIp + "): " + e.getLocalizedMessage() + ", trapped AgentUnavailableException returning Unknown state"); + if (logger.isDebugEnabled()) { + logger.debug("host (" + testHostIp + "): " + e.getLocalizedMessage() + ", trapped AgentUnavailableException returning Unknown state"); } return Status.Unknown; } catch (OperationTimedoutException e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("host (" + testHostIp + "): " + e.getLocalizedMessage() + ", trapped OperationTimedoutException returning Unknown state"); + if (logger.isDebugEnabled()) { + logger.debug("host (" + testHostIp + "): " + e.getLocalizedMessage() + ", trapped OperationTimedoutException returning Unknown state"); } return Status.Unknown; } diff --git a/server/src/main/java/com/cloud/ha/CheckOnAgentInvestigator.java b/server/src/main/java/com/cloud/ha/CheckOnAgentInvestigator.java index f6409a5c0bdc..d7945ef20776 100644 --- a/server/src/main/java/com/cloud/ha/CheckOnAgentInvestigator.java +++ b/server/src/main/java/com/cloud/ha/CheckOnAgentInvestigator.java @@ -18,7 +18,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.CheckVirtualMachineAnswer; @@ -32,7 +31,6 @@ import com.cloud.vm.VirtualMachine.PowerState; public class CheckOnAgentInvestigator extends AdapterBase implements Investigator { - private final static Logger s_logger = Logger.getLogger(CheckOnAgentInvestigator.class); @Inject AgentManager _agentMgr; @@ -50,17 +48,17 @@ public boolean isVmAlive(VirtualMachine vm, Host host) throws UnknownVM { try { CheckVirtualMachineAnswer answer = (CheckVirtualMachineAnswer)_agentMgr.send(vm.getHostId(), cmd); if (!answer.getResult()) { - s_logger.debug("Unable to get vm state on " + vm.toString()); + logger.debug("Unable to get vm state on " + vm.toString()); throw new UnknownVM(); } - s_logger.debug("Agent responded with state " + answer.getState().toString()); + logger.debug("Agent responded with state " + answer.getState().toString()); return answer.getState() == PowerState.PowerOn; } catch (AgentUnavailableException e) { - s_logger.debug("Unable to reach the agent for " + vm.toString() + ": " + e.getMessage()); + logger.debug("Unable to reach the agent for " + vm.toString() + ": " + e.getMessage()); throw new UnknownVM(); } catch (OperationTimedoutException e) { - s_logger.debug("Operation timed out for " + vm.toString() + ": " + e.getMessage()); + logger.debug("Operation timed out for " + vm.toString() + ": " + e.getMessage()); throw new UnknownVM(); } } diff --git a/server/src/main/java/com/cloud/ha/HighAvailabilityManagerExtImpl.java b/server/src/main/java/com/cloud/ha/HighAvailabilityManagerExtImpl.java index 8f3e7dd48a5a..6765992ec276 100644 --- a/server/src/main/java/com/cloud/ha/HighAvailabilityManagerExtImpl.java +++ b/server/src/main/java/com/cloud/ha/HighAvailabilityManagerExtImpl.java @@ -62,8 +62,8 @@ public boolean start() { protected class UsageServerMonitorTask extends ManagedContextRunnable { @Override protected void runInContext() { - if (s_logger.isInfoEnabled()) { - s_logger.info("checking health of usage server"); + if (logger.isInfoEnabled()) { + logger.info("checking health of usage server"); } try { @@ -78,8 +78,8 @@ protected void runInContext() { isRunning = true; } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("usage server running? " + isRunning + ", heartbeat: " + lastHeartbeat); + if (logger.isDebugEnabled()) { + logger.debug("usage server running? " + isRunning + ", heartbeat: " + lastHeartbeat); } } finally { txn.close(); @@ -96,7 +96,7 @@ protected void runInContext() { _alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER, 0, 0); } } catch (Exception ex) { - s_logger.warn("Error while monitoring usage job", ex); + logger.warn("Error while monitoring usage job", ex); } } } diff --git a/server/src/main/java/com/cloud/ha/HighAvailabilityManagerImpl.java b/server/src/main/java/com/cloud/ha/HighAvailabilityManagerImpl.java index f22bcde9e84e..b815f21e2064 100644 --- a/server/src/main/java/com/cloud/ha/HighAvailabilityManagerImpl.java +++ b/server/src/main/java/com/cloud/ha/HighAvailabilityManagerImpl.java @@ -39,8 +39,6 @@ import org.apache.cloudstack.managed.context.ManagedContext; import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.management.ManagementServerHost; -import org.apache.log4j.Logger; -import org.apache.log4j.NDC; import com.cloud.agent.AgentManager; import com.cloud.alert.AlertManager; @@ -86,6 +84,7 @@ import com.cloud.vm.VirtualMachineManager; import com.cloud.vm.VirtualMachineProfile; import com.cloud.vm.dao.VMInstanceDao; +import org.apache.logging.log4j.ThreadContext; /** * HighAvailabilityManagerImpl coordinates the HA process. VMs are registered with the HA Manager for HA. The request is stored @@ -113,7 +112,6 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements Configur private static final int SECONDS_TO_MILLISECONDS_FACTOR = 1000; - protected static final Logger s_logger = Logger.getLogger(HighAvailabilityManagerImpl.class); private ConfigKey MigrationMaxRetries = new ConfigKey<>("Advanced", Integer.class, "vm.ha.migration.max.retries","5", "Total number of attempts for trying migration of a VM.", @@ -229,13 +227,13 @@ public Status investigate(final long hostId) { for (Investigator investigator : investigators) { hostState = investigator.isAgentAlive(host); if (hostState != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(investigator.getName() + " was able to determine host " + hostId + " is in " + hostState.toString()); + if (logger.isDebugEnabled()) { + logger.debug(investigator.getName() + " was able to determine host " + hostId + " is in " + hostState.toString()); } return hostState; } - if (s_logger.isDebugEnabled()) { - s_logger.debug(investigator.getName() + " unable to determine the state of the host. Moving on."); + if (logger.isDebugEnabled()) { + logger.debug(investigator.getName() + " unable to determine the state of the host. Moving on."); } } @@ -250,11 +248,11 @@ public void scheduleRestartForVmsOnHost(final HostVO host, boolean investigate) } if (host.getHypervisorType() == HypervisorType.VMware || host.getHypervisorType() == HypervisorType.Hyperv) { - s_logger.info("Don't restart VMs on host " + host.getId() + " as it is a " + host.getHypervisorType().toString() + " host"); + logger.info("Don't restart VMs on host " + host.getId() + " as it is a " + host.getHypervisorType().toString() + " host"); return; } - s_logger.warn("Scheduling restart for VMs on host " + host.getId() + "-" + host.getName()); + logger.warn("Scheduling restart for VMs on host " + host.getId() + "-" + host.getName()); final List vms = _instanceDao.listByHostId(host.getId()); final DataCenterVO dcVO = _dcDao.findById(host.getDataCenterId()); @@ -288,18 +286,18 @@ public void scheduleRestartForVmsOnHost(final HostVO host, boolean investigate) for (VMInstanceVO vm : reorderedVMList) { ServiceOfferingVO vmOffering = _serviceOfferingDao.findById(vm.getServiceOfferingId()); if (_itMgr.isRootVolumeOnLocalStorage(vm.getId())) { - if (s_logger.isDebugEnabled()){ - s_logger.debug("Skipping HA on vm " + vm + ", because it uses local storage. Its fate is tied to the host."); + if (logger.isDebugEnabled()){ + logger.debug("Skipping HA on vm " + vm + ", because it uses local storage. Its fate is tied to the host."); } continue; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Notifying HA Mgr of to restart vm " + vm.getId() + "-" + vm.getInstanceName()); + if (logger.isDebugEnabled()) { + logger.debug("Notifying HA Mgr of to restart vm " + vm.getId() + "-" + vm.getInstanceName()); } vm = _instanceDao.findByUuid(vm.getUuid()); Long hostId = vm.getHostId(); if (hostId != null && !hostId.equals(host.getId())) { - s_logger.debug("VM " + vm.getInstanceName() + " is not on down host " + host.getId() + " it is on other host " + logger.debug("VM " + vm.getInstanceName() + " is not on down host " + host.getId() + " it is on other host " + hostId + " VM HA is done"); continue; } @@ -312,20 +310,20 @@ public void scheduleStop(VMInstanceVO vm, long hostId, WorkType type) { assert (type == WorkType.CheckStop || type == WorkType.ForceStop || type == WorkType.Stop); if (_haDao.hasBeenScheduled(vm.getId(), type)) { - s_logger.info("There's already a job scheduled to stop " + vm); + logger.info("There's already a job scheduled to stop " + vm); return; } HaWorkVO work = new HaWorkVO(vm.getId(), vm.getType(), type, Step.Scheduled, hostId, vm.getState(), 0, vm.getUpdated()); _haDao.persist(work); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Scheduled " + work); + if (logger.isDebugEnabled()) { + logger.debug("Scheduled " + work); } wakeupWorkers(); } protected void wakeupWorkers() { - s_logger.debug("Wakeup workers HA"); + logger.debug("Wakeup workers HA"); for (WorkerThread worker : _workers) { worker.wakup(); } @@ -336,7 +334,7 @@ public boolean scheduleMigration(final VMInstanceVO vm) { if (vm.getHostId() != null) { final HaWorkVO work = new HaWorkVO(vm.getId(), vm.getType(), WorkType.Migration, Step.Scheduled, vm.getHostId(), vm.getState(), 0, vm.getUpdated()); _haDao.persist(work); - s_logger.info("Scheduled migration work of VM " + vm.getUuid() + " from host " + _hostDao.findById(vm.getHostId()) + " with HAWork " + work); + logger.info("Scheduled migration work of VM " + vm.getUuid() + " from host " + _hostDao.findById(vm.getHostId()) + " with HAWork " + work); wakeupWorkers(); } return true; @@ -344,11 +342,11 @@ public boolean scheduleMigration(final VMInstanceVO vm) { @Override public void scheduleRestart(VMInstanceVO vm, boolean investigate) { - s_logger.debug("HA schedule restart"); + logger.debug("HA schedule restart"); Long hostId = vm.getHostId(); if (hostId == null) { try { - s_logger.debug("Found a vm that is scheduled to be restarted but has no host id: " + vm); + logger.debug("Found a vm that is scheduled to be restarted but has no host id: " + vm); _itMgr.advanceStop(vm.getUuid(), true); } catch (ResourceUnavailableException e) { assert false : "How do we hit this when force is true?"; @@ -363,13 +361,13 @@ public void scheduleRestart(VMInstanceVO vm, boolean investigate) { } if (vm.getHypervisorType() == HypervisorType.VMware || vm.getHypervisorType() == HypervisorType.Hyperv) { - s_logger.info("Skip HA for VMware VM or Hyperv VM" + vm.getInstanceName()); + logger.info("Skip HA for VMware VM or Hyperv VM" + vm.getInstanceName()); return; } if (!investigate) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("VM does not require investigation so I'm marking it as Stopped: " + vm.toString()); + if (logger.isDebugEnabled()) { + logger.debug("VM does not require investigation so I'm marking it as Stopped: " + vm.toString()); } AlertManager.AlertType alertType = AlertManager.AlertType.ALERT_TYPE_USERVM; @@ -387,8 +385,8 @@ public void scheduleRestart(VMInstanceVO vm, boolean investigate) { ") stopped unexpectedly on host " + hostDesc, "Virtual Machine " + vm.getHostName() + " (id: " + vm.getId() + ") running on host [" + vm.getHostId() + "] stopped unexpectedly."); - if (s_logger.isDebugEnabled()) { - s_logger.debug("VM is not HA enabled so we're done."); + if (logger.isDebugEnabled()) { + logger.debug("VM is not HA enabled so we're done."); } } @@ -408,7 +406,7 @@ public void scheduleRestart(VMInstanceVO vm, boolean investigate) { } if (vm.getHypervisorType() == HypervisorType.VMware) { - s_logger.info("Skip HA for VMware VM " + vm.getInstanceName()); + logger.info("Skip HA for VMware VM " + vm.getInstanceName()); return; } @@ -429,8 +427,8 @@ public void scheduleRestart(VMInstanceVO vm, boolean investigate) { hostId != null ? hostId : 0L, vm.getState(), timesTried, vm.getUpdated()); _haDao.persist(work); - if (s_logger.isInfoEnabled()) { - s_logger.info("Schedule vm for HA: " + vm); + if (logger.isInfoEnabled()) { + logger.info("Schedule vm for HA: " + vm); } wakeupWorkers(); @@ -438,7 +436,7 @@ public void scheduleRestart(VMInstanceVO vm, boolean investigate) { } protected Long restart(final HaWorkVO work) { - s_logger.debug("RESTART with HAWORK"); + logger.debug("RESTART with HAWORK"); List items = _haDao.listFutureHaWorkForVm(work.getInstanceId(), work.getId()); if (items.size() > 0) { StringBuilder str = new StringBuilder("Cancelling this work item because newer ones have been scheduled. Work Ids = ["); @@ -446,7 +444,7 @@ protected Long restart(final HaWorkVO work) { str.append(item.getId()).append(", "); } str.delete(str.length() - 2, str.length()).append("]"); - s_logger.info(str.toString()); + logger.info(str.toString()); return null; } @@ -457,7 +455,7 @@ protected Long restart(final HaWorkVO work) { str.append(item.getId()).append(", "); } str.delete(str.length() - 2, str.length()).append("]"); - s_logger.info(str.toString()); + logger.info(str.toString()); return (System.currentTimeMillis() >> 10) + _investigateRetryInterval; } @@ -465,13 +463,13 @@ protected Long restart(final HaWorkVO work) { VirtualMachine vm = _itMgr.findById(work.getInstanceId()); if (vm == null) { - s_logger.info("Unable to find vm: " + vmId); + logger.info("Unable to find vm: " + vmId); return null; } - s_logger.info("HA on " + vm); + logger.info("HA on " + vm); if (vm.getState() != work.getPreviousState() || vm.getUpdated() != work.getUpdateTime()) { - s_logger.info("VM " + vm + " has been changed. Current State = " + vm.getState() + " Previous State = " + work.getPreviousState() + " last updated = " + + logger.info("VM " + vm + " has been changed. Current State = " + vm.getState() + " Previous State = " + work.getPreviousState() + " last updated = " + vm.getUpdated() + " previous updated = " + work.getUpdateTime()); return null; } @@ -490,7 +488,7 @@ protected Long restart(final HaWorkVO work) { if (host == null) { host = _hostDao.findByIdIncludingRemoved(work.getHostId()); if (host != null) { - s_logger.debug("VM " + vm.toString() + " is now no longer on host " + work.getHostId() + " as the host is removed"); + logger.debug("VM " + vm.toString() + " is now no longer on host " + work.getHostId() + " as the host is removed"); isHostRemoved = true; } } @@ -503,7 +501,7 @@ protected Long restart(final HaWorkVO work) { if (work.getStep() == Step.Investigating) { if (!isHostRemoved) { if (vm.getHostId() == null || vm.getHostId() != work.getHostId()) { - s_logger.info("VM " + vm.toString() + " is now no longer on host " + work.getHostId()); + logger.info("VM " + vm.toString() + " is now no longer on host " + work.getHostId()); return null; } @@ -513,19 +511,19 @@ protected Long restart(final HaWorkVO work) { try { alive = investigator.isVmAlive(vm, host); - s_logger.info(investigator.getName() + " found " + vm + " to be alive? " + alive); + logger.info(investigator.getName() + " found " + vm + " to be alive? " + alive); break; } catch (UnknownVM e) { - s_logger.info(investigator.getName() + " could not find " + vm); + logger.info(investigator.getName() + " could not find " + vm); } } boolean fenced = false; if (alive == null) { - s_logger.debug("Fencing off VM that we don't know the state of"); + logger.debug("Fencing off VM that we don't know the state of"); for (FenceBuilder fb : fenceBuilders) { Boolean result = fb.fenceOff(vm, host); - s_logger.info("Fencer " + fb.getName() + " returned " + result); + logger.info("Fencer " + fb.getName() + " returned " + result); if (result != null && result) { fenced = true; break; @@ -535,18 +533,18 @@ protected Long restart(final HaWorkVO work) { } else if (!alive) { fenced = true; } else { - s_logger.debug("VM " + vm.getInstanceName() + " is found to be alive by " + investigator.getName()); + logger.debug("VM " + vm.getInstanceName() + " is found to be alive by " + investigator.getName()); if (host.getStatus() == Status.Up) { - s_logger.info(vm + " is alive and host is up. No need to restart it."); + logger.info(vm + " is alive and host is up. No need to restart it."); return null; } else { - s_logger.debug("Rescheduling because the host is not up but the vm is alive"); + logger.debug("Rescheduling because the host is not up but the vm is alive"); return (System.currentTimeMillis() >> 10) + _investigateRetryInterval; } } if (!fenced) { - s_logger.debug("We were unable to fence off the VM " + vm); + logger.debug("We were unable to fence off the VM " + vm); _alertMgr.sendAlert(alertType, vm.getDataCenterId(), vm.getPodIdToDeployIn(), "Unable to restart " + vm.getHostName() + " which was running on host " + hostDesc, "Insufficient capacity to restart VM, name: " + vm.getHostName() + ", id: " + vmId + " which was running on host " + hostDesc); @@ -569,7 +567,7 @@ protected Long restart(final HaWorkVO work) { work.setStep(Step.Scheduled); _haDao.update(work.getId(), work); } else { - s_logger.debug("How come that HA step is Investigating and the host is removed? Calling forced Stop on Vm anyways"); + logger.debug("How come that HA step is Investigating and the host is removed? Calling forced Stop on Vm anyways"); try { _itMgr.advanceStop(vm.getUuid(), true); } catch (ResourceUnavailableException e) { @@ -588,16 +586,16 @@ protected Long restart(final HaWorkVO work) { vm = _itMgr.findById(vm.getId()); if (!ForceHA.value() && !vm.isHaEnabled()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("VM is not HA enabled so we're done."); + if (logger.isDebugEnabled()) { + logger.debug("VM is not HA enabled so we're done."); } return null; // VM doesn't require HA } if ((host == null || host.getRemoved() != null || host.getState() != Status.Up) && !volumeMgr.canVmRestartOnAnotherServer(vm.getId())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("VM can not restart on another server."); + if (logger.isDebugEnabled()) { + logger.debug("VM can not restart on another server."); } return null; } @@ -630,7 +628,7 @@ protected Long restart(final HaWorkVO work) { // First try starting the vm with its original planner, if it doesn't succeed send HAPlanner as its an emergency. _itMgr.advanceStart(vm.getUuid(), params, null); }catch (InsufficientCapacityException e){ - s_logger.warn("Failed to deploy vm " + vmId + " with original planner, sending HAPlanner"); + logger.warn("Failed to deploy vm " + vmId + " with original planner, sending HAPlanner"); _itMgr.advanceStart(vm.getUuid(), params, _haPlanners.get(0)); } @@ -638,28 +636,28 @@ protected Long restart(final HaWorkVO work) { if (started != null && started.getState() == VirtualMachine.State.Running) { String message = String.format("HA starting VM: %s (%s)", started.getHostName(), started.getInstanceName()); HostVO hostVmHasStarted = _hostDao.findById(started.getHostId()); - s_logger.info(String.format("HA is now restarting %s on %s", started, hostVmHasStarted)); + logger.info(String.format("HA is now restarting %s on %s", started, hostVmHasStarted)); _alertMgr.sendAlert(alertType, vm.getDataCenterId(), vm.getPodIdToDeployIn(), message, message); return null; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Rescheduling VM " + vm.toString() + " to try again in " + _restartRetryInterval); + if (logger.isDebugEnabled()) { + logger.debug("Rescheduling VM " + vm.toString() + " to try again in " + _restartRetryInterval); } } catch (final InsufficientCapacityException e) { - s_logger.warn("Unable to restart " + vm.toString() + " due to " + e.getMessage()); + logger.warn("Unable to restart " + vm.toString() + " due to " + e.getMessage()); _alertMgr.sendAlert(alertType, vm.getDataCenterId(), vm.getPodIdToDeployIn(), "Unable to restart " + vm.getHostName() + " which was running on host " + hostDesc, "Insufficient capacity to restart VM, name: " + vm.getHostName() + ", id: " + vmId + " which was running on host " + hostDesc); } catch (final ResourceUnavailableException e) { - s_logger.warn("Unable to restart " + vm.toString() + " due to " + e.getMessage()); + logger.warn("Unable to restart " + vm.toString() + " due to " + e.getMessage()); _alertMgr.sendAlert(alertType, vm.getDataCenterId(), vm.getPodIdToDeployIn(), "Unable to restart " + vm.getHostName() + " which was running on host " + hostDesc, "The Storage is unavailable for trying to restart VM, name: " + vm.getHostName() + ", id: " + vmId + " which was running on host " + hostDesc); } catch (ConcurrentOperationException e) { - s_logger.warn("Unable to restart " + vm.toString() + " due to " + e.getMessage()); + logger.warn("Unable to restart " + vm.toString() + " due to " + e.getMessage()); _alertMgr.sendAlert(alertType, vm.getDataCenterId(), vm.getPodIdToDeployIn(), "Unable to restart " + vm.getHostName() + " which was running on host " + hostDesc, "The Storage is unavailable for trying to restart VM, name: " + vm.getHostName() + ", id: " + vmId + " which was running on host " + hostDesc); } catch (OperationTimedoutException e) { - s_logger.warn("Unable to restart " + vm.toString() + " due to " + e.getMessage()); + logger.warn("Unable to restart " + vm.toString() + " due to " + e.getMessage()); _alertMgr.sendAlert(alertType, vm.getDataCenterId(), vm.getPodIdToDeployIn(), "Unable to restart " + vm.getHostName() + " which was running on host " + hostDesc, "The Storage is unavailable for trying to restart VM, name: " + vm.getHostName() + ", id: " + vmId + " which was running on host " + hostDesc); } @@ -675,10 +673,10 @@ public Long migrate(final HaWorkVO work) { VMInstanceVO vm = _instanceDao.findById(vmId); if (vm == null) { - s_logger.info("Unable to find vm: " + vmId + ", skipping migrate."); + logger.info("Unable to find vm: " + vmId + ", skipping migrate."); return null; } - s_logger.info("Migration attempt: for VM " + vm.getUuid() + "from host id " + srcHostId + + logger.info("Migration attempt: for VM " + vm.getUuid() + "from host id " + srcHostId + ". Starting attempt: " + (1 + work.getTimesTried()) + "/" + _maxRetries + " times."); try { work.setStep(Step.Migrating); @@ -688,13 +686,13 @@ public Long migrate(final HaWorkVO work) { _itMgr.migrateAway(vm.getUuid(), srcHostId); return null; } catch (InsufficientServerCapacityException e) { - s_logger.warn("Migration attempt: Insufficient capacity for migrating a VM " + + logger.warn("Migration attempt: Insufficient capacity for migrating a VM " + vm.getUuid() + " from source host id " + srcHostId + ". Exception: " + e.getMessage()); _resourceMgr.migrateAwayFailed(srcHostId, vmId); return (System.currentTimeMillis() >> 10) + _migrateRetryInterval; } catch (Exception e) { - s_logger.warn("Migration attempt: Unexpected exception occurred when attempting migration of " + + logger.warn("Migration attempt: Unexpected exception occurred when attempting migration of " + vm.getUuid() + e.getMessage()); throw e; } @@ -704,8 +702,8 @@ public Long migrate(final HaWorkVO work) { public void scheduleDestroy(VMInstanceVO vm, long hostId) { final HaWorkVO work = new HaWorkVO(vm.getId(), vm.getType(), WorkType.Destroy, Step.Scheduled, hostId, vm.getState(), 0, vm.getUpdated()); _haDao.persist(work); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Scheduled " + work.toString()); + if (logger.isDebugEnabled()) { + logger.debug("Scheduled " + work.toString()); } wakeupWorkers(); } @@ -722,7 +720,7 @@ private void stopVMWithCleanup(VirtualMachine vm, VirtualMachine.State state) th } private void destroyVM(VirtualMachine vm, boolean expunge) throws OperationTimedoutException, AgentUnavailableException { - s_logger.info("Destroying " + vm.toString()); + logger.info("Destroying " + vm.toString()); if (VirtualMachine.Type.ConsoleProxy.equals(vm.getType())) { consoleProxyManager.destroyProxy(vm.getId()); } else if (VirtualMachine.Type.SecondaryStorageVm.equals(vm.getType())) { @@ -735,13 +733,13 @@ private void destroyVM(VirtualMachine vm, boolean expunge) throws OperationTimed protected Long destroyVM(final HaWorkVO work) { final VirtualMachine vm = _itMgr.findById(work.getInstanceId()); if (vm == null) { - s_logger.info("No longer can find VM " + work.getInstanceId() + ". Throwing away " + work); + logger.info("No longer can find VM " + work.getInstanceId() + ". Throwing away " + work); return null; } boolean expunge = VirtualMachine.Type.SecondaryStorageVm.equals(vm.getType()) || VirtualMachine.Type.ConsoleProxy.equals(vm.getType()); if (!expunge && VirtualMachine.State.Destroyed.equals(work.getPreviousState())) { - s_logger.info("VM " + vm.getUuid() + " already in " + vm.getState() + " state. Throwing away " + work); + logger.info("VM " + vm.getUuid() + " already in " + vm.getState() + " state. Throwing away " + work); return null; } try { @@ -750,16 +748,16 @@ protected Long destroyVM(final HaWorkVO work) { destroyVM(vm, expunge); return null; } else { - s_logger.info("VM " + vm.getUuid() + " still in " + vm.getState() + " state."); + logger.info("VM " + vm.getUuid() + " still in " + vm.getState() + " state."); } } catch (final AgentUnavailableException e) { - s_logger.debug("Agent is not available" + e.getMessage()); + logger.debug("Agent is not available" + e.getMessage()); } catch (OperationTimedoutException e) { - s_logger.debug("operation timed out: " + e.getMessage()); + logger.debug("operation timed out: " + e.getMessage()); } catch (ConcurrentOperationException e) { - s_logger.debug("concurrent operation: " + e.getMessage()); + logger.debug("concurrent operation: " + e.getMessage()); } catch (ResourceUnavailableException e) { - s_logger.debug("Resource unavailable: " + e.getMessage()); + logger.debug("Resource unavailable: " + e.getMessage()); } return (System.currentTimeMillis() >> 10) + _stopRetryInterval; @@ -768,45 +766,45 @@ protected Long destroyVM(final HaWorkVO work) { protected Long stopVM(final HaWorkVO work) throws ConcurrentOperationException { VirtualMachine vm = _itMgr.findById(work.getInstanceId()); if (vm == null) { - s_logger.info("No longer can find VM " + work.getInstanceId() + ". Throwing away " + work); + logger.info("No longer can find VM " + work.getInstanceId() + ". Throwing away " + work); work.setStep(Step.Done); return null; } - s_logger.info("Stopping " + vm); + logger.info("Stopping " + vm); try { if (work.getWorkType() == WorkType.Stop) { _itMgr.advanceStop(vm.getUuid(), false); - s_logger.info("Successfully stopped " + vm); + logger.info("Successfully stopped " + vm); return null; } else if (work.getWorkType() == WorkType.CheckStop) { if ((vm.getState() != work.getPreviousState()) || vm.getUpdated() != work.getUpdateTime() || vm.getHostId() == null || vm.getHostId().longValue() != work.getHostId()) { - s_logger.info(vm + " is different now. Scheduled Host: " + work.getHostId() + " Current Host: " + + logger.info(vm + " is different now. Scheduled Host: " + work.getHostId() + " Current Host: " + (vm.getHostId() != null ? vm.getHostId() : "none") + " State: " + vm.getState()); return null; } _itMgr.advanceStop(vm.getUuid(), false); - s_logger.info("Stop for " + vm + " was successful"); + logger.info("Stop for " + vm + " was successful"); return null; } else if (work.getWorkType() == WorkType.ForceStop) { if ((vm.getState() != work.getPreviousState()) || vm.getUpdated() != work.getUpdateTime() || vm.getHostId() == null || vm.getHostId().longValue() != work.getHostId()) { - s_logger.info(vm + " is different now. Scheduled Host: " + work.getHostId() + " Current Host: " + + logger.info(vm + " is different now. Scheduled Host: " + work.getHostId() + " Current Host: " + (vm.getHostId() != null ? vm.getHostId() : "none") + " State: " + vm.getState()); return null; } _itMgr.advanceStop(vm.getUuid(), true); - s_logger.info("Stop for " + vm + " was successful"); + logger.info("Stop for " + vm + " was successful"); return null; } else { assert false : "Who decided there's other steps but didn't modify the guy who does the work?"; } } catch (final ResourceUnavailableException e) { - s_logger.debug("Agnet is not available" + e.getMessage()); + logger.debug("Agnet is not available" + e.getMessage()); } catch (OperationTimedoutException e) { - s_logger.debug("operation timed out: " + e.getMessage()); + logger.debug("operation timed out: " + e.getMessage()); } return (System.currentTimeMillis() >> 10) + _stopRetryInterval; @@ -815,7 +813,7 @@ protected Long stopVM(final HaWorkVO work) throws ConcurrentOperationException { @Override public void cancelScheduledMigrations(final HostVO host) { WorkType type = host.getType() == HostVO.Type.Storage ? WorkType.Stop : WorkType.Migration; - s_logger.info("Canceling all scheduled migrations from host " + host.getUuid()); + logger.info("Canceling all scheduled migrations from host " + host.getUuid()); _haDao.deleteMigrationWorkItems(host.getId(), type, _serverId); } @@ -872,13 +870,13 @@ private void processWork(final HaWorkVO work) { } if (nextTime == null) { - s_logger.info("Completed work " + work + ". Took " + (work.getTimesTried() + 1) + "/" + _maxRetries + " attempts."); + logger.info("Completed work " + work + ". Took " + (work.getTimesTried() + 1) + "/" + _maxRetries + " attempts."); work.setStep(Step.Done); } else { rescheduleWork(work, nextTime.longValue()); } } catch (Exception e) { - s_logger.warn("Encountered unhandled exception during HA process, reschedule work", e); + logger.warn("Encountered unhandled exception during HA process, reschedule work", e); long nextTime = getRescheduleTime(wt); rescheduleWork(work, nextTime); @@ -891,10 +889,10 @@ private void processWork(final HaWorkVO work) { } finally { if (!Step.Done.equals(work.getStep())) { if (work.getTimesTried() >= _maxRetries) { - s_logger.warn("Giving up, retried max " + work.getTimesTried() + "/" + _maxRetries + " times for work: " + work); + logger.warn("Giving up, retried max " + work.getTimesTried() + "/" + _maxRetries + " times for work: " + work); work.setStep(Step.Done); } else { - s_logger.warn("Rescheduling work " + work + " to try again at " + new Date(work.getTimeToTry() << 10) + + logger.warn("Rescheduling work " + work + " to try again at " + new Date(work.getTimeToTry() << 10) + ". Finished attempt " + work.getTimesTried() + "/" + _maxRetries + " times."); } } @@ -967,12 +965,12 @@ public boolean stop() { protected class CleanupTask extends ManagedContextRunnable { @Override protected void runInContext() { - s_logger.info("HA Cleanup Thread Running"); + logger.info("HA Cleanup Thread Running"); try { _haDao.cleanup(System.currentTimeMillis() - _timeBetweenFailures); } catch (Exception e) { - s_logger.warn("Error while cleaning up", e); + logger.warn("Error while cleaning up", e); } } } @@ -984,7 +982,7 @@ public WorkerThread(String name) { @Override public void run() { - s_logger.info("Starting work"); + logger.info("Starting work"); while (!_stopped) { _managedContext.runWithContext(new Runnable() { @Override @@ -993,13 +991,13 @@ public void run() { } }); } - s_logger.info("Time to go home!"); + logger.info("Time to go home!"); } private void runWithContext() { HaWorkVO work = null; try { - s_logger.trace("Checking the database for work"); + logger.trace("Checking the database for work"); work = _haDao.take(_serverId); if (work == null) { try { @@ -1008,19 +1006,19 @@ private void runWithContext() { } return; } catch (final InterruptedException e) { - s_logger.info("Interrupted"); + logger.info("Interrupted"); return; } } - NDC.push("work-" + work.getId()); - s_logger.info("Processing work " + work); + ThreadContext.push("work-" + work.getId()); + logger.info("Processing work " + work); processWork(work); } catch (final Throwable th) { - s_logger.error("Caught this throwable, ", th); + logger.error("Caught this throwable, ", th); } finally { if (work != null) { - NDC.pop(); + ThreadContext.pop(); } } } @@ -1068,7 +1066,7 @@ public boolean hasPendingMigrationsWork(long vmId) { if (work.getTimesTried() <= _maxRetries) { return true; } else { - s_logger.warn("HAWork Job of migration type " + work + " found in database which has max " + + logger.warn("HAWork Job of migration type " + work + " found in database which has max " + "retries more than " + _maxRetries + " but still not in Done, Cancelled, or Error State"); } } diff --git a/server/src/main/java/com/cloud/ha/KVMFencer.java b/server/src/main/java/com/cloud/ha/KVMFencer.java index ea10570d6f8f..b51ed00b028c 100644 --- a/server/src/main/java/com/cloud/ha/KVMFencer.java +++ b/server/src/main/java/com/cloud/ha/KVMFencer.java @@ -22,7 +22,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.alert.AlertManager; @@ -40,7 +39,6 @@ import com.cloud.vm.VirtualMachine; public class KVMFencer extends AdapterBase implements FenceBuilder { - private static final Logger s_logger = Logger.getLogger(KVMFencer.class); @Inject HostDao _hostDao; @@ -76,7 +74,7 @@ public KVMFencer() { @Override public Boolean fenceOff(VirtualMachine vm, Host host) { if (host.getHypervisorType() != HypervisorType.KVM && host.getHypervisorType() != HypervisorType.LXC) { - s_logger.warn("Don't know how to fence non kvm hosts " + host.getHypervisorType()); + logger.warn("Don't know how to fence non kvm hosts " + host.getHypervisorType()); return null; } @@ -100,10 +98,10 @@ public Boolean fenceOff(VirtualMachine vm, Host host) { try { answer = (FenceAnswer)_agentMgr.send(h.getId(), fence); } catch (AgentUnavailableException e) { - s_logger.info("Moving on to the next host because " + h.toString() + " is unavailable", e); + logger.info("Moving on to the next host because " + h.toString() + " is unavailable", e); continue; } catch (OperationTimedoutException e) { - s_logger.info("Moving on to the next host because " + h.toString() + " is unavailable", e); + logger.info("Moving on to the next host because " + h.toString() + " is unavailable", e); continue; } if (answer != null && answer.getResult()) { @@ -117,7 +115,7 @@ public Boolean fenceOff(VirtualMachine vm, Host host) { "Fencing off host " + host.getId() + " did not succeed after asking " + i + " hosts. " + "Check Agent logs for more information."); - s_logger.error("Unable to fence off " + vm.toString() + " on " + host.toString()); + logger.error("Unable to fence off " + vm.toString() + " on " + host.toString()); return false; } diff --git a/server/src/main/java/com/cloud/ha/ManagementIPSystemVMInvestigator.java b/server/src/main/java/com/cloud/ha/ManagementIPSystemVMInvestigator.java index ec7f4aa72df5..ce45d662082e 100644 --- a/server/src/main/java/com/cloud/ha/ManagementIPSystemVMInvestigator.java +++ b/server/src/main/java/com/cloud/ha/ManagementIPSystemVMInvestigator.java @@ -20,7 +20,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import com.cloud.host.Host; import com.cloud.host.HostVO; @@ -32,7 +31,6 @@ import com.cloud.vm.VirtualMachine; public class ManagementIPSystemVMInvestigator extends AbstractInvestigatorImpl { - private static final Logger s_logger = Logger.getLogger(ManagementIPSystemVMInvestigator.class); @Inject private final HostDao _hostDao = null; @@ -42,28 +40,28 @@ public class ManagementIPSystemVMInvestigator extends AbstractInvestigatorImpl { @Override public boolean isVmAlive(VirtualMachine vm, Host host) throws UnknownVM { if (!vm.getType().isUsedBySystem()) { - s_logger.debug("Not a System Vm, unable to determine state of " + vm + " returning null"); + logger.debug("Not a System Vm, unable to determine state of " + vm + " returning null"); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Testing if " + vm + " is alive"); + if (logger.isDebugEnabled()) { + logger.debug("Testing if " + vm + " is alive"); } if (vm.getHostId() == null) { - s_logger.debug("There's no host id for " + vm); + logger.debug("There's no host id for " + vm); throw new UnknownVM(); } HostVO vmHost = _hostDao.findById(vm.getHostId()); if (vmHost == null) { - s_logger.debug("Unable to retrieve the host by using id " + vm.getHostId()); + logger.debug("Unable to retrieve the host by using id " + vm.getHostId()); throw new UnknownVM(); } List nics = _networkMgr.getNicsForTraffic(vm.getId(), TrafficType.Management); if (nics.size() == 0) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to find a management nic, cannot ping this system VM, unable to determine state of " + vm + " returning null"); + if (logger.isDebugEnabled()) { + logger.debug("Unable to find a management nic, cannot ping this system VM, unable to determine state of " + vm + " returning null"); } throw new UnknownVM(); } @@ -79,8 +77,8 @@ public boolean isVmAlive(VirtualMachine vm, Host host) throws UnknownVM { assert vmState != null; // In case of Status.Unknown, next host will be tried if (vmState == Status.Up) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("successfully pinged vm's private IP (" + vm.getPrivateIpAddress() + "), returning that the VM is up"); + if (logger.isDebugEnabled()) { + logger.debug("successfully pinged vm's private IP (" + vm.getPrivateIpAddress() + "), returning that the VM is up"); } return Boolean.TRUE; } else if (vmState == Status.Down) { @@ -89,8 +87,8 @@ public boolean isVmAlive(VirtualMachine vm, Host host) throws UnknownVM { Status vmHostState = testIpAddress(otherHost, vmHost.getPrivateIpAddress()); assert vmHostState != null; if (vmHostState == Status.Up) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("successfully pinged vm's host IP (" + vmHost.getPrivateIpAddress() + + if (logger.isDebugEnabled()) { + logger.debug("successfully pinged vm's host IP (" + vmHost.getPrivateIpAddress() + "), but could not ping VM, returning that the VM is down"); } return Boolean.FALSE; @@ -99,8 +97,8 @@ public boolean isVmAlive(VirtualMachine vm, Host host) throws UnknownVM { } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("unable to determine state of " + vm + " returning null"); + if (logger.isDebugEnabled()) { + logger.debug("unable to determine state of " + vm + " returning null"); } throw new UnknownVM(); } diff --git a/server/src/main/java/com/cloud/ha/RecreatableFencer.java b/server/src/main/java/com/cloud/ha/RecreatableFencer.java index 668d13fa498e..dcd4764c3818 100644 --- a/server/src/main/java/com/cloud/ha/RecreatableFencer.java +++ b/server/src/main/java/com/cloud/ha/RecreatableFencer.java @@ -20,7 +20,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; @@ -33,7 +32,6 @@ @Component public class RecreatableFencer extends AdapterBase implements FenceBuilder { - private static final Logger s_logger = Logger.getLogger(RecreatableFencer.class); @Inject VolumeDao _volsDao; @Inject @@ -47,22 +45,22 @@ public RecreatableFencer() { public Boolean fenceOff(VirtualMachine vm, Host host) { VirtualMachine.Type type = vm.getType(); if (type != VirtualMachine.Type.ConsoleProxy && type != VirtualMachine.Type.DomainRouter && type != VirtualMachine.Type.SecondaryStorageVm) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Don't know how to fence off " + type); + if (logger.isDebugEnabled()) { + logger.debug("Don't know how to fence off " + type); } return null; } List vols = _volsDao.findByInstance(vm.getId()); for (VolumeVO vol : vols) { if (!vol.isRecreatable()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to fence off volumes that are not recreatable: " + vol); + if (logger.isDebugEnabled()) { + logger.debug("Unable to fence off volumes that are not recreatable: " + vol); } return null; } if (vol.getPoolType().isShared()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to fence off volumes that are shared: " + vol); + if (logger.isDebugEnabled()) { + logger.debug("Unable to fence off volumes that are shared: " + vol); } return null; } diff --git a/server/src/main/java/com/cloud/ha/UserVmDomRInvestigator.java b/server/src/main/java/com/cloud/ha/UserVmDomRInvestigator.java index 451c5d467a1e..90d34799d3d8 100644 --- a/server/src/main/java/com/cloud/ha/UserVmDomRInvestigator.java +++ b/server/src/main/java/com/cloud/ha/UserVmDomRInvestigator.java @@ -21,7 +21,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -39,7 +38,6 @@ import com.cloud.vm.dao.UserVmDao; public class UserVmDomRInvestigator extends AbstractInvestigatorImpl { - private static final Logger s_logger = Logger.getLogger(UserVmDomRInvestigator.class); @Inject private final UserVmDao _userVmDao = null; @@ -53,14 +51,14 @@ public class UserVmDomRInvestigator extends AbstractInvestigatorImpl { @Override public boolean isVmAlive(VirtualMachine vm, Host host) throws UnknownVM { if (vm.getType() != VirtualMachine.Type.User) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Not a User Vm, unable to determine state of " + vm + " returning null"); + if (logger.isDebugEnabled()) { + logger.debug("Not a User Vm, unable to determine state of " + vm + " returning null"); } throw new UnknownVM(); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("testing if " + vm + " is alive"); + if (logger.isDebugEnabled()) { + logger.debug("testing if " + vm + " is alive"); } // to verify that the VM is alive, we ask the domR (router) to ping the VM (private IP) UserVmVO userVm = _userVmDao.findById(vm.getId()); @@ -74,8 +72,8 @@ public boolean isVmAlive(VirtualMachine vm, Host host) throws UnknownVM { List routers = _vnaMgr.getRoutersForNetwork(nic.getNetworkId()); if (routers == null || routers.isEmpty()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to find a router in network " + nic.getNetworkId() + " to ping " + vm); + if (logger.isDebugEnabled()) { + logger.debug("Unable to find a router in network " + nic.getNetworkId() + " to ping " + vm); } continue; } @@ -95,16 +93,16 @@ public boolean isVmAlive(VirtualMachine vm, Host host) throws UnknownVM { return result; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Returning null since we're unable to determine state of " + vm); + if (logger.isDebugEnabled()) { + logger.debug("Returning null since we're unable to determine state of " + vm); } throw new UnknownVM(); } @Override public Status isAgentAlive(Host agent) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("checking if agent (" + agent.getId() + ") is alive"); + if (logger.isDebugEnabled()) { + logger.debug("checking if agent (" + agent.getId() + ") is alive"); } if (agent.getPodId() == null) { @@ -114,29 +112,29 @@ public Status isAgentAlive(Host agent) { List otherHosts = findHostByPod(agent.getPodId(), agent.getId()); for (Long hostId : otherHosts) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("sending ping from (" + hostId + ") to agent's host ip address (" + agent.getPrivateIpAddress() + ")"); + if (logger.isDebugEnabled()) { + logger.debug("sending ping from (" + hostId + ") to agent's host ip address (" + agent.getPrivateIpAddress() + ")"); } Status hostState = testIpAddress(hostId, agent.getPrivateIpAddress()); assert hostState != null; // In case of Status.Unknown, next host will be tried if (hostState == Status.Up) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("ping from (" + hostId + ") to agent's host ip address (" + agent.getPrivateIpAddress() + + if (logger.isDebugEnabled()) { + logger.debug("ping from (" + hostId + ") to agent's host ip address (" + agent.getPrivateIpAddress() + ") successful, returning that agent is disconnected"); } return Status.Disconnected; // the computing host ip is ping-able, but the computing agent is down, report that the agent is disconnected } else if (hostState == Status.Down) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("returning host state: " + hostState); + if (logger.isDebugEnabled()) { + logger.debug("returning host state: " + hostState); } return Status.Down; } } // could not reach agent, could not reach agent's host, unclear what the problem is but it'll require more investigation... - if (s_logger.isDebugEnabled()) { - s_logger.debug("could not reach agent, could not reach agent's host, returning that we don't have enough information"); + if (logger.isDebugEnabled()) { + logger.debug("could not reach agent, could not reach agent's host, returning that we don't have enough information"); } return null; } @@ -165,21 +163,21 @@ private Boolean testUserVM(VirtualMachine vm, Nic nic, VirtualRouter router) { try { Answer pingTestAnswer = _agentMgr.easySend(hostId, new PingTestCommand(routerPrivateIp, privateIp)); if (pingTestAnswer != null && pingTestAnswer.getResult()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("user vm's " + vm.getHostName() + " ip address " + privateIp + " has been successfully pinged from the Virtual Router " + + if (logger.isDebugEnabled()) { + logger.debug("user vm's " + vm.getHostName() + " ip address " + privateIp + " has been successfully pinged from the Virtual Router " + router.getHostName() + ", returning that vm is alive"); } return Boolean.TRUE; } } catch (Exception e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Couldn't reach due to", e); + if (logger.isDebugEnabled()) { + logger.debug("Couldn't reach due to", e); } continue; } } - if (s_logger.isDebugEnabled()) { - s_logger.debug(vm + " could not be pinged, returning that it is unknown"); + if (logger.isDebugEnabled()) { + logger.debug(vm + " could not be pinged, returning that it is unknown"); } return null; diff --git a/server/src/main/java/com/cloud/ha/XenServerInvestigator.java b/server/src/main/java/com/cloud/ha/XenServerInvestigator.java index 896642176659..5482a7f148e6 100644 --- a/server/src/main/java/com/cloud/ha/XenServerInvestigator.java +++ b/server/src/main/java/com/cloud/ha/XenServerInvestigator.java @@ -20,7 +20,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -36,7 +35,6 @@ import com.cloud.vm.VirtualMachine; public class XenServerInvestigator extends AdapterBase implements Investigator { - private final static Logger s_logger = Logger.getLogger(XenServerInvestigator.class); @Inject HostDao _hostDao; @Inject @@ -63,7 +61,7 @@ public Status isAgentAlive(Host agent) { if (answer != null && answer.getResult()) { CheckOnHostAnswer ans = (CheckOnHostAnswer)answer; if (!ans.isDetermined()) { - s_logger.debug("Host " + neighbor + " couldn't determine the status of " + agent); + logger.debug("Host " + neighbor + " couldn't determine the status of " + agent); continue; } // even it returns true, that means host is up, but XAPI may not work diff --git a/server/src/main/java/com/cloud/ha/dao/HighAvailabilityDaoImpl.java b/server/src/main/java/com/cloud/ha/dao/HighAvailabilityDaoImpl.java index c7284053fb2e..357796a6a70b 100644 --- a/server/src/main/java/com/cloud/ha/dao/HighAvailabilityDaoImpl.java +++ b/server/src/main/java/com/cloud/ha/dao/HighAvailabilityDaoImpl.java @@ -20,7 +20,6 @@ import java.util.List; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.ha.HaWorkVO; @@ -36,7 +35,6 @@ @Component public class HighAvailabilityDaoImpl extends GenericDaoBase implements HighAvailabilityDao { - private static final Logger s_logger = Logger.getLogger(HighAvailabilityDaoImpl.class); private final SearchBuilder TBASearch; private final SearchBuilder PreviousInstanceSearch; diff --git a/server/src/main/java/com/cloud/hypervisor/CloudZonesStartupProcessor.java b/server/src/main/java/com/cloud/hypervisor/CloudZonesStartupProcessor.java index 8d674a5f9a82..961e11e91d74 100644 --- a/server/src/main/java/com/cloud/hypervisor/CloudZonesStartupProcessor.java +++ b/server/src/main/java/com/cloud/hypervisor/CloudZonesStartupProcessor.java @@ -22,7 +22,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; @@ -48,7 +47,6 @@ */ @Component public class CloudZonesStartupProcessor extends AdapterBase implements StartupCommandProcessor { - private static final Logger s_logger = Logger.getLogger(CloudZonesStartupProcessor.class); @Inject private DataCenterDao _zoneDao = null; @Inject @@ -113,7 +111,7 @@ protected void updateSecondaryHost(final HostVO host, final StartupStorageComman String zoneToken = startup.getDataCenter(); if (zoneToken == null) { - s_logger.warn("No Zone Token passed in, cannot not find zone for the agent"); + logger.warn("No Zone Token passed in, cannot not find zone for the agent"); throw new AgentAuthnException("No Zone Token passed in, cannot not find zone for agent"); } @@ -132,14 +130,14 @@ protected void updateSecondaryHost(final HostVO host, final StartupStorageComman } } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully loaded the DataCenter from the zone token passed in "); + if (logger.isDebugEnabled()) { + logger.debug("Successfully loaded the DataCenter from the zone token passed in "); } HostPodVO pod = findPod(startup, zone.getId(), Host.Type.Routing); //yes, routing Long podId = null; if (pod != null) { - s_logger.debug("Found pod " + pod.getName() + " for the secondary storage host " + startup.getName()); + logger.debug("Found pod " + pod.getName() + " for the secondary storage host " + startup.getName()); podId = pod.getId(); } host.setDataCenterId(zone.getId()); diff --git a/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java b/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java index 177fd331dee1..d0d728d4410c 100644 --- a/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java +++ b/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java @@ -32,7 +32,6 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.agent.api.Command; import com.cloud.agent.api.to.DiskTO; @@ -69,7 +68,6 @@ import com.cloud.vm.dao.VMInstanceDao; public abstract class HypervisorGuruBase extends AdapterBase implements HypervisorGuru, Configurable { - public static final Logger s_logger = Logger.getLogger(HypervisorGuruBase.class); @Inject protected @@ -106,7 +104,7 @@ public abstract class HypervisorGuruBase extends AdapterBase implements Hypervis private Map getNicDetails(Network network) { if (network == null) { - s_logger.debug("Unable to get NIC details as the network is null"); + logger.debug("Unable to get NIC details as the network is null"); return null; } Map details = networkOfferingDetailsDao.getNtwkOffDetails(network.getNetworkOfferingId()); @@ -165,7 +163,7 @@ public NicTO toNicTO(NicProfile profile) { } to.setNicSecIps(secIps); } else { - s_logger.warn("Unabled to load NicVO for NicProfile " + profile.getId()); + logger.warn("Unabled to load NicVO for NicProfile " + profile.getId()); //Workaround for dynamically created nics //FixMe: uuid and secondary IPs can be made part of nic profile to.setUuid(UUID.randomUUID().toString()); @@ -296,13 +294,13 @@ protected Long findClusterOfVm(VirtualMachine vm) { return host.getClusterId(); } - s_logger.debug(String.format("VM [%s] does not have a host id. Trying the last host.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(vm, "instanceName", "id", "uuid"))); + logger.debug(String.format("VM [%s] does not have a host id. Trying the last host.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(vm, "instanceName", "id", "uuid"))); host = hostDao.findById(vm.getLastHostId()); if (host != null) { return host.getClusterId(); } - s_logger.debug(String.format("VM [%s] does not have a last host id.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(vm, "instanceName", "id", "uuid"))); + logger.debug(String.format("VM [%s] does not have a last host id.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(vm, "instanceName", "id", "uuid"))); return null; } @@ -369,13 +367,13 @@ public ConfigKey[] getConfigKeys() { @Override public UnmanagedInstanceTO cloneHypervisorVMOutOfBand(String hostIp, String vmName, Map params) { - s_logger.error("Unsupported operation: cannot clone external VM"); + logger.error("Unsupported operation: cannot clone external VM"); return null; } @Override public boolean removeClonedHypervisorVMOutOfBand(String hostIp, String vmName, Map params) { - s_logger.error("Unsupported operation: cannot remove external VM"); + logger.error("Unsupported operation: cannot remove external VM"); return false; } } diff --git a/server/src/main/java/com/cloud/hypervisor/HypervisorGuruManagerImpl.java b/server/src/main/java/com/cloud/hypervisor/HypervisorGuruManagerImpl.java index a5f1f9fa5cb1..2c8af8cee1a0 100644 --- a/server/src/main/java/com/cloud/hypervisor/HypervisorGuruManagerImpl.java +++ b/server/src/main/java/com/cloud/hypervisor/HypervisorGuruManagerImpl.java @@ -23,7 +23,6 @@ import javax.annotation.PostConstruct; import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.api.Command; @@ -34,7 +33,6 @@ @Component public class HypervisorGuruManagerImpl extends ManagerBase implements HypervisorGuruManager { - public static final Logger s_logger = Logger.getLogger(HypervisorGuruManagerImpl.class.getName()); @Inject HostDao _hostDao; diff --git a/server/src/main/java/com/cloud/hypervisor/KVMGuru.java b/server/src/main/java/com/cloud/hypervisor/KVMGuru.java index 7c02d95f3eb9..ff588d064791 100644 --- a/server/src/main/java/com/cloud/hypervisor/KVMGuru.java +++ b/server/src/main/java/com/cloud/hypervisor/KVMGuru.java @@ -46,7 +46,6 @@ import org.apache.cloudstack.storage.command.StorageSubSystemCommand; import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.commons.collections.MapUtils; -import org.apache.log4j.Logger; import javax.inject.Inject; import java.math.BigDecimal; import java.math.RoundingMode; @@ -71,7 +70,6 @@ public class KVMGuru extends HypervisorGuruBase implements HypervisorGuru { @Inject HypervisorCapabilitiesDao _hypervisorCapabilitiesDao; - public static final Logger s_logger = Logger.getLogger(KVMGuru.class); @Override public HypervisorType getHypervisorType() { @@ -136,21 +134,21 @@ protected void setVmQuotaPercentage(VirtualMachineTO to, VirtualMachineProfile v if (host == null) { throw new CloudRuntimeException("Host with id: " + vm.getHostId() + " not found"); } - s_logger.debug("Limiting CPU usage for VM: " + vm.getUuid() + " on host: " + host.getUuid()); + logger.debug("Limiting CPU usage for VM: " + vm.getUuid() + " on host: " + host.getUuid()); double hostMaxSpeed = getHostCPUSpeed(host); double maxSpeed = getVmSpeed(to); try { BigDecimal percent = new BigDecimal(maxSpeed / hostMaxSpeed); percent = percent.setScale(2, RoundingMode.HALF_DOWN); if (percent.compareTo(new BigDecimal(1)) == 1) { - s_logger.debug("VM " + vm.getUuid() + " CPU MHz exceeded host " + host.getUuid() + " CPU MHz, limiting VM CPU to the host maximum"); + logger.debug("VM " + vm.getUuid() + " CPU MHz exceeded host " + host.getUuid() + " CPU MHz, limiting VM CPU to the host maximum"); percent = new BigDecimal(1); } to.setCpuQuotaPercentage(percent.doubleValue()); - s_logger.debug("Host: " + host.getUuid() + " max CPU speed = " + hostMaxSpeed + "MHz, VM: " + vm.getUuid() + + logger.debug("Host: " + host.getUuid() + " max CPU speed = " + hostMaxSpeed + "MHz, VM: " + vm.getUuid() + "max CPU speed = " + maxSpeed + "MHz. Setting CPU quota percentage as: " + percent.doubleValue()); } catch (NumberFormatException e) { - s_logger.error("Error calculating VM: " + vm.getUuid() + " quota percentage, it wll not be set. Error: " + e.getMessage(), e); + logger.error("Error calculating VM: " + vm.getUuid() + " quota percentage, it wll not be set. Error: " + e.getMessage(), e); } } } @@ -243,15 +241,15 @@ protected Pair getHostMaxMemoryAndCpuCores(HostVO host, VirtualMa } Long lastHostId = virtualMachine.getLastHostId(); - s_logger.info(String.format("%s is not running; therefore, we use the last host [%s] that the VM was running on to derive the unconstrained service offering max CPU and memory.", vmDescription, lastHostId)); + logger.info(String.format("%s is not running; therefore, we use the last host [%s] that the VM was running on to derive the unconstrained service offering max CPU and memory.", vmDescription, lastHostId)); HostVO lastHost = lastHostId == null ? null : hostDao.findById(lastHostId); if (lastHost != null) { maxHostMemory = lastHost.getTotalMemory(); maxHostCpuCore = lastHost.getCpus(); - s_logger.debug(String.format("Retrieved memory and cpu max values {\"memory\": %s, \"cpu\": %s} from %s last %s.", maxHostMemory, maxHostCpuCore, vmDescription, lastHost)); + logger.debug(String.format("Retrieved memory and cpu max values {\"memory\": %s, \"cpu\": %s} from %s last %s.", maxHostMemory, maxHostCpuCore, vmDescription, lastHost)); } else { - s_logger.warn(String.format("%s host [%s] and last host [%s] are null. Using 'Long.MAX_VALUE' [%s] and 'Integer.MAX_VALUE' [%s] as max memory and cpu cores.", vmDescription, virtualMachine.getHostId(), lastHostId, maxHostMemory, maxHostCpuCore)); + logger.warn(String.format("%s host [%s] and last host [%s] are null. Using 'Long.MAX_VALUE' [%s] and 'Integer.MAX_VALUE' [%s] as max memory and cpu cores.", vmDescription, virtualMachine.getHostId(), lastHostId, maxHostMemory, maxHostCpuCore)); } return new Pair<>(maxHostMemory, maxHostCpuCore); @@ -264,18 +262,18 @@ protected Long getVmMaxMemory(ServiceOfferingVO serviceOfferingVO, String vmDesc Integer customOfferingMaxMemory = NumberUtils.createInteger(serviceOfferingVO.getDetail(ApiConstants.MAX_MEMORY)); Integer maxMemoryConfig = ConfigurationManagerImpl.VM_SERVICE_OFFERING_MAX_RAM_SIZE.value(); if (customOfferingMaxMemory != null) { - s_logger.debug(String.format("Using 'Custom unconstrained' %s max memory value [%sMb] as %s memory.", serviceOfferingDescription, customOfferingMaxMemory, vmDescription)); + logger.debug(String.format("Using 'Custom unconstrained' %s max memory value [%sMb] as %s memory.", serviceOfferingDescription, customOfferingMaxMemory, vmDescription)); maxMemory = ByteScaleUtils.mebibytesToBytes(customOfferingMaxMemory); } else { String maxMemoryConfigKey = ConfigurationManagerImpl.VM_SERVICE_OFFERING_MAX_RAM_SIZE.key(); - s_logger.info(String.format("%s is a 'Custom unconstrained' service offering. Using config [%s] value [%s] as max %s memory.", + logger.info(String.format("%s is a 'Custom unconstrained' service offering. Using config [%s] value [%s] as max %s memory.", serviceOfferingDescription, maxMemoryConfigKey, maxMemoryConfig, vmDescription)); if (maxMemoryConfig > 0) { maxMemory = ByteScaleUtils.mebibytesToBytes(maxMemoryConfig); } else { - s_logger.info(String.format("Config [%s] has value less or equal '0'. Using %s host or last host max memory [%s] as VM max memory in the hypervisor.", maxMemoryConfigKey, vmDescription, maxHostMemory)); + logger.info(String.format("Config [%s] has value less or equal '0'. Using %s host or last host max memory [%s] as VM max memory in the hypervisor.", maxMemoryConfigKey, vmDescription, maxHostMemory)); maxMemory = maxHostMemory; } } @@ -290,18 +288,18 @@ protected Integer getVmMaxCpuCores(ServiceOfferingVO serviceOfferingVO, String v Integer maxCpuCoresConfig = ConfigurationManagerImpl.VM_SERVICE_OFFERING_MAX_CPU_CORES.value(); if (customOfferingMaxCpuCores != null) { - s_logger.debug(String.format("Using 'Custom unconstrained' %s max cpu cores [%s] as %s cpu cores.", serviceOfferingDescription, customOfferingMaxCpuCores, vmDescription)); + logger.debug(String.format("Using 'Custom unconstrained' %s max cpu cores [%s] as %s cpu cores.", serviceOfferingDescription, customOfferingMaxCpuCores, vmDescription)); maxCpuCores = customOfferingMaxCpuCores; } else { String maxCpuCoreConfigKey = ConfigurationManagerImpl.VM_SERVICE_OFFERING_MAX_CPU_CORES.key(); - s_logger.info(String.format("%s is a 'Custom unconstrained' service offering. Using config [%s] value [%s] as max %s cpu cores.", + logger.info(String.format("%s is a 'Custom unconstrained' service offering. Using config [%s] value [%s] as max %s cpu cores.", serviceOfferingDescription, maxCpuCoreConfigKey, maxCpuCoresConfig, vmDescription)); if (maxCpuCoresConfig > 0) { maxCpuCores = maxCpuCoresConfig; } else { - s_logger.info(String.format("Config [%s] has value less or equal '0'. Using %s host or last host max cpu cores [%s] as VM cpu cores in the hypervisor.", maxCpuCoreConfigKey, vmDescription, maxHostCpuCore)); + logger.info(String.format("Config [%s] has value less or equal '0'. Using %s host or last host max cpu cores [%s] as VM cpu cores in the hypervisor.", maxCpuCoreConfigKey, vmDescription, maxHostCpuCore)); maxCpuCores = maxHostCpuCore; } } @@ -344,7 +342,7 @@ public Map getClusterSettings(long vmId) { @Override public VirtualMachine importVirtualMachineFromBackup(long zoneId, long domainId, long accountId, long userId, String vmInternalName, Backup backup) { - s_logger.debug(String.format("Trying to import VM [vmInternalName: %s] from Backup [%s].", vmInternalName, + logger.debug(String.format("Trying to import VM [vmInternalName: %s] from Backup [%s].", vmInternalName, ReflectionToStringBuilderUtils.reflectOnlySelectedFields(backup, "id", "uuid", "vmId", "externalId", "backupType"))); VMInstanceVO vm = _instanceDao.findVMByInstanceNameIncludingRemoved(vmInternalName); diff --git a/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/KvmServerDiscoverer.java b/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/KvmServerDiscoverer.java index 6e29de2b2ba7..af16d12884c0 100644 --- a/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/KvmServerDiscoverer.java +++ b/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/KvmServerDiscoverer.java @@ -17,12 +17,10 @@ package com.cloud.hypervisor.kvm.discoverer; -import org.apache.log4j.Logger; import com.cloud.hypervisor.Hypervisor; public class KvmServerDiscoverer extends LibvirtServerDiscoverer { - private static final Logger s_logger = Logger.getLogger(KvmServerDiscoverer.class); @Override public Hypervisor.HypervisorType getHypervisorType() { diff --git a/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java b/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java index e9f0d5f58e4e..390ea155b3c7 100644 --- a/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java +++ b/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java @@ -54,7 +54,6 @@ import org.apache.cloudstack.direct.download.DirectDownloadManager; import org.apache.cloudstack.framework.ca.Certificate; import org.apache.cloudstack.utils.security.KeyStoreUtils; -import org.apache.log4j.Logger; import javax.inject.Inject; import javax.naming.ConfigurationException; @@ -70,7 +69,6 @@ import static com.cloud.configuration.ConfigurationManagerImpl.ADD_HOST_ON_SERVICE_RESTART_KVM; public abstract class LibvirtServerDiscoverer extends DiscovererBase implements Discoverer, Listener, ResourceStateAdapter { - private static final Logger s_logger = Logger.getLogger(LibvirtServerDiscoverer.class); private final int _waitTime = 5; /* wait for 5 minutes */ private String _kvmPrivateNic; private String _kvmPublicNic; @@ -206,8 +204,8 @@ private void setupAgentSecurity(final Connection sshConnection, final String age throw new CloudRuntimeException("Failed to setup certificate in the KVM agent's keystore file, please see logs and configure manually!"); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Succeeded to import certificate in the keystore for agent on the KVM host: " + agentIp + ". Agent secured and trusted."); + if (logger.isDebugEnabled()) { + logger.debug("Succeeded to import certificate in the keystore for agent on the KVM host: " + agentIp + ". Agent secured and trusted."); } } @@ -216,8 +214,8 @@ private void setupAgentSecurity(final Connection sshConnection, final String age find(long dcId, Long podId, Long clusterId, URI uri, String username, String password, List hostTags) throws DiscoveryException { ClusterVO cluster = _clusterDao.findById(clusterId); if (cluster == null || cluster.getHypervisorType() != getHypervisorType()) { - if (s_logger.isInfoEnabled()) - s_logger.info("invalid cluster id or cluster is not for " + getHypervisorType() + " hypervisors"); + if (logger.isInfoEnabled()) + logger.info("invalid cluster id or cluster is not for " + getHypervisorType() + " hypervisors"); return null; } @@ -231,7 +229,7 @@ private void setupAgentSecurity(final Connection sshConnection, final String age Map details = new HashMap(); if (!uri.getScheme().equals("http")) { String msg = "urlString is not http so we're not taking care of the discovery for this: " + uri; - s_logger.debug(msg); + logger.debug(msg); return null; } Connection sshConnection = null; @@ -248,7 +246,7 @@ private void setupAgentSecurity(final Connection sshConnection, final String age for (HostVO existingHost : existingHosts) { if (existingHost.getGuid().toLowerCase().startsWith(guid.toLowerCase())) { final String msg = "Skipping host " + agentIp + " because " + guid + " is already in the database for resource " + existingHost.getGuid() + " with ID " + existingHost.getUuid(); - s_logger.debug(msg); + logger.debug(msg); throw new CloudRuntimeException(msg); } } @@ -261,20 +259,20 @@ private void setupAgentSecurity(final Connection sshConnection, final String age final String privateKey = _configDao.getValue("ssh.privatekey"); if (!SSHCmdHelper.acquireAuthorizedConnectionWithPublicKey(sshConnection, username, privateKey)) { if (org.apache.commons.lang3.StringUtils.isEmpty(password)) { - s_logger.error("Failed to authenticate with ssh key"); + logger.error("Failed to authenticate with ssh key"); throw new DiscoveredWithErrorException("Authentication error with ssh private key"); } - s_logger.info("Failed to authenticate with ssh key, retrying with password"); + logger.info("Failed to authenticate with ssh key, retrying with password"); if (!sshConnection.authenticateWithPassword(username, password)) { - s_logger.error("Failed to authenticate with password"); + logger.error("Failed to authenticate with password"); throw new DiscoveredWithErrorException("Authentication error with host password"); } } if (!SSHCmdHelper.sshExecuteCmd(sshConnection, "ls /dev/kvm")) { String errorMsg = "This machine does not have KVM enabled."; - if (s_logger.isDebugEnabled()) { - s_logger.debug(errorMsg); + if (logger.isDebugEnabled()) { + logger.debug(errorMsg); } throw new DiscoveredWithErrorException(errorMsg); } @@ -334,7 +332,7 @@ private void setupAgentSecurity(final Connection sshConnection, final String age if (!SSHCmdHelper.sshExecuteCmd(sshConnection, setupAgentCommand + parameters)) { String errorMsg = String.format("CloudStack Agent setup through command [%s] with parameters [%s] failed.", setupAgentCommand, parameters); - s_logger.info(errorMsg); + logger.info(errorMsg); throw new DiscoveredWithErrorException(errorMsg); } @@ -365,13 +363,13 @@ private void setupAgentSecurity(final Connection sshConnection, final String age _hostDao.saveDetails(connectedHost); return resources; } catch (DiscoveredWithErrorException e) { - s_logger.error("DiscoveredWithErrorException caught and rethrowing, message: "+ e.getMessage()); + logger.error("DiscoveredWithErrorException caught and rethrowing, message: "+ e.getMessage()); throw e; } catch (Exception e) { String msg = " can't setup agent, due to " + e.toString() + " - " + e.getMessage(); - s_logger.warn(msg); - if (s_logger.isDebugEnabled()) { - s_logger.debug(msg, e); + logger.warn(msg); + if (logger.isDebugEnabled()) { + logger.debug(msg, e); } throw new DiscoveredWithErrorException(msg, e); } finally { @@ -391,10 +389,10 @@ private HostVO waitForHostConnect(long dcId, long podId, long clusterId, String try { Thread.sleep(30000); } catch (InterruptedException e) { - s_logger.debug("Failed to sleep: " + e.toString()); + logger.debug("Failed to sleep: " + e.toString()); } } - s_logger.debug("Timeout, to wait for the host connecting to mgt svr, assuming it is failed"); + logger.debug("Timeout, to wait for the host connecting to mgt svr, assuming it is failed"); List hosts = _resourceMgr.findHostByGuid(dcId, guid); if (hosts.size() == 1) { return hosts.get(0); @@ -460,7 +458,7 @@ public HostVO createHostVOForConnectedAgent(HostVO host, StartupCommand[] cmd) { /* KVM requires host are the same in cluster */ ClusterVO clusterVO = _clusterDao.findById(host.getClusterId()); if (clusterVO == null) { - s_logger.debug("cannot find cluster: " + host.getClusterId()); + logger.debug("cannot find cluster: " + host.getClusterId()); throw new IllegalArgumentException("cannot add host, due to can't find cluster: " + host.getClusterId()); } @@ -473,7 +471,7 @@ public HostVO createHostVOForConnectedAgent(HostVO host, StartupCommand[] cmd) { if (!hostOsInCluster.equalsIgnoreCase(hostOs)) { String msg = String.format("host: %s with hostOS, \"%s\"into a cluster, in which there are \"%s\" hosts added", firstCmd.getPrivateIpAddress(), hostOs, hostOsInCluster); if (hostOs != null && hostOs.startsWith(hostOsInCluster)) { - s_logger.warn(String.format("Adding %s. This may or may not be ok!", msg)); + logger.warn(String.format("Adding %s. This may or may not be ok!", msg)); } else { throw new IllegalArgumentException(String.format("Can't add %s.", msg)); } @@ -502,9 +500,9 @@ public DeleteHostAnswer deleteHost(HostVO host, boolean isForced, boolean isForc ShutdownCommand cmd = new ShutdownCommand(ShutdownCommand.DeleteHost, null, !ADD_HOST_ON_SERVICE_RESTART_KVM.value()); agentMgr.send(host.getId(), cmd); } catch (AgentUnavailableException e) { - s_logger.warn("Sending ShutdownCommand failed: ", e); + logger.warn("Sending ShutdownCommand failed: ", e); } catch (OperationTimedoutException e) { - s_logger.warn("Sending ShutdownCommand failed: ", e); + logger.warn("Sending ShutdownCommand failed: ", e); } return new DeleteHostAnswer(true); diff --git a/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LxcServerDiscoverer.java b/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LxcServerDiscoverer.java index 16ac97d64dd2..8872edde38a0 100644 --- a/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LxcServerDiscoverer.java +++ b/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LxcServerDiscoverer.java @@ -17,12 +17,10 @@ package com.cloud.hypervisor.kvm.discoverer; -import org.apache.log4j.Logger; import com.cloud.hypervisor.Hypervisor; public class LxcServerDiscoverer extends LibvirtServerDiscoverer { - private static final Logger s_logger = Logger.getLogger(LxcServerDiscoverer.class); @Override public Hypervisor.HypervisorType getHypervisorType() { diff --git a/server/src/main/java/com/cloud/hypervisor/kvm/dpdk/DpdkHelperImpl.java b/server/src/main/java/com/cloud/hypervisor/kvm/dpdk/DpdkHelperImpl.java index b69a6d498b71..361b1302b287 100644 --- a/server/src/main/java/com/cloud/hypervisor/kvm/dpdk/DpdkHelperImpl.java +++ b/server/src/main/java/com/cloud/hypervisor/kvm/dpdk/DpdkHelperImpl.java @@ -31,7 +31,8 @@ import com.cloud.vm.dao.VMInstanceDao; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import javax.inject.Inject; import java.util.List; @@ -47,7 +48,7 @@ public class DpdkHelperImpl implements DpdkHelper { @Inject private UserVmDetailsDao userVmDetailsDao; - public static final Logger s_logger = Logger.getLogger(DpdkHelperImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); private ServiceOffering getServiceOfferingFromVMProfile(VirtualMachineProfile virtualMachineProfile) { ServiceOffering offering = virtualMachineProfile.getServiceOffering(); @@ -74,7 +75,7 @@ public void setDpdkVhostUserMode(VirtualMachineTO to, VirtualMachineProfile vm) VHostUserMode dpdKvHostUserMode = VHostUserMode.fromValue(mode); to.addExtraConfig(DPDK_VHOST_USER_MODE, dpdKvHostUserMode.toString()); } catch (IllegalArgumentException e) { - s_logger.error(String.format("DPDK vHost User mode found as a detail for service offering: %s " + + logger.error(String.format("DPDK vHost User mode found as a detail for service offering: %s " + "but value: %s is not supported. Supported values: %s, %s", offering.getId(), mode, VHostUserMode.CLIENT.toString(), VHostUserMode.SERVER.toString())); diff --git a/server/src/main/java/com/cloud/metadata/ResourceMetaDataManagerImpl.java b/server/src/main/java/com/cloud/metadata/ResourceMetaDataManagerImpl.java index 8291505c7700..fc2453286cbb 100644 --- a/server/src/main/java/com/cloud/metadata/ResourceMetaDataManagerImpl.java +++ b/server/src/main/java/com/cloud/metadata/ResourceMetaDataManagerImpl.java @@ -47,7 +47,6 @@ import org.apache.cloudstack.resourcedetail.dao.GuestOsDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.dc.dao.DataCenterDetailsDao; @@ -71,7 +70,6 @@ @Component public class ResourceMetaDataManagerImpl extends ManagerBase implements ResourceMetaDataService, ResourceMetaDataManager { - public static final Logger s_logger = Logger.getLogger(ResourceMetaDataManagerImpl.class); @Inject VolumeDetailsDao _volumeDetailDao; @Inject diff --git a/server/src/main/java/com/cloud/network/ExternalDeviceUsageManagerImpl.java b/server/src/main/java/com/cloud/network/ExternalDeviceUsageManagerImpl.java index c3efbf627a8e..329e4b903792 100644 --- a/server/src/main/java/com/cloud/network/ExternalDeviceUsageManagerImpl.java +++ b/server/src/main/java/com/cloud/network/ExternalDeviceUsageManagerImpl.java @@ -30,7 +30,6 @@ import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.managed.context.ManagedContextRunnable; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; @@ -155,7 +154,6 @@ public class ExternalDeviceUsageManagerImpl extends ManagerBase implements Exter ScheduledExecutorService _executor; private int _externalNetworkStatsInterval; - private static final org.apache.log4j.Logger s_logger = Logger.getLogger(ExternalDeviceUsageManagerImpl.class); @Override public boolean configure(String name, Map params) throws ConfigurationException { @@ -212,24 +210,24 @@ public void updateExternalLoadBalancerNetworkUsageStats(long loadBalancerRuleId) LoadBalancerVO lb = _loadBalancerDao.findById(loadBalancerRuleId); if (lb == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cannot update usage stats, LB rule is not found"); + if (logger.isDebugEnabled()) { + logger.debug("Cannot update usage stats, LB rule is not found"); } return; } long networkId = lb.getNetworkId(); Network network = _networkDao.findById(networkId); if (network == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cannot update usage stats, Network is not found"); + if (logger.isDebugEnabled()) { + logger.debug("Cannot update usage stats, Network is not found"); } return; } ExternalLoadBalancerDeviceVO lbDeviceVO = getExternalLoadBalancerForNetwork(network); if (lbDeviceVO == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cannot update usage stats, No external LB device found"); + if (logger.isDebugEnabled()) { + logger.debug("Cannot update usage stats, No external LB device found"); } return; } @@ -243,7 +241,7 @@ public void updateExternalLoadBalancerNetworkUsageStats(long loadBalancerRuleId) if (lbAnswer == null || !lbAnswer.getResult()) { String details = (lbAnswer != null) ? lbAnswer.getDetails() : "details unavailable"; String msg = "Unable to get external load balancer stats for network" + networkId + " due to: " + details + "."; - s_logger.error(msg); + logger.error(msg); return; } } @@ -251,7 +249,7 @@ public void updateExternalLoadBalancerNetworkUsageStats(long loadBalancerRuleId) long accountId = lb.getAccountId(); AccountVO account = _accountDao.findById(accountId); if (account == null) { - s_logger.debug("Skipping stats update for external LB for account with ID " + accountId); + logger.debug("Skipping stats update for external LB for account with ID " + accountId); return; } @@ -285,7 +283,7 @@ public void updateExternalLoadBalancerNetworkUsageStats(long loadBalancerRuleId) } if (bytesSentAndReceived == null) { - s_logger.debug("Didn't get an external network usage answer for public IP " + publicIp); + logger.debug("Didn't get an external network usage answer for public IP " + publicIp); } else { newCurrentBytesSent += bytesSentAndReceived[0]; newCurrentBytesReceived += bytesSentAndReceived[1]; @@ -314,23 +312,23 @@ public void doInTransactionWithoutResult(TransactionStatus status) { userStats.setCurrentBytesSent(newCurrentBytesSent); if (oldCurrentBytesSent > newCurrentBytesSent) { - s_logger.warn(warning + "Stored bytes sent: " + toHumanReadableSize(oldCurrentBytesSent) + ", new bytes sent: " + toHumanReadableSize(newCurrentBytesSent) + "."); + logger.warn(warning + "Stored bytes sent: " + toHumanReadableSize(oldCurrentBytesSent) + ", new bytes sent: " + toHumanReadableSize(newCurrentBytesSent) + "."); userStats.setNetBytesSent(oldNetBytesSent + oldCurrentBytesSent); } userStats.setCurrentBytesReceived(newCurrentBytesReceived); if (oldCurrentBytesReceived > newCurrentBytesReceived) { - s_logger.warn(warning + "Stored bytes received: " + toHumanReadableSize(oldCurrentBytesReceived) + ", new bytes received: " + toHumanReadableSize(newCurrentBytesReceived) + "."); + logger.warn(warning + "Stored bytes received: " + toHumanReadableSize(oldCurrentBytesReceived) + ", new bytes received: " + toHumanReadableSize(newCurrentBytesReceived) + "."); userStats.setNetBytesReceived(oldNetBytesReceived + oldCurrentBytesReceived); } if (_userStatsDao.update(userStats.getId(), userStats)) { - s_logger.debug("Successfully updated stats for " + statsEntryIdentifier); + logger.debug("Successfully updated stats for " + statsEntryIdentifier); } else { - s_logger.debug("Failed to update stats for " + statsEntryIdentifier); + logger.debug("Failed to update stats for " + statsEntryIdentifier); } } else { - s_logger.warn("Unable to find user stats entry for " + statsEntryIdentifier); + logger.warn("Unable to find user stats entry for " + statsEntryIdentifier); } } }); @@ -364,7 +362,7 @@ protected void runInContext() { // Skip external device usage collection if none exist if(_hostDao.listByType(Host.Type.ExternalFirewall).isEmpty() && _hostDao.listByType(Host.Type.ExternalLoadBalancer).isEmpty()){ - s_logger.debug("External devices are not used. Skipping external device usage collection"); + logger.debug("External devices are not used. Skipping external device usage collection"); return; } @@ -378,14 +376,14 @@ protected void runInContext() { } } } catch (Exception e) { - s_logger.warn("Problems while getting external device usage", e); + logger.warn("Problems while getting external device usage", e); } finally { scanLock.releaseRef(); } } protected void runExternalDeviceNetworkUsageTask() { - s_logger.debug("External devices stats collector is running..."); + logger.debug("External devices stats collector is running..."); for (DataCenterVO zone : _dcDao.listAll()) { List domainRoutersInZone = _routerDao.listByDataCenter(zone.getId()); @@ -400,8 +398,8 @@ protected void runExternalDeviceNetworkUsageTask() { long accountId = domainRouter.getAccountId(); if (accountsProcessed.contains(new Long(accountId))) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Networks for Account " + accountId + " are already processed for external network usage, so skipping usage check."); + if (logger.isTraceEnabled()) { + logger.trace("Networks for Account " + accountId + " are already processed for external network usage, so skipping usage check."); } continue; } @@ -415,7 +413,7 @@ protected void runExternalDeviceNetworkUsageTask() { for (NetworkVO network : networksForAccount) { if (!_networkModel.networkIsConfiguredForExternalNetworking(zoneId, network.getId())) { - s_logger.debug("Network " + network.getId() + " is not configured for external networking, so skipping usage check."); + logger.debug("Network " + network.getId() + " is not configured for external networking, so skipping usage check."); continue; } @@ -448,17 +446,17 @@ protected void runExternalDeviceNetworkUsageTask() { if (firewallAnswer == null || !firewallAnswer.getResult()) { String details = (firewallAnswer != null) ? firewallAnswer.getDetails() : "details unavailable"; String msg = "Unable to get external firewall stats for network" + zone.getName() + " due to: " + details + "."; - s_logger.error(msg); + logger.error(msg); } else { fwDeviceUsageAnswerMap.put(fwDeviceId, firewallAnswer); } } catch (Exception e) { String msg = "Unable to get external firewall stats for network" + zone.getName(); - s_logger.error(msg, e); + logger.error(msg, e); } } else { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Reusing usage Answer for device id " + fwDeviceId + "for Network " + network.getId()); + if (logger.isTraceEnabled()) { + logger.trace("Reusing usage Answer for device id " + fwDeviceId + "for Network " + network.getId()); } firewallAnswer = fwDeviceUsageAnswerMap.get(fwDeviceId); } @@ -483,17 +481,17 @@ protected void runExternalDeviceNetworkUsageTask() { if (lbAnswer == null || !lbAnswer.getResult()) { String details = (lbAnswer != null) ? lbAnswer.getDetails() : "details unavailable"; String msg = "Unable to get external load balancer stats for " + zone.getName() + " due to: " + details + "."; - s_logger.error(msg); + logger.error(msg); } else { lbDeviceUsageAnswerMap.put(lbDeviceId, lbAnswer); } } catch (Exception e) { String msg = "Unable to get external load balancer stats for " + zone.getName(); - s_logger.error(msg, e); + logger.error(msg, e); } } else { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Reusing usage Answer for device id " + lbDeviceId + "for Network " + network.getId()); + if (logger.isTraceEnabled()) { + logger.trace("Reusing usage Answer for device id " + lbDeviceId + "for Network " + network.getId()); } lbAnswer = lbDeviceUsageAnswerMap.get(lbDeviceId); } @@ -506,7 +504,7 @@ protected void runExternalDeviceNetworkUsageTask() { AccountVO account = _accountDao.findById(accountId); if (account == null) { - s_logger.debug("Skipping stats update for account with ID " + accountId); + logger.debug("Skipping stats update for account with ID " + accountId); continue; } @@ -533,13 +531,13 @@ private boolean updateBytes(UserStatisticsVO userStats, long newCurrentBytesSent userStats.setCurrentBytesSent(newCurrentBytesSent); if (oldCurrentBytesSent > newCurrentBytesSent) { - s_logger.warn(warning + "Stored bytes sent: " + toHumanReadableSize(oldCurrentBytesSent) + ", new bytes sent: " + toHumanReadableSize(newCurrentBytesSent) + "."); + logger.warn(warning + "Stored bytes sent: " + toHumanReadableSize(oldCurrentBytesSent) + ", new bytes sent: " + toHumanReadableSize(newCurrentBytesSent) + "."); userStats.setNetBytesSent(oldNetBytesSent + oldCurrentBytesSent); } userStats.setCurrentBytesReceived(newCurrentBytesReceived); if (oldCurrentBytesReceived > newCurrentBytesReceived) { - s_logger.warn(warning + "Stored bytes received: " + toHumanReadableSize(oldCurrentBytesReceived) + ", new bytes received: " + toHumanReadableSize(newCurrentBytesReceived) + "."); + logger.warn(warning + "Stored bytes received: " + toHumanReadableSize(oldCurrentBytesReceived) + ", new bytes received: " + toHumanReadableSize(newCurrentBytesReceived) + "."); userStats.setNetBytesReceived(oldNetBytesReceived + oldCurrentBytesReceived); } @@ -592,7 +590,7 @@ private boolean updateStatsEntry(long accountId, long zoneId, long networkId, St } if (bytesSentAndReceived == null) { - s_logger.debug("Didn't get an external network usage answer for public IP " + publicIp); + logger.debug("Didn't get an external network usage answer for public IP " + publicIp); } else { newCurrentBytesSent += bytesSentAndReceived[0]; newCurrentBytesReceived += bytesSentAndReceived[1]; @@ -600,14 +598,14 @@ private boolean updateStatsEntry(long accountId, long zoneId, long networkId, St } else { URI broadcastURI = network.getBroadcastUri(); if (broadcastURI == null) { - s_logger.debug("Not updating stats for guest network with ID " + network.getId() + " because the network is not implemented."); + logger.debug("Not updating stats for guest network with ID " + network.getId() + " because the network is not implemented."); return true; } else { long vlanTag = Integer.parseInt(BroadcastDomainType.getValue(broadcastURI)); long[] bytesSentAndReceived = answer.guestVlanBytes.get(String.valueOf(vlanTag)); if (bytesSentAndReceived == null) { - s_logger.warn("Didn't get an external network usage answer for guest VLAN " + vlanTag); + logger.warn("Didn't get an external network usage answer for guest VLAN " + vlanTag); } else { newCurrentBytesSent += bytesSentAndReceived[0]; newCurrentBytesReceived += bytesSentAndReceived[1]; @@ -619,15 +617,15 @@ private boolean updateStatsEntry(long accountId, long zoneId, long networkId, St try { userStats = _userStatsDao.lock(accountId, zoneId, networkId, publicIp, hostId, host.getType().toString()); } catch (Exception e) { - s_logger.warn("Unable to find user stats entry for " + statsEntryIdentifier); + logger.warn("Unable to find user stats entry for " + statsEntryIdentifier); return false; } if (updateBytes(userStats, newCurrentBytesSent, newCurrentBytesReceived)) { - s_logger.debug("Successfully updated stats for " + statsEntryIdentifier); + logger.debug("Successfully updated stats for " + statsEntryIdentifier); return true; } else { - s_logger.debug("Failed to update stats for " + statsEntryIdentifier); + logger.debug("Failed to update stats for " + statsEntryIdentifier); return false; } } @@ -715,7 +713,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { }); return true; } catch (Exception e) { - s_logger.warn("Exception: ", e); + logger.warn("Exception: ", e); return false; } } diff --git a/server/src/main/java/com/cloud/network/ExternalFirewallDeviceManagerImpl.java b/server/src/main/java/com/cloud/network/ExternalFirewallDeviceManagerImpl.java index 21eae27ea667..924a3b75dada 100644 --- a/server/src/main/java/com/cloud/network/ExternalFirewallDeviceManagerImpl.java +++ b/server/src/main/java/com/cloud/network/ExternalFirewallDeviceManagerImpl.java @@ -30,7 +30,6 @@ import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.network.ExternalNetworkDeviceManager.NetworkDevice; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -174,7 +173,6 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl @Inject FirewallRulesDao _fwRulesDao; - private static final org.apache.log4j.Logger s_logger = Logger.getLogger(ExternalFirewallDeviceManagerImpl.class); private long _defaultFwCapacity; @Override @@ -219,7 +217,7 @@ public ExternalFirewallDeviceVO addExternalFirewall(long physicalNetworkId, Stri try { uri = new URI(url); } catch (Exception e) { - s_logger.debug(e); + logger.debug(e); throw new InvalidParameterValueException(e.getMessage()); } @@ -302,7 +300,7 @@ public boolean deleteExternalFirewall(Long hostId) { _externalFirewallDeviceDao.remove(fwDeviceId); return true; } catch (Exception e) { - s_logger.debug("Failed to delete external firewall device due to " + e.getMessage()); + logger.debug("Failed to delete external firewall device due to " + e.getMessage()); return false; } } @@ -388,7 +386,7 @@ protected boolean freeFirewallForNetwork(Network network) { _networkExternalFirewallDao.remove(fwDeviceForNetwork.getId()); } } catch (Exception exception) { - s_logger.error("Failed to release firewall device for the network" + network.getId() + " due to " + exception.getMessage()); + logger.error("Failed to release firewall device for the network" + network.getId() + " due to " + exception.getMessage()); return false; } finally { deviceMapLock.unlock(); @@ -423,7 +421,7 @@ public ExternalFirewallResponse createExternalFirewallResponse(Host externalFire @Override public boolean manageGuestNetworkWithExternalFirewall(boolean add, Network network) throws ResourceUnavailableException, InsufficientCapacityException { if (network.getTrafficType() != TrafficType.Guest) { - s_logger.trace("External firewall can only be used for add/remove guest networks."); + logger.trace("External firewall can only be used for add/remove guest networks."); return false; } @@ -453,7 +451,7 @@ public boolean manageGuestNetworkWithExternalFirewall(boolean add, Network netwo } else { ExternalFirewallDeviceVO fwDeviceVO = getExternalFirewallForNetwork(network); if (fwDeviceVO == null) { - s_logger.warn("Network shutdown requested on external firewall element, which did not implement the network." + logger.warn("Network shutdown requested on external firewall element, which did not implement the network." + " Either network implement failed half way through or already network shutdown is completed."); return true; } @@ -478,7 +476,7 @@ public boolean manageGuestNetworkWithExternalFirewall(boolean add, Network netwo } if (sourceNatIp == null) { String errorMsg = "External firewall was unable to find the source NAT IP address for network " + network.getName(); - s_logger.error(errorMsg); + logger.error(errorMsg); return true; } } @@ -515,10 +513,10 @@ public boolean manageGuestNetworkWithExternalFirewall(boolean add, Network netwo String answerDetails = (answer != null) ? answer.getDetails() : "answer was null"; String msg = "External firewall was unable to " + action + " the guest network on the external firewall in zone " + zone.getName() + " due to " + answerDetails; - s_logger.error(msg); + logger.error(msg); if (!add && (!reservedIpAddressesForGuestNetwork.contains(network.getGateway()))) { // If we failed the implementation as well, then just return, no complain - s_logger.error("Skip the shutdown of guest network on SRX because it seems we didn't implement it as well"); + logger.error("Skip the shutdown of guest network on SRX because it seems we didn't implement it as well"); return true; } throw new ResourceUnavailableException(msg, DataCenter.class, zoneId); @@ -545,7 +543,7 @@ public boolean manageGuestNetworkWithExternalFirewall(boolean add, Network netwo List nics = _nicDao.listByNetworkId(network.getId()); for (NicVO nic : nics) { if (nic.getVmType() == null && ReservationStrategy.PlaceHolder.equals(nic.getReservationStrategy()) && nic.getIPv4Address().equals(network.getGateway())) { - s_logger.debug("Removing placeholder nic " + nic + " for the network " + network); + logger.debug("Removing placeholder nic " + nic + " for the network " + network); _nicDao.remove(nic.getId()); } } @@ -553,7 +551,7 @@ public boolean manageGuestNetworkWithExternalFirewall(boolean add, Network netwo } String action = add ? "implemented" : "shut down"; - s_logger.debug("External firewall has " + action + " the guest network for account " + account.getAccountName() + "(id = " + account.getAccountId() + + logger.debug("External firewall has " + action + " the guest network for account " + account.getAccountName() + "(id = " + account.getAccountId() + ") with VLAN tag " + guestVlanTag); return true; @@ -574,7 +572,7 @@ public boolean applyFirewallRules(Network network, List assert (externalFirewall != null); if (network.getState() == Network.State.Allocated) { - s_logger.debug("External firewall was asked to apply firewall rules for network with ID " + network.getId() + + logger.debug("External firewall was asked to apply firewall rules for network with ID " + network.getId() + "; this network is not implemented. Skipping backend commands."); return true; } @@ -617,7 +615,7 @@ public boolean applyStaticNatRules(Network network, List ru assert (externalFirewall != null); if (network.getState() == Network.State.Allocated) { - s_logger.debug("External firewall was asked to apply firewall rules for network with ID " + network.getId() + + logger.debug("External firewall was asked to apply firewall rules for network with ID " + network.getId() + "; this network is not implemented. Skipping backend commands."); return true; } @@ -645,7 +643,7 @@ protected void sendFirewallRules(List firewallRules, DataCenter if (answer == null || !answer.getResult()) { String details = (answer != null) ? answer.getDetails() : "details unavailable"; String msg = "External firewall was unable to apply static nat rules to the SRX appliance in zone " + zone.getName() + " due to: " + details + "."; - s_logger.error(msg); + logger.error(msg); throw new ResourceUnavailableException(msg, DataCenter.class, zone.getId()); } } @@ -658,7 +656,7 @@ protected void sendStaticNatRules(List staticNatRules, DataCent if (answer == null || !answer.getResult()) { String details = (answer != null) ? answer.getDetails() : "details unavailable"; String msg = "External firewall was unable to apply static nat rules to the SRX appliance in zone " + zone.getName() + " due to: " + details + "."; - s_logger.error(msg); + logger.error(msg); throw new ResourceUnavailableException(msg, DataCenter.class, zone.getId()); } } @@ -671,7 +669,7 @@ protected void sendPortForwardingRules(List portForwarding if (answer == null || !answer.getResult()) { String details = (answer != null) ? answer.getDetails() : "details unavailable"; String msg = "External firewall was unable to apply port forwarding rules to the SRX appliance in zone " + zone.getName() + " due to: " + details + "."; - s_logger.error(msg); + logger.error(msg); throw new ResourceUnavailableException(msg, DataCenter.class, zone.getId()); } } @@ -713,7 +711,7 @@ public boolean manageRemoteAccessVpn(boolean create, Network network, RemoteAcce if (answer == null || !answer.getResult()) { String details = (answer != null) ? answer.getDetails() : "details unavailable"; String msg = "External firewall was unable to create a remote access VPN in zone " + zone.getName() + " due to: " + details + "."; - s_logger.error(msg); + logger.error(msg); throw new ResourceUnavailableException(msg, DataCenter.class, zone.getId()); } @@ -749,7 +747,7 @@ public boolean manageRemoteAccessVpnUsers(Network network, RemoteAccessVpn vpn, String details = (answer != null) ? answer.getDetails() : "details unavailable"; DataCenterVO zone = _dcDao.findById(network.getDataCenterId()); String msg = "External firewall was unable to add remote access users in zone " + zone.getName() + " due to: " + details + "."; - s_logger.error(msg); + logger.error(msg); throw new ResourceUnavailableException(msg, DataCenter.class, zone.getId()); } @@ -822,7 +820,7 @@ public boolean applyPortForwardingRules(Network network, List l } else { ExternalLoadBalancerDeviceVO lbDeviceVO = getExternalLoadBalancerForNetwork(network); if (lbDeviceVO == null) { - s_logger.warn("There is no external load balancer device assigned to this network either network is not implement are already shutdown so just returning"); + logger.warn("There is no external load balancer device assigned to this network either network is not implement are already shutdown so just returning"); return true; } else { externalLoadBalancer = _hostDao.findById(lbDeviceVO.getHostId()); @@ -933,7 +931,7 @@ public boolean applyLoadBalancerRules(Network network, List l boolean externalLoadBalancerIsInline = _networkMgr.isNetworkInlineMode(network); if (network.getState() == Network.State.Allocated) { - s_logger.debug("External load balancer was asked to apply LB rules for network with ID " + network.getId() + + logger.debug("External load balancer was asked to apply LB rules for network with ID " + network.getId() + "; this network is not implemented. Skipping backend commands."); return true; } @@ -1001,13 +999,13 @@ public boolean applyLoadBalancerRules(Network network, List l if (answer == null || !answer.getResult()) { String details = (answer != null) ? answer.getDetails() : "details unavailable"; String msg = "Unable to apply load balancer rules to the external load balancer appliance in zone " + zone.getName() + " due to: " + details + "."; - s_logger.error(msg); + logger.error(msg); throw new ResourceUnavailableException(msg, DataCenter.class, network.getDataCenterId()); } } } catch (Exception ex) { if (externalLoadBalancerIsInline) { - s_logger.error("Rollbacking static nat operation of inline mode load balancing due to error on applying LB rules!"); + logger.error("Rollbacking static nat operation of inline mode load balancing due to error on applying LB rules!"); String existedGuestIp = loadBalancersToApply.get(0).getSrcIp(); // Rollback static NAT operation in current session for (int i = 0; i < loadBalancingRules.size(); i++) { @@ -1034,7 +1032,7 @@ public boolean applyLoadBalancerRules(Network network, List l @Override public boolean manageGuestNetworkWithExternalLoadBalancer(boolean add, Network guestConfig) throws ResourceUnavailableException, InsufficientCapacityException { if (guestConfig.getTrafficType() != TrafficType.Guest) { - s_logger.trace("External load balancer can only be used for guest networks."); + logger.trace("External load balancer can only be used for guest networks."); return false; } @@ -1051,17 +1049,17 @@ public boolean manageGuestNetworkWithExternalLoadBalancer(boolean add, Network g lbDeviceVO = allocateLoadBalancerForNetwork(guestConfig); if (lbDeviceVO == null) { String msg = "failed to alloacate a external load balancer for the network " + guestConfig.getId(); - s_logger.error(msg); + logger.error(msg); throw new InsufficientNetworkCapacityException(msg, DataCenter.class, guestConfig.getDataCenterId()); } } externalLoadBalancer = _hostDao.findById(lbDeviceVO.getHostId()); - s_logger.debug("Allocated external load balancer device:" + lbDeviceVO.getId() + " for the network: " + guestConfig.getId()); + logger.debug("Allocated external load balancer device:" + lbDeviceVO.getId() + " for the network: " + guestConfig.getId()); } else { // find the load balancer device allocated for the network ExternalLoadBalancerDeviceVO lbDeviceVO = getExternalLoadBalancerForNetwork(guestConfig); if (lbDeviceVO == null) { - s_logger.warn("Network shutdwon requested on external load balancer element, which did not implement the network." + logger.warn("Network shutdwon requested on external load balancer element, which did not implement the network." + " Either network implement failed half way through or already network shutdown is completed. So just returning."); return true; } @@ -1087,14 +1085,14 @@ public boolean manageGuestNetworkWithExternalLoadBalancer(boolean add, Network g selfIp = _ipAddrMgr.acquireGuestIpAddress(guestConfig, null); if (selfIp == null) { String msg = "failed to acquire guest IP address so not implementing the network on the external load balancer "; - s_logger.error(msg); + logger.error(msg); throw new InsufficientNetworkCapacityException(msg, Network.class, guestConfig.getId()); } } else { // get the self-ip used by the load balancer Nic selfipNic = getPlaceholderNic(guestConfig); if (selfipNic == null) { - s_logger.warn("Network shutdwon requested on external load balancer element, which did not implement the network." + logger.warn("Network shutdwon requested on external load balancer element, which did not implement the network." + " Either network implement failed half way through or already network shutdown is completed. So just returning."); return true; } @@ -1115,7 +1113,7 @@ public boolean manageGuestNetworkWithExternalLoadBalancer(boolean add, Network g String answerDetails = (answer != null) ? answer.getDetails() : null; answerDetails = (answerDetails != null) ? " due to " + answerDetails : ""; String msg = "External load balancer was unable to " + action + " the guest network on the external load balancer in zone " + zone.getName() + answerDetails; - s_logger.error(msg); + logger.error(msg); throw new ResourceUnavailableException(msg, Network.class, guestConfig.getId()); } @@ -1131,14 +1129,14 @@ public boolean manageGuestNetworkWithExternalLoadBalancer(boolean add, Network g boolean releasedLB = freeLoadBalancerForNetwork(guestConfig); if (!releasedLB) { String msg = "Failed to release the external load balancer used for the network: " + guestConfig.getId(); - s_logger.error(msg); + logger.error(msg); } } - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { Account account = _accountDao.findByIdIncludingRemoved(guestConfig.getAccountId()); String action = add ? "implemented" : "shut down"; - s_logger.debug("External load balancer has " + action + " the guest network for account " + account.getAccountName() + "(id = " + account.getAccountId() + + logger.debug("External load balancer has " + action + " the guest network for account " + account.getAccountName() + "(id = " + account.getAccountId() + ") with VLAN tag " + guestVlanTag); } @@ -1196,20 +1194,20 @@ protected IpDeployer getIpDeployerForInlineMode(Network network) { List providers = _networkMgr.getProvidersForServiceInNetwork(network, Service.Firewall); //Only support one provider now if (providers == null) { - s_logger.error("Cannot find firewall provider for network " + network.getId()); + logger.error("Cannot find firewall provider for network " + network.getId()); return null; } if (providers.size() != 1) { - s_logger.error("Found " + providers.size() + " firewall provider for network " + network.getId()); + logger.error("Found " + providers.size() + " firewall provider for network " + network.getId()); return null; } NetworkElement element = _networkModel.getElementImplementingProvider(providers.get(0).getName()); if (!(element instanceof IpDeployer)) { - s_logger.error("The firewall provider for network " + network.getName() + " don't have ability to deploy IP address!"); + logger.error("The firewall provider for network " + network.getName() + " don't have ability to deploy IP address!"); return null; } - s_logger.info("Let " + element.getName() + " handle ip association for " + getName() + " in network " + network.getId()); + logger.info("Let " + element.getName() + " handle ip association for " + getName() + " in network " + network.getId()); return (IpDeployer)element; } @@ -1231,7 +1229,7 @@ public List getLBHealthChecks(Network network, List getLBHealthChecks(Network network, List getLBHealthChecks(Network network, List listNetworkDevice(Long zoneId, Long physicalNetworkId, Long p // if (devs.size() == 1) { // res.add(devs.get(0)); // } else { -// s_logger.debug("List " + type + ": " + devs.size() + " found"); +// logger.debug("List " + type + ": " + devs.size() + " found"); // } // } else { // List devs = _hostDao.listBy(type, zoneId); diff --git a/server/src/main/java/com/cloud/network/IpAddressManagerImpl.java b/server/src/main/java/com/cloud/network/IpAddressManagerImpl.java index b5908935350f..6bf0487e018f 100644 --- a/server/src/main/java/com/cloud/network/IpAddressManagerImpl.java +++ b/server/src/main/java/com/cloud/network/IpAddressManagerImpl.java @@ -54,7 +54,6 @@ import org.apache.cloudstack.region.PortableIpVO; import org.apache.cloudstack.region.Region; import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.alert.AlertManager; @@ -190,7 +189,6 @@ import com.cloud.vm.dao.VMInstanceDao; public class IpAddressManagerImpl extends ManagerBase implements IpAddressManager, Configurable { - private static final Logger s_logger = Logger.getLogger(IpAddressManagerImpl.class); @Inject NetworkOrchestrationService _networkMgr; @@ -333,7 +331,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage private List getIpv6SupportingVlanRangeIds(long dcId) throws InsufficientAddressCapacityException { List vlans = _vlanDao.listIpv6SupportingVlansByZone(dcId); if (CollectionUtils.isEmpty(vlans)) { - s_logger.error("Unable to find VLAN IP range that support both IPv4 and IPv6"); + logger.error("Unable to find VLAN IP range that support both IPv4 and IPv6"); InsufficientAddressCapacityException ex = new InsufficientAddressCapacityException("Insufficient address capacity", DataCenter.class, dcId); ex.addProxyObject(ApiDBUtils.findZoneById(dcId).getUuid()); throw ex; @@ -380,7 +378,7 @@ private IPAddressVO assignAndAllocateIpAddressEntry(final Account owner, final V } if (finalAddress == null) { - s_logger.error("Failed to fetch any free public IP address"); + logger.error("Failed to fetch any free public IP address"); throw new CloudRuntimeException("Failed to fetch any free public IP address"); } @@ -390,7 +388,7 @@ private IPAddressVO assignAndAllocateIpAddressEntry(final Account owner, final V final State expectedAddressState = allocate ? State.Allocated : State.Allocating; if (finalAddress.getState() != expectedAddressState) { - s_logger.error("Failed to fetch new public IP and get in expected state=" + expectedAddressState); + logger.error("Failed to fetch new public IP and get in expected state=" + expectedAddressState); throw new CloudRuntimeException("Failed to fetch new public IP with expected state " + expectedAddressState); } return finalAddress; @@ -528,7 +526,7 @@ public boolean configure(String name, Map params) { rulesContinueOnErrFlag = RulesContinueOnError.value(); } - s_logger.info("IPAddress Manager is configured."); + logger.info("IPAddress Manager is configured."); return true; } @@ -579,7 +577,7 @@ boolean checkIfIpAssocRequired(Network network, boolean postApplyRules, List rules, FirewallRule.Purpose purpose, NetworkRuleApplier applier, boolean continueOnError) throws ResourceUnavailableException { if (rules == null || rules.size() == 0) { - s_logger.debug("There are no rules to forward to the network elements"); + logger.debug("There are no rules to forward to the network elements"); return true; } @@ -641,7 +639,7 @@ public boolean applyRules(List rules, FirewallRule.Purpo if (!continueOnError) { throw e; } - s_logger.warn("Problems with applying " + purpose + " rules but pushing on", e); + logger.warn("Problems with applying " + purpose + " rules but pushing on", e); success = false; } @@ -659,31 +657,31 @@ protected boolean cleanupIpResources(long ipId, long userId, Account caller) { // Revoke all firewall rules for the ip try { - s_logger.debug("Revoking all " + Purpose.Firewall + "rules as a part of public IP id=" + ipId + " release..."); + logger.debug("Revoking all " + Purpose.Firewall + "rules as a part of public IP id=" + ipId + " release..."); if (!_firewallMgr.revokeFirewallRulesForIp(ipId, userId, caller)) { - s_logger.warn("Unable to revoke all the firewall rules for ip id=" + ipId + " as a part of ip release"); + logger.warn("Unable to revoke all the firewall rules for ip id=" + ipId + " as a part of ip release"); success = false; } } catch (ResourceUnavailableException e) { - s_logger.warn("Unable to revoke all firewall rules for ip id=" + ipId + " as a part of ip release", e); + logger.warn("Unable to revoke all firewall rules for ip id=" + ipId + " as a part of ip release", e); success = false; } // Revoke all PF/Static nat rules for the ip try { - s_logger.debug("Revoking all " + Purpose.PortForwarding + "/" + Purpose.StaticNat + " rules as a part of public IP id=" + ipId + " release..."); + logger.debug("Revoking all " + Purpose.PortForwarding + "/" + Purpose.StaticNat + " rules as a part of public IP id=" + ipId + " release..."); if (!_rulesMgr.revokeAllPFAndStaticNatRulesForIp(ipId, userId, caller)) { - s_logger.warn("Unable to revoke all the port forwarding rules for ip id=" + ipId + " as a part of ip release"); + logger.warn("Unable to revoke all the port forwarding rules for ip id=" + ipId + " as a part of ip release"); success = false; } } catch (ResourceUnavailableException e) { - s_logger.warn("Unable to revoke all the port forwarding rules for ip id=" + ipId + " as a part of ip release", e); + logger.warn("Unable to revoke all the port forwarding rules for ip id=" + ipId + " as a part of ip release", e); success = false; } - s_logger.debug("Revoking all " + Purpose.LoadBalancing + " rules as a part of public IP id=" + ipId + " release..."); + logger.debug("Revoking all " + Purpose.LoadBalancing + " rules as a part of public IP id=" + ipId + " release..."); if (!_lbMgr.removeAllLoadBalanacersForIp(ipId, caller, userId)) { - s_logger.warn("Unable to revoke all the load balancer rules for ip id=" + ipId + " as a part of ip release"); + logger.warn("Unable to revoke all the load balancer rules for ip id=" + ipId + " as a part of ip release"); success = false; } @@ -691,11 +689,11 @@ protected boolean cleanupIpResources(long ipId, long userId, Account caller) { // conditions // only when ip address failed to be cleaned up as a part of account destroy and was marked as Releasing, this part of // the code would be triggered - s_logger.debug("Cleaning up remote access vpns as a part of public IP id=" + ipId + " release..."); + logger.debug("Cleaning up remote access vpns as a part of public IP id=" + ipId + " release..."); try { _vpnMgr.destroyRemoteAccessVpnForIp(ipId, caller,false); } catch (ResourceUnavailableException e) { - s_logger.warn("Unable to destroy remote access vpn for ip id=" + ipId + " as a part of ip release", e); + logger.warn("Unable to destroy remote access vpn for ip id=" + ipId + " as a part of ip release", e); success = false; } @@ -713,7 +711,7 @@ public boolean disassociatePublicIpAddress(long addrId, long userId, Account cal // Cleanup all ip address resources - PF/LB/Static nat rules if (!cleanupIpResources(addrId, userId, caller)) { success = false; - s_logger.warn("Failed to release resources for ip address id=" + addrId); + logger.warn("Failed to release resources for ip address id=" + addrId); } IPAddressVO ip = markIpAsUnavailable(addrId); @@ -721,15 +719,15 @@ public boolean disassociatePublicIpAddress(long addrId, long userId, Account cal return true; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing ip id=" + addrId + "; sourceNat = " + ip.isSourceNat()); + if (logger.isDebugEnabled()) { + logger.debug("Releasing ip id=" + addrId + "; sourceNat = " + ip.isSourceNat()); } if (ip.getAssociatedWithNetworkId() != null) { Network network = _networksDao.findById(ip.getAssociatedWithNetworkId()); try { if (!applyIpAssociations(network, rulesContinueOnErrFlag)) { - s_logger.warn("Unable to apply ip address associations for " + network); + logger.warn("Unable to apply ip address associations for " + network); success = false; } } catch (ResourceUnavailableException e) { @@ -746,7 +744,7 @@ public boolean disassociatePublicIpAddress(long addrId, long userId, Account cal if (ip.isPortable()) { releasePortableIpAddress(addrId); } - s_logger.debug("Released a public ip id=" + addrId); + logger.debug("Released a public ip id=" + addrId); } else if (publicIpQuarantine != null) { removePublicIpAddressFromQuarantine(publicIpQuarantine.getId(), "Public IP address removed from quarantine as there was an error while disassociating it."); } @@ -941,7 +939,7 @@ public List listAvailablePublicIps(final long dcId, final Long podI ex.addProxyObject(ApiDBUtils.findPodById(podId).getUuid()); throw ex; } - s_logger.warn(errorMessage.toString()); + logger.warn(errorMessage.toString()); InsufficientAddressCapacityException ex = new InsufficientAddressCapacityException("Insufficient address capacity", DataCenter.class, dcId); ex.addProxyObject(ApiDBUtils.findZoneById(dcId).getUuid()); throw ex; @@ -977,7 +975,7 @@ public List listAvailablePublicIps(final long dcId, final Long podI ex.addProxyObject(ApiDBUtils.findPodById(podId).getUuid()); throw ex; } - s_logger.warn(errorMessage.toString()); + logger.warn(errorMessage.toString()); InsufficientAddressCapacityException ex = new InsufficientAddressCapacityException("Insufficient address capacity", DataCenter.class, dcId); ex.addProxyObject(ApiDBUtils.findZoneById(dcId).getUuid()); throw ex; @@ -999,7 +997,7 @@ public List listAvailablePublicIps(final long dcId, final Long podI try { _resourceLimitMgr.checkResourceLimit(owner, ResourceType.public_ip); } catch (ResourceAllocationException ex) { - s_logger.warn("Failed to allocate resource of type " + ex.getResourceType() + " for account " + owner); + logger.warn("Failed to allocate resource of type " + ex.getResourceType() + " for account " + owner); throw new AccountLimitException("Maximum number of public IP addresses for account: " + owner.getAccountName() + " has been exceeded."); } } @@ -1036,11 +1034,11 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } } } else { - s_logger.error("Failed to mark public IP as allocated with id=" + addr.getId() + " address=" + addr.getAddress()); + logger.error("Failed to mark public IP as allocated with id=" + addr.getId() + " address=" + addr.getAddress()); } } } else { - s_logger.error("Failed to acquire row lock to mark public IP as allocated with id=" + addr.getId() + " address=" + addr.getAddress()); + logger.error("Failed to acquire row lock to mark public IP as allocated with id=" + addr.getId() + " address=" + addr.getAddress()); } } }); @@ -1092,8 +1090,8 @@ public PublicIp doInTransaction(TransactionStatus status) throws InsufficientAdd ConcurrentOperationException ex = new ConcurrentOperationException("Unable to lock account"); throw ex; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("lock account " + ownerId + " is acquired"); + if (logger.isDebugEnabled()) { + logger.debug("lock account " + ownerId + " is acquired"); } List vlanDbIds = null; boolean displayIp = true; @@ -1114,19 +1112,19 @@ public PublicIp doInTransaction(TransactionStatus status) throws InsufficientAdd } }); if (ip.getState() != State.Allocated) { - s_logger.error("Failed to fetch new IP and allocate it for ip with id=" + ip.getId() + ", address=" + ip.getAddress()); + logger.error("Failed to fetch new IP and allocate it for ip with id=" + ip.getId() + ", address=" + ip.getAddress()); } return ip; } finally { if (owner != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing lock account " + ownerId); + if (logger.isDebugEnabled()) { + logger.debug("Releasing lock account " + ownerId); } _accountDao.releaseFromLockTable(ownerId); } if (ip == null) { - s_logger.error("Unable to get source nat ip address for account " + ownerId); + logger.error("Unable to get source nat ip address for account " + ownerId); } } } @@ -1151,7 +1149,7 @@ public boolean applyIpAssociations(Network network, boolean continueOnError) thr messageBus.publish(_name, MESSAGE_RELEASE_IPADDR_EVENT, PublishScope.LOCAL, addr); } else { success = false; - s_logger.warn("Failed to release resources for ip address id=" + addr.getId()); + logger.warn("Failed to release resources for ip address id=" + addr.getId()); } } } @@ -1198,7 +1196,7 @@ public boolean applyIpAssociations(Network network, boolean postApplyRules, bool if (!continueOnError) { throw e; } else { - s_logger.debug("Resource is not available: " + provider.getName(), e); + logger.debug("Resource is not available: " + provider.getName(), e); } } } @@ -1260,7 +1258,7 @@ public void releasePodIp(Long id) throws CloudRuntimeException { } if (ipVO.getTakenAt() == null) { - s_logger.debug("Ip Address with id= " + id + " is not allocated, so do nothing."); + logger.debug("Ip Address with id= " + id + " is not allocated, so do nothing."); throw new CloudRuntimeException("Ip Address with id= " + id + " is not allocated, so do nothing."); } // Verify permission @@ -1295,17 +1293,17 @@ public IpAddress allocateIp(final Account ipOwner, final boolean isSystem, Accou Account accountToLock = null; try { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Associate IP address called by the user " + callerUserId + " account " + ipOwner.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Associate IP address called by the user " + callerUserId + " account " + ipOwner.getId()); } accountToLock = _accountDao.acquireInLockTable(ipOwner.getId()); if (accountToLock == null) { - s_logger.warn("Unable to lock account: " + ipOwner.getId()); + logger.warn("Unable to lock account: " + ipOwner.getId()); throw new ConcurrentOperationException("Unable to acquire account lock"); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Associate IP address lock acquired"); + if (logger.isDebugEnabled()) { + logger.debug("Associate IP address lock acquired"); } if (ipaddress != null) { @@ -1330,7 +1328,7 @@ public PublicIp doInTransaction(TransactionStatus status) throws InsufficientAdd CallContext.current().setEventDetails("Ip Id: " + ip.getId()); Ip ipAddress = ip.getAddress(); - s_logger.debug("Got " + ipAddress + " to assign for account " + ipOwner.getId() + " in zone " + zone.getId()); + logger.debug("Got " + ipAddress + " to assign for account " + ipOwner.getId() + " in zone " + zone.getId()); return ip; } @@ -1340,11 +1338,11 @@ public PublicIp doInTransaction(TransactionStatus status) throws InsufficientAdd } finally { if (accountToLock != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing lock account " + ipOwner); + if (logger.isDebugEnabled()) { + logger.debug("Releasing lock account " + ipOwner); } _accountDao.releaseFromLockTable(ipOwner.getId()); - s_logger.debug("Associate IP address lock released"); + logger.debug("Associate IP address lock released"); } } return ip; @@ -1474,12 +1472,12 @@ public IPAddressVO associateIPToGuestNetwork(long ipId, long networkId, boolean } owner = _accountMgr.getAccount(ipToAssoc.getAllocatedToAccountId()); } else { - s_logger.debug("Unable to find ip address by id: " + ipId); + logger.debug("Unable to find ip address by id: " + ipId); return null; } if (ipToAssoc.getAssociatedWithNetworkId() != null) { - s_logger.debug("IP " + ipToAssoc + " is already associated with network id=" + networkId); + logger.debug("IP " + ipToAssoc + " is already associated with network id=" + networkId); return ipToAssoc; } @@ -1487,7 +1485,7 @@ public IPAddressVO associateIPToGuestNetwork(long ipId, long networkId, boolean if (network != null) { _accountMgr.checkAccess(owner, AccessType.UseEntry, false, network); } else { - s_logger.debug("Unable to find ip address by id: " + ipId); + logger.debug("Unable to find ip address by id: " + ipId); return null; } @@ -1504,7 +1502,7 @@ public IPAddressVO associateIPToGuestNetwork(long ipId, long networkId, boolean // In Advance zone allow to do IP assoc only for Isolated networks with source nat service enabled if (network.getGuestType() == GuestType.Isolated && !(_networkModel.areServicesSupportedInNetwork(network.getId(), Service.SourceNat))) { if (releaseOnFailure && ipToAssoc != null) { - s_logger.warn("Failed to associate ip address, so unassigning ip from the database " + ipToAssoc); + logger.warn("Failed to associate ip address, so unassigning ip from the database " + ipToAssoc); _ipAddressDao.unassignIpAddress(ipToAssoc.getId()); } throw new InvalidParameterValueException("In zone of type " + NetworkType.Advanced + " ip address can be associated only to the network of guest type " @@ -1514,7 +1512,7 @@ public IPAddressVO associateIPToGuestNetwork(long ipId, long networkId, boolean // In Advance zone allow to do IP assoc only for shared networks with source nat/static nat/lb/pf services enabled if (network.getGuestType() == GuestType.Shared && !isSharedNetworkOfferingWithServices(network.getNetworkOfferingId())) { if (releaseOnFailure && ipToAssoc != null) { - s_logger.warn("Failed to associate ip address, so unassigning ip from the database " + ipToAssoc); + logger.warn("Failed to associate ip address, so unassigning ip from the database " + ipToAssoc); _ipAddressDao.unassignIpAddress(ipToAssoc.getId()); } throw new InvalidParameterValueException("In zone of type " + NetworkType.Advanced + " ip address can be associated with network of guest type " + GuestType.Shared @@ -1525,7 +1523,7 @@ public IPAddressVO associateIPToGuestNetwork(long ipId, long networkId, boolean boolean isSourceNat = isSourceNatAvailableForNetwork(owner, ipToAssoc, network); - s_logger.debug("Associating ip " + ipToAssoc + " to network " + network); + logger.debug("Associating ip " + ipToAssoc + " to network " + network); IPAddressVO ip = _ipAddressDao.findById(ipId); //update ip address with networkId @@ -1537,16 +1535,16 @@ public IPAddressVO associateIPToGuestNetwork(long ipId, long networkId, boolean try { success = applyIpAssociations(network, false); if (success) { - s_logger.debug("Successfully associated ip address " + ip.getAddress().addr() + " to network " + network); + logger.debug("Successfully associated ip address " + ip.getAddress().addr() + " to network " + network); } else { - s_logger.warn("Failed to associate ip address " + ip.getAddress().addr() + " to network " + network); + logger.warn("Failed to associate ip address " + ip.getAddress().addr() + " to network " + network); } return _ipAddressDao.findById(ipId); } finally { if (!success && releaseOnFailure) { if (ip != null) { try { - s_logger.warn("Failed to associate ip address, so releasing ip from the database " + ip); + logger.warn("Failed to associate ip address, so releasing ip from the database " + ip); _ipAddressDao.markAsUnavailable(ip.getId()); if (!applyIpAssociations(network, true)) { // if fail to apply ip associations again, unassign ip address without updating resource @@ -1554,7 +1552,7 @@ public IPAddressVO associateIPToGuestNetwork(long ipId, long networkId, boolean _ipAddressDao.unassignIpAddress(ip.getId()); } } catch (Exception e) { - s_logger.warn("Unable to disassociate ip address for recovery", e); + logger.warn("Unable to disassociate ip address for recovery", e); } } } @@ -1652,7 +1650,7 @@ public IPAddressVO disassociatePortableIPToGuestNetwork(long ipId, long networkI } owner = _accountMgr.getAccount(ipToAssoc.getAllocatedToAccountId()); } else { - s_logger.debug("Unable to find ip address by id: " + ipId); + logger.debug("Unable to find ip address by id: " + ipId); return null; } @@ -1679,9 +1677,9 @@ public IPAddressVO disassociatePortableIPToGuestNetwork(long ipId, long networkI try { boolean success = applyIpAssociations(network, false); if (success) { - s_logger.debug("Successfully associated ip address " + ip.getAddress().addr() + " to network " + network); + logger.debug("Successfully associated ip address " + ip.getAddress().addr() + " to network " + network); } else { - s_logger.warn("Failed to associate ip address " + ip.getAddress().addr() + " to network " + network); + logger.warn("Failed to associate ip address " + ip.getAddress().addr() + " to network " + network); } return ip; } finally { @@ -1844,14 +1842,14 @@ public Ternary, Network> doInTransaction(Transa + requiredOfferings.get(0).getTags()); } - s_logger.debug("Creating network for account " + owner + " from the network offering id=" + requiredOfferings.get(0).getId() + logger.debug("Creating network for account " + owner + " from the network offering id=" + requiredOfferings.get(0).getId() + " as a part of createVlanIpRange process"); guestNetwork = _networkMgr.createGuestNetwork(requiredOfferings.get(0).getId(), owner.getAccountName() + "-network", owner.getAccountName() + "-network", null, null, null, false, null, owner, null, physicalNetwork, zoneId, ACLType.Account, null, null, null, null, true, null, null, null, null, null, null, null, null, null, null); if (guestNetwork == null) { - s_logger.warn("Failed to create default Virtual network for the account " + accountId + "in zone " + zoneId); + logger.warn("Failed to create default Virtual network for the account " + accountId + "in zone " + zoneId); throw new CloudRuntimeException("Failed to create a Guest Isolated Networks with SourceNAT " + "service enabled as a part of createVlanIpRange, for the account " + accountId + "in zone " + zoneId); } @@ -1908,19 +1906,19 @@ public Ternary, Network> doInTransaction(Transa DeployDestination dest = new DeployDestination(zone, null, null, null); Account callerAccount = CallContext.current().getCallingAccount(); UserVO callerUser = _userDao.findById(CallContext.current().getCallingUserId()); - Journal journal = new Journal.LogJournal("Implementing " + guestNetwork, s_logger); + Journal journal = new Journal.LogJournal("Implementing " + guestNetwork, logger); ReservationContext context = new ReservationContextImpl(UUID.randomUUID().toString(), journal, callerUser, callerAccount); - s_logger.debug("Implementing network " + guestNetwork + " as a part of network provision for persistent network"); + logger.debug("Implementing network " + guestNetwork + " as a part of network provision for persistent network"); try { Pair implementedNetwork = _networkMgr.implementNetwork(guestNetwork.getId(), dest, context); if (implementedNetwork == null || implementedNetwork.first() == null) { - s_logger.warn("Failed to implement the network " + guestNetwork); + logger.warn("Failed to implement the network " + guestNetwork); } if (implementedNetwork != null) { guestNetwork = implementedNetwork.second(); } } catch (Exception ex) { - s_logger.warn("Failed to implement network " + guestNetwork + " elements and resources as a part of" + " network provision due to ", ex); + logger.warn("Failed to implement network " + guestNetwork + " elements and resources as a part of" + " network provision due to ", ex); CloudRuntimeException e = new CloudRuntimeException("Failed to implement network (with specified id)" + " elements and resources as a part of network provision for persistent network"); e.addProxyObject(guestNetwork.getUuid(), "networkId"); @@ -1936,7 +1934,7 @@ public IPAddressVO markIpAsUnavailable(final long addrId) { final IPAddressVO ip = _ipAddressDao.findById(addrId); if (ip.getAllocatedToAccountId() == null && ip.getAllocatedTime() == null) { - s_logger.trace("Ip address id=" + addrId + " is already released"); + logger.trace("Ip address id=" + addrId + " is already released"); return ip; } @@ -1972,22 +1970,22 @@ public IPAddressVO doInTransaction(TransactionStatus status) { protected boolean checkIfIpResourceCountShouldBeUpdated(IPAddressVO ip) { boolean isDirectIp = ip.getAssociatedWithNetworkId() == null && ip.getVpcId() == null; if (isDirectIp) { - s_logger.debug(String.format("IP address [%s] is direct; therefore, the resource count should not be updated.", ip)); + logger.debug(String.format("IP address [%s] is direct; therefore, the resource count should not be updated.", ip)); return false; } if (isIpDedicated(ip)) { - s_logger.debug(String.format("IP address [%s] is dedicated; therefore, the resource count should not be updated.", ip)); + logger.debug(String.format("IP address [%s] is dedicated; therefore, the resource count should not be updated.", ip)); return false; } boolean isReservedIp = ip.getState() == IpAddress.State.Reserved; if (isReservedIp) { - s_logger.debug(String.format("IP address [%s] is reserved; therefore, the resource count should not be updated.", ip)); + logger.debug(String.format("IP address [%s] is reserved; therefore, the resource count should not be updated.", ip)); return false; } - s_logger.debug(String.format("IP address [%s] is not direct, dedicated or reserved; therefore, the resource count should be updated.", ip)); + logger.debug(String.format("IP address [%s] is not direct, dedicated or reserved; therefore, the resource count should be updated.", ip)); return true; } @@ -1995,7 +1993,7 @@ protected boolean checkIfIpResourceCountShouldBeUpdated(IPAddressVO ip) { @DB public String acquireGuestIpAddress(Network network, String requestedIp) { if (requestedIp != null && requestedIp.equals(network.getGateway())) { - s_logger.warn("Requested ip address " + requestedIp + " is used as a gateway address in network " + network); + logger.warn("Requested ip address " + requestedIp + " is used as a gateway address in network " + network); return null; } @@ -2006,7 +2004,7 @@ public String acquireGuestIpAddress(Network network, String requestedIp) { Set availableIps = _networkModel.getAvailableIps(network, requestedIp); if (availableIps == null || availableIps.isEmpty()) { - s_logger.debug("There are no free ips in the network " + network); + logger.debug("There are no free ips in the network " + network); return null; } @@ -2017,10 +2015,10 @@ public String acquireGuestIpAddress(Network network, String requestedIp) { String[] cidr = network.getCidr().split("/"); boolean isSameCidr = NetUtils.sameSubnetCIDR(requestedIp, NetUtils.long2Ip(array[0]), Integer.parseInt(cidr[1])); if (!isSameCidr) { - s_logger.warn("Requested ip address " + requestedIp + " doesn't belong to the network " + network + " cidr"); + logger.warn("Requested ip address " + requestedIp + " doesn't belong to the network " + network + " cidr"); return null; } else if (NetUtils.IsIpEqualToNetworkOrBroadCastIp(requestedIp, cidr[0], Integer.parseInt(cidr[1]))) { - s_logger.warn("Requested ip address " + requestedIp + " is equal to the to the network/broadcast ip of the network" + network); + logger.warn("Requested ip address " + requestedIp + " is equal to the to the network/broadcast ip of the network" + network); return null; } return requestedIp; @@ -2036,7 +2034,7 @@ public String acquireFirstGuestIpAddress(Network network) { } Set availableIps = _networkModel.getAvailableIps(network, null); if (availableIps == null || availableIps.isEmpty()) { - s_logger.debug("There are no free ips in the network " + network); + logger.debug("There are no free ips in the network " + network); return null; } return NetUtils.long2Ip(availableIps.iterator().next()); @@ -2049,7 +2047,7 @@ public String acquireLastGuestIpAddress(Network network) { } Set availableIps = _networkModel.getAvailableIps(network, null); if (availableIps == null || availableIps.isEmpty()) { - s_logger.debug("There are no free ips in the network " + network); + logger.debug("There are no free ips in the network " + network); return null; } @@ -2094,7 +2092,7 @@ public List getStaticNatSourceIps(List staticN @Override public boolean applyStaticNats(List staticNats, boolean continueOnError, boolean forRevoke) throws ResourceUnavailableException { if (staticNats == null || staticNats.size() == 0) { - s_logger.debug("There are no static nat rules for the network elements"); + logger.debug("There are no static nat rules for the network elements"); return true; } @@ -2103,7 +2101,7 @@ public boolean applyStaticNats(List staticNats, boolean con // Check if the StaticNat service is supported if (!_networkModel.areServicesSupportedInNetwork(network.getId(), Service.StaticNat)) { - s_logger.debug("StaticNat service is not supported in specified network id"); + logger.debug("StaticNat service is not supported in specified network id"); return true; } @@ -2131,7 +2129,7 @@ public boolean applyStaticNats(List staticNats, boolean con if (!continueOnError) { throw e; } - s_logger.warn("Problems with " + element.getName() + " but pushing on", e); + logger.warn("Problems with " + element.getName() + " but pushing on", e); success = false; } @@ -2192,7 +2190,7 @@ public IpAddress assignSystemIp(long networkId, Account owner, boolean forElasti if ((off.isElasticLb() && forElasticLb) || (off.isElasticIp() && forElasticIp)) { try { - s_logger.debug("Allocating system IP address for load balancer rule..."); + logger.debug("Allocating system IP address for load balancer rule..."); // allocate ip ip = allocateIP(owner, true, guestNetwork.getDataCenterId()); // apply ip associations @@ -2222,10 +2220,10 @@ public boolean handleSystemIpRelease(IpAddress ip) { if (ip.getSystem()) { CallContext ctx = CallContext.current(); if (!disassociatePublicIpAddress(ip.getId(), ctx.getCallingUserId(), ctx.getCallingAccount())) { - s_logger.warn("Unable to release system ip address id=" + ip.getId()); + logger.warn("Unable to release system ip address id=" + ip.getId()); success = false; } else { - s_logger.warn("Successfully released system ip address id=" + ip.getId()); + logger.warn("Successfully released system ip address id=" + ip.getId()); } } } @@ -2251,7 +2249,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) throws Insuff if (placeholderNic != null) { IPAddressVO userIp = _ipAddressDao.findByIpAndSourceNetworkId(network.getId(), placeholderNic.getIPv4Address()); ip = PublicIp.createFromAddrAndVlan(userIp, _vlanDao.findById(userIp.getVlanId())); - s_logger.debug("Nic got an ip address " + placeholderNic.getIPv4Address() + " stored in placeholder nic for the network " + network); + logger.debug("Nic got an ip address " + placeholderNic.getIPv4Address() + " stored in placeholder nic for the network " + network); } } @@ -2301,7 +2299,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) throws Insuff //Get ip address from the placeholder and don't allocate a new one if (requestedIpv4 != null && vm.getType() == VirtualMachine.Type.DomainRouter) { - s_logger.debug("There won't be nic assignment for VR id " + vm.getId() + " in this network " + network); + logger.debug("There won't be nic assignment for VR id " + vm.getId() + " in this network " + network); } @@ -2348,7 +2346,7 @@ public int getRuleCountForIp(Long addressId, FirewallRule.Purpose purpose, Firew public String allocatePublicIpForGuestNic(Network network, Long podId, Account owner, String requestedIp) throws InsufficientAddressCapacityException { PublicIp ip = assignPublicIpAddress(network.getDataCenterId(), podId, owner, VlanType.DirectAttached, network.getId(), requestedIp, false, false); if (ip == null) { - s_logger.debug("There is no free public ip address"); + logger.debug("There is no free public ip address"); return null; } Ip ipAddr = ip.getAddress(); @@ -2407,25 +2405,25 @@ public boolean canPublicIpAddressBeAllocated(IpAddress ip, Account newOwner) { PublicIpQuarantineVO publicIpQuarantineVO = publicIpQuarantineDao.findByPublicIpAddressId(ip.getId()); if (publicIpQuarantineVO == null) { - s_logger.debug(String.format("Public IP address [%s] is not in quarantine; therefore, it is allowed to be allocated.", ip)); + logger.debug(String.format("Public IP address [%s] is not in quarantine; therefore, it is allowed to be allocated.", ip)); return true; } if (!isPublicIpAddressStillInQuarantine(publicIpQuarantineVO, new Date())) { - s_logger.debug(String.format("Public IP address [%s] is no longer in quarantine; therefore, it is allowed to be allocated.", ip)); + logger.debug(String.format("Public IP address [%s] is no longer in quarantine; therefore, it is allowed to be allocated.", ip)); return true; } Account previousOwner = _accountMgr.getAccount(publicIpQuarantineVO.getPreviousOwnerId()); if (Objects.equals(previousOwner.getUuid(), newOwner.getUuid())) { - s_logger.debug(String.format("Public IP address [%s] is in quarantine; however, the Public IP previous owner [%s] is the same as the new owner [%s]; therefore the IP" + + logger.debug(String.format("Public IP address [%s] is in quarantine; however, the Public IP previous owner [%s] is the same as the new owner [%s]; therefore the IP" + " can be allocated. The public IP address will be removed from quarantine.", ip, previousOwner, newOwner)); removePublicIpAddressFromQuarantine(publicIpQuarantineVO.getId(), "IP was removed from quarantine because it has been allocated by the previous owner"); return true; } - s_logger.error(String.format("Public IP address [%s] is in quarantine and the previous owner [%s] is different than the new owner [%s]; therefore, the IP cannot be " + + logger.error(String.format("Public IP address [%s] is in quarantine and the previous owner [%s] is different than the new owner [%s]; therefore, the IP cannot be " + "allocated.", ip, previousOwner, newOwner)); return false; } @@ -2443,7 +2441,7 @@ public boolean isPublicIpAddressStillInQuarantine(PublicIpQuarantineVO publicIpQ public PublicIpQuarantine addPublicIpAddressToQuarantine(IpAddress publicIpAddress, Long domainId) { Integer quarantineDuration = PUBLIC_IP_ADDRESS_QUARANTINE_DURATION.valueInDomain(domainId); if (quarantineDuration <= 0) { - s_logger.debug(String.format("Not adding IP [%s] to quarantine because configuration [%s] has value equal or less to 0.", publicIpAddress.getAddress(), + logger.debug(String.format("Not adding IP [%s] to quarantine because configuration [%s] has value equal or less to 0.", publicIpAddress.getAddress(), PUBLIC_IP_ADDRESS_QUARANTINE_DURATION.key())); return null; } @@ -2452,7 +2450,7 @@ public PublicIpQuarantine addPublicIpAddressToQuarantine(IpAddress publicIpAddre long accountId = publicIpAddress.getAccountId(); if (accountId == Account.ACCOUNT_ID_SYSTEM) { - s_logger.debug(String.format("Not adding IP [%s] to quarantine because it belongs to the system account.", publicIpAddress.getAddress())); + logger.debug(String.format("Not adding IP [%s] to quarantine because it belongs to the system account.", publicIpAddress.getAddress())); return null; } @@ -2462,7 +2460,7 @@ public PublicIpQuarantine addPublicIpAddressToQuarantine(IpAddress publicIpAddre quarantineEndDate.add(Calendar.MINUTE, quarantineDuration); PublicIpQuarantineVO publicIpQuarantine = new PublicIpQuarantineVO(ipId, accountId, currentDate, quarantineEndDate.getTime()); - s_logger.debug(String.format("Adding public IP Address [%s] to quarantine for the duration of [%s] minute(s).", publicIpAddress.getAddress(), quarantineDuration)); + logger.debug(String.format("Adding public IP Address [%s] to quarantine for the duration of [%s] minute(s).", publicIpAddress.getAddress(), quarantineDuration)); return publicIpQuarantineDao.persist(publicIpQuarantine); } @@ -2477,7 +2475,7 @@ public void removePublicIpAddressFromQuarantine(Long quarantineProcessId, String publicIpQuarantineVO.setRemovalReason(removalReason); publicIpQuarantineVO.setRemoverAccountId(removerAccountId); - s_logger.debug(String.format("Removing public IP Address [%s] from quarantine by updating the removed date to [%s].", ipAddress, removedDate)); + logger.debug(String.format("Removing public IP Address [%s] from quarantine by updating the removed date to [%s].", ipAddress, removedDate)); publicIpQuarantineDao.persist(publicIpQuarantineVO); } @@ -2489,7 +2487,7 @@ public PublicIpQuarantine updatePublicIpAddressInQuarantine(Long quarantineProce publicIpQuarantineVO.setEndDate(newEndDate); - s_logger.debug(String.format("Updating the end date for the quarantine of the public IP Address [%s] from [%s] to [%s].", ipAddress, currentEndDate, newEndDate)); + logger.debug(String.format("Updating the end date for the quarantine of the public IP Address [%s] from [%s] to [%s].", ipAddress, currentEndDate, newEndDate)); publicIpQuarantineDao.persist(publicIpQuarantineVO); return publicIpQuarantineVO; } diff --git a/server/src/main/java/com/cloud/network/Ipv6AddressManagerImpl.java b/server/src/main/java/com/cloud/network/Ipv6AddressManagerImpl.java index 52096f97954c..4cee7423cbfc 100644 --- a/server/src/main/java/com/cloud/network/Ipv6AddressManagerImpl.java +++ b/server/src/main/java/com/cloud/network/Ipv6AddressManagerImpl.java @@ -23,7 +23,6 @@ import javax.naming.ConfigurationException; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.log4j.Logger; import com.cloud.configuration.Config; import com.cloud.dc.DataCenter; @@ -52,7 +51,6 @@ import com.googlecode.ipv6.IPv6Address; public class Ipv6AddressManagerImpl extends ManagerBase implements Ipv6AddressManager { - public static final Logger s_logger = Logger.getLogger(Ipv6AddressManagerImpl.class.getName()); String _name = null; int _ipv6RetryMax = 0; @@ -205,14 +203,14 @@ protected boolean isIp6Taken(Network network, String requestedIpv6) { public void setNicIp6Address(final NicProfile nic, final DataCenter dc, final Network network) throws InsufficientAddressCapacityException { if (network.getIp6Gateway() != null) { if (nic.getIPv6Address() == null) { - s_logger.debug("Found IPv6 CIDR " + network.getIp6Cidr() + " for Network " + network); + logger.debug("Found IPv6 CIDR " + network.getIp6Cidr() + " for Network " + network); nic.setIPv6Cidr(network.getIp6Cidr()); nic.setIPv6Gateway(network.getIp6Gateway()); setNicPropertiesFromNetwork(nic, network); IPv6Address ipv6addr = NetUtils.EUI64Address(network.getIp6Cidr(), nic.getMacAddress()); - s_logger.info("Calculated IPv6 address " + ipv6addr + " using EUI-64 for NIC " + nic.getUuid()); + logger.info("Calculated IPv6 address " + ipv6addr + " using EUI-64 for NIC " + nic.getUuid()); nic.setIPv6Address(ipv6addr.toString()); if (nic.getIPv4Address() != null) { diff --git a/server/src/main/java/com/cloud/network/Ipv6ServiceImpl.java b/server/src/main/java/com/cloud/network/Ipv6ServiceImpl.java index 68b32e577db4..a3432a8633a8 100644 --- a/server/src/main/java/com/cloud/network/Ipv6ServiceImpl.java +++ b/server/src/main/java/com/cloud/network/Ipv6ServiceImpl.java @@ -52,7 +52,6 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.api.ApiDBUtils; import com.cloud.configuration.Resource; @@ -110,7 +109,6 @@ public class Ipv6ServiceImpl extends ComponentLifecycleBase implements Ipv6Service { - public static final Logger s_logger = Logger.getLogger(Ipv6ServiceImpl.class.getName()); private static final String s_publicNetworkReserver = PublicNetworkGuru.class.getSimpleName(); ScheduledExecutorService _ipv6GuestPrefixSubnetNetworkMapStateScanner; @@ -161,7 +159,7 @@ private boolean isPublicIpv6PlaceholderNic(NicVO nic) { NicVO nic = nicOptional.get(); Optional vlanOptional = ranges.stream().filter(v -> nic.getIPv6Cidr().equals(v.getIp6Cidr()) && nic.getIPv6Gateway().equals(v.getIp6Gateway())).findFirst(); if (vlanOptional.isEmpty()) { - s_logger.error(String.format("Public IPv6 placeholder NIC with cidr: %s, gateway: %s for network ID: %d is not present in the allocated VLAN: %s", + logger.error(String.format("Public IPv6 placeholder NIC with cidr: %s, gateway: %s for network ID: %d is not present in the allocated VLAN: %s", nic.getIPv6Cidr(), nic.getIPv6Gateway(),network.getId(), ranges.get(0).getVlanTag())); return null; } @@ -207,7 +205,7 @@ private void processPublicIpv6AddressUpdateForVpcTier(final Network network, fin private Pair assignPublicIpv6ToNetworkInternal(Network network, String vlanId, String nicMacAddress) throws InsufficientAddressCapacityException { final List ranges = vlanDao.listIpv6RangeByZoneIdAndVlanId(network.getDataCenterId(), vlanId); if (CollectionUtils.isEmpty(ranges)) { - s_logger.error(String.format("Unable to find IPv6 address for zone ID: %d, physical network ID: %d, VLAN: %s", network.getDataCenterId(), network.getPhysicalNetworkId(), vlanId)); + logger.error(String.format("Unable to find IPv6 address for zone ID: %d, physical network ID: %d, VLAN: %s", network.getDataCenterId(), network.getPhysicalNetworkId(), vlanId)); InsufficientAddressCapacityException ex = new InsufficientAddressCapacityException("Insufficient address capacity", DataCenter.class, network.getDataCenterId()); ex.addProxyObject(ApiDBUtils.findZoneById(network.getDataCenterId()).getUuid()); throw ex; @@ -336,7 +334,7 @@ public Pair preAllocateIpv6SubnetForNetwork(long zoneId) throws return Transaction.execute((TransactionCallbackWithException, ResourceAllocationException>) status -> { List prefixes = dataCenterGuestIpv6PrefixDao.listByDataCenterId(zoneId); if (CollectionUtils.isEmpty(prefixes)) { - s_logger.error(String.format("IPv6 prefixes not found for the zone ID: %d", zoneId)); + logger.error(String.format("IPv6 prefixes not found for the zone ID: %d", zoneId)); throw new ResourceAllocationException("Unable to allocate IPv6 network", Resource.ResourceType.network); } Ipv6GuestPrefixSubnetNetworkMapVO ip6Subnet = null; @@ -494,7 +492,7 @@ public void updateIpv6RoutesForVpcResponse(Vpc vpc, VpcResponse response) { public void checkNetworkIpv6Upgrade(Network network) throws InsufficientAddressCapacityException, ResourceAllocationException { List prefixes = dataCenterGuestIpv6PrefixDao.listByDataCenterId(network.getDataCenterId()); if (CollectionUtils.isEmpty(prefixes)) { - s_logger.error(String.format("IPv6 prefixes not found for the zone ID: %d", network.getDataCenterId())); + logger.error(String.format("IPv6 prefixes not found for the zone ID: %d", network.getDataCenterId())); throw new ResourceAllocationException("Unable to allocate IPv6 network", Resource.ResourceType.network); } List addresses = network.getVpcId() == null ? @@ -504,7 +502,7 @@ public void checkNetworkIpv6Upgrade(Network network) throws InsufficientAddressC VlanVO vlan = vlanDao.findById(address.getVlanId()); final List ranges = vlanDao.listIpv6RangeByZoneIdAndVlanId(network.getDataCenterId(), vlan.getVlanTag()); if (CollectionUtils.isEmpty(ranges)) { - s_logger.error(String.format("Unable to find IPv6 address for zone ID: %d, physical network ID: %d, VLAN: %s", network.getDataCenterId(), network.getPhysicalNetworkId(), vlan.getVlanTag())); + logger.error(String.format("Unable to find IPv6 address for zone ID: %d, physical network ID: %d, VLAN: %s", network.getDataCenterId(), network.getPhysicalNetworkId(), vlan.getVlanTag())); InsufficientAddressCapacityException ex = new InsufficientAddressCapacityException("Insufficient address capacity", DataCenter.class, network.getDataCenterId()); ex.addProxyObject(ApiDBUtils.findZoneById(network.getDataCenterId()).getUuid()); throw ex; @@ -653,13 +651,13 @@ public FirewallRule getIpv6FirewallRule(Long entityId) { public boolean applyIpv6FirewallRule(long id) { FirewallRuleVO rule = firewallDao.findById(id); if (rule == null) { - s_logger.error(String.format("Unable to find IPv6 firewall rule with ID: %d", id)); + logger.error(String.format("Unable to find IPv6 firewall rule with ID: %d", id)); return false; } if (!FirewallRule.Purpose.Ipv6Firewall.equals(rule.getPurpose())) { - s_logger.error(String.format("Cannot apply IPv6 firewall rule with ID: %d as purpose %s is not %s", id, rule.getPurpose(), FirewallRule.Purpose.Ipv6Firewall)); + logger.error(String.format("Cannot apply IPv6 firewall rule with ID: %d as purpose %s is not %s", id, rule.getPurpose(), FirewallRule.Purpose.Ipv6Firewall)); } - s_logger.debug(String.format("Applying IPv6 firewall rules for rule with ID: %s", rule.getUuid())); + logger.debug(String.format("Applying IPv6 firewall rules for rule with ID: %s", rule.getUuid())); List rules = firewallDao.listByNetworkPurposeTrafficType(rule.getNetworkId(), rule.getPurpose(), FirewallRule.TrafficType.Egress); rules.addAll(firewallDao.listByNetworkPurposeTrafficType(rule.getNetworkId(), FirewallRule.Purpose.Ipv6Firewall, FirewallRule.TrafficType.Ingress)); return firewallManager.applyFirewallRules(rules, false, CallContext.current().getCallingAccount()); @@ -675,7 +673,7 @@ public void removePublicIpv6PlaceholderNics(Network network) { @Override public void doInTransactionWithoutResult(TransactionStatus status) { for (Nic nic : nics) { - s_logger.debug("Removing placeholder nic " + nic); + logger.debug("Removing placeholder nic " + nic); nicDao.remove(nic.getId()); publishPublicIpv6ReleaseActionEvent(network, nic.getIPv6Address()); } @@ -684,7 +682,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } } catch (Exception e) { String msg = String.format("IPv6 Placeholder Nics trash. Exception: %s", e.getMessage()); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg, e); } } @@ -710,8 +708,8 @@ public void reallyRun() { try { List subnets = ipv6GuestPrefixSubnetNetworkMapDao.findPrefixesInStates(Ipv6GuestPrefixSubnetNetworkMap.State.Allocating); for (Ipv6GuestPrefixSubnetNetworkMapVO subnet : subnets) { - if (s_logger.isInfoEnabled()) { - s_logger.info(String.format("Running state scanned on Ipv6GuestPrefixSubnetNetworkMap : %s", subnet.getSubnet())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Running state scanned on Ipv6GuestPrefixSubnetNetworkMap : %s", subnet.getSubnet())); } try { if ((new Date()).getTime() - subnet.getUpdated().getTime() < Ipv6PrefixSubnetCleanupInterval.value()*1000) { @@ -719,11 +717,11 @@ public void reallyRun() { } releaseIpv6Subnet(subnet.getId()); } catch (CloudRuntimeException e) { - s_logger.warn(String.format("Failed to release IPv6 guest prefix subnet : %s during state scan", subnet.getSubnet()), e); + logger.warn(String.format("Failed to release IPv6 guest prefix subnet : %s during state scan", subnet.getSubnet()), e); } } } catch (Exception e) { - s_logger.warn("Caught exception while running Ipv6GuestPrefixSubnetNetworkMap state scanner: ", e); + logger.warn("Caught exception while running Ipv6GuestPrefixSubnetNetworkMap state scanner: ", e); } } } diff --git a/server/src/main/java/com/cloud/network/NetworkMigrationManagerImpl.java b/server/src/main/java/com/cloud/network/NetworkMigrationManagerImpl.java index 0cfd6e6021b2..b8c464cfe3e9 100644 --- a/server/src/main/java/com/cloud/network/NetworkMigrationManagerImpl.java +++ b/server/src/main/java/com/cloud/network/NetworkMigrationManagerImpl.java @@ -21,7 +21,8 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.cloud.entity.api.db.VMNetworkMapVO; @@ -107,7 +108,7 @@ import com.cloud.vm.dao.UserVmDao; public class NetworkMigrationManagerImpl implements NetworkMigrationManager { - public static final Logger s_logger = Logger.getLogger(NetworkMigrationManagerImpl.class.getName()); + protected Logger logger = LogManager.getLogger(getClass()); @Inject private DataCenterDao _dcDao = null; @@ -181,8 +182,8 @@ public class NetworkMigrationManagerImpl implements NetworkMigrationManager { private ResourceTagDao _resourceTagDao = null; @Override public long makeCopyOfNetwork(Network network, NetworkOffering networkOffering, Long vpcId) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Making a copy of network with uuid " + network.getUuid() + " and id " + network.getId() + " for migration."); + if (logger.isDebugEnabled()) { + logger.debug("Making a copy of network with uuid " + network.getUuid() + " and id " + network.getId() + " for migration."); } long originalNetworkId = network.getId(); NetworkDomainVO domainNetworkMapByNetworkId = _networkDomainDao.getDomainNetworkMapByNetworkId(originalNetworkId); @@ -238,8 +239,8 @@ public class NetworkMigrationManagerImpl implements NetworkMigrationManager { assignUserNicsToNewNetwork(originalNetworkId, networkCopyId); assignRouterNicsToNewNetwork(network.getId(), networkCopyId); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully created a copy of network " + originalNetwork.getName() + "(" + originalNetwork.getUuid() + ") id is " + originalNetwork.getId() + " for migration. The network copy has uuid " + network.getUuid() + " and id " + network.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Successfully created a copy of network " + originalNetwork.getName() + "(" + originalNetwork.getUuid() + ") id is " + originalNetwork.getId() + " for migration. The network copy has uuid " + network.getUuid() + " and id " + network.getId()); } return networkCopyId; } @@ -285,8 +286,8 @@ private void assignUserNicsToNewNetwork(long srcNetworkId, long dstNetworkId) { @Override public Long makeCopyOfVpc(long vpcId, long vpcOfferingId) { VpcVO vpc = _vpcDao.findById(vpcId); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Making a copy of vpc with uuid " + vpc.getUuid() + " and id " + vpc.getId() + " for migration."); + if (logger.isDebugEnabled()) { + logger.debug("Making a copy of vpc with uuid " + vpc.getUuid() + " and id " + vpc.getId() + " for migration."); } if (vpc == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Specified vpc id doesn't exist in the system"); @@ -313,8 +314,8 @@ public Long makeCopyOfVpc(long vpcId, long vpcOfferingId) { copyVpcDetails(vpcId, copyOfVpcId); reassignGatewayToNewVpc(vpcId, copyOfVpcId); copyVpcResourceTagsToNewVpc(vpcId, copyOfVpcId); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully created a copy of network " + vpc.getName() + "(" + vpc.getUuid() + ") id is " + vpc.getId() + " for migration. The network copy has uuid " + copyVpcVO.getUuid() + " and id " + copyOfVpc.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Successfully created a copy of network " + vpc.getName() + "(" + vpc.getUuid() + ") id is " + vpc.getId() + " for migration. The network copy has uuid " + copyVpcVO.getUuid() + " and id " + copyOfVpc.getId()); } } catch (ResourceAllocationException e) { throw new CloudRuntimeException(e.getMessage()); @@ -329,7 +330,7 @@ public void startVpc(Vpc vpc) { try { _vpcService.startVpc(vpc.getId(), true); } catch (ResourceUnavailableException | InsufficientCapacityException e) { - s_logger.error("Vpc can not be started. Aborting migration process"); + logger.error("Vpc can not be started. Aborting migration process"); throw new CloudRuntimeException("Vpc can not be started.", e); } } @@ -395,8 +396,8 @@ private void copyVpcResourceTagsToNewVpc(long srcVpcId, long dstVpcId){ private void copyFirewallRulesToNewNetwork(Network srcNetwork, long dstNetworkId) { List firewallRules = _firewallDao.listByNetworkPurposeTrafficType(srcNetwork.getId(), FirewallRule.Purpose.Firewall, FirewallRule.TrafficType.Egress); firewallRules.addAll(_firewallDao.listByNetworkPurposeTrafficType(srcNetwork.getId(), FirewallRule.Purpose.Firewall, FirewallRule.TrafficType.Ingress)); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Copying firewall rules from network with id " + srcNetwork.getId() + " to network with id " + dstNetworkId); + if (logger.isDebugEnabled()) { + logger.debug("Copying firewall rules from network with id " + srcNetwork.getId() + " to network with id " + dstNetworkId); } //Loop over all the firewall rules in the original network and copy all values to a new firewall rule @@ -450,7 +451,7 @@ private void assignRouterNicsToNewNetwork(long srcNetworkId, long dstNetworkId) @Override public Network upgradeNetworkToNewNetworkOffering(long networkId, long newPhysicalNetworkId, long networkOfferingId, Long vpcId) { - s_logger.debug("upgrading network to network with new offering."); + logger.debug("upgrading network to network with new offering."); NetworkVO network = _networksDao.findById(networkId); NetworkOffering newOffering = _networkOfferingDao.findByIdIncludingRemoved(networkOfferingId); long gurusImplementing = 0; @@ -492,7 +493,7 @@ public void deleteCopyOfNetwork(long networkCopyId, long originalNetworkId) { NicVO userNic = _nicDao.findByNetworkIdAndType(networkCopyId, VirtualMachine.Type.User); if (userNic != null) { - s_logger.error("Something went wrong while migrating nics from the old network to the new network. Failed to delete copy of network. There are still user nics present in the network."); + logger.error("Something went wrong while migrating nics from the old network to the new network. Failed to delete copy of network. There are still user nics present in the network."); throw new CloudRuntimeException("Failed to delete copy of network. There are still user nics present in the network."); } @@ -530,7 +531,7 @@ public void deleteCopyOfVpc(long vpcCopyId, long originalVpcId) { } private Boolean migrateNicsInDB(NicVO originalNic, Network networkInNewPhysicalNet, DataCenter dc, ReservationContext context) { - s_logger.debug("migrating nics in database."); + logger.debug("migrating nics in database."); UserVmVO vmVO = _vmDao.findById(originalNic.getInstanceId()); VirtualMachineProfile vmProfile = new VirtualMachineProfileImpl(vmVO, null, null, null, null); NicProfile nicProfile = new NicProfile(originalNic, networkInNewPhysicalNet, null, null, null, _networkModel.isSecurityGroupSupportedInNetwork(networkInNewPhysicalNet), null); @@ -569,8 +570,8 @@ private Boolean migrateNicsInDB(NicVO originalNic, Network networkInNewPhysicalN markAsNonDefault(originalNic); _networkMgr.removeNic(vmProfile, originalNic); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Nic is migrated successfully for vm " + vmVO + " to " + networkInNewPhysicalNet); + if (logger.isDebugEnabled()) { + logger.debug("Nic is migrated successfully for vm " + vmVO + " to " + networkInNewPhysicalNet); } return true; } diff --git a/server/src/main/java/com/cloud/network/NetworkModelImpl.java b/server/src/main/java/com/cloud/network/NetworkModelImpl.java index 86000205bec0..af0d25c4c1db 100644 --- a/server/src/main/java/com/cloud/network/NetworkModelImpl.java +++ b/server/src/main/java/com/cloud/network/NetworkModelImpl.java @@ -45,7 +45,6 @@ import org.apache.cloudstack.network.dao.NetworkPermissionDao; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.api.ApiDBUtils; import com.cloud.configuration.Config; @@ -146,7 +145,6 @@ import com.cloud.vm.dao.VMInstanceDao; public class NetworkModelImpl extends ManagerBase implements NetworkModel, Configurable { - static final Logger s_logger = Logger.getLogger(NetworkModelImpl.class); public static final String UNABLE_TO_USE_NETWORK = "Unable to use network with id= %s, permission denied"; @Inject EntityManager _entityMgr; @@ -365,7 +363,7 @@ public Map> getIpToServices(List 0; @@ -804,7 +802,7 @@ public NetworkVO getNetworkWithSGWithFreeIPs(Long zoneId) { } } if (ret_network == null) { - s_logger.debug("Can not find network with security group enabled with free IPs"); + logger.debug("Can not find network with security group enabled with free IPs"); } return ret_network; } @@ -817,7 +815,7 @@ public NetworkVO getNetworkWithSecurityGroupEnabled(Long zoneId) { } if (networks.size() > 1) { - s_logger.debug("There are multiple network with security group enabled? select one of them..."); + logger.debug("There are multiple network with security group enabled? select one of them..."); } return networks.get(0); } @@ -911,12 +909,12 @@ public Nic getDefaultNic(long vmId) { } } } else { - s_logger.debug("Unable to find default network for the vm; vm doesn't have any nics"); + logger.debug("Unable to find default network for the vm; vm doesn't have any nics"); return null; } if (defaultNic == null) { - s_logger.debug("Unable to find default network for the vm; vm doesn't have default nic"); + logger.debug("Unable to find default network for the vm; vm doesn't have default nic"); } return defaultNic; @@ -928,7 +926,7 @@ public UserDataServiceProvider getUserDataUpdateProvider(Network network) { String userDataProvider = _ntwkSrvcDao.getProviderForServiceInNetwork(network.getId(), Service.UserData); if (userDataProvider == null) { - s_logger.debug("Network " + network + " doesn't support service " + Service.UserData.getName()); + logger.debug("Network " + network + " doesn't support service " + Service.UserData.getName()); return null; } @@ -970,7 +968,7 @@ public String getIpOfNetworkElementInVirtualNetwork(long accountId, long dataCen List virtualNetworks = _networksDao.listByZoneAndGuestType(accountId, dataCenterId, GuestType.Isolated, false); if (virtualNetworks.isEmpty()) { - s_logger.trace("Unable to find default Virtual network account id=" + accountId); + logger.trace("Unable to find default Virtual network account id=" + accountId); return null; } @@ -981,7 +979,7 @@ public String getIpOfNetworkElementInVirtualNetwork(long accountId, long dataCen if (networkElementNic != null) { return networkElementNic.getIPv4Address(); } else { - s_logger.warn("Unable to set find network element for the network id=" + virtualNetwork.getId()); + logger.warn("Unable to set find network element for the network id=" + virtualNetwork.getId()); return null; } } @@ -1210,13 +1208,13 @@ private Long getPhysicalNetworkId(long zoneId, List pNtwks, S Long pNtwkId = null; for (PhysicalNetwork pNtwk : pNtwks) { if (tag == null && pNtwk.getTags().isEmpty()) { - s_logger.debug("Found physical network id=" + pNtwk.getId() + " with null tag"); + logger.debug("Found physical network id=" + pNtwk.getId() + " with null tag"); if (pNtwkId != null) { throw new CloudRuntimeException("There is more than 1 physical network with empty tag in the zone id=" + zoneId); } pNtwkId = pNtwk.getId(); } else if (tag != null && pNtwk.getTags().contains(tag)) { - s_logger.debug("Found physical network id=" + pNtwk.getId() + " based on requested tags " + tag); + logger.debug("Found physical network id=" + pNtwk.getId() + " based on requested tags " + tag); pNtwkId = pNtwk.getId(); break; } @@ -1250,7 +1248,7 @@ public List listNetworkOfferingsForUpgrade(long networkId) { @Override public boolean isSecurityGroupSupportedInNetwork(Network network) { if (network.getTrafficType() != TrafficType.Guest) { - s_logger.trace("Security group can be enabled for Guest networks only; and network " + network + " has a diff traffic type"); + logger.trace("Security group can be enabled for Guest networks only; and network " + network + " has a diff traffic type"); return false; } @@ -1318,8 +1316,8 @@ public String getDefaultManagementTrafficLabel(long zoneId, HypervisorType hyper return label; } } catch (Exception ex) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Failed to retrive the default label for management traffic:" + "zone: " + zoneId + " hypervisor: " + hypervisorType + " due to:" + + if (logger.isDebugEnabled()) { + logger.debug("Failed to retrive the default label for management traffic:" + "zone: " + zoneId + " hypervisor: " + hypervisorType + " due to:" + ex.getMessage()); } } @@ -1353,8 +1351,8 @@ public String getDefaultStorageTrafficLabel(long zoneId, HypervisorType hypervis return label; } } catch (Exception ex) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Failed to retrive the default label for storage traffic:" + "zone: " + zoneId + " hypervisor: " + hypervisorType + " due to:" + + if (logger.isDebugEnabled()) { + logger.debug("Failed to retrive the default label for storage traffic:" + "zone: " + zoneId + " hypervisor: " + hypervisorType + " due to:" + ex.getMessage()); } } @@ -1391,7 +1389,7 @@ public List getPhysicalNetworkInfo(long dcId, Hypervis public boolean isProviderEnabledInPhysicalNetwork(long physicalNetowrkId, String providerName) { PhysicalNetworkServiceProviderVO ntwkSvcProvider = _pNSPDao.findByServiceProvider(physicalNetowrkId, providerName); if (ntwkSvcProvider == null) { - s_logger.warn("Unable to find provider " + providerName + " in physical network id=" + physicalNetowrkId); + logger.warn("Unable to find provider " + providerName + " in physical network id=" + physicalNetowrkId); return false; } return isProviderEnabled(ntwkSvcProvider); @@ -1433,7 +1431,7 @@ public String getNetworkTag(HypervisorType hType, Network network) { if (physicalNetworkId == null) { assert (false) : "Can't get the physical network"; - s_logger.warn("Can't get the physical network"); + logger.warn("Can't get the physical network"); return null; } @@ -1684,14 +1682,14 @@ public void checkCapabilityForProvider(Set providers, Service service, @Override public final void checkNetworkPermissions(Account caller, Network network) { if (_accountMgr.isRootAdmin(caller.getAccountId()) && Boolean.TRUE.equals(AdminIsAllowedToDeployAnywhere.value())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("root admin is permitted to do stuff on every network"); + if (logger.isDebugEnabled()) { + logger.debug("root admin is permitted to do stuff on every network"); } } else { if (network == null) { throw new CloudRuntimeException("cannot check permissions on (Network) "); } - s_logger.info(String.format("Checking permission for account %s (%s) on network %s (%s)", caller.getAccountName(), caller.getUuid(), network.getName(), network.getUuid())); + logger.info(String.format("Checking permission for account %s (%s) on network %s (%s)", caller.getAccountName(), caller.getUuid(), network.getName(), network.getUuid())); if (network.getGuestType() != GuestType.Shared || network.getAclType() == ACLType.Account) { checkAccountNetworkPermissions(caller, network); @@ -1768,7 +1766,7 @@ public void checkRouterPermissions(Account owner, VirtualRouter router) { _accountMgr.checkAccess(owner, null, true, account); return; } catch (PermissionDeniedException ex) { - s_logger.info("Account " + owner + " do not have permission on router owner " + account); + logger.info("Account " + owner + " do not have permission on router owner " + account); } List routerNics = _nicDao.listByVmId(router.getId()); for (final Nic routerNic : routerNics) { @@ -1878,8 +1876,8 @@ public String getDefaultPublicTrafficLabel(long dcId, HypervisorType hypervisorT return label; } } catch (Exception ex) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Failed to retrieve the default label for public traffic." + "zone: " + dcId + " hypervisor: " + hypervisorType + " due to: " + + if (logger.isDebugEnabled()) { + logger.debug("Failed to retrieve the default label for public traffic." + "zone: " + dcId + " hypervisor: " + hypervisorType + " due to: " + ex.getMessage()); } } @@ -1913,8 +1911,8 @@ public String getDefaultGuestTrafficLabel(long dcId, HypervisorType hypervisorTy return label; } } catch (Exception ex) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Failed to retrive the default label for guest traffic:" + "zone: " + dcId + " hypervisor: " + hypervisorType + " due to:" + + if (logger.isDebugEnabled()) { + logger.debug("Failed to retrive the default label for guest traffic:" + "zone: " + dcId + " hypervisor: " + hypervisorType + " due to:" + ex.getMessage()); } } @@ -1987,13 +1985,13 @@ public boolean isNetworkAvailableInDomain(long networkId, long domainId) { Long networkDomainId = null; Network network = getNetwork(networkId); if (network.getGuestType() != GuestType.Shared) { - s_logger.trace("Network id=" + networkId + " is not shared"); + logger.trace("Network id=" + networkId + " is not shared"); return false; } NetworkDomainVO networkDomainMap = _networkDomainDao.getDomainNetworkMapByNetworkId(networkId); if (networkDomainMap == null) { - s_logger.trace("Network id=" + networkId + " is shared, but not domain specific"); + logger.trace("Network id=" + networkId + " is shared, but not domain specific"); return true; } else { networkDomainId = networkDomainMap.getDomainId(); @@ -2025,7 +2023,7 @@ public Set getAvailableIps(Network network, String requestedIp) { for (String ip : ips) { if (requestedIp != null && requestedIp.equals(ip)) { - s_logger.warn("Requested ip address " + requestedIp + " is already in use in network" + network); + logger.warn("Requested ip address " + requestedIp + " is already in use in network" + network); return null; } @@ -2086,14 +2084,14 @@ boolean isProviderEnabled(PhysicalNetworkServiceProvider provider) { boolean isServiceEnabledInNetwork(long physicalNetworkId, long networkId, Service service) { // check if the service is supported in the network if (!areServicesSupportedInNetwork(networkId, service)) { - s_logger.debug("Service " + service.getName() + " is not supported in the network id=" + networkId); + logger.debug("Service " + service.getName() + " is not supported in the network id=" + networkId); return false; } // get provider for the service and check if all of them are supported String provider = _ntwkSrvcDao.getProviderForServiceInNetwork(networkId, service); if (!isProviderEnabledInPhysicalNetwork(physicalNetworkId, provider)) { - s_logger.debug("Provider " + provider + " is not enabled in physical network id=" + physicalNetworkId); + logger.debug("Provider " + provider + " is not enabled in physical network id=" + physicalNetworkId); return false; } @@ -2117,7 +2115,7 @@ PhysicalNetwork getOnePhysicalNetworkByZoneAndTrafficType(long zoneId, TrafficTy } if (networkList.size() > 1) { - s_logger.info("More than one physical networks exist in zone id=" + zoneId + " with traffic type=" + trafficType + ". "); + logger.info("More than one physical networks exist in zone id=" + zoneId + " with traffic type=" + trafficType + ". "); } return networkList.get(0); @@ -2272,7 +2270,7 @@ public boolean configure(String name, Map params) throws Configu networkSearch.and("traffictype", networkSearch.entity().getTrafficType(), Op.EQ); NicForTrafficTypeSearch.done(); - s_logger.info("Network Model is configured."); + logger.info("Network Model is configured."); return true; } @@ -2286,11 +2284,11 @@ public boolean start() { Provider implementedProvider = element.getProvider(); if (implementedProvider != null) { if (s_providerToNetworkElementMap.containsKey(implementedProvider.getName())) { - s_logger.error("Cannot start NetworkModel: Provider <-> NetworkElement must be a one-to-one map, " + "multiple NetworkElements found for Provider: " + + logger.error("Cannot start NetworkModel: Provider <-> NetworkElement must be a one-to-one map, " + "multiple NetworkElements found for Provider: " + implementedProvider.getName()); continue; } - s_logger.info("Add provider <-> element map entry. " + implementedProvider.getName() + "-" + element.getName() + "-" + element.getClass().getSimpleName()); + logger.info("Add provider <-> element map entry. " + implementedProvider.getName() + "-" + element.getName() + "-" + element.getClass().getSimpleName()); s_providerToNetworkElementMap.put(implementedProvider.getName(), element.getName()); } if (capabilities != null && implementedProvider != null) { @@ -2310,7 +2308,7 @@ public boolean start() { //After network elements are configured correctly, verify ConfigDrive entries on enabled zones verifyDisabledConfigDriveEntriesOnEnabledZones(); - s_logger.info("Started Network Model"); + logger.info("Started Network Model"); return true; } @@ -2563,7 +2561,7 @@ public boolean isNetworkReadyForGc(long networkId) { // The active nics count (nics_count in op_networks table) might be wrong due to some reasons, should check the state of vms as well. // (nics for Starting VMs might not be allocated yet as Starting state also used when vm is being Created) if (_nicDao.countNicsForNonStoppedVms(networkId) > 0 || _nicDao.countNicsForNonStoppedRunningVrs(networkId) > 0) { - s_logger.debug("Network id=" + networkId + " is not ready for GC as it has vms that are not Stopped at the moment"); + logger.debug("Network id=" + networkId + " is not ready for GC as it has vms that are not Stopped at the moment"); return false; } @@ -2651,7 +2649,7 @@ public List generateVmData(String userData, String userDataDetails, St try { md5 = MessageDigest.getInstance("MD5"); } catch (NoSuchAlgorithmException e) { - s_logger.error("Unexpected exception " + e.getMessage(), e); + logger.error("Unexpected exception " + e.getMessage(), e); throw new CloudRuntimeException("Unable to get MD5 MessageDigest", e); } md5.reset(); @@ -2669,7 +2667,7 @@ public List generateVmData(String userData, String userDataDetails, St Domain domain = _domainDao.findById(vm.getDomainId()); if (domain != null && VirtualMachineManager.AllowExposeDomainInMetadata.valueIn(domain.getId())) { - s_logger.debug("Adding domain info to cloud metadata"); + logger.debug("Adding domain info to cloud metadata"); vmData.add(new String[]{METATDATA_DIR, CLOUD_DOMAIN_FILE, domain.getName()}); vmData.add(new String[]{METATDATA_DIR, CLOUD_DOMAIN_ID_FILE, domain.getUuid()}); } diff --git a/server/src/main/java/com/cloud/network/NetworkServiceImpl.java b/server/src/main/java/com/cloud/network/NetworkServiceImpl.java index 18f98e6f99f1..9705f231e8b8 100644 --- a/server/src/main/java/com/cloud/network/NetworkServiceImpl.java +++ b/server/src/main/java/com/cloud/network/NetworkServiceImpl.java @@ -82,7 +82,6 @@ import org.apache.commons.lang3.EnumUtils; import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import org.springframework.beans.factory.annotation.Autowired; @@ -259,7 +258,6 @@ * NetworkServiceImpl implements NetworkService. */ public class NetworkServiceImpl extends ManagerBase implements NetworkService, Configurable { - private static final Logger s_logger = Logger.getLogger(NetworkServiceImpl.class); private static final ConfigKey AllowDuplicateNetworkName = new ConfigKey<>("Advanced", Boolean.class, "allow.duplicate.networkname", "true", "Allow creating networks with same name in account", true, ConfigKey.Scope.Account); @@ -716,8 +714,8 @@ public IpAddress allocateIP(Account ipOwner, long zoneId, Long networkId, Boolea if (zone.getNetworkType() == NetworkType.Advanced) { if (isSharedNetworkOfferingWithServices(network.getNetworkOfferingId())) { _accountMgr.checkAccess(caller, AccessType.UseEntry, false, network); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Associate IP address called by the user " + callerUserId + " account " + ipOwner.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Associate IP address called by the user " + callerUserId + " account " + ipOwner.getId()); } return _ipAddrMgr.allocateIp(ipOwner, false, caller, callerUserId, zone, displayIp, ipaddress); } else { @@ -763,8 +761,8 @@ public IpAddress allocatePortableIP(Account ipOwner, int regionId, Long zoneId, if (zone.getNetworkType() == NetworkType.Advanced) { if (isSharedNetworkOfferingWithServices(network.getNetworkOfferingId())) { _accountMgr.checkAccess(caller, AccessType.UseEntry, false, network); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Associate IP address called by the user " + callerUserId + " account " + ipOwner.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Associate IP address called by the user " + callerUserId + " account " + ipOwner.getId()); } return _ipAddrMgr.allocatePortableIp(ipOwner, caller, zoneId, networkId, null); } else { @@ -806,7 +804,7 @@ public boolean configure(final String name, final Map params) th _allowSubdomainNetworkAccess = Boolean.valueOf(_configs.get(Config.SubDomainNetworkAccess.key())); - s_logger.info("Network Service is configured."); + logger.info("Network Service is configured."); return true; } @@ -835,7 +833,7 @@ public boolean configureNicSecondaryIp(NicSecondaryIp secIp, boolean isZoneSgEna if (isZoneSgEnabled) { success = _securityGroupService.securityGroupRulesForVmSecIp(secIp.getNicId(), secondaryIp, true); - s_logger.info("Associated ip address to NIC : " + secIp.getIp4Address()); + logger.info("Associated ip address to NIC : " + secIp.getIp4Address()); } else { success = true; } @@ -882,11 +880,11 @@ public NicSecondaryIp allocateSecondaryGuestIP(final long nicId, IpAddresses req int maxAllowedIpsPerNic = NumbersUtil.parseInt(_configDao.getValue(Config.MaxNumberOfSecondaryIPsPerNIC.key()), 10); Long nicWiseIpCount = _nicSecondaryIpDao.countByNicId(nicId); if (nicWiseIpCount.intValue() >= maxAllowedIpsPerNic) { - s_logger.error("Maximum Number of Ips \"vm.network.nic.max.secondary.ipaddresses = \"" + maxAllowedIpsPerNic + " per Nic has been crossed for the nic " + nicId + "."); + logger.error("Maximum Number of Ips \"vm.network.nic.max.secondary.ipaddresses = \"" + maxAllowedIpsPerNic + " per Nic has been crossed for the nic " + nicId + "."); throw new InsufficientAddressCapacityException("Maximum Number of Ips per Nic has been crossed.", Nic.class, nicId); } - s_logger.debug("Calling the ip allocation ..."); + logger.debug("Calling the ip allocation ..."); String ipaddr = null; String ip6addr = null; //Isolated network can exist in Basic zone only, so no need to verify the zone type @@ -920,11 +918,11 @@ public NicSecondaryIp allocateSecondaryGuestIP(final long nicId, IpAddresses req throw new InvalidParameterValueException("Allocating ip to guest nic " + nicId + " failed"); } } catch (InsufficientAddressCapacityException e) { - s_logger.error("Allocating ip to guest nic " + nicId + " failed"); + logger.error("Allocating ip to guest nic " + nicId + " failed"); return null; } } else { - s_logger.error("AddIpToVMNic is not supported in this network..."); + logger.error("AddIpToVMNic is not supported in this network..."); return null; } @@ -939,11 +937,11 @@ public Long doInTransaction(TransactionStatus status) { if (!nicSecondaryIpSet) { nicVO.setSecondaryIp(true); // commit when previously set ?? - s_logger.debug("Setting nics table ..."); + logger.debug("Setting nics table ..."); _nicDao.update(nicId, nicVO); } - s_logger.debug("Setting nic_secondary_ip table ..."); + logger.debug("Setting nic_secondary_ip table ..."); Long vmId = nicVO.getInstanceId(); NicSecondaryIpVO secondaryIpVO = new NicSecondaryIpVO(nicId, ip4AddrFinal, ip6AddrFinal, vmId, ipOwner.getId(), ipOwner.getDomainId(), networkId); _nicSecondaryIpDao.persist(secondaryIpVO); @@ -989,7 +987,7 @@ public boolean releaseSecondaryIpFromNic(long ipAddressId) { NetworkOfferingVO ntwkOff = _networkOfferingDao.findById(network.getNetworkOfferingId()); Long nicId = secIpVO.getNicId(); - s_logger.debug("ip id = " + ipAddressId + " nic id = " + nicId); + logger.debug("ip id = " + ipAddressId + " nic id = " + nicId); //check is this the last secondary ip for NIC List ipList = _nicSecondaryIpDao.listByNicId(nicId); boolean lastIp = false; @@ -1003,7 +1001,7 @@ public boolean releaseSecondaryIpFromNic(long ipAddressId) { throw new InvalidParameterValueException("Invalid zone Id is given"); } - s_logger.debug("Calling secondary ip " + secIpVO.getIp4Address() + " release "); + logger.debug("Calling secondary ip " + secIpVO.getIp4Address() + " release "); if (dc.getNetworkType() == NetworkType.Advanced && network.getGuestType() == Network.GuestType.Isolated) { //check PF or static NAT is configured on this ip address String secondaryIp = secIpVO.getIp4Address(); @@ -1012,7 +1010,7 @@ public boolean releaseSecondaryIpFromNic(long ipAddressId) { if (fwRulesList.size() != 0) { for (FirewallRuleVO rule : fwRulesList) { if (_portForwardingDao.findByIdAndIp(rule.getId(), secondaryIp) != null) { - s_logger.debug("VM nic IP " + secondaryIp + " is associated with the port forwarding rule"); + logger.debug("VM nic IP " + secondaryIp + " is associated with the port forwarding rule"); throw new InvalidParameterValueException("Can't remove the secondary ip " + secondaryIp + " is associate with the port forwarding rule"); } } @@ -1020,12 +1018,12 @@ public boolean releaseSecondaryIpFromNic(long ipAddressId) { //check if the secondary ip associated with any static nat rule IPAddressVO publicIpVO = _ipAddressDao.findByIpAndNetworkId(secIpVO.getNetworkId(), secondaryIp); if (publicIpVO != null) { - s_logger.debug("VM nic IP " + secondaryIp + " is associated with the static NAT rule public IP address id " + publicIpVO.getId()); + logger.debug("VM nic IP " + secondaryIp + " is associated with the static NAT rule public IP address id " + publicIpVO.getId()); throw new InvalidParameterValueException("Can' remove the ip " + secondaryIp + "is associate with static NAT rule public IP address id " + publicIpVO.getId()); } if (_loadBalancerDao.isLoadBalancerRulesMappedToVmGuestIp(vm.getId(), secondaryIp, network.getId())) { - s_logger.debug("VM nic IP " + secondaryIp + " is mapped to load balancing rule"); + logger.debug("VM nic IP " + secondaryIp + " is mapped to load balancing rule"); throw new InvalidParameterValueException("Can't remove the secondary ip " + secondaryIp + " is mapped to load balancing rule"); } @@ -1057,11 +1055,11 @@ boolean removeNicSecondaryIP(final NicSecondaryIpVO ipVO, final boolean lastIp) public void doInTransactionWithoutResult(TransactionStatus status) { if (lastIp) { nic.setSecondaryIp(false); - s_logger.debug("Setting nics secondary ip to false ..."); + logger.debug("Setting nics secondary ip to false ..."); _nicDao.update(nicId, nic); } - s_logger.debug("Revoving nic secondary ip entry ..."); + logger.debug("Revoving nic secondary ip entry ..."); _nicSecondaryIpDao.remove(ipVO.getId()); } }); @@ -1099,7 +1097,7 @@ public IpAddress reserveIpAddress(Account account, Boolean displayIp, Long ipAdd } if (State.Reserved.equals(ipVO.getState())) { if (account.getId() == ipVO.getAccountId()) { - s_logger.info(String.format("IP address %s has already been reserved for account %s", ipVO.getAddress(), account)); + logger.info(String.format("IP address %s has already been reserved for account %s", ipVO.getAddress(), account)); return ipVO; } throw new InvalidParameterValueException("Unable to reserve a IP because it has already been reserved for another account."); @@ -1120,7 +1118,7 @@ public IpAddress reserveIpAddress(Account account, Boolean displayIp, Long ipAdd try { _resourceLimitMgr.checkResourceLimit(account, Resource.ResourceType.public_ip); } catch (ResourceAllocationException ex) { - s_logger.warn("Failed to allocate resource of type " + ex.getResourceType() + " for account " + account); + logger.warn("Failed to allocate resource of type " + ex.getResourceType() + " for account " + account); throw new AccountLimitException("Maximum number of public IP addresses for account: " + account.getAccountName() + " has been exceeded."); } } @@ -1189,7 +1187,7 @@ private boolean releaseIpAddressInternal(long ipAddressId) throws InsufficientAd } if (ipVO.getAllocatedTime() == null) { - s_logger.debug("Ip Address id= " + ipAddressId + " is not allocated, so do nothing."); + logger.debug("Ip Address id= " + ipAddressId + " is not allocated, so do nothing."); return true; } @@ -1243,7 +1241,7 @@ private boolean releaseIpAddressInternal(long ipAddressId) throws InsufficientAd } } } else { - s_logger.warn("Failed to release public ip address id=" + ipAddressId); + logger.warn("Failed to release public ip address id=" + ipAddressId); } return success; } @@ -1461,7 +1459,7 @@ public Network createGuestNetwork(CreateNetworkCmd cmd) throws InsufficientCapac ipv4 = true; } } catch (UnknownHostException e) { - s_logger.error("Unable to convert gateway IP to a InetAddress", e); + logger.error("Unable to convert gateway IP to a InetAddress", e); throw new InvalidParameterValueException("Gateway parameter is invalid"); } } @@ -1493,8 +1491,8 @@ public Network createGuestNetwork(CreateNetworkCmd cmd) throws InsufficientCapac } if (gateway != null && netmask != null) { if (NetUtils.isNetworkorBroadcastIP(gateway, netmask)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("The gateway IP provided is " + gateway + " and netmask is " + netmask + ". The IP is either broadcast or network IP."); + if (logger.isDebugEnabled()) { + logger.debug("The gateway IP provided is " + gateway + " and netmask is " + netmask + ". The IP is either broadcast or network IP."); } throw new InvalidParameterValueException("Invalid gateway IP provided. Either the IP is broadcast or network IP."); } @@ -1675,7 +1673,7 @@ public Network createGuestNetwork(CreateNetworkCmd cmd) throws InsufficientCapac void checkAndSetRouterSourceNatIp(Account owner, CreateNetworkCmd cmd, Network network) throws InsufficientAddressCapacityException, ResourceAllocationException { String sourceNatIp = cmd.getSourceNatIP(); if (sourceNatIp == null) { - s_logger.debug(String.format("no source nat ip given for create network %s command, using something arbitrary.", cmd.getNetworkName())); + logger.debug(String.format("no source nat ip given for create network %s command, using something arbitrary.", cmd.getNetworkName())); return; // nothing to try } IpAddress ip = allocateIP(owner, cmd.getZoneId(), network.getId(), null, sourceNatIp); @@ -1683,7 +1681,7 @@ void checkAndSetRouterSourceNatIp(Account owner, CreateNetworkCmd cmd, Network n associateIPToNetwork(ip.getId(), network.getId()); } catch (ResourceUnavailableException e) { String msg = String.format("can´t use %s as sourcenat IP address for network %s/%s as it is un available", sourceNatIp, network.getName(), network.getUuid()); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg,e); } } @@ -1706,7 +1704,7 @@ private boolean checkAndUpdateRouterSourceNatIp(UpdateNetworkCmd cmd, Network ne } catch (Exception e) { // pokemon execption from transaction String msg = String.format("Update of source NAT ip to %s for network \"%s\"/%s failed due to %s", requestedIp.getAddress().addr(), network.getName(), network.getUuid(), e.getLocalizedMessage()); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg, e); } } @@ -1717,21 +1715,21 @@ private boolean checkAndUpdateRouterSourceNatIp(UpdateNetworkCmd cmd, Network ne private IPAddressVO checkSourceNatIpAddressForUpdate(UpdateNetworkCmd cmd, Network network) { String sourceNatIp = cmd.getSourceNatIP(); if (sourceNatIp == null) { - s_logger.trace(String.format("no source NAT ip given to update network %s with.", cmd.getNetworkName())); + logger.trace(String.format("no source NAT ip given to update network %s with.", cmd.getNetworkName())); return null; } else { - s_logger.info(String.format("updating network %s to have source NAT ip %s", cmd.getNetworkName(), sourceNatIp)); + logger.info(String.format("updating network %s to have source NAT ip %s", cmd.getNetworkName(), sourceNatIp)); } // check if the address is already aqcuired for this network IPAddressVO requestedIp = _ipAddressDao.findByIp(sourceNatIp); if (requestedIp == null || requestedIp.getAssociatedWithNetworkId() == null || ! requestedIp.getAssociatedWithNetworkId().equals(network.getId())) { - s_logger.warn(String.format("Source NAT IP %s is not associated with network %s/%s. It cannot be used as source NAT IP.", + logger.warn(String.format("Source NAT IP %s is not associated with network %s/%s. It cannot be used as source NAT IP.", sourceNatIp, network.getName(), network.getUuid())); return null; } // check if it is the current source NAT address if (requestedIp.isSourceNat()) { - s_logger.info(String.format("IP address %s is allready the source Nat address. Not updating!", sourceNatIp)); + logger.info(String.format("IP address %s is allready the source Nat address. Not updating!", sourceNatIp)); return null; } return requestedIp; @@ -1860,7 +1858,7 @@ private Account getOwningAccount(CreateNetworkCmd cmd, Account caller) { if ((cmd.getAccountName() != null && domainId != null) || cmd.getProjectId() != null) { owner = _accountMgr.finalizeOwner(caller, cmd.getAccountName(), domainId, cmd.getProjectId()); } else { - s_logger.info(String.format("Assigning the network to caller:%s because either projectId or accountname and domainId are not provided", caller.getAccountName())); + logger.info(String.format("Assigning the network to caller:%s because either projectId or accountname and domainId are not provided", caller.getAccountName())); owner = caller; } return owner; @@ -1885,7 +1883,7 @@ protected void mtuCheckForVpcNetwork(Long vpcId, Pair interfac if (vpc == null) { throw new CloudRuntimeException(String.format("VPC with id %s not found", vpcId)); } - s_logger.warn(String.format("VPC public MTU already set at VPC creation phase to: %s. Ignoring public MTU " + + logger.warn(String.format("VPC public MTU already set at VPC creation phase to: %s. Ignoring public MTU " + "passed during VPC network tier creation ", vpc.getPublicMtu())); interfaceMTUs.set(vpc.getPublicMtu(), privateMtu); } @@ -1904,13 +1902,13 @@ protected Pair validateMtuConfig(Integer publicMtu, Integer pr String subject = "Incorrect MTU configured on network for public interfaces of the VR"; String message = String.format("Configured MTU for network VR's public interfaces exceeds the upper limit " + "enforced by zone level setting: %s. VR's public interfaces can be configured with a maximum MTU of %s", VRPublicInterfaceMtu.key(), VRPublicInterfaceMtu.valueIn(zoneId)); - s_logger.warn(message); + logger.warn(message); alertManager.sendAlert(AlertService.AlertType.ALERT_TYPE_VR_PUBLIC_IFACE_MTU, zoneId, null, subject, message); publicMtu = vrMaxMtuForPublicIfaces; } else if (publicMtu < MINIMUM_MTU) { String subject = "Incorrect MTU configured on network for public interfaces of the VR"; String message = String.format("Configured MTU for network VR's public interfaces is lesser than the supported minimum of %s.", MINIMUM_MTU); - s_logger.warn(message); + logger.warn(message); alertManager.sendAlert(AlertService.AlertType.ALERT_TYPE_VR_PUBLIC_IFACE_MTU, zoneId, null, subject, message); publicMtu = MINIMUM_MTU; } @@ -1919,13 +1917,13 @@ protected Pair validateMtuConfig(Integer publicMtu, Integer pr String subject = "Incorrect MTU configured on network for private interface of the VR"; String message = String.format("Configured MTU for network VR's public interfaces exceeds the upper limit " + "enforced by zone level setting: %s. VR's public interfaces can be configured with a maximum MTU of %s", VRPublicInterfaceMtu.key(), VRPublicInterfaceMtu.valueIn(zoneId)); - s_logger.warn(message); + logger.warn(message); alertManager.sendAlert(AlertService.AlertType.ALERT_TYPE_VR_PRIVATE_IFACE_MTU, zoneId, null, subject, message); privateMtu = vrMaxMtuForPrivateIfaces; } else if (privateMtu < MINIMUM_MTU) { String subject = "Incorrect MTU configured on network for private interfaces of the VR"; String message = String.format("Configured MTU for network VR's private interfaces is lesser than the supported minimum of %s.", MINIMUM_MTU); - s_logger.warn(message); + logger.warn(message); alertManager.sendAlert(AlertService.AlertType.ALERT_TYPE_VR_PRIVATE_IFACE_MTU, zoneId, null, subject, message); privateMtu = MINIMUM_MTU; } @@ -1970,16 +1968,16 @@ private Network implementedNetworkInCreation(final Account caller, final DataCen try { DeployDestination dest = new DeployDestination(zone, null, null, null); UserVO callerUser = _userDao.findById(CallContext.current().getCallingUserId()); - Journal journal = new Journal.LogJournal("Implementing " + network, s_logger); + Journal journal = new Journal.LogJournal("Implementing " + network, logger); ReservationContext context = new ReservationContextImpl(UUID.randomUUID().toString(), journal, callerUser, caller); - s_logger.debug("Implementing network " + network + " as a part of network provision for persistent network"); + logger.debug("Implementing network " + network + " as a part of network provision for persistent network"); Pair implementedNetwork = _networkMgr.implementNetwork(network.getId(), dest, context); if (implementedNetwork == null || implementedNetwork.first() == null) { - s_logger.warn("Failed to provision the network " + network); + logger.warn("Failed to provision the network " + network); } return implementedNetwork.second(); } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to implement persistent guest network " + network + "due to ", ex); + logger.warn("Failed to implement persistent guest network " + network + "due to ", ex); CloudRuntimeException e = new CloudRuntimeException("Failed to implement persistent guest network"); e.addProxyObject(network.getUuid(), "networkId"); throw e; @@ -1991,10 +1989,10 @@ private void validateNetworkOfferingForNonRootAdminUser(NetworkOffering ntwkOff) throw new InvalidParameterValueException("This user can only create a Guest network"); } if (ntwkOff.getGuestType() == GuestType.L2 || ntwkOff.getGuestType() == GuestType.Isolated) { - s_logger.debug(String.format("Creating a network from network offerings having traffic type [%s] and network type [%s].", + logger.debug(String.format("Creating a network from network offerings having traffic type [%s] and network type [%s].", TrafficType.Guest, ntwkOff.getGuestType())); } else if (ntwkOff.getGuestType() == GuestType.Shared && ! ntwkOff.isSpecifyVlan()) { - s_logger.debug(String.format("Creating a network from network offerings having traffic type [%s] and network type [%s] with specifyVlan=%s.", + logger.debug(String.format("Creating a network from network offerings having traffic type [%s] and network type [%s] with specifyVlan=%s.", TrafficType.Guest, GuestType.Shared, ntwkOff.isSpecifyVlan())); } else { throw new InvalidParameterValueException( @@ -2660,7 +2658,7 @@ public boolean deleteNetwork(long networkId, boolean forced) { NetworkVO associatedNetwork = _networksDao.findById(networkDetailVO.getResourceId()); if (associatedNetwork != null) { String msg = String.format("Cannot delete network %s which is associated to another network %s", network.getUuid(), associatedNetwork.getUuid()); - s_logger.debug(msg); + logger.debug(msg); throw new InvalidParameterValueException(msg); } } @@ -2724,9 +2722,9 @@ public boolean restartNetwork(NetworkVO network, boolean cleanup, boolean makeRe long id = network.getId(); boolean success = _networkMgr.restartNetwork(id, callerAccount, user, cleanup, livePatch); if (success) { - s_logger.debug(String.format("Network id=%d is restarted successfully.",id)); + logger.debug(String.format("Network id=%d is restarted successfully.",id)); } else { - s_logger.warn(String.format("Network id=%d failed to restart.",id)); + logger.warn(String.format("Network id=%d failed to restart.",id)); } return success; @@ -2843,19 +2841,19 @@ private void replugNicsForUpdatedNetwork(NetworkVO network) throws ResourceUnava long vmId = nic.getInstanceId(); VMInstanceVO vm = _vmDao.findById(vmId); if (vm == null) { - s_logger.error(String.format("Cannot replug NIC: %s as VM for it is not found with ID: %d", nic, vmId)); + logger.error(String.format("Cannot replug NIC: %s as VM for it is not found with ID: %d", nic, vmId)); continue; } if (!Hypervisor.HypervisorType.VMware.equals(vm.getHypervisorType())) { - s_logger.debug(String.format("Cannot replug NIC: %s for VM: %s as it is not on VMware", nic, vm)); + logger.debug(String.format("Cannot replug NIC: %s for VM: %s as it is not on VMware", nic, vm)); continue; } if (!VirtualMachine.Type.User.equals(vm.getType())) { - s_logger.debug(String.format("Cannot replug NIC: %s for VM: %s as it is not a user VM", nic, vm)); + logger.debug(String.format("Cannot replug NIC: %s for VM: %s as it is not a user VM", nic, vm)); continue; } if (!VirtualMachine.State.Running.equals(vm.getState())) { - s_logger.debug(String.format("Cannot replug NIC: %s for VM: %s as it is not in running state", nic, vm)); + logger.debug(String.format("Cannot replug NIC: %s for VM: %s as it is not in running state", nic, vm)); continue; } Host host = _hostDao.findById(vm.getHostId()); @@ -3098,11 +3096,11 @@ public Network updateGuestNetwork(final UpdateNetworkCmd cmd) { List nicsPresent = _nicDao.listByNetworkId(networkId); String cidrIpRange[] = NetUtils.getIpRangeFromCidr(guestVmCidrPair[0], size); - s_logger.info("The start IP of the specified guest vm cidr is: " + cidrIpRange[0] + " and end IP is: " + cidrIpRange[1]); + logger.info("The start IP of the specified guest vm cidr is: " + cidrIpRange[0] + " and end IP is: " + cidrIpRange[1]); long startIp = NetUtils.ip2Long(cidrIpRange[0]); long endIp = NetUtils.ip2Long(cidrIpRange[1]); long range = endIp - startIp + 1; - s_logger.info("The specified guest vm cidr has " + range + " IPs"); + logger.info("The specified guest vm cidr has " + range + " IPs"); for (NicVO nic : nicsPresent) { if (nic.getIPv4Address() == null) { @@ -3141,14 +3139,14 @@ public Network updateGuestNetwork(final UpdateNetworkCmd cmd) { // Condition for IP Reservation reset : guestVmCidr and network CIDR are same if (network.getNetworkCidr().equals(guestVmCidr)) { - s_logger.warn("Guest VM CIDR and Network CIDR both are same, reservation will reset."); + logger.warn("Guest VM CIDR and Network CIDR both are same, reservation will reset."); network.setNetworkCidr(null); } // Finally update "cidr" with the guestVmCidr // which becomes the effective address space for CloudStack guest VMs network.setCidr(guestVmCidr); _networksDao.update(networkId, network); - s_logger.info("IP Reservation has been applied. The new CIDR for Guests Vms is " + guestVmCidr); + logger.info("IP Reservation has been applied. The new CIDR for Guests Vms is " + guestVmCidr); } Pair mtus = validateMtuOnUpdate(network, dc.getId(), publicMtu, privateMtu); @@ -3224,7 +3222,7 @@ public Network updateGuestNetwork(final UpdateNetworkCmd cmd) { _networkMgr.cleanupConfigForServicesInNetwork(servicesNotInNewOffering, network); } } catch (Exception e) { // old pokemon catch that used to catch throwable - s_logger.debug("failed to cleanup config related to unused services error:" + e.getMessage()); + logger.debug("failed to cleanup config related to unused services error:" + e.getMessage()); } boolean validStateToShutdown = (network.getState() == Network.State.Implemented || network.getState() == Network.State.Setup || network.getState() == Network.State.Allocated); @@ -3234,21 +3232,21 @@ public Network updateGuestNetwork(final UpdateNetworkCmd cmd) { if (restartNetwork) { if (validStateToShutdown) { if (!changeCidr) { - s_logger.debug("Shutting down elements and resources for network id=" + networkId + " as a part of network update"); + logger.debug("Shutting down elements and resources for network id=" + networkId + " as a part of network update"); if (!_networkMgr.shutdownNetworkElementsAndResources(context, true, network)) { - s_logger.warn("Failed to shutdown the network elements and resources as a part of network restart: " + network); + logger.warn("Failed to shutdown the network elements and resources as a part of network restart: " + network); CloudRuntimeException ex = new CloudRuntimeException("Failed to shutdown the network elements and resources as a part of update to network of specified id"); ex.addProxyObject(network.getUuid(), "networkId"); throw ex; } } else { // We need to shutdown the network, since we want to re-implement the network. - s_logger.debug("Shutting down network id=" + networkId + " as a part of network update"); + logger.debug("Shutting down network id=" + networkId + " as a part of network update"); //check if network has reservation if (NetUtils.isNetworkAWithinNetworkB(network.getCidr(), network.getNetworkCidr())) { - s_logger.warn( + logger.warn( "Existing IP reservation will become ineffective for the network with id = " + networkId + " You need to reapply reservation after network reimplementation."); //set cidr to the newtork cidr network.setCidr(network.getNetworkCidr()); @@ -3257,7 +3255,7 @@ public Network updateGuestNetwork(final UpdateNetworkCmd cmd) { } if (!_networkMgr.shutdownNetwork(network.getId(), context, true)) { - s_logger.warn("Failed to shutdown the network as a part of update to network with specified id"); + logger.warn("Failed to shutdown the network as a part of update to network with specified id"); CloudRuntimeException ex = new CloudRuntimeException("Failed to shutdown the network as a part of update of specified network id"); ex.addProxyObject(network.getUuid(), "networkId"); throw ex; @@ -3301,7 +3299,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { long vmId = nic.getInstanceId(); VMInstanceVO vm = _vmDao.findById(vmId); if (vm == null) { - s_logger.error("Vm for nic " + nic.getId() + " not found with Vm Id:" + vmId); + logger.error("Vm for nic " + nic.getId() + " not found with Vm Id:" + vmId); continue; } long isDefault = (nic.isDefaultNic()) ? 1 : 0; @@ -3326,7 +3324,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { if (restartNetwork) { if (network.getState() != Network.State.Allocated) { DeployDestination dest = new DeployDestination(_dcDao.findById(network.getDataCenterId()), null, null, null); - s_logger.debug("Implementing the network " + network + " elements and resources as a part of network update"); + logger.debug("Implementing the network " + network + " elements and resources as a part of network update"); try { if (!changeCidr) { _networkMgr.implementNetworkElementsAndResources(dest, context, network, _networkOfferingDao.findById(network.getNetworkOfferingId())); @@ -3334,7 +3332,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { _networkMgr.implementNetwork(network.getId(), dest, context); } } catch (Exception ex) { - s_logger.warn("Failed to implement network " + network + " elements and resources as a part of network update due to ", ex); + logger.warn("Failed to implement network " + network + " elements and resources as a part of network update due to ", ex); CloudRuntimeException e = new CloudRuntimeException("Failed to implement network (with specified id) elements and resources as a part of network update"); e.addProxyObject(network.getUuid(), "networkId"); throw e; @@ -3353,7 +3351,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { DeployDestination dest = new DeployDestination(_dcDao.findById(network.getDataCenterId()), null, null, null); _networkMgr.implementNetwork(network.getId(), dest, context); } catch (Exception ex) { - s_logger.warn("Failed to implement network " + network + " elements and resources as a part o" + "f network update due to ", ex); + logger.warn("Failed to implement network " + network + " elements and resources as a part o" + "f network update due to ", ex); CloudRuntimeException e = new CloudRuntimeException("Failed to implement network (with specified" + " id) elements and resources as a part of network update"); e.addProxyObject(network.getUuid(), "networkId"); throw e; @@ -3388,7 +3386,7 @@ protected Pair validateMtuOnUpdate(NetworkVO network, Long zon } else if (publicMtu < MINIMUM_MTU) { String subject = "Incorrect MTU configured on network for public interfaces of the VR"; String message = String.format("Configured MTU for network VR's public interfaces is lesser than the supported minimum of %s.", MINIMUM_MTU); - s_logger.warn(message); + logger.warn(message); alertManager.sendAlert(AlertService.AlertType.ALERT_TYPE_VR_PUBLIC_IFACE_MTU, zoneId, null, subject, message); publicMtu = MINIMUM_MTU; } @@ -3400,14 +3398,14 @@ protected Pair validateMtuOnUpdate(NetworkVO network, Long zon } else if (privateMtu < MINIMUM_MTU) { String subject = "Incorrect MTU configured on network for private interfaces of the VR"; String message = String.format("Configured MTU for network VR's private interfaces is lesser than the supported minimum of %s.", MINIMUM_MTU); - s_logger.warn(message); + logger.warn(message); alertManager.sendAlert(AlertService.AlertType.ALERT_TYPE_VR_PRIVATE_IFACE_MTU, zoneId, null, subject, message); privateMtu = MINIMUM_MTU; } } if (publicMtu != null && network.getVpcId() != null) { - s_logger.warn("Cannot update VPC public interface MTU via network tiers. " + + logger.warn("Cannot update VPC public interface MTU via network tiers. " + "Please update the public interface MTU via the VPC. Skipping.. "); publicMtu = null; } @@ -3445,7 +3443,7 @@ protected boolean updateMtuOnVr(Map> routersToIpList) { Long routerId = routerEntrySet.getKey(); DomainRouterVO router = routerDao.findById(routerId); if (router == null) { - s_logger.error(String.format("Failed to find router with id: %s", routerId)); + logger.error(String.format("Failed to find router with id: %s", routerId)); continue; } Commands cmds = new Commands(Command.OnError.Stop); @@ -3458,12 +3456,12 @@ protected boolean updateMtuOnVr(Map> routersToIpList) { networkHelper.sendCommandsToRouter(router, cmds); Answer updateNetworkAnswer = cmds.getAnswer("updateNetwork"); if (!(updateNetworkAnswer != null && updateNetworkAnswer.getResult())) { - s_logger.warn("Unable to update guest network on router " + router); + logger.warn("Unable to update guest network on router " + router); throw new CloudRuntimeException("Failed to update guest network with new MTU"); } success = true; } catch (ResourceUnavailableException e) { - s_logger.error(String.format("Failed to update network MTU for router %s due to %s", router, e.getMessage())); + logger.error(String.format("Failed to update network MTU for router %s due to %s", router, e.getMessage())); success = false; } } @@ -3521,12 +3519,12 @@ public Network migrateGuestNetwork(long networkId, long networkOfferingId, Accou //perform below validation if the network is vpc network if (network.getVpcId() != null) { - s_logger.warn("Failed to migrate network as the specified network is a vpc tier. Use migrateVpc."); + logger.warn("Failed to migrate network as the specified network is a vpc tier. Use migrateVpc."); throw new InvalidParameterValueException("Failed to migrate network as the specified network is a vpc tier. Use migrateVpc."); } if (_configMgr.isOfferingForVpc(newNtwkOff)) { - s_logger.warn("Failed to migrate network as the specified network offering is a VPC offering"); + logger.warn("Failed to migrate network as the specified network offering is a VPC offering"); throw new InvalidParameterValueException("Failed to migrate network as the specified network offering is a VPC offering"); } @@ -3539,14 +3537,14 @@ public Network migrateGuestNetwork(long networkId, long networkOfferingId, Accou NetworkOffering oldNtwkOff = _networkOfferingDao.findByIdIncludingRemoved(oldNetworkOfferingId); if (!resume && network.getRelated() != network.getId()) { - s_logger.warn("Related network is not equal to network id. You might want to re-run migration with resume = true command."); + logger.warn("Related network is not equal to network id. You might want to re-run migration with resume = true command."); throw new CloudRuntimeException("Failed to migrate network as previous migration left this network in transient condition. Specify resume as true."); } if (networkNeedsMigration(network, newPhysicalNetworkId, oldNtwkOff, newNtwkOff)) { return migrateNetworkToPhysicalNetwork(network, oldNtwkOff, newNtwkOff, null, null, newPhysicalNetworkId, callerAccount, callerUser); } else { - s_logger.info("Network does not need migration."); + logger.info("Network does not need migration."); return network; } } @@ -3600,11 +3598,11 @@ private Network migrateNetworkToPhysicalNetwork(Network network, NetworkOffering if (shouldImplement) { DeployDestination dest = new DeployDestination(zone, null, null, null); - s_logger.debug("Implementing the network " + network + " elements and resources as a part of network update"); + logger.debug("Implementing the network " + network + " elements and resources as a part of network update"); try { networkInNewPhysicalNet = _networkMgr.implementNetwork(networkInNewPhysicalNet.getId(), dest, context).second(); } catch (Exception ex) { - s_logger.warn("Failed to implement network " + network + " elements and resources as a part of network update due to ", ex); + logger.warn("Failed to implement network " + network + " elements and resources as a part of network update due to ", ex); CloudRuntimeException e = new CloudRuntimeException("Failed to implement network (with specified id) elements and resources as a part of network update"); e.addProxyObject(network.getUuid(), "networkId"); throw e; @@ -3644,7 +3642,7 @@ public Vpc migrateVpcNetwork(long vpcId, long vpcOfferingId, Map //let's check if the user did not change the vpcoffering opposed to the last failed run. verifyAlreadyMigratedTiers(vpcCopyId, vpcOfferingId, networkToOffering); } else { - s_logger.warn("This vpc has a migration row in the resource details table. You might want to re-run migration with resume = true command."); + logger.warn("This vpc has a migration row in the resource details table. You might want to re-run migration with resume = true command."); throw new CloudRuntimeException("Failed to migrate VPC as previous migration left this VPC in transient condition. Specify resume as true."); } } @@ -3686,7 +3684,7 @@ public Vpc migrateVpcNetwork(long vpcId, long vpcOfferingId, Map try { _vpcMgr.validateNtwkOffForNtwkInVpc(networkId, newNtwkOff.getId(), tier.getCidr(), tier.getNetworkDomain(), copyOfVpc, tier.getGateway(), networkAccount, tier.getNetworkACLId()); } catch (InvalidParameterValueException e) { - s_logger.error("Specified network offering can not be used in combination with specified vpc offering. Aborting migration. You can re-run with resume = true and the correct uuid."); + logger.error("Specified network offering can not be used in combination with specified vpc offering. Aborting migration. You can re-run with resume = true and the correct uuid."); throw e; } @@ -3740,7 +3738,7 @@ private void vpcTiersCanBeMigrated(List tiersInVpc, Account a private void verifyAlreadyMigratedTiers(long migratedVpcId, long vpcOfferingId, Map networkToOffering) { Vpc migratedVpc = _vpcDao.findById(migratedVpcId); if (migratedVpc.getVpcOfferingId() != vpcOfferingId) { - s_logger.error("The vpc is already partially migrated in a previous run. The provided vpc offering is not the same as the one used during the first migration process."); + logger.error("The vpc is already partially migrated in a previous run. The provided vpc offering is not the same as the one used during the first migration process."); throw new InvalidParameterValueException("Failed to resume migrating VPC as VPC offering does not match previously specified VPC offering (" + migratedVpc.getVpcOfferingId() + ")"); } @@ -3827,7 +3825,7 @@ private void verifyNetworkCanBeMigrated(Account callerAccount, Network network) boolean validateNetworkReadyToMigrate = (network.getState() == Network.State.Implemented || network.getState() == Network.State.Setup || network.getState() == Network.State.Allocated); if (!validateNetworkReadyToMigrate) { - s_logger.error("Failed to migrate network as it is in invalid state."); + logger.error("Failed to migrate network as it is in invalid state."); CloudRuntimeException ex = new CloudRuntimeException("Failed to migrate network as it is in invalid state."); ex.addProxyObject(network.getUuid(), "networkId"); throw ex; @@ -3845,19 +3843,19 @@ private boolean canMoveToPhysicalNetwork(Network network, long oldNetworkOfferin // Type of the network should be the same if (oldNetworkOffering.getGuestType() != newNetworkOffering.getGuestType()) { - s_logger.debug("Network offerings " + newNetworkOfferingId + " and " + oldNetworkOfferingId + " are of different types, can't upgrade"); + logger.debug("Network offerings " + newNetworkOfferingId + " and " + oldNetworkOfferingId + " are of different types, can't upgrade"); return false; } // Traffic types should be the same if (oldNetworkOffering.getTrafficType() != newNetworkOffering.getTrafficType()) { - s_logger.debug("Network offerings " + newNetworkOfferingId + " and " + oldNetworkOfferingId + " have different traffic types, can't upgrade"); + logger.debug("Network offerings " + newNetworkOfferingId + " and " + oldNetworkOfferingId + " have different traffic types, can't upgrade"); return false; } // specify ipRanges should be the same if (oldNetworkOffering.isSpecifyIpRanges() != newNetworkOffering.isSpecifyIpRanges()) { - s_logger.debug("Network offerings " + newNetworkOfferingId + " and " + oldNetworkOfferingId + " have different values for specifyIpRangess, can't upgrade"); + logger.debug("Network offerings " + newNetworkOfferingId + " and " + oldNetworkOfferingId + " have different values for specifyIpRangess, can't upgrade"); return false; } @@ -3892,26 +3890,26 @@ protected boolean canUpgrade(Network network, long oldNetworkOfferingId, long ne // security group service should be the same if (areServicesSupportedByNetworkOffering(oldNetworkOfferingId, Service.SecurityGroup) != areServicesSupportedByNetworkOffering(newNetworkOfferingId, Service.SecurityGroup)) { - s_logger.debug("Offerings " + newNetworkOfferingId + " and " + oldNetworkOfferingId + " have different securityGroupProperty, can't upgrade"); + logger.debug("Offerings " + newNetworkOfferingId + " and " + oldNetworkOfferingId + " have different securityGroupProperty, can't upgrade"); return false; } // tags should be the same if (newNetworkOffering.getTags() != null) { if (oldNetworkOffering.getTags() == null) { - s_logger.debug("New network offering id=" + newNetworkOfferingId + " has tags and old network offering id=" + oldNetworkOfferingId + " doesn't, can't upgrade"); + logger.debug("New network offering id=" + newNetworkOfferingId + " has tags and old network offering id=" + oldNetworkOfferingId + " doesn't, can't upgrade"); return false; } if (!com.cloud.utils.StringUtils.areTagsEqual(oldNetworkOffering.getTags(), newNetworkOffering.getTags())) { - s_logger.debug("Network offerings " + newNetworkOffering.getUuid() + " and " + oldNetworkOffering.getUuid() + " have different tags, can't upgrade"); + logger.debug("Network offerings " + newNetworkOffering.getUuid() + " and " + oldNetworkOffering.getUuid() + " have different tags, can't upgrade"); return false; } } // specify vlan should be the same if (oldNetworkOffering.isSpecifyVlan() != newNetworkOffering.isSpecifyVlan()) { - s_logger.debug("Network offerings " + newNetworkOfferingId + " and " + oldNetworkOfferingId + " have different values for specifyVlan, can't upgrade"); + logger.debug("Network offerings " + newNetworkOfferingId + " and " + oldNetworkOfferingId + " have different values for specifyVlan, can't upgrade"); return false; } @@ -4032,7 +4030,7 @@ public PhysicalNetworkVO doInTransaction(TransactionStatus status) { } }); } catch (Exception ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new CloudRuntimeException("Fail to create a physical network"); } } @@ -4185,13 +4183,13 @@ public void addOrRemoveVnets(String[] listOfRanges, final PhysicalNetworkVO netw @Override public void doInTransactionWithoutResult(TransactionStatus status) { if (addVnetsFinal != null) { - s_logger.debug("Adding vnet range " + addVnetsFinal.toString() + " for the physicalNetwork id= " + network.getId() + " and zone id=" + network.getDataCenterId() + logger.debug("Adding vnet range " + addVnetsFinal.toString() + " for the physicalNetwork id= " + network.getId() + " and zone id=" + network.getDataCenterId() + " as a part of updatePhysicalNetwork call"); //add vnet takes a list of strings to be added. each string is a vnet. _dcDao.addVnet(network.getDataCenterId(), network.getId(), addVnetsFinal); } if (removeVnetsFinal != null) { - s_logger.debug("removing vnet range " + removeVnetsFinal.toString() + " for the physicalNetwork id= " + network.getId() + " and zone id=" + network.getDataCenterId() + logger.debug("removing vnet range " + removeVnetsFinal.toString() + " for the physicalNetwork id= " + network.getId() + " and zone id=" + network.getDataCenterId() + " as a part of updatePhysicalNetwork call"); //deleteVnets takes a list of strings to be removed. each string is a vnet. _dcVnetDao.deleteVnets(TransactionLegacy.currentTxn(), network.getDataCenterId(), network.getId(), removeVnetsFinal); @@ -4218,7 +4216,7 @@ private List> validateVlanRange(PhysicalNetworkVO network // for GRE phynets allow up to 32bits // TODO: Not happy about this test. // What about guru-like objects for physical networs? - s_logger.debug("ISOLATION METHODS:" + network.getIsolationMethods()); + logger.debug("ISOLATION METHODS:" + network.getIsolationMethods()); // Java does not have unsigned types... if (network.getIsolationMethods().contains("GRE")) { minVnet = MIN_GRE_KEY; @@ -4229,12 +4227,12 @@ private List> validateVlanRange(PhysicalNetworkVO network // fail if zone already contains VNI, need to be unique per zone. // since adding a range adds each VNI to the database, need only check min/max for (String vnet : VnetRange) { - s_logger.debug("Looking to see if VNI " + vnet + " already exists on another network in zone " + network.getDataCenterId()); + logger.debug("Looking to see if VNI " + vnet + " already exists on another network in zone " + network.getDataCenterId()); List vnis = _dcVnetDao.findVnet(network.getDataCenterId(), vnet); if (vnis != null && !vnis.isEmpty()) { for (DataCenterVnetVO vni : vnis) { if (vni.getPhysicalNetworkId() != network.getId()) { - s_logger.debug("VNI " + vnet + " already exists on another network in zone, please specify a unique range"); + logger.debug("VNI " + vnet + " already exists on another network in zone, please specify a unique range"); throw new InvalidParameterValueException("VNI " + vnet + " already exists on another network in zone, please specify a unique range"); } } @@ -4257,7 +4255,7 @@ private List> validateVlanRange(PhysicalNetworkVO network StartVnet = Integer.parseInt(VnetRange[0]); EndVnet = Integer.parseInt(VnetRange[1]); } catch (NumberFormatException e) { - s_logger.warn("Unable to parse vnet range:", e); + logger.warn("Unable to parse vnet range:", e); throw new InvalidParameterValueException("Please provide valid vnet range. The vnet range should be a comma separated list example 2001-2012,3000-3005." + rangeMessage); } if (StartVnet < minVnet || EndVnet > maxVnet) { @@ -4274,7 +4272,7 @@ private List> validateVlanRange(PhysicalNetworkVO network } public void validateIfServiceOfferingIsActiveAndSystemVmTypeIsDomainRouter(final Long serviceOfferingId) { - s_logger.debug(String.format("Validating if service offering [%s] is active, and if system VM is of Domain Router type.", serviceOfferingId)); + logger.debug(String.format("Validating if service offering [%s] is active, and if system VM is of Domain Router type.", serviceOfferingId)); final ServiceOfferingVO serviceOffering = serviceOfferingDao.findById(serviceOfferingId); if (serviceOffering == null) { @@ -4426,10 +4424,10 @@ private boolean deleteProviders() { try { deleteNetworkServiceProvider(provider.getId()); } catch (ResourceUnavailableException e) { - s_logger.warn("Unable to complete destroy of the physical network provider: " + provider.getProviderName() + ", id: " + provider.getId(), e); + logger.warn("Unable to complete destroy of the physical network provider: " + provider.getProviderName() + ", id: " + provider.getId(), e); return false; } catch (ConcurrentOperationException e) { - s_logger.warn("Unable to complete destroy of the physical network provider: " + provider.getProviderName() + ", id: " + provider.getId(), e); + logger.warn("Unable to complete destroy of the physical network provider: " + provider.getProviderName() + ", id: " + provider.getId(), e); return false; } } @@ -4583,7 +4581,7 @@ public GuestVlanRange dedicateGuestVlanRange(DedicateGuestVlanRangeCmd cmd) { startVlan = Integer.parseInt(vlanRange[0]); endVlan = Integer.parseInt(vlanRange[1]); } catch (NumberFormatException e) { - s_logger.warn("Unable to parse guest vlan range:", e); + logger.warn("Unable to parse guest vlan range:", e); throw new InvalidParameterValueException("Please provide valid guest vlan range"); } @@ -4686,7 +4684,7 @@ private List getVlanFromRange(String vlanRange) { tokens.add(startVlan); tokens.add(endVlan); } catch (NumberFormatException e) { - s_logger.warn("Unable to parse guest vlan range:", e); + logger.warn("Unable to parse guest vlan range:", e); throw new InvalidParameterValueException("Please provide valid guest vlan range"); } return tokens; @@ -4893,7 +4891,7 @@ public PhysicalNetworkServiceProvider addProviderToPhysicalNetwork(Long physical return nsp; } catch (Exception ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new CloudRuntimeException("Fail to add a provider to physical network"); } @@ -4948,8 +4946,8 @@ public PhysicalNetworkServiceProvider updateNetworkServiceProvider(Long id, Stri boolean update = false; if (state != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("trying to update the state of the service provider id=" + id + " on physical network: " + provider.getPhysicalNetworkId() + " to state: " + stateStr); + if (logger.isDebugEnabled()) { + logger.debug("trying to update the state of the service provider id=" + id + " on physical network: " + provider.getPhysicalNetworkId() + " to state: " + stateStr); } switch (state) { case Enabled: @@ -5015,8 +5013,8 @@ public boolean deleteNetworkServiceProvider(Long id) throws ConcurrentOperationE Account callerAccount = _accountMgr.getActiveAccountById(callerUser.getAccountId()); // shutdown the provider instances ReservationContext context = new ReservationContextImpl(null, null, callerUser, callerAccount); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Shutting down the service provider id=" + id + " on physical network: " + provider.getPhysicalNetworkId()); + if (logger.isDebugEnabled()) { + logger.debug("Shutting down the service provider id=" + id + " on physical network: " + provider.getPhysicalNetworkId()); } NetworkElement element = _networkModel.getElementImplementingProvider(provider.getProviderName()); if (element == null) { @@ -5075,7 +5073,7 @@ private void checkForPhysicalNetworksWithoutTag(PhysicalNetworkVO physicalNetwor } } if (networkWithoutTagCount > 0) { - s_logger.error("Number of physical networks without tags are " + networkWithoutTagCount); + logger.error("Number of physical networks without tags are " + networkWithoutTagCount); throw new CloudRuntimeException("There are more than 1 physical network without tags in the zone= " + physicalNetwork.getDataCenterId()); } @@ -5153,7 +5151,7 @@ public PhysicalNetworkTrafficType addTrafficTypeToPhysicalNetwork(Long physicalN // find row in networks table that is defined as 'Public', created when zone was deployed NetworkVO publicNetwork = _networksDao.listByZoneAndTrafficType(network.getDataCenterId(), TrafficType.Public).get(0); if (publicNetwork != null) { - s_logger.debug("setting public network " + publicNetwork + " to broadcast type vxlan"); + logger.debug("setting public network " + publicNetwork + " to broadcast type vxlan"); publicNetwork.setBroadcastDomainType(BroadcastDomainType.Vxlan); _networksDao.persist(publicNetwork); } @@ -5162,7 +5160,7 @@ public PhysicalNetworkTrafficType addTrafficTypeToPhysicalNetwork(Long physicalN return pNetworktrafficType; } catch (Exception ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new CloudRuntimeException("Fail to add a traffic type to physical network"); } @@ -5318,7 +5316,7 @@ private PhysicalNetworkServiceProvider addDefaultOvsToPhysicalNetwork(long physi } OvsProviderVO element = _ovsProviderDao.findByNspId(nsp.getId()); if (element != null) { - s_logger.debug("There is already a Ovs element with service provider id " + nsp.getId()); + logger.debug("There is already a Ovs element with service provider id " + nsp.getId()); return nsp; } element = new OvsProviderVO(nsp.getId()); @@ -5557,7 +5555,7 @@ public Network doInTransaction(TransactionStatus status) throws ResourceAllocati //create Guest network privateNetwork = _networkMgr.createPrivateNetwork(ntwkOffFinal.getId(), networkName, displayText, gateway, cidr, uriString, bypassVlanOverlapCheck, owner, pNtwk, vpcId); if (privateNetwork != null) { - s_logger.debug("Successfully created guest network " + privateNetwork); + logger.debug("Successfully created guest network " + privateNetwork); if (associatedNetworkId != null) { _networkDetailsDao.persist(new NetworkDetailVO(privateNetwork.getId(), Network.AssociatedNetworkId, String.valueOf(associatedNetworkId), true)); } @@ -5565,7 +5563,7 @@ public Network doInTransaction(TransactionStatus status) throws ResourceAllocati throw new CloudRuntimeException("Creating guest network failed"); } } else { - s_logger.debug("Private network already exists: " + privateNetwork); + logger.debug("Private network already exists: " + privateNetwork); //Do not allow multiple private gateways with same Vlan within a VPC throw new InvalidParameterValueException("Private network for the vlan: " + uriString + " and cidr " + cidr + " already exists " + "for Vpc " + vpcId + " in zone " + _entityMgr.findById(DataCenter.class, pNtwk.getDataCenterId()).getName()); @@ -5585,7 +5583,7 @@ public Network doInTransaction(TransactionStatus status) throws ResourceAllocati _dcDao.update(dc.getId(), dc); } - s_logger.debug("Private network " + privateNetwork + " is created"); + logger.debug("Private network " + privateNetwork + " is created"); return privateNetwork; } @@ -5696,8 +5694,8 @@ public AcquirePodIpCmdResponse allocatePodIp(Account ipOwner, String zoneId, Str if (_accountMgr.checkAccessAndSpecifyAuthority(caller, zone.getId()) != zone.getId()) { throw new InvalidParameterValueException("Caller does not have permission for this Zone" + "(" + zoneId + ")"); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Associate IP address called by the user " + callerUserId + " account " + ipOwner.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Associate IP address called by the user " + callerUserId + " account " + ipOwner.getId()); } return _ipAddrMgr.allocatePodIp(zoneId, podId); @@ -5986,7 +5984,7 @@ public void removePublicIpAddressFromQuarantine(RemoveQuarantinedIpCmd cmd) thro String removalReason = cmd.getRemovalReason(); if (StringUtils.isBlank(removalReason)) { - s_logger.error("The removalReason parameter cannot be blank."); + logger.error("The removalReason parameter cannot be blank."); ipAddress = ObjectUtils.defaultIfNull(ipAddress, _ipAddressDao.findById(publicIpQuarantine.getPublicIpAddressId()).getAddress().toString()); throw new CloudRuntimeException(String.format("The given reason for removing the public IP address [%s] from quarantine is blank.", ipAddress)); } @@ -6003,10 +6001,10 @@ public void removePublicIpAddressFromQuarantine(RemoveQuarantinedIpCmd cmd) thro protected PublicIpQuarantine retrievePublicIpQuarantine(Long ipId, String ipAddress) throws CloudRuntimeException { PublicIpQuarantine publicIpQuarantine; if (ipId != null) { - s_logger.debug("The ID of the IP in quarantine was informed; therefore, the `ipAddress` parameter will be ignored."); + logger.debug("The ID of the IP in quarantine was informed; therefore, the `ipAddress` parameter will be ignored."); publicIpQuarantine = publicIpQuarantineDao.findById(ipId); } else if (ipAddress != null) { - s_logger.debug("The address of the IP in quarantine was informed, it will be used to fetch its metadata."); + logger.debug("The address of the IP in quarantine was informed, it will be used to fetch its metadata."); publicIpQuarantine = publicIpQuarantineDao.findByIpAddress(ipAddress); } else { throw new CloudRuntimeException("Either the ID or the address of the IP in quarantine must be informed."); diff --git a/server/src/main/java/com/cloud/network/NetworkUsageManagerImpl.java b/server/src/main/java/com/cloud/network/NetworkUsageManagerImpl.java index b7adecda0dd2..59e21dc9c770 100644 --- a/server/src/main/java/com/cloud/network/NetworkUsageManagerImpl.java +++ b/server/src/main/java/com/cloud/network/NetworkUsageManagerImpl.java @@ -28,7 +28,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.command.admin.usage.AddTrafficMonitorCmd; @@ -97,7 +96,6 @@ public enum NetworkUsageResourceName { TrafficSentinel; } - private static final org.apache.log4j.Logger s_logger = Logger.getLogger(NetworkUsageManagerImpl.class); @Inject HostDao _hostDao; @Inject @@ -150,7 +148,7 @@ public Host addTrafficMonitor(AddTrafficMonitorCmd cmd) { try { uri = new URI(cmd.getUrl()); } catch (Exception e) { - s_logger.debug(e); + logger.debug(e); throw new InvalidParameterValueException(e.getMessage()); } @@ -276,11 +274,11 @@ public boolean processAnswers(long agentId, long seq, Answer[] answers) { HostVO host = _hostDao.findById(agentId); if (host != null) { if ((host.getManagementServerId() == null) || (mgmtSrvrId != host.getManagementServerId())) { - s_logger.warn("Not the owner. Not collecting Direct Network usage from TrafficMonitor : " + agentId); + logger.warn("Not the owner. Not collecting Direct Network usage from TrafficMonitor : " + agentId); return false; } } else { - s_logger.warn("Agent not found. Not collecting Direct Network usage from TrafficMonitor : " + agentId); + logger.warn("Agent not found. Not collecting Direct Network usage from TrafficMonitor : " + agentId); return false; } @@ -300,12 +298,12 @@ public boolean processAnswers(long agentId, long seq, Answer[] answers) { } private boolean collectDirectNetworkUsage(final HostVO host) { - s_logger.debug("Direct Network Usage stats collector is running..."); + logger.debug("Direct Network Usage stats collector is running..."); final long zoneId = host.getDataCenterId(); final DetailVO lastCollectDetail = _detailsDao.findDetail(host.getId(), "last_collection"); if (lastCollectDetail == null) { - s_logger.warn("Last collection time not available. Skipping direct usage collection for Traffic Monitor: " + host.getId()); + logger.warn("Last collection time not available. Skipping direct usage collection for Traffic Monitor: " + host.getId()); return false; } Date lastCollection = new Date(Long.parseLong(lastCollectDetail.getValue())); @@ -321,7 +319,7 @@ private boolean collectDirectNetworkUsage(final HostVO host) { final Date now = rightNow.getTime(); if (lastCollection.after(now)) { - s_logger.debug("Current time is less than 2 hours after last collection time : " + lastCollection.toString() + + logger.debug("Current time is less than 2 hours after last collection time : " + lastCollection.toString() + ". Skipping direct network usage collection"); return false; } @@ -380,7 +378,7 @@ private boolean collectDirectNetworkUsage(final HostVO host) { if (answer == null || !answer.getResult()) { String details = (answer != null) ? answer.getDetails() : "details unavailable"; String msg = "Unable to get network usage stats from " + host.getId() + " due to: " + details + "."; - s_logger.error(msg); + logger.error(msg); return false; } else { for (UsageIPAddressVO usageIp : fullDurationIpUsage) { @@ -389,11 +387,11 @@ private boolean collectDirectNetworkUsage(final HostVO host) { Long bytesSent = bytesSentRcvd[0]; Long bytesRcvd = bytesSentRcvd[1]; if (bytesSent == null || bytesRcvd == null) { - s_logger.debug("Incorrect bytes for IP: " + publicIp); + logger.debug("Incorrect bytes for IP: " + publicIp); continue; } if (bytesSent == 0L && bytesRcvd == 0L) { - s_logger.trace("Ignore zero bytes for IP: " + publicIp); + logger.trace("Ignore zero bytes for IP: " + publicIp); continue; } UserStatisticsVO stats = new UserStatisticsVO(usageIp.getAccountId(), zoneId, null, null, null, null); @@ -413,7 +411,7 @@ private boolean collectDirectNetworkUsage(final HostVO host) { if (answer == null || !answer.getResult()) { String details = (answer != null) ? answer.getDetails() : "details unavailable"; String msg = "Unable to get network usage stats from " + host.getId() + " due to: " + details + "."; - s_logger.error(msg); + logger.error(msg); return false; } else { String publicIp = usageIp.getAddress(); @@ -421,11 +419,11 @@ private boolean collectDirectNetworkUsage(final HostVO host) { Long bytesSent = bytesSentRcvd[0]; Long bytesRcvd = bytesSentRcvd[1]; if (bytesSent == null || bytesRcvd == null) { - s_logger.debug("Incorrect bytes for IP: " + publicIp); + logger.debug("Incorrect bytes for IP: " + publicIp); continue; } if (bytesSent == 0L && bytesRcvd == 0L) { - s_logger.trace("Ignore zero bytes for IP: " + publicIp); + logger.trace("Ignore zero bytes for IP: " + publicIp); continue; } UserStatisticsVO stats = new UserStatisticsVO(usageIp.getAccountId(), zoneId, null, null, null, null); @@ -437,7 +435,7 @@ private boolean collectDirectNetworkUsage(final HostVO host) { } if (collectedStats.size() == 0) { - s_logger.debug("No new direct network stats. No need to persist"); + logger.debug("No new direct network stats. No need to persist"); return false; } //Persist all the stats and last_collection time in a single transaction @@ -477,8 +475,8 @@ public AgentControlAnswer processControlCommand(long agentId, AgentControlComman @Override public boolean processDisconnect(long agentId, Status state) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Disconnected called on " + agentId + " with status " + state.toString()); + if (logger.isDebugEnabled()) { + logger.debug("Disconnected called on " + agentId + " with status " + state.toString()); } return true; } @@ -491,12 +489,12 @@ public void processHostAdded(long hostId) { public void processConnect(Host agent, StartupCommand cmd, boolean forRebalance) { if (cmd instanceof StartupTrafficMonitorCommand) { long agentId = agent.getId(); - s_logger.debug("Sending RecurringNetworkUsageCommand to " + agentId); + logger.debug("Sending RecurringNetworkUsageCommand to " + agentId); RecurringNetworkUsageCommand watch = new RecurringNetworkUsageCommand(_interval); try { _agentMgr.send(agentId, new Commands(watch), this); } catch (AgentUnavailableException e) { - s_logger.debug("Can not process connect for host " + agentId, e); + logger.debug("Can not process connect for host " + agentId, e); } } return; diff --git a/server/src/main/java/com/cloud/network/PortProfileManagerImpl.java b/server/src/main/java/com/cloud/network/PortProfileManagerImpl.java index 50d8d3b443ae..b656ae6e8a23 100644 --- a/server/src/main/java/com/cloud/network/PortProfileManagerImpl.java +++ b/server/src/main/java/com/cloud/network/PortProfileManagerImpl.java @@ -16,7 +16,8 @@ // under the License. package com.cloud.network; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.exception.InvalidParameterValueException; import com.cloud.network.dao.PortProfileDaoImpl; @@ -29,7 +30,7 @@ public class PortProfileManagerImpl { private PortProfileDaoImpl _portProfileDao; - private static final org.apache.log4j.Logger s_logger = Logger.getLogger(PortProfileManagerImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); public PortProfileManagerImpl() { _portProfileDao = new PortProfileDaoImpl(); @@ -42,7 +43,7 @@ public PortProfileVO addPortProfile(String portProfName, long vsmId, int vlanId, // First, check if a port profile with the given name already exists. If it does, throw an exception. if (_portProfileDao.findByName(portProfName) != null) { - s_logger.info("Port Profile with specified name: " + portProfName + " already exists"); + logger.info("Port Profile with specified name: " + portProfName + " already exists"); throw new InvalidParameterValueException("Port Profile with specified name: " + portProfName + " already exists"); } // Check if the VSM id is a valid one. @@ -67,7 +68,7 @@ public PortProfileVO addPortProfile(String portProfName, long vsmId, int lowVlan portProfileObj = _portProfileDao.findByName(portProfName); if (portProfileObj != null) { - s_logger.info("Port Profile with specified name: " + portProfName + " already exists"); + logger.info("Port Profile with specified name: " + portProfName + " already exists"); throw new InvalidParameterValueException("Port Profile with specified name: " + portProfName + " already exists"); } @@ -75,7 +76,7 @@ public PortProfileVO addPortProfile(String portProfName, long vsmId, int lowVlan // range passed to this function. If so, throw an exception. if (_portProfileDao.doesVlanRangeClash(lowVlanId, highVlanId) == true) { - s_logger.info("Port Profile's vlanId range clashes with an existing Port Profile's"); + logger.info("Port Profile's vlanId range clashes with an existing Port Profile's"); throw new InvalidParameterValueException("Port Profile's vlanId range clashes with an existing Port Profile's"); } diff --git a/server/src/main/java/com/cloud/network/SshKeysDistriMonitor.java b/server/src/main/java/com/cloud/network/SshKeysDistriMonitor.java index e263c548cdcd..d922f8d0018e 100644 --- a/server/src/main/java/com/cloud/network/SshKeysDistriMonitor.java +++ b/server/src/main/java/com/cloud/network/SshKeysDistriMonitor.java @@ -16,7 +16,8 @@ // under the License. package com.cloud.network; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; @@ -38,7 +39,7 @@ import com.cloud.hypervisor.Hypervisor.HypervisorType; public class SshKeysDistriMonitor implements Listener { - private static final Logger s_logger = Logger.getLogger(SshKeysDistriMonitor.class); + protected Logger logger = LogManager.getLogger(getClass()); AgentManager _agentMgr; private ConfigurationDao _configDao; @@ -59,8 +60,8 @@ public synchronized boolean processAnswers(long agentId, long seq, Answer[] resp @Override public synchronized boolean processDisconnect(long agentId, Status state) { - if (s_logger.isTraceEnabled()) - s_logger.trace("Agent disconnected, agent id: " + agentId + ", state: " + state + ". Will notify waiters"); + if (logger.isTraceEnabled()) + logger.trace("Agent disconnected, agent id: " + agentId + ", state: " + state + ". Will notify waiters"); return true; } @@ -92,7 +93,7 @@ public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) Commands c = new Commands(cmds); _agentMgr.send(host.getId(), c, this); } catch (AgentUnavailableException e) { - s_logger.debug("Failed to send keys to agent: " + host.getId()); + logger.debug("Failed to send keys to agent: " + host.getId()); } } } diff --git a/server/src/main/java/com/cloud/network/StorageNetworkManagerImpl.java b/server/src/main/java/com/cloud/network/StorageNetworkManagerImpl.java index ac43c11fbe2f..2bd7f880fbe8 100644 --- a/server/src/main/java/com/cloud/network/StorageNetworkManagerImpl.java +++ b/server/src/main/java/com/cloud/network/StorageNetworkManagerImpl.java @@ -23,7 +23,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.command.admin.network.CreateStorageNetworkIpRangeCmd; @@ -59,7 +58,6 @@ @Component public class StorageNetworkManagerImpl extends ManagerBase implements StorageNetworkManager, StorageNetworkService { - private static final Logger s_logger = Logger.getLogger(StorageNetworkManagerImpl.class); @Inject StorageNetworkIpAddressDao _sNwIpDao; @@ -246,7 +244,7 @@ public StorageNetworkIpRangeVO doInTransaction(TransactionStatus status) throws err.append("endIp=" + endIpFinal); err.append("netmask=" + netmask); err.append("zoneId=" + zoneId); - s_logger.debug(err.toString(), e); + logger.debug(err.toString(), e); throw e; } @@ -286,7 +284,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { range = _sNwIpRangeDao.acquireInLockTable(rangeId); if (range == null) { String msg = "Unable to acquire lock on storage network ip range id=" + rangeId + ", delete failed"; - s_logger.warn(msg); + logger.warn(msg); throw new CloudRuntimeException(msg); } /* @@ -338,7 +336,7 @@ public StorageNetworkIpAddressVO acquireIpAddress(long podId) { r = _sNwIpRangeDao.acquireInLockTable(rangeId); if (r == null) { String msg = "Unable to acquire lock on storage network ip range id=" + rangeId + ", delete failed"; - s_logger.warn(msg); + logger.warn(msg); throw new CloudRuntimeException(msg); } diff --git a/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java b/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java index c10ff89fa3d5..00bce4306d0d 100644 --- a/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java +++ b/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java @@ -74,7 +74,6 @@ import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.PerformanceMonitorAnswer; @@ -191,7 +190,6 @@ import com.google.gson.reflect.TypeToken; public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManager, AutoScaleService, Configurable { - private static final Logger s_logger = Logger.getLogger(AutoScaleManagerImpl.class); @Inject protected DispatchChainFactory dispatchChainFactory = null; @@ -304,10 +302,10 @@ public boolean start() { // create thread pool and blocking queue final int workersCount = AutoScaleStatsWorker.value(); groupExecutor = Executors.newFixedThreadPool(workersCount); - s_logger.info("AutoScale Manager created a thread pool to check autoscale vm groups. The pool size is : " + workersCount); + logger.info("AutoScale Manager created a thread pool to check autoscale vm groups. The pool size is : " + workersCount); final BlockingQueue>> queue = new LinkedBlockingQueue<>(workersCount); - s_logger.info("AutoScale Manager created a blocking queue to check autoscale vm groups. The queue size is : " + workersCount); + logger.info("AutoScale Manager created a blocking queue to check autoscale vm groups. The queue size is : " + workersCount); completionService = new ExecutorCompletionService<>(groupExecutor, queue); @@ -585,7 +583,7 @@ public AutoScaleVmProfile createAutoScaleVmProfile(CreateAutoScaleVmProfileCmd c } profileVO = checkValidityAndPersist(profileVO, true); - s_logger.info("Successfully create AutoScale Vm Profile with Id: " + profileVO.getId()); + logger.info("Successfully create AutoScale Vm Profile with Id: " + profileVO.getId()); return profileVO; } @@ -666,7 +664,7 @@ public AutoScaleVmProfile updateAutoScaleVmProfile(UpdateAutoScaleVmProfileCmd c } vmProfile = checkValidityAndPersist(vmProfile, false); - s_logger.info("Updated Auto Scale Vm Profile id:" + vmProfile.getId()); + logger.info("Updated Auto Scale Vm Profile id:" + vmProfile.getId()); return vmProfile; } @@ -682,7 +680,7 @@ public boolean deleteAutoScaleVmProfile(long id) { boolean success = autoScaleVmProfileDao.remove(id); if (success) { - s_logger.info("Successfully deleted AutoScale Vm Profile with Id: " + id); + logger.info("Successfully deleted AutoScale Vm Profile with Id: " + id); } return success; } @@ -816,7 +814,7 @@ public AutoScalePolicy createAutoScalePolicy(CreateAutoScalePolicyCmd cmd) { AutoScalePolicyVO policyVO = new AutoScalePolicyVO(cmd.getName(), cmd.getDomainId(), cmd.getAccountId(), duration, quietTime, null, scaleAction); policyVO = checkValidityAndPersist(policyVO, cmd.getConditionIds()); - s_logger.info("Successfully created AutoScale Policy with Id: " + policyVO.getId()); + logger.info("Successfully created AutoScale Policy with Id: " + policyVO.getId()); return policyVO; } @@ -837,15 +835,15 @@ public Boolean doInTransaction(TransactionStatus status) { boolean success = true; success = autoScalePolicyDao.remove(id); if (!success) { - s_logger.warn("Failed to remove AutoScale Policy db object"); + logger.warn("Failed to remove AutoScale Policy db object"); return false; } success = autoScalePolicyConditionMapDao.removeByAutoScalePolicyId(id); if (!success) { - s_logger.warn("Failed to remove AutoScale Policy Condition mappings"); + logger.warn("Failed to remove AutoScale Policy Condition mappings"); return false; } - s_logger.info("Successfully deleted autoscale policy id : " + id); + logger.info("Successfully deleted autoscale policy id : " + id); return success; } @@ -987,7 +985,7 @@ public AutoScalePolicy updateAutoScalePolicy(UpdateAutoScalePolicyCmd cmd) { for (AutoScaleVmGroupPolicyMapVO vmGroupPolicy : vmGroupPolicyList) { AutoScaleVmGroupVO vmGroupVO = autoScaleVmGroupDao.findById(vmGroupPolicy.getVmGroupId()); if (vmGroupVO == null) { - s_logger.warn("Stale database entry! There is an entry in VmGroupPolicyMap but the vmGroup is missing:" + vmGroupPolicy.getVmGroupId()); + logger.warn("Stale database entry! There is an entry in VmGroupPolicyMap but the vmGroup is missing:" + vmGroupPolicy.getVmGroupId()); continue; @@ -1001,7 +999,7 @@ public AutoScalePolicy updateAutoScalePolicy(UpdateAutoScalePolicyCmd cmd) { } policy = checkValidityAndPersist(policy, conditionIds); - s_logger.info("Successfully updated Auto Scale Policy id:" + policyId); + logger.info("Successfully updated Auto Scale Policy id:" + policyId); if (CollectionUtils.isNotEmpty(conditionIds)) { markStatisticsAsInactive(null, policyId); @@ -1044,7 +1042,7 @@ public AutoScaleVmGroup createAutoScaleVmGroup(CreateAutoScaleVmGroupCmd cmd) { } vmGroupVO = checkValidityAndPersist(vmGroupVO, cmd.getScaleUpPolicyIds(), cmd.getScaleDownPolicyIds()); - s_logger.info("Successfully created Autoscale Vm Group with Id: " + vmGroupVO.getId()); + logger.info("Successfully created Autoscale Vm Group with Id: " + vmGroupVO.getId()); createInactiveDummyRecord(vmGroupVO.getId()); scheduleMonitorTask(vmGroupVO.getId()); @@ -1071,7 +1069,7 @@ protected boolean configureAutoScaleVmGroup(long vmGroupid, AutoScaleVmGroup.Sta } catch (ResourceUnavailableException re) { throw re; } catch (Exception e) { - s_logger.warn("Exception during configureLbAutoScaleVmGroup in lb rules manager", e); + logger.warn("Exception during configureLbAutoScaleVmGroup in lb rules manager", e); return false; } } @@ -1120,7 +1118,7 @@ public boolean deleteAutoScaleVmGroup(final long id, final Boolean cleanup) { autoScaleVmGroupDao.persist(autoScaleVmGroupVO); } finally { if (!success) { - s_logger.warn("Could not delete AutoScale Vm Group id : " + id); + logger.warn("Could not delete AutoScale Vm Group id : " + id); return false; } } @@ -1134,7 +1132,7 @@ public Boolean doInTransaction(TransactionStatus status) { boolean success = autoScaleVmGroupDao.remove(id); if (!success) { - s_logger.warn("Failed to remove AutoScale Group db object"); + logger.warn("Failed to remove AutoScale Group db object"); return false; } @@ -1142,23 +1140,23 @@ public Boolean doInTransaction(TransactionStatus status) { success = autoScaleVmGroupPolicyMapDao.removeByGroupId(id); if (!success) { - s_logger.warn("Failed to remove AutoScale Group Policy mappings"); + logger.warn("Failed to remove AutoScale Group Policy mappings"); return false; } success = autoScaleVmGroupVmMapDao.removeByGroup(id); if (!success) { - s_logger.warn("Failed to remove AutoScale Group VM mappings"); + logger.warn("Failed to remove AutoScale Group VM mappings"); return false; } success = asGroupStatisticsDao.removeByGroupId(id); if (!success) { - s_logger.warn("Failed to remove AutoScale Group statistics"); + logger.warn("Failed to remove AutoScale Group statistics"); return false; } - s_logger.info("Successfully deleted autoscale vm group id : " + id); + logger.info("Successfully deleted autoscale vm group id : " + id); return success; // Successfull } }); @@ -1358,7 +1356,7 @@ public AutoScaleVmGroup updateAutoScaleVmGroup(UpdateAutoScaleVmGroupCmd cmd) { vmGroupVO = checkValidityAndPersist(vmGroupVO, scaleUpPolicyIds, scaleDownPolicyIds); if (vmGroupVO != null) { - s_logger.debug("Updated Auto Scale VmGroup id:" + vmGroupId); + logger.debug("Updated Auto Scale VmGroup id:" + vmGroupId); if ((interval != null && interval != currentInterval) || CollectionUtils.isNotEmpty(scaleUpPolicyIds) || CollectionUtils.isNotEmpty(scaleDownPolicyIds)) { markStatisticsAsInactive(vmGroupId, null); @@ -1394,10 +1392,10 @@ public AutoScaleVmGroup enableAutoScaleVmGroup(Long id) { autoScaleVmGroupDao.persist(vmGroup); } finally { if (!success) { - s_logger.warn("Failed to enable AutoScale Vm Group id : " + id); + logger.warn("Failed to enable AutoScale Vm Group id : " + id); return null; } - s_logger.info("Successfully enabled AutoScale Vm Group with Id:" + id); + logger.info("Successfully enabled AutoScale Vm Group with Id:" + id); createInactiveDummyRecord(vmGroup.getId()); } return vmGroup; @@ -1429,10 +1427,10 @@ public AutoScaleVmGroup disableAutoScaleVmGroup(Long id) { autoScaleVmGroupDao.persist(vmGroup); } finally { if (!success) { - s_logger.warn("Failed to disable AutoScale Vm Group id : " + id); + logger.warn("Failed to disable AutoScale Vm Group id : " + id); return null; } - s_logger.info("Successfully disabled AutoScale Vm Group with Id:" + id); + logger.info("Successfully disabled AutoScale Vm Group with Id:" + id); } return vmGroup; } @@ -1459,7 +1457,7 @@ public Counter createCounter(CreateCounterCmd cmd) { CounterVO counter = null; - s_logger.debug("Adding Counter " + name); + logger.debug("Adding Counter " + name); counter = counterDao.persist(new CounterVO(src, name, cmd.getValue(), provider)); CallContext.current().setEventDetails(" Id: " + counter.getId() + " Name: " + name); @@ -1495,7 +1493,7 @@ public Condition createCondition(CreateConditionCmd cmd) { ConditionVO condition = null; condition = conditionDao.persist(new ConditionVO(cid, threshold, owner.getAccountId(), owner.getDomainId(), op)); - s_logger.info("Successfully created condition with Id: " + condition.getId()); + logger.info("Successfully created condition with Id: " + condition.getId()); CallContext.current().setEventDetails(" Id: " + condition.getId()); return condition; @@ -1571,13 +1569,13 @@ public boolean deleteCounter(long counterId) throws ResourceInUseException { ConditionVO condition = conditionDao.findByCounterId(counterId); if (condition != null) { - s_logger.info("Cannot delete counter " + counter.getName() + " as it is being used in a condition."); + logger.info("Cannot delete counter " + counter.getName() + " as it is being used in a condition."); throw new ResourceInUseException("Counter is in use."); } boolean success = counterDao.remove(counterId); if (success) { - s_logger.info("Successfully deleted counter with Id: " + counterId); + logger.info("Successfully deleted counter with Id: " + counterId); } return success; @@ -1594,12 +1592,12 @@ public boolean deleteCondition(long conditionId) throws ResourceInUseException { // Verify if condition is used in any autoscale policy if (autoScalePolicyConditionMapDao.isConditionInUse(conditionId)) { - s_logger.info("Cannot delete condition " + conditionId + " as it is being used in a condition."); + logger.info("Cannot delete condition " + conditionId + " as it is being used in a condition."); throw new ResourceInUseException("Cannot delete Condition when it is in use by one or more AutoScale Policies."); } boolean success = conditionDao.remove(conditionId); if (success) { - s_logger.info("Successfully deleted condition " + condition.getId()); + logger.info("Successfully deleted condition " + condition.getId()); } return success; } @@ -1647,7 +1645,7 @@ public Condition updateCondition(UpdateConditionCmd cmd) throws ResourceInUseExc List groups = autoScaleVmGroupDao.search(sc2, null); if (CollectionUtils.isNotEmpty(groups)) { String msg = String.format("Cannot update condition %d as it is being used in %d vm groups NOT in Disabled state.", conditionId, groups.size()); - s_logger.info(msg); + logger.info(msg); throw new ResourceInUseException(msg); } } @@ -1656,7 +1654,7 @@ public Condition updateCondition(UpdateConditionCmd cmd) throws ResourceInUseExc condition.setThreshold(threshold); boolean success = conditionDao.update(conditionId, condition); if (success) { - s_logger.info("Successfully updated condition " + condition.getId()); + logger.info("Successfully updated condition " + condition.getId()); for (Long policyId : policyIds) { markStatisticsAsInactive(null, policyId); @@ -1670,12 +1668,12 @@ public boolean deleteAutoScaleVmGroupsByAccount(Long accountId) { boolean success = true; List groups = autoScaleVmGroupDao.listByAccount(accountId); for (AutoScaleVmGroupVO group : groups) { - s_logger.debug("Deleting AutoScale Vm Group " + group + " for account Id: " + accountId); + logger.debug("Deleting AutoScale Vm Group " + group + " for account Id: " + accountId); try { deleteAutoScaleVmGroup(group.getId(), true); - s_logger.debug("AutoScale Vm Group " + group + " has been successfully deleted for account Id: " + accountId); + logger.debug("AutoScale Vm Group " + group + " has been successfully deleted for account Id: " + accountId); } catch (Exception e) { - s_logger.warn("Failed to delete AutoScale Vm Group " + group + " for account Id: " + accountId + " due to: ", e); + logger.warn("Failed to delete AutoScale Vm Group " + group + " for account Id: " + accountId + " due to: ", e); success = false; } } @@ -1688,15 +1686,15 @@ public void cleanUpAutoScaleResources(Long accountId) { int count = 0; count = autoScaleVmProfileDao.removeByAccountId(accountId); if (count > 0) { - s_logger.debug("Deleted " + count + " AutoScale Vm Profile for account Id: " + accountId); + logger.debug("Deleted " + count + " AutoScale Vm Profile for account Id: " + accountId); } count = autoScalePolicyDao.removeByAccountId(accountId); if (count > 0) { - s_logger.debug("Deleted " + count + " AutoScale Policies for account Id: " + accountId); + logger.debug("Deleted " + count + " AutoScale Policies for account Id: " + accountId); } count = conditionDao.removeByAccountId(accountId); if (count > 0) { - s_logger.debug("Deleted " + count + " Conditions for account Id: " + accountId); + logger.debug("Deleted " + count + " Conditions for account Id: " + accountId); } } @@ -1705,7 +1703,7 @@ private boolean checkConditionUp(AutoScaleVmGroupVO asGroup, Integer numVm) { Integer currentVM = autoScaleVmGroupVmMapDao.countAvailableVmsByGroup(asGroup.getId()); Integer maxVm = asGroup.getMaxMembers(); if (currentVM + numVm > maxVm) { - s_logger.warn("number of VM will greater than the maximum in this group if scaling up, so do nothing more"); + logger.warn("number of VM will greater than the maximum in this group if scaling up, so do nothing more"); return false; } return true; @@ -1715,7 +1713,7 @@ private boolean checkConditionDown(AutoScaleVmGroupVO asGroup) { Integer currentVM = autoScaleVmGroupVmMapDao.countAvailableVmsByGroup(asGroup.getId()); Integer minVm = asGroup.getMinMembers(); if (currentVM - 1 < minVm) { - s_logger.warn("number of VM will less than the minimum in this group if scaling down, so do nothing more"); + logger.warn("number of VM will less than the minimum in this group if scaling down, so do nothing more"); return false; } return true; @@ -1823,17 +1821,17 @@ protected long createNewVM(AutoScaleVmGroupVO asGroup) { return -1; } } catch (InsufficientCapacityException ex) { - s_logger.info(ex); - s_logger.trace(ex.getMessage(), ex); + logger.info(ex); + logger.trace(ex.getMessage(), ex); throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, ex.getMessage()); } catch (ResourceUnavailableException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); } catch (ConcurrentOperationException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } catch (ResourceAllocationException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_ALLOCATION_ERROR, ex.getMessage()); } } @@ -1861,7 +1859,7 @@ protected Long getVmOverrideDiskOfferingId(Map deployParams) { if (overrideDiskOfferingInParam != null) { overrideDiskOfferingId = overrideDiskOfferingInParam.getId(); } else { - s_logger.warn("Cannot find disk offering by overridediskofferingid from otherdeployparams in AutoScale Vm profile"); + logger.warn("Cannot find disk offering by overridediskofferingid from otherdeployparams in AutoScale Vm profile"); } } return overrideDiskOfferingId; @@ -1875,7 +1873,7 @@ protected final Long getVmDiskOfferingId(Map deployParams) { if (diskOfferingInParam != null) { diskOfferingId = diskOfferingInParam.getId(); } else { - s_logger.warn("Cannot find disk offering by diskofferingid from otherdeployparams in AutoScale Vm profile"); + logger.warn("Cannot find disk offering by diskofferingid from otherdeployparams in AutoScale Vm profile"); } } return diskOfferingId; @@ -1888,7 +1886,7 @@ protected Long getVmDataDiskSize(Map deployParams) { try { dataDiskSize = Long.parseLong(dataDiskSizeInParam); } catch (NumberFormatException ex) { - s_logger.warn("Cannot parse size from otherdeployparams in AutoScale Vm profile"); + logger.warn("Cannot parse size from otherdeployparams in AutoScale Vm profile"); } } return dataDiskSize; @@ -1903,7 +1901,7 @@ protected List getVmSshKeyPairs(Map deployParams, Accoun if (s != null) { sshKeyPairs.add(s.getName()); } else { - s_logger.warn("Cannot find ssh keypair by name in sshkeypairs from otherdeployparams in AutoScale Vm profile"); + logger.warn("Cannot find ssh keypair by name in sshkeypairs from otherdeployparams in AutoScale Vm profile"); } } } @@ -1919,7 +1917,7 @@ protected List getVmAffinityGroupId(Map deployParams) { if (affintyGroup != null) { affinityGroupIdList.add(affintyGroup.getId()); } else { - s_logger.warn("Cannot find affinity group by affinitygroupids from otherdeployparams in AutoScale Vm profile"); + logger.warn("Cannot find affinity group by affinitygroupids from otherdeployparams in AutoScale Vm profile"); } } } @@ -1933,7 +1931,7 @@ public void updateVmDetails(Map deployParams, Map(), null); } catch (final ResourceUnavailableException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); } catch (ResourceAllocationException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_ALLOCATION_ERROR, ex.getMessage()); } catch (ConcurrentOperationException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } catch (InsufficientCapacityException ex) { StringBuilder message = new StringBuilder(ex.getMessage()); @@ -1983,8 +1981,8 @@ private boolean startNewVM(long vmId) { message.append(", Please check the affinity groups provided, there may not be sufficient capacity to follow them"); } } - s_logger.info(ex); - s_logger.info(message.toString(), ex); + logger.info(ex); + logger.info(message.toString(), ex); throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, message.toString()); } return true; @@ -1999,7 +1997,7 @@ private boolean assignLBruleToNewVm(long vmId, AutoScaleVmGroupVO asGroup) { for (LoadBalancerVMMapVO LbVmMapVo : lbVmMapVos) { long instanceId = LbVmMapVo.getInstanceId(); if (instanceId == vmId) { - s_logger.warn("the new VM is already mapped to LB rule. What's wrong?"); + logger.warn("the new VM is already mapped to LB rule. What's wrong?"); return true; } } @@ -2008,7 +2006,7 @@ private boolean assignLBruleToNewVm(long vmId, AutoScaleVmGroupVO asGroup) { try { return loadBalancingRulesService.assignToLoadBalancer(lbId, lstVmId, new HashMap<>(), true); } catch (CloudRuntimeException ex) { - s_logger.warn("Caught exception: ", ex); + logger.warn("Caught exception: ", ex); return false; } } @@ -2036,7 +2034,7 @@ private long removeLBrule(AutoScaleVmGroupVO asGroup) { public void doScaleUp(long groupId, Integer numVm) { AutoScaleVmGroupVO asGroup = autoScaleVmGroupDao.findById(groupId); if (asGroup == null) { - s_logger.error("Can not find the groupid " + groupId + " for scaling up"); + logger.error("Can not find the groupid " + groupId + " for scaling up"); return; } if (!checkConditionUp(asGroup, numVm)) { @@ -2045,7 +2043,7 @@ public void doScaleUp(long groupId, Integer numVm) { AutoScaleVmGroup.State oldState = asGroup.getState(); AutoScaleVmGroup.State newState = AutoScaleVmGroup.State.SCALING; if (!autoScaleVmGroupDao.updateState(groupId, oldState, newState)) { - s_logger.error(String.format("Can not update vmgroup state from %s to %s, groupId: %s", oldState, newState, groupId)); + logger.error(String.format("Can not update vmgroup state from %s to %s, groupId: %s", oldState, newState, groupId)); return; } try { @@ -2055,7 +2053,7 @@ public void doScaleUp(long groupId, Integer numVm) { true, 0); long vmId = createNewVM(asGroup); if (vmId == -1) { - s_logger.error("Can not deploy new VM for scaling up in the group " + logger.error("Can not deploy new VM for scaling up in the group " + asGroup.getId() + ". Waiting for next round"); break; } @@ -2085,13 +2083,13 @@ public void doScaleUp(long groupId, Integer numVm) { ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, asGroup.getAccountId(), EventVO.LEVEL_INFO, EventTypes.EVENT_AUTOSCALEVMGROUP_SCALEUP, String.format("Started and assigned LB rule for VM %d in AutoScale VM group %d", vmId, groupId), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), 0); } else { - s_logger.error("Can not assign LB rule for this new VM"); + logger.error("Can not assign LB rule for this new VM"); ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, asGroup.getAccountId(), EventVO.LEVEL_ERROR, EventTypes.EVENT_AUTOSCALEVMGROUP_SCALEUP, String.format("Failed to assign LB rule for VM %d in AutoScale VM group %d", vmId, groupId), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), 0); break; } } catch (ServerApiException e) { - s_logger.error("Can not deploy new VM for scaling up in the group " + logger.error("Can not deploy new VM for scaling up in the group " + asGroup.getId() + ". Waiting for next round"); ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, asGroup.getAccountId(), EventVO.LEVEL_ERROR, EventTypes.EVENT_AUTOSCALEVMGROUP_SCALEUP, String.format("Failed to start VM %d in AutoScale VM group %d", vmId, groupId), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), 0); @@ -2101,7 +2099,7 @@ public void doScaleUp(long groupId, Integer numVm) { } } finally { if (!autoScaleVmGroupDao.updateState(groupId, newState, oldState)) { - s_logger.error(String.format("Can not update vmgroup state from %s back to %s, groupId: %s", newState, oldState, groupId)); + logger.error(String.format("Can not update vmgroup state from %s back to %s, groupId: %s", newState, oldState, groupId)); } } } @@ -2110,7 +2108,7 @@ public void doScaleUp(long groupId, Integer numVm) { public void doScaleDown(final long groupId) { AutoScaleVmGroupVO asGroup = autoScaleVmGroupDao.findById(groupId); if (asGroup == null) { - s_logger.error("Can not find the groupid " + groupId + " for scaling down"); + logger.error("Can not find the groupid " + groupId + " for scaling down"); return; } if (!checkConditionDown(asGroup)) { @@ -2119,7 +2117,7 @@ public void doScaleDown(final long groupId) { AutoScaleVmGroup.State oldState = asGroup.getState(); AutoScaleVmGroup.State newState = AutoScaleVmGroup.State.SCALING; if (!autoScaleVmGroupDao.updateState(groupId, oldState, newState)) { - s_logger.error(String.format("Can not update vmgroup state from %s to %s, groupId: %s", oldState, newState, groupId)); + logger.error(String.format("Can not update vmgroup state from %s to %s, groupId: %s", oldState, newState, groupId)); return; } ActionEventUtils.onStartedActionEvent(User.UID_SYSTEM, asGroup.getAccountId(), EventTypes.EVENT_AUTOSCALEVMGROUP_SCALEDOWN, @@ -2130,7 +2128,7 @@ public void doScaleDown(final long groupId) { try { vmId = removeLBrule(asGroup); } catch (Exception ex) { - s_logger.info("Got exception when remove LB rule for a VM in AutoScale VM group %d: " + groupId, ex); + logger.info("Got exception when remove LB rule for a VM in AutoScale VM group %d: " + groupId, ex); ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, asGroup.getAccountId(), EventVO.LEVEL_ERROR, EventTypes.EVENT_AUTOSCALEVMGROUP_SCALEDOWN, String.format("Failed to remove LB rule for a VM in AutoScale VM group %d", groupId), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), 0); throw ex; @@ -2173,13 +2171,13 @@ public void doScaleDown(final long groupId) { String.format("Failed to destroy VM %d in AutoScale VM group %d", vmId, groupId), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), 0); } } else { - s_logger.error("Can not remove LB rule for the VM being destroyed. Do nothing more."); + logger.error("Can not remove LB rule for the VM being destroyed. Do nothing more."); ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, asGroup.getAccountId(), EventVO.LEVEL_ERROR, EventTypes.EVENT_AUTOSCALEVMGROUP_SCALEDOWN, String.format("Failed to remove LB rule for a VM in AutoScale VM group %d", groupId), groupId, ApiCommandResourceType.AutoScaleVmGroup.toString(), 0); } } finally { if (!autoScaleVmGroupDao.updateState(groupId, newState, oldState)) { - s_logger.error(String.format("Can not update vmgroup state from %s back to %s, groupId: %s", newState, oldState, groupId)); + logger.error(String.format("Can not update vmgroup state from %s back to %s, groupId: %s", newState, oldState, groupId)); } } } @@ -2209,11 +2207,11 @@ public void checkAllAutoScaleVmGroups() { try { Future> future = completionService.take(); Pair result = future.get(); - s_logger.debug("Checked AutoScale vm group " + result.first() + " with result: " + result.second()); + logger.debug("Checked AutoScale vm group " + result.first() + " with result: " + result.second()); } catch (ExecutionException ex) { - s_logger.warn("Failed to get result of checking AutoScale vm group due to Exception: " , ex); + logger.warn("Failed to get result of checking AutoScale vm group due to Exception: " , ex); } catch (InterruptedException ex) { - s_logger.warn("Failed to get result of checking AutoScale vm group due to Exception: " , ex); + logger.warn("Failed to get result of checking AutoScale vm group due to Exception: " , ex); Thread.currentThread().interrupt(); } } @@ -2229,10 +2227,10 @@ public CheckAutoScaleVmGroupAsync(AutoScaleVmGroupVO asGroup) { @Override public Pair call() { try { - s_logger.debug("Checking AutoScale vm group " + asGroup); + logger.debug("Checking AutoScale vm group " + asGroup); checkAutoScaleVmGroup(asGroup); } catch (Exception ex) { - s_logger.warn("Failed to check AutoScale vm group " + asGroup + " due to Exception: " , ex); + logger.warn("Failed to check AutoScale vm group " + asGroup + " due to Exception: " , ex); return new Pair<>(asGroup.getId(), false); } return new Pair<>(asGroup.getId(), true); @@ -2317,7 +2315,7 @@ protected Map> getPolicyCounters(AutoScaleVmGroupTO groupT } protected AutoScalePolicy.Action getAutoscaleAction(Map countersMap, Map countersNumberMap, AutoScaleVmGroupTO groupTO) { - s_logger.debug("[AutoScale] Getting autoscale action for group : " + groupTO.getId()); + logger.debug("[AutoScale] Getting autoscale action for group : " + groupTO.getId()); Network.Provider provider = getLoadBalancerServiceProvider(groupTO.getLoadBalancerId()); @@ -2356,10 +2354,10 @@ protected AutoScalePolicy.Action checkConditionsForPolicy(Map co } Double sum = countersMap.get(key); Integer number = countersNumberMap.get(key); - s_logger.debug(String.format("Checking policyId = %d, conditionId = %d, counter = \"%s\", sum = %f, number = %s", policyTO.getId(), conditionTO.getId(), counter.getName(), sum, number)); + logger.debug(String.format("Checking policyId = %d, conditionId = %d, counter = \"%s\", sum = %f, number = %s", policyTO.getId(), conditionTO.getId(), counter.getName(), sum, number)); if (number == null || number == 0) { bValid = false; - s_logger.debug(String.format("Skipping policyId = %d, conditionId = %d, counter = \"%s\" because the number is %s", policyTO.getId(), conditionTO.getId(), counter.getName(), number)); + logger.debug(String.format("Skipping policyId = %d, conditionId = %d, counter = \"%s\" because the number is %s", policyTO.getId(), conditionTO.getId(), counter.getName(), number)); break; } Double avg = sum / number; @@ -2370,7 +2368,7 @@ protected AutoScalePolicy.Action checkConditionsForPolicy(Map co || ((op == com.cloud.network.as.Condition.Operator.LE) && (avg.doubleValue() <= thresholdPercent.doubleValue())) || ((op == com.cloud.network.as.Condition.Operator.LT) && (avg.doubleValue() < thresholdPercent.doubleValue())); - s_logger.debug(String.format("Check result on policyId = %d, conditionId = %d, counter = %s is : %s" + + logger.debug(String.format("Check result on policyId = %d, conditionId = %d, counter = %s is : %s" + " (actual result = %f, operator = %s, threshold = %f)", policyTO.getId(), conditionTO.getId(), counter.getSource(), bConditionCheck, avg, op, thresholdPercent)); @@ -2380,7 +2378,7 @@ protected AutoScalePolicy.Action checkConditionsForPolicy(Map co } } AutoScalePolicy.Action action = bValid ? policyTO.getAction() : null; - s_logger.debug(String.format("Check result on policyId = %d is %s", policyTO.getId(), action)); + logger.debug(String.format("Check result on policyId = %d is %s", policyTO.getId(), action)); return action; } @@ -2441,7 +2439,7 @@ protected boolean checkAsGroupMaxAndMinMembers(AutoScaleVmGroupVO asGroup) { // check minimum vm of group Integer currentVM = autoScaleVmGroupVmMapDao.countAvailableVmsByGroup(asGroup.getId()); if (currentVM < asGroup.getMinMembers()) { - s_logger.debug(String.format("There are currently %s available VMs which is less than the minimum member of " + + logger.debug(String.format("There are currently %s available VMs which is less than the minimum member of " + "the AS group (%s), scaling up %d VMs", currentVM, asGroup.getMinMembers(), asGroup.getMinMembers() - currentVM)); doScaleUp(asGroup.getId(), asGroup.getMinMembers() - currentVM); return false; @@ -2449,7 +2447,7 @@ protected boolean checkAsGroupMaxAndMinMembers(AutoScaleVmGroupVO asGroup) { // check maximum vm of group if (currentVM > asGroup.getMaxMembers()) { - s_logger.debug(String.format("There are currently %s available VMs which is more than the maximum member of " + + logger.debug(String.format("There are currently %s available VMs which is more than the maximum member of " + "the AS group (%s), scaling down %d VMs", currentVM, asGroup.getMaxMembers(), currentVM - asGroup.getMaxMembers())); for (int i = 0; i < currentVM - asGroup.getMaxMembers(); i++) { doScaleDown(asGroup.getId()); @@ -2480,8 +2478,8 @@ protected void checkNetScalerAsGroup(AutoScaleVmGroupVO asGroup) { asGroup.setLastInterval(new Date()); autoScaleVmGroupDao.persist(asGroup); - if (s_logger.isDebugEnabled()) { - s_logger.debug("[Netscaler AutoScale] Collecting RRDs data..."); + if (logger.isDebugEnabled()) { + logger.debug("[Netscaler AutoScale] Collecting RRDs data..."); } Map params = new HashMap<>(); List asGroupVmVOs = autoScaleVmGroupVmMapDao.listByGroup(asGroup.getId()); @@ -2506,10 +2504,10 @@ protected void checkNetScalerAsGroup(AutoScaleVmGroupVO asGroup) { try { PerformanceMonitorAnswer answer = (PerformanceMonitorAnswer) agentMgr.send(receiveHost, perfMon); if (answer == null || !answer.getResult()) { - s_logger.debug("Failed to send data to node !"); + logger.debug("Failed to send data to node !"); } else { String result = answer.getDetails(); - s_logger.debug("[AutoScale] RRDs collection answer: " + result); + logger.debug("[AutoScale] RRDs collection answer: " + result); HashMap countersMap = new HashMap<>(); HashMap countersNumberMap = new HashMap<>(); @@ -2517,7 +2515,7 @@ protected void checkNetScalerAsGroup(AutoScaleVmGroupVO asGroup) { AutoScalePolicy.Action scaleAction = getAutoscaleAction(countersMap, countersNumberMap, groupTO); if (scaleAction != null) { - s_logger.debug("[AutoScale] Doing scale action: " + scaleAction + " for group " + asGroup.getId()); + logger.debug("[AutoScale] Doing scale action: " + scaleAction + " for group " + asGroup.getId()); if (AutoScalePolicy.Action.SCALEUP.equals(scaleAction)) { doScaleUp(asGroup.getId(), 1); } else { @@ -2527,7 +2525,7 @@ protected void checkNetScalerAsGroup(AutoScaleVmGroupVO asGroup) { } } catch (Exception e) { - s_logger.error("Cannot sent PerformanceMonitorCommand to host " + receiveHost + " or process the answer due to Exception: ", e); + logger.error("Cannot sent PerformanceMonitorCommand to host " + receiveHost + " or process the answer due to Exception: ", e); } } @@ -2572,7 +2570,7 @@ protected void processPerformanceMonitorAnswer(Map countersMap, updateCountersMapWithInstantData(countersMap, countersNumberMap, groupTO, counterId, conditionId, policyId, coVal, AutoScaleValueType.INSTANT_VM); } catch (Exception e) { - s_logger.error("Cannot process PerformanceMonitorAnswer due to Exception: ", e); + logger.error("Cannot process PerformanceMonitorAnswer due to Exception: ", e); } } } @@ -2603,7 +2601,7 @@ protected void updateCountersMapWithInstantData(Map countersMap, if (AutoScaleValueType.INSTANT_VM_GROUP.equals(valueType)) { Integer currentVM = autoScaleVmGroupVmMapDao.countAvailableVmsByGroup(groupTO.getId()); if (currentVM == 0) { - s_logger.debug(String.format("Skipping updating countersMap for group %s and policy %s and counter %s due to no VMs", groupTO.getId(), policyId, counterId)); + logger.debug(String.format("Skipping updating countersMap for group %s and policy %s and counter %s due to no VMs", groupTO.getId(), policyId, counterId)); return; } coVal = coVal / currentVM; @@ -2637,17 +2635,17 @@ protected void monitorVirtualRouterAsGroup(AutoScaleVmGroupVO asGroup) { asGroup.setLastInterval(new Date()); autoScaleVmGroupDao.persist(asGroup); - s_logger.debug("[AutoScale] Collecting performance data ..."); + logger.debug("[AutoScale] Collecting performance data ..."); AutoScaleVmGroupTO groupTO = lbRulesMgr.toAutoScaleVmGroupTO(asGroup); if (isNative(groupTO)) { - s_logger.debug("[AutoScale] Collecting performance data from hosts ..."); + logger.debug("[AutoScale] Collecting performance data from hosts ..."); getVmStatsFromHosts(groupTO); } if (hasSourceVirtualRouter(groupTO)) { - s_logger.debug("[AutoScale] Collecting performance data from virtual router ..."); + logger.debug("[AutoScale] Collecting performance data from virtual router ..."); getNetworkStatsFromVirtualRouter(groupTO); } } @@ -2671,7 +2669,7 @@ protected void checkVirtualRouterAsGroup(AutoScaleVmGroupVO asGroup) { // get scale action AutoScalePolicy.Action scaleAction = getAutoscaleAction(countersMap, countersNumberMap, groupTO); if (scaleAction != null) { - s_logger.debug("[AutoScale] Doing scale action: " + scaleAction + " for group " + asGroup.getId()); + logger.debug("[AutoScale] Doing scale action: " + scaleAction + " for group " + asGroup.getId()); if (AutoScalePolicy.Action.SCALEUP.equals(scaleAction)) { doScaleUp(asGroup.getId(), 1); } else { @@ -2702,16 +2700,16 @@ protected void getVmStatsFromHosts(AutoScaleVmGroupTO groupTO) { Map vmStatsById = new HashMap<>(); HostVO host = hostDao.findById(hostId); if (host == null) { - s_logger.debug("Failed to get VM stats from non-existing host : " + hostId); + logger.debug("Failed to get VM stats from non-existing host : " + hostId); return vmStatsById; } try { vmStatsById = virtualMachineManager.getVirtualMachineStatistics(host.getId(), host.getName(), vmIds); if (MapUtils.isEmpty(vmStatsById)) { - s_logger.warn("Got empty result for virtual machine statistics from host: " + host); + logger.warn("Got empty result for virtual machine statistics from host: " + host); } } catch (Exception e) { - s_logger.debug("Failed to get VM stats from host : " + host.getName()); + logger.debug("Failed to get VM stats from host : " + host.getName()); } return vmStatsById; } @@ -2737,7 +2735,7 @@ protected void processVmStatsByIdFromHost(AutoScaleVmGroupTO groupTO, List } else { // In some scenarios, the free memory is greater than VM memory // see https://github.com/apache/cloudstack/issues/4566 - s_logger.warn(String.format("Getting virtual machine statistics return invalid free memory KBs for VM %d: %f", vmId, vmStats.getIntFreeMemoryKBs())); + logger.warn(String.format("Getting virtual machine statistics return invalid free memory KBs for VM %d: %f", vmId, vmStats.getIntFreeMemoryKBs())); } } } @@ -2768,7 +2766,7 @@ protected void getNetworkStatsFromVirtualRouter(AutoScaleVmGroupTO groupTO) { command.setWait(30); GetAutoScaleMetricsAnswer answer = (GetAutoScaleMetricsAnswer) agentMgr.easySend(router.getHostId(), command); if (answer == null || !answer.getResult()) { - s_logger.error("Failed to get autoscale metrics from virtual router " + router.getName()); + logger.error("Failed to get autoscale metrics from virtual router " + router.getName()); processGetAutoScaleMetricsAnswer(groupTO, new ArrayList<>(), router.getId()); } else { processGetAutoScaleMetricsAnswer(groupTO, answer.getValues(), router.getId()); @@ -2827,24 +2825,24 @@ protected void processGetAutoScaleMetricsAnswer(AutoScaleVmGroupTO groupTO, List } protected boolean updateCountersMap(AutoScaleVmGroupTO groupTO, Map countersMap, Map countersNumberMap) { - s_logger.debug("Updating countersMap for as group: " + groupTO.getId()); + logger.debug("Updating countersMap for as group: " + groupTO.getId()); for (AutoScalePolicyTO policyTO : groupTO.getPolicies()) { Date afterDate = new Date(System.currentTimeMillis() - ((long)policyTO.getDuration() << 10)); List dummyStats = asGroupStatisticsDao.listDummyRecordsByVmGroup(groupTO.getId(), afterDate); if (CollectionUtils.isNotEmpty(dummyStats)) { - s_logger.error(String.format("Failed to update counters map as there are %d dummy statistics in as group %d", dummyStats.size(), groupTO.getId())); + logger.error(String.format("Failed to update counters map as there are %d dummy statistics in as group %d", dummyStats.size(), groupTO.getId())); return false; } List inactiveStats = asGroupStatisticsDao.listInactiveByVmGroupAndPolicy(groupTO.getId(), policyTO.getId(), afterDate); if (CollectionUtils.isNotEmpty(inactiveStats)) { - s_logger.error(String.format("Failed to update counters map as there are %d Inactive statistics in as group %d and policy %s", inactiveStats.size(), groupTO.getId(), policyTO.getId())); + logger.error(String.format("Failed to update counters map as there are %d Inactive statistics in as group %d and policy %s", inactiveStats.size(), groupTO.getId(), policyTO.getId())); continue; } for (ConditionTO conditionTO : policyTO.getConditions()) { updateCountersMapPerCondition(groupTO, policyTO, conditionTO, afterDate, countersMap, countersNumberMap); } } - s_logger.debug("DONE Updating countersMap for as group: " + groupTO.getId()); + logger.debug("DONE Updating countersMap for as group: " + groupTO.getId()); return true; } @@ -2854,10 +2852,10 @@ private void updateCountersMapPerCondition(AutoScaleVmGroupTO groupTO, AutoScale CounterTO counter = conditionTO.getCounter(); List stats = asGroupStatisticsDao.listByVmGroupAndPolicyAndCounter(groupTO.getId(), policyTO.getId(), counter.getId(), afterDate); if (CollectionUtils.isEmpty(stats)) { - s_logger.debug(String.format("Skipping updating countersMap for group %s and policy %s and counter %s due to no stats", groupTO.getId(), policyTO.getId(), counter.getId())); + logger.debug(String.format("Skipping updating countersMap for group %s and policy %s and counter %s due to no stats", groupTO.getId(), policyTO.getId(), counter.getId())); return; } - s_logger.debug(String.format("Updating countersMap with %d stats for group %s and policy %s and counter %s", stats.size(), groupTO.getId(), policyTO.getId(), counter.getId())); + logger.debug(String.format("Updating countersMap with %d stats for group %s and policy %s and counter %s", stats.size(), groupTO.getId(), policyTO.getId(), counter.getId())); Map> aggregatedRecords = new HashMap<>(); List incorrectRecords = new ArrayList<>(); for (AutoScaleVmGroupStatisticsVO stat : stats) { @@ -2880,7 +2878,7 @@ private void updateCountersMapPerCondition(AutoScaleVmGroupTO groupTO, AutoScale if (stat.getRawValue() >= lastRecord.getRawValue()) { aggregatedRecordList.add(stat); } else { - s_logger.info("The new raw value is less than the previous raw value, which means the data is incorrect. The key is " + key); + logger.info("The new raw value is less than the previous raw value, which means the data is incorrect. The key is " + key); aggregatedRecords.remove(key); incorrectRecords.add(key); } @@ -2895,13 +2893,13 @@ public void updateCountersMapByAggregatedRecords(Map countersMap Map> aggregatedRecords, Long conditionId, Long policyId, Long groupId) { if (MapUtils.isNotEmpty(aggregatedRecords)) { - s_logger.debug("Processing aggregated data"); + logger.debug("Processing aggregated data"); for (Map.Entry> aggregatedRecord : aggregatedRecords.entrySet()) { String recordKey = aggregatedRecord.getKey(); Long counterId = Long.valueOf(recordKey.split("-")[0]); List records = aggregatedRecord.getValue(); if (records.size() <= 1) { - s_logger.info(String.format("Ignoring aggregated records, conditionId = %s, counterId = %s", conditionId, counterId)); + logger.info(String.format("Ignoring aggregated records, conditionId = %s, counterId = %s", conditionId, counterId)); continue; } AutoScaleVmGroupStatisticsVO firstRecord = records.get(0); @@ -2910,7 +2908,7 @@ public void updateCountersMapByAggregatedRecords(Map countersMap if (AutoScaleValueType.AGGREGATED_VM_GROUP.equals(firstRecord.getValueType())) { Integer currentVM = autoScaleVmGroupVmMapDao.countAvailableVmsByGroup(groupId); if (currentVM == 0) { - s_logger.debug(String.format("Skipping updating countersMap for group %s and policy %s and counter %s due to no VMs", groupId, policyId, counterId)); + logger.debug(String.format("Skipping updating countersMap for group %s and policy %s and counter %s due to no VMs", groupId, policyId, counterId)); return; } coVal = coVal / currentVM; @@ -2932,14 +2930,14 @@ protected void cleanupAsVmGroupStatistics(AutoScaleVmGroupTO groupTO) { Integer duration = policyTO.getDuration(); Integer delaySecs = cleanupDelay >= duration ? cleanupDelay : duration; Date beforeDate = new Date(System.currentTimeMillis() - ((long)delaySecs * 1000)); - s_logger.debug(String.format("Removing stats for policy %d in as group %d, before %s", policyTO.getId(), groupTO.getId(), beforeDate)); + logger.debug(String.format("Removing stats for policy %d in as group %d, before %s", policyTO.getId(), groupTO.getId(), beforeDate)); asGroupStatisticsDao.removeByGroupAndPolicy(groupTO.getId(), policyTO.getId(), beforeDate); if (delaySecs > maxDelaySecs) { maxDelaySecs = delaySecs; } } Date beforeDate = new Date(System.currentTimeMillis() - ((long)maxDelaySecs * 1000)); - s_logger.debug(String.format("Removing stats for other policies in as group %d, before %s", groupTO.getId(), beforeDate)); + logger.debug(String.format("Removing stats for other policies in as group %d, before %s", groupTO.getId(), beforeDate)); asGroupStatisticsDao.removeByGroupId(groupTO.getId(), beforeDate); } @@ -2956,7 +2954,7 @@ protected void scheduleMonitorTask(Long groupId) { ScheduledExecutorService vmGroupExecutor = vmGroupMonitorMaps.get(groupId); if (vmGroupExecutor == null) { AutoScaleVmGroupVO vmGroup = autoScaleVmGroupDao.findById(groupId); - s_logger.debug("Scheduling monitor task for autoscale vm group " + vmGroup); + logger.debug("Scheduling monitor task for autoscale vm group " + vmGroup); vmGroupExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("VmGroup-Monitor-" + groupId)); vmGroupExecutor.scheduleWithFixedDelay(new MonitorTask(groupId), vmGroup.getInterval(), vmGroup.getInterval(), TimeUnit.SECONDS); vmGroupMonitorMaps.put(groupId, vmGroupExecutor); @@ -2966,7 +2964,7 @@ protected void scheduleMonitorTask(Long groupId) { protected void cancelMonitorTask(Long groupId) { ScheduledExecutorService vmGroupExecutor = vmGroupMonitorMaps.get(groupId); if (vmGroupExecutor != null) { - s_logger.debug("Cancelling monitor task for autoscale vm group " + groupId); + logger.debug("Cancelling monitor task for autoscale vm group " + groupId); vmGroupExecutor.shutdown(); vmGroupMonitorMaps.remove(groupId); } @@ -2985,21 +2983,21 @@ protected synchronized void runInContext() { try { AutoScaleVmGroupVO asGroup = autoScaleVmGroupDao.findById(groupId); if (asGroup == null) { - s_logger.error("Can not find the groupid " + groupId + " for monitoring"); + logger.error("Can not find the groupid " + groupId + " for monitoring"); return; } - s_logger.debug("Start monitoring on AutoScale VmGroup " + asGroup); + logger.debug("Start monitoring on AutoScale VmGroup " + asGroup); // check group state if (asGroup.getState().equals(AutoScaleVmGroup.State.ENABLED)) { Network.Provider provider = getLoadBalancerServiceProvider(asGroup.getLoadBalancerId()); if (Network.Provider.Netscaler.equals(provider)) { - s_logger.debug("Skipping the monitoring on AutoScale VmGroup with Netscaler provider: " + asGroup); + logger.debug("Skipping the monitoring on AutoScale VmGroup with Netscaler provider: " + asGroup); } else if (Network.Provider.VirtualRouter.equals(provider) || Network.Provider.VPCVirtualRouter.equals(provider)) { monitorVirtualRouterAsGroup(asGroup); } } } catch (final Exception e) { - s_logger.warn("Caught the following exception on monitoring AutoScale Vm Group", e); + logger.warn("Caught the following exception on monitoring AutoScale Vm Group", e); } } } @@ -3031,7 +3029,7 @@ protected boolean destroyVm(Long vmId) { } return true; } catch (Exception ex) { - s_logger.error("Cannot destroy vm with id: " + vmId + "due to Exception: ", ex); + logger.error("Cannot destroy vm with id: " + vmId + "due to Exception: ", ex); return false; } } diff --git a/server/src/main/java/com/cloud/network/element/ConfigDriveNetworkElement.java b/server/src/main/java/com/cloud/network/element/ConfigDriveNetworkElement.java index 83900ff2d438..a9fa3e95275e 100644 --- a/server/src/main/java/com/cloud/network/element/ConfigDriveNetworkElement.java +++ b/server/src/main/java/com/cloud/network/element/ConfigDriveNetworkElement.java @@ -34,7 +34,6 @@ import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.HandleConfigDriveIsoAnswer; @@ -93,7 +92,6 @@ public class ConfigDriveNetworkElement extends AdapterBase implements NetworkElement, UserDataServiceProvider, StateListener, NetworkMigrationResponder { - private static final Logger LOG = Logger.getLogger(ConfigDriveNetworkElement.class); private static final Map> capabilities = setCapabilities(); @@ -171,7 +169,7 @@ public boolean release(Network network, NicProfile nic, VirtualMachineProfile vm try { return deleteConfigDriveIso(vm.getVirtualMachine()); } catch (ResourceUnavailableException e) { - LOG.error("Failed to delete config drive due to: ", e); + logger.error("Failed to delete config drive due to: ", e); return false; } } @@ -268,7 +266,7 @@ public boolean saveHypervisorHostname(NicProfile nic, Network network, VirtualMa try { recreateConfigDriveIso(nic, network, vm, dest); } catch (ResourceUnavailableException e) { - LOG.error("Failed to add config disk drive due to: ", e); + logger.error("Failed to add config disk drive due to: ", e); return false; } } @@ -325,7 +323,7 @@ public boolean postStateTransitionEvent(StateMachine2.Transition customUserdataParamMap = getVMCustomUserdataParamMap(profile.getId()); @@ -567,16 +565,16 @@ private boolean deleteConfigDriveIsoOnHostCache(final VirtualMachine vm, final L ConfigDriveNetworkElement.class, 0L); } - LOG.debug("Deleting config drive ISO for vm: " + vm.getInstanceName() + " on host: " + hostId); + logger.debug("Deleting config drive ISO for vm: " + vm.getInstanceName() + " on host: " + hostId); final String isoPath = ConfigDrive.createConfigDrivePath(vm.getInstanceName()); final HandleConfigDriveIsoCommand configDriveIsoCommand = new HandleConfigDriveIsoCommand(isoPath, null, null, false, true, false); HostVO hostVO = _hostDao.findById(hostId); if (hostVO == null) { - LOG.warn(String.format("Host %s appears to be unavailable, skipping deletion of config-drive ISO on host cache", hostId)); + logger.warn(String.format("Host %s appears to be unavailable, skipping deletion of config-drive ISO on host cache", hostId)); return false; } if (!Arrays.asList(Status.Up, Status.Connecting).contains(hostVO.getStatus())) { - LOG.warn(String.format("Host status %s is not Up or Connecting, skipping deletion of config-drive ISO on host cache", hostId)); + logger.warn(String.format("Host status %s is not Up or Connecting, skipping deletion of config-drive ISO on host cache", hostId)); return false; } @@ -586,7 +584,7 @@ private boolean deleteConfigDriveIsoOnHostCache(final VirtualMachine vm, final L } if (!answer.getResult()) { - LOG.error("Failed to remove config drive for instance: " + vm.getInstanceName()); + logger.error("Failed to remove config drive for instance: " + vm.getInstanceName()); return false; } return true; @@ -601,7 +599,7 @@ private boolean createConfigDriveIso(VirtualMachineProfile profile, DeployDestin ConfigDriveNetworkElement.class, 0L); } - LOG.debug("Creating config drive ISO for vm: " + profile.getInstanceName()); + logger.debug("Creating config drive ISO for vm: " + profile.getInstanceName()); Map customUserdataParamMap = getVMCustomUserdataParamMap(profile.getId()); @@ -666,7 +664,7 @@ private boolean deleteConfigDriveIso(final VirtualMachine vm) throws ResourceUna Long hostId = (vm.getHostId() != null) ? vm.getHostId() : vm.getLastHostId(); Location location = getConfigDriveLocation(vm.getId()); if (hostId == null) { - LOG.info(String.format("The VM was never booted; no config-drive ISO created for VM %s", vm.getName())); + logger.info(String.format("The VM was never booted; no config-drive ISO created for VM %s", vm.getName())); return true; } if (location == Location.HOST) { @@ -694,14 +692,14 @@ private boolean deleteConfigDriveIso(final VirtualMachine vm) throws ResourceUna ConfigDriveNetworkElement.class, 0L); } - LOG.debug("Deleting config drive ISO for vm: " + vm.getInstanceName()); + logger.debug("Deleting config drive ISO for vm: " + vm.getInstanceName()); final String isoPath = ConfigDrive.createConfigDrivePath(vm.getInstanceName()); final HandleConfigDriveIsoCommand configDriveIsoCommand = new HandleConfigDriveIsoCommand(isoPath, null, dataStore.getTO(), false, false, false); final HandleConfigDriveIsoAnswer answer = (HandleConfigDriveIsoAnswer) agentManager.easySend(agentId, configDriveIsoCommand); if (!answer.getResult()) { - LOG.error("Failed to remove config drive for instance: " + vm.getInstanceName()); + logger.error("Failed to remove config drive for instance: " + vm.getInstanceName()); return false; } return true; @@ -731,7 +729,7 @@ private void addConfigDriveDisk(final VirtualMachineProfile profile, final DataS profile.addDisk(new DiskTO(dataTO, CONFIGDRIVEDISKSEQ.longValue(), isoPath, Volume.Type.ISO)); } else { - LOG.warn("Config drive iso already is in VM profile."); + logger.warn("Config drive iso already is in VM profile."); } } diff --git a/server/src/main/java/com/cloud/network/element/VirtualRouterElement.java b/server/src/main/java/com/cloud/network/element/VirtualRouterElement.java index 839ab9ae0af2..4b2e06d52d4d 100644 --- a/server/src/main/java/com/cloud/network/element/VirtualRouterElement.java +++ b/server/src/main/java/com/cloud/network/element/VirtualRouterElement.java @@ -25,7 +25,6 @@ import javax.inject.Inject; import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Qualifier; @@ -121,7 +120,6 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterElementService, DhcpServiceProvider, UserDataServiceProvider, SourceNatServiceProvider, StaticNatServiceProvider, FirewallServiceProvider, LoadBalancingServiceProvider, PortForwardingServiceProvider, RemoteAccessVPNServiceProvider, IpDeployer, NetworkMigrationResponder, AggregatedCommandExecutor, RedundantResource, DnsServiceProvider{ - private static final Logger s_logger = Logger.getLogger(VirtualRouterElement.class); protected static final Map> capabilities = setCapabilities(); @Inject @@ -198,12 +196,12 @@ protected boolean canHandle(final Network network, final Service service) { if (service == null) { if (!_networkMdl.isProviderForNetwork(getProvider(), network.getId())) { - s_logger.trace("Element " + getProvider().getName() + " is not a provider for the network " + network); + logger.trace("Element " + getProvider().getName() + " is not a provider for the network " + network); return false; } } else { if (!_networkMdl.isProviderSupportServiceInNetwork(network.getId(), service, getProvider())) { - s_logger.trace("Element " + getProvider().getName() + " doesn't support service " + service.getName() + " in the network " + network); + logger.trace("Element " + getProvider().getName() + " doesn't support service " + service.getName() + " in the network " + network); return false; } } @@ -294,7 +292,7 @@ public boolean applyFWRules(final Network network, final List routers = getRouters(network); if (routers == null || routers.isEmpty()) { - s_logger.debug("Virtual router elemnt doesn't need to apply firewall rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); + logger.debug("Virtual router elemnt doesn't need to apply firewall rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); return true; } @@ -341,7 +339,7 @@ public boolean applyLBRules(final Network network, final List final List routers = getRouters(network); if (routers == null || routers.isEmpty()) { - s_logger.debug("Virtual router elemnt doesn't need to apply lb rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); + logger.debug("Virtual router elemnt doesn't need to apply lb rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); return true; } @@ -365,7 +363,7 @@ public String[] applyVpnUsers(final RemoteAccessVpn vpn, final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - s_logger.debug("Virtual router elemnt doesn't need to apply vpn users on the backend; virtual router" + " doesn't exist in the network " + network.getId()); + logger.debug("Virtual router elemnt doesn't need to apply vpn users on the backend; virtual router" + " doesn't exist in the network " + network.getId()); return null; } @@ -374,7 +372,7 @@ public String[] applyVpnUsers(final RemoteAccessVpn vpn, final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - s_logger.debug("Virtual router elemnt doesn't need stop vpn on the backend; virtual router doesn't" + " exist in the network " + network.getId()); + logger.debug("Virtual router elemnt doesn't need stop vpn on the backend; virtual router doesn't" + " exist in the network " + network.getId()); return true; } return _routerMgr.startRemoteAccessVpn(network, vpn, routers); } else { - s_logger.debug("Element " + getName() + " doesn't handle createVpn command"); + logger.debug("Element " + getName() + " doesn't handle createVpn command"); return false; } } @@ -409,13 +407,13 @@ public boolean stopVpn(final RemoteAccessVpn vpn) throws ResourceUnavailableExce if (canHandle(network, Service.Vpn)) { final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - s_logger.debug(String.format("There is no virtual router in network [uuid: %s, name: %s], it is not necessary to stop the VPN on backend.", + logger.debug(String.format("There is no virtual router in network [uuid: %s, name: %s], it is not necessary to stop the VPN on backend.", network.getUuid(), network.getName())); return true; } return _routerMgr.deleteRemoteAccessVpn(network, vpn, routers); } else { - s_logger.debug(String.format("Element %s doesn't handle removeVpn command", getName())); + logger.debug(String.format("Element %s doesn't handle removeVpn command", getName())); return false; } } @@ -433,7 +431,7 @@ public boolean applyIps(final Network network, final List routers = getRouters(network); if (routers == null || routers.isEmpty()) { - s_logger.debug("Virtual router elemnt doesn't need to associate ip addresses on the backend; virtual " + "router doesn't exist in the network " + network.getId()); + logger.debug("Virtual router elemnt doesn't need to associate ip addresses on the backend; virtual " + "router doesn't exist in the network " + network.getId()); return true; } @@ -603,7 +601,7 @@ public boolean applyStaticNats(final Network network, final List routers = getRouters(network); if (routers == null || routers.isEmpty()) { - s_logger.debug("Virtual router elemnt doesn't need to apply static nat on the backend; virtual " + "router doesn't exist in the network " + network.getId()); + logger.debug("Virtual router elemnt doesn't need to apply static nat on the backend; virtual " + "router doesn't exist in the network " + network.getId()); return true; } @@ -668,12 +666,12 @@ public boolean shutdown(final Network network, final ReservationContext context, for (final DomainRouterVO router : routers) { stopResult = stopResult && _routerMgr.stop(router, false, context.getCaller(), context.getAccount()) != null; if (!stopResult) { - s_logger.warn("Failed to stop virtual router element " + router + ", but would try to process clean up anyway."); + logger.warn("Failed to stop virtual router element " + router + ", but would try to process clean up anyway."); } if (cleanup) { destroyResult = destroyResult && _routerMgr.destroyRouter(router.getId(), context.getAccount(), context.getCaller().getId()) != null; if (!destroyResult) { - s_logger.warn("Failed to clean up virtual router element " + router); + logger.warn("Failed to clean up virtual router element " + router); } } } @@ -705,7 +703,7 @@ public boolean savePassword(final Network network, final NicProfile nic, final V } final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - s_logger.debug("Can't find virtual router element in network " + network.getId()); + logger.debug("Can't find virtual router element in network " + network.getId()); return true; } @@ -722,7 +720,7 @@ public boolean savePassword(final Network network, final NicProfile nic, final V if (router.getState() == State.Running) { final boolean result = networkTopology.savePasswordToRouter(network, nic, uservm, router); if (!result) { - s_logger.error("Unable to save password for VM " + vm.getInstanceName() + + logger.error("Unable to save password for VM " + vm.getInstanceName() + " on router " + router.getInstanceName()); return false; } @@ -763,7 +761,7 @@ public boolean saveSSHKey(final Network network, final NicProfile nic, final Vir } final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - s_logger.debug("Can't find virtual router element in network " + network.getId()); + logger.debug("Can't find virtual router element in network " + network.getId()); return true; } @@ -813,7 +811,7 @@ public boolean saveUserData(final Network network, final NicProfile nic, final V } final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - s_logger.debug("Can't find virtual router element in network " + network.getId()); + logger.debug("Can't find virtual router element in network " + network.getId()); return true; } @@ -842,7 +840,7 @@ public List> getCommands() { public VirtualRouterProvider configure(final ConfigureVirtualRouterElementCmd cmd) { final VirtualRouterProviderVO element = _vrProviderDao.findById(cmd.getId()); if (element == null || !(element.getType() == Type.VirtualRouter || element.getType() == Type.VPCVirtualRouter)) { - s_logger.debug("Can't find Virtual Router element with network service provider id " + cmd.getId()); + logger.debug("Can't find Virtual Router element with network service provider id " + cmd.getId()); return null; } @@ -856,7 +854,7 @@ public VirtualRouterProvider configure(final ConfigureVirtualRouterElementCmd cm public OvsProvider configure(final ConfigureOvsElementCmd cmd) { final OvsProviderVO element = _ovsProviderDao.findById(cmd.getId()); if (element == null) { - s_logger.debug("Can't find Ovs element with network service provider id " + cmd.getId()); + logger.debug("Can't find Ovs element with network service provider id " + cmd.getId()); return null; } @@ -873,7 +871,7 @@ public VirtualRouterProvider addElement(final Long nspId, final Type providerTyp } VirtualRouterProviderVO element = _vrProviderDao.findByNspIdAndType(nspId, providerType); if (element != null) { - s_logger.debug("There is already a virtual router element with service provider id " + nspId); + logger.debug("There is already a virtual router element with service provider id " + nspId); return null; } element = new VirtualRouterProviderVO(nspId, providerType); @@ -887,7 +885,7 @@ public boolean applyPFRules(final Network network, final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - s_logger.debug("Virtual router elemnt doesn't need to apply firewall rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); + logger.debug("Virtual router elemnt doesn't need to apply firewall rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); return true; } @@ -1050,7 +1048,7 @@ protected boolean removeDhcpSupportForSubnet(Network network, Network.Service se try { return _routerMgr.removeDhcpSupportForSubnet(network, routers); } catch (final ResourceUnavailableException e) { - s_logger.info("Router resource unavailable ", e); + logger.info("Router resource unavailable ", e); } } return false; @@ -1238,7 +1236,7 @@ private boolean canHandleLbRules(final List rules) { if (schemeCaps != null) { for (final LoadBalancingRule rule : rules) { if (!schemeCaps.contains(rule.getScheme().toString())) { - s_logger.debug("Scheme " + rules.get(0).getScheme() + " is not supported by the provider " + getName()); + logger.debug("Scheme " + rules.get(0).getScheme() + " is not supported by the provider " + getName()); return false; } } @@ -1251,12 +1249,12 @@ private void updateUserVmData(final NicProfile nic, final Network network, final if (_networkModel.areServicesSupportedByNetworkOffering(network.getNetworkOfferingId(), Service.UserData)) { boolean result = saveUserData(network, nic, vm); if (!result) { - s_logger.warn("Failed to update userdata for vm " + vm + " and nic " + nic); + logger.warn("Failed to update userdata for vm " + vm + " and nic " + nic); } else { - s_logger.debug("Successfully saved user data to router"); + logger.debug("Successfully saved user data to router"); } } else { - s_logger.debug("Not applying userdata for nic id=" + nic.getId() + " in vm id=" + vm.getId() + " because it is not supported in network id=" + network.getId()); + logger.debug("Not applying userdata for nic id=" + nic.getId() + " in vm id=" + vm.getId() + " because it is not supported in network id=" + network.getId()); } } @@ -1275,7 +1273,7 @@ public boolean prepareMigration(final NicProfile nic, final Network network, fin try { networkTopology.setupDhcpForPvlan(false, router, router.getHostId(), nic); } catch (final ResourceUnavailableException e) { - s_logger.warn("Timed Out", e); + logger.warn("Timed Out", e); } } else if (vm.getType() == VirtualMachine.Type.User) { assert vm instanceof UserVmVO; @@ -1299,7 +1297,7 @@ public void rollbackMigration(final NicProfile nic, final Network network, final try { networkTopology.setupDhcpForPvlan(true, router, router.getHostId(), nic); } catch (final ResourceUnavailableException e) { - s_logger.warn("Timed Out", e); + logger.warn("Timed Out", e); } } else if (vm.getType() == VirtualMachine.Type.User) { assert vm instanceof UserVmVO; @@ -1322,7 +1320,7 @@ public void commitMigration(final NicProfile nic, final Network network, final V try { networkTopology.setupDhcpForPvlan(true, router, router.getHostId(), nic); } catch (final ResourceUnavailableException e) { - s_logger.warn("Timed Out", e); + logger.warn("Timed Out", e); } } else if (vm.getType() == VirtualMachine.Type.User) { assert vm instanceof UserVmVO; diff --git a/server/src/main/java/com/cloud/network/element/VpcVirtualRouterElement.java b/server/src/main/java/com/cloud/network/element/VpcVirtualRouterElement.java index d740f80bd25b..3d1920bcbc37 100644 --- a/server/src/main/java/com/cloud/network/element/VpcVirtualRouterElement.java +++ b/server/src/main/java/com/cloud/network/element/VpcVirtualRouterElement.java @@ -76,13 +76,11 @@ import org.apache.cloudstack.network.router.deployment.RouterDeploymentDefinitionBuilder; import org.apache.cloudstack.network.topology.NetworkTopology; import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Qualifier; public class VpcVirtualRouterElement extends VirtualRouterElement implements VpcProvider, Site2SiteVpnServiceProvider, NetworkACLServiceProvider { - private static final Logger s_logger = Logger.getLogger(VpcVirtualRouterElement.class); private static final Map> capabilities = setCapabilities(); @@ -134,12 +132,12 @@ protected boolean canHandle(final Network network, final Service service) { if (service == null) { if (!_networkMdl.isProviderForNetwork(getProvider(), network.getId())) { - s_logger.trace("Element " + getProvider().getName() + " is not a provider for the network " + network); + logger.trace("Element " + getProvider().getName() + " is not a provider for the network " + network); return false; } } else { if (!_networkMdl.isProviderSupportServiceInNetwork(network.getId(), service, getProvider())) { - s_logger.trace("Element " + getProvider().getName() + " doesn't support service " + service.getName() + " in the network " + network); + logger.trace("Element " + getProvider().getName() + " doesn't support service " + service.getName() + " in the network " + network); return false; } } @@ -186,13 +184,13 @@ public boolean implement(final Network network, final NetworkOffering offering, final Long vpcId = network.getVpcId(); if (vpcId == null) { - s_logger.trace("Network " + network + " is not associated with any VPC"); + logger.trace("Network " + network + " is not associated with any VPC"); return false; } final Vpc vpc = _vpcMgr.getActiveVpc(vpcId); if (vpc == null) { - s_logger.warn("Unable to find Enabled VPC by id " + vpcId); + logger.warn("Unable to find Enabled VPC by id " + vpcId); return false; } @@ -226,7 +224,7 @@ public boolean implement(final Network network, final NetworkOffering offering, protected void configureGuestNetwork(final Network network, final List routers ) throws ConcurrentOperationException, InsufficientCapacityException, ResourceUnavailableException { - s_logger.info("Adding VPC routers to Guest Network: " + routers.size() + " to be added!"); + logger.info("Adding VPC routers to Guest Network: " + routers.size() + " to be added!"); for (final DomainRouterVO router : routers) { if (!_networkMdl.isVmPartOfNetwork(router.getId(), network.getId())) { @@ -235,9 +233,9 @@ protected void configureGuestNetwork(final Network network, final List getRouters(final Network network, final DeployDes //For the 2nd time it returns the VPC routers. final Long vpcId = network.getVpcId(); if (vpcId == null) { - s_logger.error("Network " + network + " is not associated with any VPC"); + logger.error("Network " + network + " is not associated with any VPC"); return routers; } final Vpc vpc = _vpcMgr.getActiveVpc(vpcId); if (vpc == null) { - s_logger.warn("Unable to find Enabled VPC by id " + vpcId); + logger.warn("Unable to find Enabled VPC by id " + vpcId); return routers; } @@ -376,11 +374,11 @@ protected List getRouters(final Network network, final DeployDes try { routers = routerDeploymentDefinition.deployVirtualRouter(); } catch (final ConcurrentOperationException e) { - s_logger.error("Error occurred when loading routers from routerDeploymentDefinition.deployVirtualRouter()!", e); + logger.error("Error occurred when loading routers from routerDeploymentDefinition.deployVirtualRouter()!", e); } catch (final InsufficientCapacityException e) { - s_logger.error("Error occurred when loading routers from routerDeploymentDefinition.deployVirtualRouter()!", e); + logger.error("Error occurred when loading routers from routerDeploymentDefinition.deployVirtualRouter()!", e); } catch (final ResourceUnavailableException e) { - s_logger.error("Error occurred when loading routers from routerDeploymentDefinition.deployVirtualRouter()!", e); + logger.error("Error occurred when loading routers from routerDeploymentDefinition.deployVirtualRouter()!", e); } return routers; @@ -420,17 +418,17 @@ public Map> getCapabilities() { @Override public boolean createPrivateGateway(final PrivateGateway gateway) throws ConcurrentOperationException, ResourceUnavailableException { if (gateway.getType() != VpcGateway.Type.Private) { - s_logger.warn("Type of vpc gateway is not " + VpcGateway.Type.Private); + logger.warn("Type of vpc gateway is not " + VpcGateway.Type.Private); return true; } final List routers = _vpcRouterMgr.getVpcRouters(gateway.getVpcId()); if (routers == null || routers.isEmpty()) { - s_logger.debug(getName() + " element doesn't need to create Private gateway on the backend; VPC virtual " + "router doesn't exist in the vpc id=" + gateway.getVpcId()); + logger.debug(getName() + " element doesn't need to create Private gateway on the backend; VPC virtual " + "router doesn't exist in the vpc id=" + gateway.getVpcId()); return true; } - s_logger.info("Adding VPC routers to Guest Network: " + routers.size() + " to be added!"); + logger.info("Adding VPC routers to Guest Network: " + routers.size() + " to be added!"); final DataCenterVO dcVO = _dcDao.findById(gateway.getZoneId()); final NetworkTopology networkTopology = networkTopologyContext.retrieveNetworkTopology(dcVO); @@ -445,7 +443,7 @@ public boolean createPrivateGateway(final PrivateGateway gateway) throws Concurr final List rules = _networkACLItemDao.listByACL(gateway.getNetworkACLId()); result = result && networkTopology.applyNetworkACLs(network, rules, domainRouterVO, isPrivateGateway); } catch (final Exception ex) { - s_logger.debug("Failed to apply network acl id " + gateway.getNetworkACLId() + " on gateway "); + logger.debug("Failed to apply network acl id " + gateway.getNetworkACLId() + " on gateway "); return false; } } @@ -457,17 +455,17 @@ public boolean createPrivateGateway(final PrivateGateway gateway) throws Concurr @Override public boolean deletePrivateGateway(final PrivateGateway gateway) throws ConcurrentOperationException, ResourceUnavailableException { if (gateway.getType() != VpcGateway.Type.Private) { - s_logger.warn("Type of vpc gateway is not " + VpcGateway.Type.Private); + logger.warn("Type of vpc gateway is not " + VpcGateway.Type.Private); return false; } final List routers = _vpcRouterMgr.getVpcRouters(gateway.getVpcId()); if (routers == null || routers.isEmpty()) { - s_logger.debug(getName() + " element doesn't need to delete Private gateway on the backend; VPC virtual " + "router doesn't exist in the vpc id=" + gateway.getVpcId()); + logger.debug(getName() + " element doesn't need to delete Private gateway on the backend; VPC virtual " + "router doesn't exist in the vpc id=" + gateway.getVpcId()); return true; } - s_logger.info("Adding VPC routers to Guest Network: " + routers.size() + " to be added!"); + logger.info("Adding VPC routers to Guest Network: " + routers.size() + " to be added!"); int result = 0; for (final DomainRouterVO domainRouterVO : routers) { @@ -492,7 +490,7 @@ public boolean applyIps(final Network network, final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - s_logger.debug(getName() + " element doesn't need to associate ip addresses on the backend; VPC virtual " + "router doesn't exist in the network " + logger.debug(getName() + " element doesn't need to associate ip addresses on the backend; VPC virtual " + "router doesn't exist in the network " + network.getId()); return false; } @@ -513,7 +511,7 @@ public boolean applyNetworkACLs(final Network network, final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { - s_logger.debug("Virtual router elemnt doesn't need to apply firewall rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); + logger.debug("Virtual router elemnt doesn't need to apply firewall rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); return true; } @@ -524,7 +522,7 @@ public boolean applyNetworkACLs(final Network network, final List routes) throws ResourceUnavailableException { final List routers = _routerDao.listByVpcId(vpc.getId()); if (routers == null || routers.isEmpty()) { - s_logger.debug("Virtual router elemnt doesn't need to static routes on the backend; virtual " + "router doesn't exist in the vpc " + vpc); + logger.debug("Virtual router elemnt doesn't need to static routes on the backend; virtual " + "router doesn't exist in the vpc " + vpc); return true; } @@ -550,7 +548,7 @@ public boolean applyStaticRoutes(final Vpc vpc, final List r if (!networkTopology.applyStaticRoutes(routes, routers)) { throw new CloudRuntimeException("Failed to apply static routes in vpc " + vpc); } else { - s_logger.debug("Applied static routes on vpc " + vpc); + logger.debug("Applied static routes on vpc " + vpc); return true; } } @@ -562,7 +560,7 @@ public boolean applyACLItemsToPrivateGw(final PrivateGateway gateway, final List final List routers = _vpcRouterMgr.getVpcRouters(gateway.getVpcId()); if (routers == null || routers.isEmpty()) { - s_logger.debug("Virtual router element doesn't need to apply network acl rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); + logger.debug("Virtual router element doesn't need to apply network acl rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); return true; } @@ -577,7 +575,7 @@ public boolean applyACLItemsToPrivateGw(final PrivateGateway gateway, final List if (nicProfile != null) { result = result && networkTopology.applyNetworkACLs(network, rules, domainRouterVO, isPrivateGateway); } else { - s_logger.warn("Nic Profile for router '" + domainRouterVO + "' has already been removed. Router is redundant = " + domainRouterVO.getIsRedundantRouter()); + logger.warn("Nic Profile for router '" + domainRouterVO + "' has already been removed. Router is redundant = " + domainRouterVO.getIsRedundantRouter()); } } return result; @@ -590,7 +588,7 @@ public boolean startSite2SiteVpn(final Site2SiteVpnConnection conn) throws Resou final Map vpnCapabilities = capabilities.get(Service.Vpn); if (!vpnCapabilities.get(Capability.VpnTypes).contains("s2svpn")) { - s_logger.error("try to start site 2 site vpn on unsupported network element?"); + logger.error("try to start site 2 site vpn on unsupported network element?"); return false; } @@ -621,7 +619,7 @@ public boolean stopSite2SiteVpn(final Site2SiteVpnConnection conn) throws Resour final Map vpnCapabilities = capabilities.get(Service.Vpn); if (!vpnCapabilities.get(Capability.VpnTypes).contains("s2svpn")) { - s_logger.error("try to stop site 2 site vpn on unsupported network element?"); + logger.error("try to stop site 2 site vpn on unsupported network element?"); return false; } @@ -655,7 +653,7 @@ public String[] applyVpnUsers(final RemoteAccessVpn vpn, final List routers = _vpcRouterMgr.getVpcRouters(vpcId); if (routers == null) { - s_logger.debug("Cannot apply vpn users on the backend; virtual router doesn't exist in the network " + vpcId); + logger.debug("Cannot apply vpn users on the backend; virtual router doesn't exist in the network " + vpcId); return null; } @@ -684,7 +682,7 @@ public boolean startVpn(final RemoteAccessVpn vpn) throws ResourceUnavailableExc final List routers = _vpcRouterMgr.getVpcRouters(vpn.getVpcId()); if (routers == null) { - s_logger.debug("Cannot apply vpn users on the backend; virtual router doesn't exist in the network " + vpn.getVpcId()); + logger.debug("Cannot apply vpn users on the backend; virtual router doesn't exist in the network " + vpn.getVpcId()); return false; } @@ -703,7 +701,7 @@ public boolean stopVpn(final RemoteAccessVpn vpn) throws ResourceUnavailableExce final List routers = _vpcRouterMgr.getVpcRouters(vpn.getVpcId()); if (routers == null) { - s_logger.debug("Cannot apply vpn users on the backend; virtual router doesn't exist in the network " + vpn.getVpcId()); + logger.debug("Cannot apply vpn users on the backend; virtual router doesn't exist in the network " + vpn.getVpcId()); return false; } diff --git a/server/src/main/java/com/cloud/network/firewall/FirewallManagerImpl.java b/server/src/main/java/com/cloud/network/firewall/FirewallManagerImpl.java index b08df5a3d1b0..5b0d9eb190d5 100644 --- a/server/src/main/java/com/cloud/network/firewall/FirewallManagerImpl.java +++ b/server/src/main/java/com/cloud/network/firewall/FirewallManagerImpl.java @@ -33,7 +33,6 @@ import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.configuration.Config; @@ -101,7 +100,6 @@ @Component public class FirewallManagerImpl extends ManagerBase implements FirewallService, FirewallManager, NetworkRuleApplier { - private static final Logger s_logger = Logger.getLogger(FirewallManagerImpl.class); @Inject FirewallRulesDao _firewallDao; @@ -163,7 +161,7 @@ public boolean configure(String name, Map params) throws Configu @Override public boolean start() { - s_logger.info("Firewall provider list is " + _firewallElements.iterator().next()); + logger.info("Firewall provider list is " + _firewallElements.iterator().next()); return super.start(); } @@ -472,8 +470,8 @@ public void detectRulesConflict(FirewallRule newRule) throws NetworkRuleConflict } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("No network rule conflicts detected for " + newRule + " against " + (rules.size() - 1) + " existing rules"); + if (logger.isDebugEnabled()) { + logger.debug("No network rule conflicts detected for " + newRule + " against " + (rules.size() - 1) + " existing rules"); } } @@ -560,7 +558,7 @@ public void validateFirewallRule(Account caller, IPAddressVO ipAddress, Integer public boolean applyRules(List rules, boolean continueOnError, boolean updateRulesInDB) throws ResourceUnavailableException { boolean success = true; if (rules == null || rules.size() == 0) { - s_logger.debug("There are no rules to forward to the network elements"); + logger.debug("There are no rules to forward to the network elements"); return true; } Purpose purpose = rules.get(0).getPurpose(); @@ -572,7 +570,7 @@ public boolean applyRules(List rules, boolean continueOn applied = _ipAddrMgr.applyRules(rules, purpose, this, continueOnError); } if (!applied) { - s_logger.warn("Rules are not completely applied"); + logger.warn("Rules are not completely applied"); return false; } else { if (updateRulesInDB) { @@ -580,7 +578,7 @@ public boolean applyRules(List rules, boolean continueOn if (rule.getState() == FirewallRule.State.Revoke) { FirewallRuleVO relatedRule = _firewallDao.findByRelatedId(rule.getId()); if (relatedRule != null) { - s_logger.warn("Can't remove the firewall rule id=" + rule.getId() + " as it has related firewall rule id=" + relatedRule.getId() + + logger.warn("Can't remove the firewall rule id=" + rule.getId() + " as it has related firewall rule id=" + relatedRule.getId() + "; leaving it in Revoke state"); success = false; } else { @@ -648,7 +646,7 @@ public boolean applyRules(Network network, Purpose purpose, List rules, boolean continueOnError, Account caller) { if (rules.size() == 0) { - s_logger.debug("There are no firewall rules to apply"); + logger.debug("There are no firewall rules to apply"); return true; } @@ -703,7 +701,7 @@ public boolean applyFirewallRules(List rules, boolean continueOn return false; } } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to apply firewall rules due to : "+ ex.getMessage()); + logger.warn("Failed to apply firewall rules due to : "+ ex.getMessage()); return false; } @@ -713,7 +711,7 @@ public boolean applyFirewallRules(List rules, boolean continueOn @Override public boolean applyDefaultEgressFirewallRule(Long networkId, boolean defaultPolicy, boolean add) throws ResourceUnavailableException { - s_logger.debug("applying default firewall egress rules "); + logger.debug("applying default firewall egress rules "); NetworkVO network = _networkDao.findById(networkId); List sourceCidr = new ArrayList(); @@ -736,7 +734,7 @@ public boolean applyDefaultEgressFirewallRule(Long networkId, boolean defaultPol return false; } } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to apply default egress rules for guest network due to ", ex); + logger.warn("Failed to apply default egress rules for guest network due to ", ex); return false; } return true; @@ -855,8 +853,8 @@ public void doInTransactionWithoutResult(TransactionStatus status) { boolean generateUsageEvent = false; if (rule.getState() == State.Staged) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found a rule that is still in stage state so just removing it: " + rule); + if (logger.isDebugEnabled()) { + logger.debug("Found a rule that is still in stage state so just removing it: " + rule); } removeRule(rule); generateUsageEvent = true; @@ -885,8 +883,8 @@ public boolean revokeFirewallRulesForIp(long ipId, long userId, Account caller) List rules = new ArrayList(); List fwRules = _firewallDao.listByIpAndPurposeAndNotRevoked(ipId, Purpose.Firewall); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing " + fwRules.size() + " firewall rules for ip id=" + ipId); + if (logger.isDebugEnabled()) { + logger.debug("Releasing " + fwRules.size() + " firewall rules for ip id=" + ipId); } for (FirewallRuleVO rule : fwRules) { @@ -906,8 +904,8 @@ public boolean revokeFirewallRulesForIp(long ipId, long userId, Account caller) // Now we check again in case more rules have been inserted. rules.addAll(_firewallDao.listByIpAndPurposeAndNotRevoked(ipId, Purpose.Firewall)); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully released firewall rules for ip id=" + ipId + " and # of rules now = " + rules.size()); + if (logger.isDebugEnabled()) { + logger.debug("Successfully released firewall rules for ip id=" + ipId + " and # of rules now = " + rules.size()); } return rules.size() == 0; @@ -936,8 +934,8 @@ public boolean revokeAllFirewallRulesForNetwork(long networkId, long userId, Acc List rules = new ArrayList(); List fwRules = _firewallDao.listByNetworkAndPurposeAndNotRevoked(networkId, Purpose.Firewall); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing " + fwRules.size() + " firewall rules for network id=" + networkId); + if (logger.isDebugEnabled()) { + logger.debug("Releasing " + fwRules.size() + " firewall rules for network id=" + networkId); } for (FirewallRuleVO rule : fwRules) { @@ -953,8 +951,8 @@ public boolean revokeAllFirewallRulesForNetwork(long networkId, long userId, Acc // Now we check again in case more rules have been inserted. rules.addAll(_firewallDao.listByNetworkAndPurposeAndNotRevoked(networkId, Purpose.Firewall)); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully released firewall rules for network id=" + networkId + " and # of rules now = " + rules.size()); + if (logger.isDebugEnabled()) { + logger.debug("Successfully released firewall rules for network id=" + networkId + " and # of rules now = " + rules.size()); } return success && rules.size() == 0; @@ -965,11 +963,11 @@ public boolean revokeRelatedFirewallRule(long ruleId, boolean apply) { FirewallRule fwRule = _firewallDao.findByRelatedId(ruleId); if (fwRule == null) { - s_logger.trace("No related firewall rule exists for rule id=" + ruleId + " so returning true here"); + logger.trace("No related firewall rule exists for rule id=" + ruleId + " so returning true here"); return true; } - s_logger.debug("Revoking Firewall rule id=" + fwRule.getId() + " as a part of rule delete id=" + ruleId + " with apply=" + apply); + logger.debug("Revoking Firewall rule id=" + fwRule.getId() + " as a part of rule delete id=" + ruleId + " with apply=" + apply); return revokeIngressFirewallRule(fwRule.getId(), apply); } @@ -1005,10 +1003,10 @@ public boolean revokeFirewallRulesForVm(long vmId) { Set ipsToReprogram = new HashSet(); if (firewallRules.isEmpty()) { - s_logger.debug("No firewall rules are found for vm id=" + vmId); + logger.debug("No firewall rules are found for vm id=" + vmId); return true; } else { - s_logger.debug("Found " + firewallRules.size() + " to cleanup for vm id=" + vmId); + logger.debug("Found " + firewallRules.size() + " to cleanup for vm id=" + vmId); } for (FirewallRuleVO rule : firewallRules) { @@ -1019,11 +1017,11 @@ public boolean revokeFirewallRulesForVm(long vmId) { // apply rules for all ip addresses for (Long ipId : ipsToReprogram) { - s_logger.debug("Applying firewall rules for ip address id=" + ipId + " as a part of vm expunge"); + logger.debug("Applying firewall rules for ip address id=" + ipId + " as a part of vm expunge"); try { success = success && applyIngressFirewallRules(ipId, _accountMgr.getSystemAccount()); } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to apply firewall rules for ip id=" + ipId); + logger.warn("Failed to apply firewall rules for ip id=" + ipId); success = false; } } @@ -1043,7 +1041,7 @@ public boolean addSystemFirewallRules(IPAddressVO ip, Account acct) { createFirewallRule(ip.getId(), acct, rule.getXid(), rule.getSourcePortStart(), rule.getSourcePortEnd(), rule.getProtocol(), rule.getSourceCidrList(),null, rule.getIcmpCode(), rule.getIcmpType(), rule.getRelated(), FirewallRuleType.System, rule.getNetworkId(), rule.getTrafficType(), true); } catch (Exception e) { - s_logger.debug("Failed to add system wide firewall rule, due to:" + e.toString()); + logger.debug("Failed to add system wide firewall rule, due to:" + e.toString()); } } return true; diff --git a/server/src/main/java/com/cloud/network/guru/ControlNetworkGuru.java b/server/src/main/java/com/cloud/network/guru/ControlNetworkGuru.java index ce62c7b4e3fe..0ed71cf8b3f8 100644 --- a/server/src/main/java/com/cloud/network/guru/ControlNetworkGuru.java +++ b/server/src/main/java/com/cloud/network/guru/ControlNetworkGuru.java @@ -22,7 +22,6 @@ import javax.naming.ConfigurationException; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.log4j.Logger; import com.cloud.configuration.Config; import com.cloud.dc.DataCenter; @@ -53,7 +52,6 @@ import com.cloud.vm.VirtualMachineProfile; public class ControlNetworkGuru extends PodBasedNetworkGuru implements NetworkGuru { - private static final Logger s_logger = Logger.getLogger(ControlNetworkGuru.class); @Inject DataCenterDao _dcDao; @Inject @@ -84,7 +82,7 @@ protected boolean canHandle(NetworkOffering offering) { if (offering.isSystemOnly() && isMyTrafficType(offering.getTrafficType())) { return true; } else { - s_logger.trace("We only care about System only Control network"); + logger.trace("We only care about System only Control network"); return false; } } @@ -152,7 +150,7 @@ public void reserve(NicProfile nic, Network config, VirtualMachineProfile vm, De String netmask = NetUtils.cidr2Netmask(_cidr); - s_logger.debug(String.format("Reserved NIC for %s [ipv4:%s netmask:%s gateway:%s]", vm.getInstanceName(), ip, netmask, _gateway)); + logger.debug(String.format("Reserved NIC for %s [ipv4:%s netmask:%s gateway:%s]", vm.getInstanceName(), ip, netmask, _gateway)); nic.setIPv4Address(ip); nic.setMacAddress(NetUtils.long2Mac(NetUtils.ip2Long(ip) | (14l << 40))); @@ -170,14 +168,14 @@ public boolean release(NicProfile nic, VirtualMachineProfile vm, String reservat DataCenterVO dcVo = _dcDao.findById(dcId); if (dcVo.getNetworkType() != NetworkType.Basic) { super.release(nic, vm, reservationId); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Released nic: " + nic); + if (logger.isDebugEnabled()) { + logger.debug("Released nic: " + nic); } return true; } else { nic.deallocate(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Released nic: " + nic); + if (logger.isDebugEnabled()) { + logger.debug("Released nic: " + nic); } return true; } @@ -186,8 +184,8 @@ public boolean release(NicProfile nic, VirtualMachineProfile vm, String reservat _dcDao.releaseLinkLocalIpAddress(nic.getId(), reservationId); nic.deallocate(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Released nic: " + nic); + if (logger.isDebugEnabled()) { + logger.debug("Released nic: " + nic); } return true; @@ -225,7 +223,7 @@ public boolean configure(String name, Map params) throws Configu _gateway = NetUtils.getLinkLocalGateway(); } - s_logger.info("Control network setup: cidr=" + _cidr + "; gateway = " + _gateway); + logger.info("Control network setup: cidr=" + _cidr + "; gateway = " + _gateway); return true; } diff --git a/server/src/main/java/com/cloud/network/guru/DirectNetworkGuru.java b/server/src/main/java/com/cloud/network/guru/DirectNetworkGuru.java index 7763d5b1adba..c8c323475908 100644 --- a/server/src/main/java/com/cloud/network/guru/DirectNetworkGuru.java +++ b/server/src/main/java/com/cloud/network/guru/DirectNetworkGuru.java @@ -23,7 +23,6 @@ import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenter.NetworkType; @@ -80,7 +79,6 @@ public class DirectNetworkGuru extends AdapterBase implements NetworkGuru { - private static final Logger s_logger = Logger.getLogger(DirectNetworkGuru.class); @Inject DataCenterDao _dcDao; @@ -128,7 +126,7 @@ protected boolean isMyIsolationMethod(PhysicalNetwork physicalNetwork) { List isolationMethods = physicalNetwork.getIsolationMethods(); if (CollectionUtils.isNotEmpty(isolationMethods)) { for (String method : isolationMethods) { - s_logger.debug(method + ": " + m.toString()); + logger.debug(method + ": " + m.toString()); if (method.equalsIgnoreCase(m.toString())) { return true; } @@ -156,7 +154,7 @@ && isMyIsolationMethod(physnet) && physnet.getIsolationMethods().contains("GRE")) { return true; } else { - s_logger.trace("We only take care of Shared Guest networks without Ovs or NiciraNvp provider"); + logger.trace("We only take care of Shared Guest networks without Ovs or NiciraNvp provider"); return false; } } @@ -167,7 +165,7 @@ public Network design(NetworkOffering offering, DeploymentPlan plan, Network use PhysicalNetworkVO physnet = _physicalNetworkDao.findById(plan.getPhysicalNetworkId()); if (!canHandle(offering, dc, physnet)) { - s_logger.info("Refusing to design this network"); + logger.info("Refusing to design this network"); return null; } @@ -326,7 +324,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) throws Insuff if (vm.getType() == VirtualMachine.Type.DomainRouter) { Nic placeholderNic = _networkModel.getPlaceholderNicForRouter(network, null); if (placeholderNic == null) { - s_logger.debug("Saving placeholder nic with ip4 address " + nic.getIPv4Address() + " and ipv6 address " + nic.getIPv6Address() + + logger.debug("Saving placeholder nic with ip4 address " + nic.getIPv4Address() + " and ipv6 address " + nic.getIPv6Address() + " for the network " + network); _networkMgr.savePlaceholderNic(network, nic.getIPv4Address(), nic.getIPv6Address(), VirtualMachine.Type.DomainRouter); } @@ -355,8 +353,8 @@ public Network implement(Network network, NetworkOffering offering, DeployDestin @Override @DB public void deallocate(final Network network, final NicProfile nic, VirtualMachineProfile vm) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Deallocate network: networkId: " + nic.getNetworkId() + ", ip: " + nic.getIPv4Address()); + if (logger.isDebugEnabled()) { + logger.debug("Deallocate network: networkId: " + nic.getNetworkId() + ", ip: " + nic.getIPv4Address()); } if (nic.getIPv4Address() != null) { @@ -368,14 +366,14 @@ public void doInTransactionWithoutResult(TransactionStatus status) { // if the ip address a part of placeholder, don't release it Nic placeholderNic = _networkModel.getPlaceholderNicForRouter(network, null); if (placeholderNic != null && placeholderNic.getIPv4Address().equalsIgnoreCase(ip.getAddress().addr())) { - s_logger.debug("Not releasing direct ip " + ip.getId() + " yet as its ip is saved in the placeholder"); + logger.debug("Not releasing direct ip " + ip.getId() + " yet as its ip is saved in the placeholder"); } else { _ipAddrMgr.markIpAsUnavailable(ip.getId()); _ipAddressDao.unassignIpAddress(ip.getId()); } //unassign nic secondary ip address - s_logger.debug("remove nic " + nic.getId() + " secondary ip "); + logger.debug("remove nic " + nic.getId() + " secondary ip "); List nicSecIps = null; nicSecIps = _nicSecondaryIpDao.getSecondaryIpAddressesForNic(nic.getId()); for (String secIp : nicSecIps) { @@ -415,12 +413,12 @@ public boolean trash(Network network, NetworkOffering offering) { public void doInTransactionWithoutResult(TransactionStatus status) { for (Nic nic : nics) { if (nic.getIPv4Address() != null) { - s_logger.debug("Releasing ip " + nic.getIPv4Address() + " of placeholder nic " + nic); + logger.debug("Releasing ip " + nic.getIPv4Address() + " of placeholder nic " + nic); IPAddressVO ip = _ipAddressDao.findByIpAndSourceNetworkId(nic.getNetworkId(), nic.getIPv4Address()); if (ip != null) { _ipAddrMgr.markIpAsUnavailable(ip.getId()); _ipAddressDao.unassignIpAddress(ip.getId()); - s_logger.debug("Removing placeholder nic " + nic); + logger.debug("Removing placeholder nic " + nic); _nicDao.remove(nic.getId()); } } @@ -430,7 +428,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } return true; }catch (Exception e) { - s_logger.error("trash. Exception:" + e.getMessage()); + logger.error("trash. Exception:" + e.getMessage()); throw new CloudRuntimeException("trash. Exception:" + e.getMessage(),e); } } diff --git a/server/src/main/java/com/cloud/network/guru/DirectPodBasedNetworkGuru.java b/server/src/main/java/com/cloud/network/guru/DirectPodBasedNetworkGuru.java index 7186812151c7..2800e3284c1b 100644 --- a/server/src/main/java/com/cloud/network/guru/DirectPodBasedNetworkGuru.java +++ b/server/src/main/java/com/cloud/network/guru/DirectPodBasedNetworkGuru.java @@ -21,7 +21,6 @@ import javax.inject.Inject; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; -import org.apache.log4j.Logger; import com.cloud.configuration.ZoneConfig; import com.cloud.dc.DataCenter; @@ -66,7 +65,6 @@ import com.googlecode.ipv6.IPv6Address; public class DirectPodBasedNetworkGuru extends DirectNetworkGuru { - private static final Logger s_logger = Logger.getLogger(DirectPodBasedNetworkGuru.class); @Inject DataCenterDao _dcDao; @@ -89,7 +87,7 @@ protected boolean canHandle(NetworkOffering offering, DataCenter dc, PhysicalNet if (dc.getNetworkType() == NetworkType.Basic && isMyTrafficType(offering.getTrafficType())) { return true; } else { - s_logger.trace("We only take care of Guest Direct Pod based networks"); + logger.trace("We only take care of Guest Direct Pod based networks"); return false; } } @@ -185,7 +183,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) throws Insuff if (placeholderNic != null) { IPAddressVO userIp = _ipAddressDao.findByIpAndSourceNetworkId(network.getId(), placeholderNic.getIPv4Address()); ip = PublicIp.createFromAddrAndVlan(userIp, _vlanDao.findById(userIp.getVlanId())); - s_logger.debug("Nic got an ip address " + placeholderNic.getIPv4Address() + " stored in placeholder nic for the network " + network + + logger.debug("Nic got an ip address " + placeholderNic.getIPv4Address() + " stored in placeholder nic for the network " + network + " and gateway " + podRangeGateway); } } @@ -210,7 +208,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) throws Insuff if (vm.getType() == VirtualMachine.Type.DomainRouter) { Nic placeholderNic = _networkModel.getPlaceholderNicForRouter(network, pod.getId()); if (placeholderNic == null) { - s_logger.debug("Saving placeholder nic with ip4 address " + nic.getIPv4Address() + " for the network " + network); + logger.debug("Saving placeholder nic with ip4 address " + nic.getIPv4Address() + " for the network " + network); _networkMgr.savePlaceholderNic(network, nic.getIPv4Address(), null, VirtualMachine.Type.DomainRouter); } } @@ -228,16 +226,16 @@ public void doInTransactionWithoutResult(TransactionStatus status) throws Insuff */ if (vlan.getIp6Cidr() != null) { if (nic.getIPv6Address() == null) { - s_logger.debug("Found IPv6 CIDR " + vlan.getIp6Cidr() + " for VLAN " + vlan.getId()); + logger.debug("Found IPv6 CIDR " + vlan.getIp6Cidr() + " for VLAN " + vlan.getId()); nic.setIPv6Cidr(vlan.getIp6Cidr()); nic.setIPv6Gateway(vlan.getIp6Gateway()); IPv6Address ipv6addr = NetUtils.EUI64Address(vlan.getIp6Cidr(), nic.getMacAddress()); - s_logger.info("Calculated IPv6 address " + ipv6addr + " using EUI-64 for NIC " + nic.getUuid()); + logger.info("Calculated IPv6 address " + ipv6addr + " using EUI-64 for NIC " + nic.getUuid()); nic.setIPv6Address(ipv6addr.toString()); } } else { - s_logger.debug("No IPv6 CIDR configured for VLAN " + vlan.getId()); + logger.debug("No IPv6 CIDR configured for VLAN " + vlan.getId()); } } }); diff --git a/server/src/main/java/com/cloud/network/guru/ExternalGuestNetworkGuru.java b/server/src/main/java/com/cloud/network/guru/ExternalGuestNetworkGuru.java index 471f4d11b256..d11483eba6a1 100644 --- a/server/src/main/java/com/cloud/network/guru/ExternalGuestNetworkGuru.java +++ b/server/src/main/java/com/cloud/network/guru/ExternalGuestNetworkGuru.java @@ -24,7 +24,6 @@ import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; -import org.apache.log4j.Logger; import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenter.NetworkType; @@ -72,7 +71,6 @@ import com.cloud.vm.VirtualMachineProfile; public class ExternalGuestNetworkGuru extends GuestNetworkGuru { - private static final Logger s_logger = Logger.getLogger(ExternalGuestNetworkGuru.class); @Inject NetworkOrchestrationService _networkMgr; @Inject @@ -104,7 +102,7 @@ protected boolean canHandle(NetworkOffering offering, final NetworkType networkT && isMyIsolationMethod(physicalNetwork) && !offering.isSystemOnly()) { return true; } else { - s_logger.trace("We only take care of Guest networks of type " + GuestType.Isolated + " in zone of type " + NetworkType.Advanced); + logger.trace("We only take care of Guest networks of type " + GuestType.Isolated + " in zone of type " + NetworkType.Advanced); return false; } } @@ -273,7 +271,7 @@ public NicProfile allocate(Network config, NicProfile nic, VirtualMachineProfile if (!isPublicNetwork) { Nic placeholderNic = _networkModel.getPlaceholderNicForRouter(config, null); if (placeholderNic == null) { - s_logger.debug("Saving placeholder nic with ip4 address " + profile.getIPv4Address() + + logger.debug("Saving placeholder nic with ip4 address " + profile.getIPv4Address() + " and ipv6 address " + profile.getIPv6Address() + " for the network " + config); _networkMgr.savePlaceholderNic(config, profile.getIPv4Address(), profile.getIPv6Address(), VirtualMachine.Type.DomainRouter); } diff --git a/server/src/main/java/com/cloud/network/guru/GuestNetworkGuru.java b/server/src/main/java/com/cloud/network/guru/GuestNetworkGuru.java index 137d1e7268bf..46a0a0ac67d3 100644 --- a/server/src/main/java/com/cloud/network/guru/GuestNetworkGuru.java +++ b/server/src/main/java/com/cloud/network/guru/GuestNetworkGuru.java @@ -29,7 +29,6 @@ import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.configuration.Config; import com.cloud.dc.DataCenter; @@ -91,7 +90,6 @@ import com.cloud.vm.dao.NicDao; public abstract class GuestNetworkGuru extends AdapterBase implements NetworkGuru, Configurable { - private static final Logger s_logger = Logger.getLogger(GuestNetworkGuru.class); @Inject protected VpcDao _vpcDao; @@ -198,7 +196,7 @@ public boolean isMyIsolationMethod(final PhysicalNetwork physicalNetwork) { } if (methods.isEmpty()) { // The empty isolation method is assumed to be VLAN - s_logger.debug("Empty physical isolation type for physical network " + physicalNetwork.getUuid()); + logger.debug("Empty physical isolation type for physical network " + physicalNetwork.getUuid()); methods = new ArrayList(1); methods.add("VLAN".toLowerCase()); } @@ -291,8 +289,8 @@ public Network design(final NetworkOffering offering, final DeploymentPlan plan, @DB public void deallocate(final Network network, final NicProfile nic, final VirtualMachineProfile vm) { if (network.getSpecifyIpRanges()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Deallocate network: networkId: " + nic.getNetworkId() + ", ip: " + nic.getIPv4Address()); + if (logger.isDebugEnabled()) { + logger.debug("Deallocate network: networkId: " + nic.getNetworkId() + ", ip: " + nic.getIPv4Address()); } final IPAddressVO ip = _ipAddressDao.findByIpAndSourceNetworkId(nic.getNetworkId(), nic.getIPv4Address()); @@ -431,7 +429,7 @@ public NicProfile allocate(final Network network, NicProfile nic, final VirtualM if (network.getGuestType() != GuestType.L2 && vm.getType() == VirtualMachine.Type.DomainRouter) { Nic placeholderNic = _networkModel.getPlaceholderNicForRouter(network, null); if (placeholderNic != null) { - s_logger.debug("Nic got an ip address " + placeholderNic.getIPv4Address() + " stored in placeholder nic for the network " + network); + logger.debug("Nic got an ip address " + placeholderNic.getIPv4Address() + " stored in placeholder nic for the network " + network); guestIp = placeholderNic.getIPv4Address(); } } @@ -508,7 +506,7 @@ public void shutdown(final NetworkProfile profile, final NetworkOffering offerin } if ((profile.getBroadcastDomainType() == BroadcastDomainType.Vlan || profile.getBroadcastDomainType() == BroadcastDomainType.Vxlan) && !offering.isSpecifyVlan()) { - s_logger.debug("Releasing vnet for the network id=" + profile.getId()); + logger.debug("Releasing vnet for the network id=" + profile.getId()); _dcDao.releaseVnet(BroadcastDomainType.getValue(profile.getBroadcastUri()), profile.getDataCenterId(), profile.getPhysicalNetworkId(), profile.getAccountId(), profile.getReservationId()); ActionEventUtils.onCompletedActionEvent(CallContext.current().getCallingUserId(), profile.getAccountId(), EventVO.LEVEL_INFO, EventTypes.EVENT_ZONE_VLAN_RELEASE, diff --git a/server/src/main/java/com/cloud/network/guru/PodBasedNetworkGuru.java b/server/src/main/java/com/cloud/network/guru/PodBasedNetworkGuru.java index 9f9771e7cfb1..3a0ad7e90939 100644 --- a/server/src/main/java/com/cloud/network/guru/PodBasedNetworkGuru.java +++ b/server/src/main/java/com/cloud/network/guru/PodBasedNetworkGuru.java @@ -23,7 +23,6 @@ import javax.inject.Inject; import com.cloud.network.NetworkModel; -import org.apache.log4j.Logger; import com.cloud.dc.Pod; import com.cloud.dc.dao.DataCenterDao; @@ -50,7 +49,6 @@ import com.cloud.vm.VirtualMachineProfile; public class PodBasedNetworkGuru extends AdapterBase implements NetworkGuru { - private static final Logger s_logger = Logger.getLogger(PodBasedNetworkGuru.class); @Inject DataCenterDao _dcDao; @Inject @@ -141,7 +139,7 @@ public void reserve(NicProfile nic, Network config, VirtualMachineProfile vm, De } nic.setIsolationUri(null); - s_logger.debug("Allocated a nic " + nic + " for " + vm); + logger.debug("Allocated a nic " + nic + " for " + vm); } @Override @@ -158,8 +156,8 @@ public boolean release(NicProfile nic, VirtualMachineProfile vm, String reservat nic.deallocate(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Released nic: " + nic); + if (logger.isDebugEnabled()) { + logger.debug("Released nic: " + nic); } return true; diff --git a/server/src/main/java/com/cloud/network/guru/PrivateNetworkGuru.java b/server/src/main/java/com/cloud/network/guru/PrivateNetworkGuru.java index a5eac9aac1b8..9f6551fd78d3 100644 --- a/server/src/main/java/com/cloud/network/guru/PrivateNetworkGuru.java +++ b/server/src/main/java/com/cloud/network/guru/PrivateNetworkGuru.java @@ -18,7 +18,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import com.cloud.configuration.ConfigurationManager; import com.cloud.dc.DataCenter; @@ -55,7 +54,6 @@ import com.cloud.vm.VirtualMachineProfile; public class PrivateNetworkGuru extends AdapterBase implements NetworkGuru { - private static final Logger s_logger = Logger.getLogger(PrivateNetworkGuru.class); @Inject protected ConfigurationManager _configMgr; @Inject @@ -92,7 +90,7 @@ protected boolean canHandle(NetworkOffering offering, DataCenter dc) { offering.isSystemOnly()) { return true; } else { - s_logger.trace("We only take care of system Guest networks of type " + GuestType.Isolated + " in zone of type " + NetworkType.Advanced); + logger.trace("We only take care of system Guest networks of type " + GuestType.Isolated + " in zone of type " + NetworkType.Advanced); return false; } } @@ -139,8 +137,8 @@ public Network design(NetworkOffering offering, DeploymentPlan plan, Network use @Override public void deallocate(Network network, NicProfile nic, VirtualMachineProfile vm) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Deallocate network: networkId: " + nic.getNetworkId() + ", ip: " + nic.getIPv4Address()); + if (logger.isDebugEnabled()) { + logger.debug("Deallocate network: networkId: " + nic.getNetworkId() + ", ip: " + nic.getIPv4Address()); } PrivateIpVO ip = _privateIpDao.findByIpAndSourceNetworkId(nic.getNetworkId(), nic.getIPv4Address()); diff --git a/server/src/main/java/com/cloud/network/guru/PublicNetworkGuru.java b/server/src/main/java/com/cloud/network/guru/PublicNetworkGuru.java index e8374b39f53d..0061064c8960 100644 --- a/server/src/main/java/com/cloud/network/guru/PublicNetworkGuru.java +++ b/server/src/main/java/com/cloud/network/guru/PublicNetworkGuru.java @@ -19,7 +19,6 @@ import javax.inject.Inject; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; -import org.apache.log4j.Logger; import com.cloud.dc.DataCenter; import com.cloud.dc.Vlan.VlanType; @@ -61,7 +60,6 @@ import com.cloud.vm.VirtualMachineProfile; public class PublicNetworkGuru extends AdapterBase implements NetworkGuru { - private static final Logger s_logger = Logger.getLogger(PublicNetworkGuru.class); @Inject DataCenterDao _dcDao; @@ -213,8 +211,8 @@ public Network implement(Network network, NetworkOffering offering, DeployDestin @Override @DB public void deallocate(Network network, NicProfile nic, VirtualMachineProfile vm) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("public network deallocate network: networkId: " + nic.getNetworkId() + ", ip: " + nic.getIPv4Address()); + if (logger.isDebugEnabled()) { + logger.debug("public network deallocate network: networkId: " + nic.getNetworkId() + ", ip: " + nic.getIPv4Address()); } final IPAddressVO ip = _ipAddressDao.findByIpAndSourceNetworkId(nic.getNetworkId(), nic.getIPv4Address()); @@ -229,8 +227,8 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } nic.deallocate(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Deallocated nic: " + nic); + if (logger.isDebugEnabled()) { + logger.debug("Deallocated nic: " + nic); } } diff --git a/server/src/main/java/com/cloud/network/guru/StorageNetworkGuru.java b/server/src/main/java/com/cloud/network/guru/StorageNetworkGuru.java index a26705e6d2c6..0f199a751264 100644 --- a/server/src/main/java/com/cloud/network/guru/StorageNetworkGuru.java +++ b/server/src/main/java/com/cloud/network/guru/StorageNetworkGuru.java @@ -19,7 +19,6 @@ import javax.inject.Inject; import com.cloud.network.NetworkModel; -import org.apache.log4j.Logger; import com.cloud.dc.Pod; import com.cloud.dc.StorageNetworkIpAddressVO; @@ -45,7 +44,6 @@ import com.cloud.vm.VirtualMachineProfile; public class StorageNetworkGuru extends PodBasedNetworkGuru implements NetworkGuru { - private static final Logger s_logger = Logger.getLogger(StorageNetworkGuru.class); @Inject StorageNetworkManager _sNwMgr; @Inject @@ -76,7 +74,7 @@ protected boolean canHandle(NetworkOffering offering) { if (isMyTrafficType(offering.getTrafficType()) && offering.isSystemOnly()) { return true; } else { - s_logger.trace("It's not storage network offering, skip it."); + logger.trace("It's not storage network offering, skip it."); return false; } } @@ -143,7 +141,7 @@ public void reserve(NicProfile nic, Network network, VirtualMachineProfile vm, D nic.setBroadcastUri(null); } nic.setIsolationUri(null); - s_logger.debug("Allocated a storage nic " + nic + " for " + vm); + logger.debug("Allocated a storage nic " + nic + " for " + vm); } @Override @@ -154,7 +152,7 @@ public boolean release(NicProfile nic, VirtualMachineProfile vm, String reservat } _sNwMgr.releaseIpAddress(nic.getIPv4Address()); - s_logger.debug("Release an storage ip " + nic.getIPv4Address()); + logger.debug("Release an storage ip " + nic.getIPv4Address()); nic.deallocate(); return true; } diff --git a/server/src/main/java/com/cloud/network/lb/LBHealthCheckManagerImpl.java b/server/src/main/java/com/cloud/network/lb/LBHealthCheckManagerImpl.java index d8ad4d428339..b9d687e66b41 100644 --- a/server/src/main/java/com/cloud/network/lb/LBHealthCheckManagerImpl.java +++ b/server/src/main/java/com/cloud/network/lb/LBHealthCheckManagerImpl.java @@ -26,7 +26,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; @@ -42,7 +41,6 @@ @Component public class LBHealthCheckManagerImpl extends ManagerBase implements LBHealthCheckManager, Manager { - private static final Logger s_logger = Logger.getLogger(LBHealthCheckManagerImpl.class); @Inject ConfigurationDao _configDao; @@ -58,8 +56,8 @@ public class LBHealthCheckManagerImpl extends ManagerBase implements LBHealthChe @Override public boolean configure(String name, Map params) throws ConfigurationException { _configs = _configDao.getConfiguration("management-server", params); - if (s_logger.isInfoEnabled()) { - s_logger.info(format("Configuring LBHealthCheck Manager %1$s", name)); + if (logger.isInfoEnabled()) { + logger.info(format("Configuring LBHealthCheck Manager %1$s", name)); } this.name = name; _interval = NumbersUtil.parseLong(_configs.get(Config.LBHealthCheck.key()), 600); @@ -69,14 +67,14 @@ public boolean configure(String name, Map params) throws Configu @Override public boolean start() { - s_logger.debug("LB HealthCheckmanager is getting Started"); + logger.debug("LB HealthCheckmanager is getting Started"); _executor.scheduleAtFixedRate(new UpdateLBHealthCheck(), 10, _interval, TimeUnit.SECONDS); return true; } @Override public boolean stop() { - s_logger.debug("HealthCheckmanager is getting Stopped"); + logger.debug("HealthCheckmanager is getting Stopped"); _executor.shutdown(); return true; } @@ -93,7 +91,7 @@ protected void runInContext() { updateLBHealthCheck(Scheme.Public); updateLBHealthCheck(Scheme.Internal); } catch (Exception e) { - s_logger.error("Exception in LB HealthCheck Update Checker", e); + logger.error("Exception in LB HealthCheck Update Checker", e); } } } @@ -103,9 +101,9 @@ public void updateLBHealthCheck(Scheme scheme) { try { _lbService.updateLBHealthChecks(scheme); } catch (ResourceUnavailableException e) { - s_logger.debug("Error while updating the LB HealtCheck ", e); + logger.debug("Error while updating the LB HealtCheck ", e); } - s_logger.debug("LB HealthCheck Manager is running and getting the updates from LB providers and updating service status"); + logger.debug("LB HealthCheck Manager is running and getting the updates from LB providers and updating service status"); } } diff --git a/server/src/main/java/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java b/server/src/main/java/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java index 8cb8972e2957..844c3c1b9974 100644 --- a/server/src/main/java/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java +++ b/server/src/main/java/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java @@ -52,7 +52,6 @@ import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.agent.api.to.LoadBalancerTO; import com.cloud.configuration.ConfigurationManager; @@ -178,7 +177,6 @@ import com.google.gson.reflect.TypeToken; public class LoadBalancingRulesManagerImpl extends ManagerBase implements LoadBalancingRulesManager, LoadBalancingRulesService { - private static final Logger s_logger = Logger.getLogger(LoadBalancingRulesManagerImpl.class); @Inject NetworkOrchestrationService _networkMgr; @@ -328,7 +326,7 @@ private LbAutoScaleVmGroup getLbAutoScaleVmGroup(AutoScaleVmGroupVO vmGroup, Aut DataCenter zone = _entityMgr.findById(DataCenter.class, vmGroup.getZoneId()); if (zone == null) { // This should never happen, but still a cautious check - s_logger.warn("Unable to find zone while packaging AutoScale Vm Group, zoneid: " + vmGroup.getZoneId()); + logger.warn("Unable to find zone while packaging AutoScale Vm Group, zoneid: " + vmGroup.getZoneId()); throw new InvalidParameterValueException("Unable to find zone"); } else { if (zone.getNetworkType() == NetworkType.Advanced) { @@ -443,7 +441,7 @@ private boolean applyAutoScaleConfig(LoadBalancerVO lb, AutoScaleVmGroupVO vmGro List rules = Arrays.asList(rule); if (!applyLbRules(new ArrayList<>(rules), false)) { - s_logger.debug("LB rules' autoscale config are not completely applied"); + logger.debug("LB rules' autoscale config are not completely applied"); return false; } @@ -480,16 +478,16 @@ public boolean configureLbAutoScaleVmGroup(final long vmGroupid, AutoScaleVmGrou try { success = applyAutoScaleConfig(loadBalancer, vmGroup, currentState); } catch (ResourceUnavailableException e) { - s_logger.warn("Unable to configure AutoScaleVmGroup to the lb rule: " + loadBalancer.getId() + " because resource is unavailable:", e); + logger.warn("Unable to configure AutoScaleVmGroup to the lb rule: " + loadBalancer.getId() + " because resource is unavailable:", e); if (isRollBackAllowedForProvider(loadBalancer)) { loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); - s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " lb state rolback while creating AutoscaleVmGroup"); + logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " lb state rolback while creating AutoscaleVmGroup"); } throw e; } finally { if (!success) { - s_logger.warn("Failed to configure LB Auto Scale Vm Group with Id:" + vmGroupid); + logger.warn("Failed to configure LB Auto Scale Vm Group with Id:" + vmGroupid); } } @@ -499,15 +497,15 @@ public boolean configureLbAutoScaleVmGroup(final long vmGroupid, AutoScaleVmGrou @Override public void doInTransactionWithoutResult(TransactionStatus status) { loadBalancer.setState(FirewallRule.State.Active); - s_logger.debug("LB rule " + loadBalancer.getId() + " state is set to Active"); + logger.debug("LB rule " + loadBalancer.getId() + " state is set to Active"); _lbDao.persist(loadBalancer); vmGroup.setState(AutoScaleVmGroup.State.ENABLED); _autoScaleVmGroupDao.persist(vmGroup); - s_logger.debug("LB Auto Scale Vm Group with Id: " + vmGroupid + " is set to Enabled state."); + logger.debug("LB Auto Scale Vm Group with Id: " + vmGroupid + " is set to Enabled state."); } }); } - s_logger.info("Successfully configured LB Autoscale Vm Group with Id: " + vmGroupid); + logger.info("Successfully configured LB Autoscale Vm Group with Id: " + vmGroupid); } return success; } @@ -714,7 +712,7 @@ public boolean validateLbRule(LoadBalancingRule lbRule) { Network network = _networkDao.findById(lbRule.getNetworkId()); Purpose purpose = lbRule.getPurpose(); if (purpose != Purpose.LoadBalancing) { - s_logger.debug("Unable to validate network rules for purpose: " + purpose.toString()); + logger.debug("Unable to validate network rules for purpose: " + purpose.toString()); return false; } for (LoadBalancingServiceProvider ne : _lbProviders) { @@ -755,12 +753,12 @@ public boolean applyLBStickinessPolicy(CreateLBStickinessPolicyCmd cmd) { try { applyLoadBalancerConfig(cmd.getLbRuleId()); } catch (ResourceUnavailableException e) { - s_logger.warn("Unable to apply Stickiness policy to the lb rule: " + cmd.getLbRuleId() + " because resource is unavailable:", e); + logger.warn("Unable to apply Stickiness policy to the lb rule: " + cmd.getLbRuleId() + " because resource is unavailable:", e); if (isRollBackAllowedForProvider(loadBalancer)) { loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); deleteLBStickinessPolicy(cmd.getEntityId(), false); - s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " lb state rolback while creating sticky policy"); + logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " lb state rolback while creating sticky policy"); } else { deleteLBStickinessPolicy(cmd.getEntityId(), false); if (oldStickinessPolicyId != 0) { @@ -771,7 +769,7 @@ public boolean applyLBStickinessPolicy(CreateLBStickinessPolicyCmd cmd) { if (backupState.equals(FirewallRule.State.Active)) applyLoadBalancerConfig(cmd.getLbRuleId()); } catch (ResourceUnavailableException e1) { - s_logger.info("[ignored] applying load balancer config.", e1); + logger.info("[ignored] applying load balancer config.", e1); } finally { loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); @@ -801,11 +799,11 @@ public boolean applyLBHealthCheckPolicy(CreateLBHealthCheckPolicyCmd cmd) { try { applyLoadBalancerConfig(cmd.getLbRuleId()); } catch (ResourceUnavailableException e) { - s_logger.warn("Unable to apply healthcheck policy to the lb rule: " + cmd.getLbRuleId() + " because resource is unavailable:", e); + logger.warn("Unable to apply healthcheck policy to the lb rule: " + cmd.getLbRuleId() + " because resource is unavailable:", e); if (isRollBackAllowedForProvider(loadBalancer)) { loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); - s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " lb state rolback while creating healthcheck policy"); + logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " lb state rolback while creating healthcheck policy"); } deleteLBHealthCheckPolicy(cmd.getEntityId(), false); success = false; @@ -841,11 +839,11 @@ public boolean deleteLBStickinessPolicy(long stickinessPolicyId, boolean apply) boolean backupStickyState = stickinessPolicy.isRevoke(); stickinessPolicy.setRevoke(true); _lb2stickinesspoliciesDao.persist(stickinessPolicy); - s_logger.debug("Set load balancer rule for revoke: rule id " + loadBalancerId + ", stickinesspolicyID " + stickinessPolicyId); + logger.debug("Set load balancer rule for revoke: rule id " + loadBalancerId + ", stickinesspolicyID " + stickinessPolicyId); try { if (!applyLoadBalancerConfig(loadBalancerId)) { - s_logger.warn("Failed to remove load balancer rule id " + loadBalancerId + " for stickinesspolicyID " + stickinessPolicyId); + logger.warn("Failed to remove load balancer rule id " + loadBalancerId + " for stickinesspolicyID " + stickinessPolicyId); throw new CloudRuntimeException("Failed to remove load balancer rule id " + loadBalancerId + " for stickinesspolicyID " + stickinessPolicyId); } } catch (ResourceUnavailableException e) { @@ -854,9 +852,9 @@ public boolean deleteLBStickinessPolicy(long stickinessPolicyId, boolean apply) _lb2stickinesspoliciesDao.persist(stickinessPolicy); loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); - s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while deleting sticky policy: " + stickinessPolicyId); + logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while deleting sticky policy: " + stickinessPolicyId); } - s_logger.warn("Unable to apply the load balancer config because resource is unavailable.", e); + logger.warn("Unable to apply the load balancer config because resource is unavailable.", e); success = false; } } else { @@ -894,7 +892,7 @@ public boolean deleteLBHealthCheckPolicy(long healthCheckPolicyId, boolean apply boolean backupStickyState = healthCheckPolicy.isRevoke(); healthCheckPolicy.setRevoke(true); _lb2healthcheckDao.persist(healthCheckPolicy); - s_logger.debug("Set health check policy to revoke for loadbalancing rule id : " + loadBalancerId + ", healthCheckpolicyID " + healthCheckPolicyId); + logger.debug("Set health check policy to revoke for loadbalancing rule id : " + loadBalancerId + ", healthCheckpolicyID " + healthCheckPolicyId); // removing the state of services set by the monitor. final List maps = _lb2VmMapDao.listByLoadBalancerId(loadBalancerId); @@ -902,7 +900,7 @@ public boolean deleteLBHealthCheckPolicy(long healthCheckPolicyId, boolean apply Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(TransactionStatus status) { - s_logger.debug("Resetting health state policy for services in loadbalancing rule id : " + loadBalancerId); + logger.debug("Resetting health state policy for services in loadbalancing rule id : " + loadBalancerId); for (LoadBalancerVMMapVO map : maps) { map.setState(null); _lb2VmMapDao.persist(map); @@ -913,7 +911,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { try { if (!applyLoadBalancerConfig(loadBalancerId)) { - s_logger.warn("Failed to remove load balancer rule id " + loadBalancerId + " for healthCheckpolicyID " + healthCheckPolicyId); + logger.warn("Failed to remove load balancer rule id " + loadBalancerId + " for healthCheckpolicyID " + healthCheckPolicyId); throw new CloudRuntimeException("Failed to remove load balancer rule id " + loadBalancerId + " for healthCheckpolicyID " + healthCheckPolicyId); } } catch (ResourceUnavailableException e) { @@ -922,9 +920,9 @@ public void doInTransactionWithoutResult(TransactionStatus status) { _lb2healthcheckDao.persist(healthCheckPolicy); loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); - s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while deleting healthcheck policy: " + healthCheckPolicyId); + logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while deleting healthcheck policy: " + healthCheckPolicyId); } - s_logger.warn("Unable to apply the load balancer config because resource is unavailable.", e); + logger.warn("Unable to apply the load balancer config because resource is unavailable.", e); success = false; } } else { @@ -948,7 +946,7 @@ public void updateLBHealthChecks(Scheme scheme) throws ResourceUnavailableExcept if (capability != null && capability.equalsIgnoreCase("true")) { /* - * s_logger.debug( + * logger.debug( * "HealthCheck Manager :: LB Provider in the Network has the Healthcheck policy capability :: " * + provider.get(0).getName()); */ @@ -981,7 +979,7 @@ public void updateLBHealthChecks(Scheme scheme) throws ResourceUnavailableExcept if (dstIp.equalsIgnoreCase(lbto.getDestinations()[i].getDestIp())) { lbVmMap.setState(des.getMonitorState()); _lb2VmMapDao.persist(lbVmMap); - s_logger.debug("Updating the LB VM Map table with the service state"); + logger.debug("Updating the LB VM Map table with the service state"); } } } @@ -995,7 +993,7 @@ public void updateLBHealthChecks(Scheme scheme) throws ResourceUnavailableExcept } } } else { - // s_logger.debug("HealthCheck Manager :: LB Provider in the Network DNOT the Healthcheck policy capability "); + // logger.debug("HealthCheck Manager :: LB Provider in the Network DNOT the Healthcheck policy capability "); } } } @@ -1160,8 +1158,8 @@ public boolean assignToLoadBalancer(long loadBalancerId, List instanceIds, vmIdIpMap.put(instanceId, vmIpsList); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Adding " + vm + " to the load balancer pool"); + if (logger.isDebugEnabled()) { + logger.debug("Adding " + vm + " to the load balancer pool"); } vmsToAdd.add(vm); } @@ -1200,7 +1198,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { applyLoadBalancerConfig(loadBalancerId); success = true; } catch (ResourceUnavailableException e) { - s_logger.warn("Unable to apply the load balancer config because resource is unavailable.", e); + logger.warn("Unable to apply the load balancer config because resource is unavailable.", e); success = false; } finally { if (!success) { @@ -1215,7 +1213,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { }); if (!vmInstanceIds.isEmpty()) { _lb2VmMapDao.remove(loadBalancer.getId(), vmInstanceIds, null); - s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while attaching VM: " + vmInstanceIds); + logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while attaching VM: " + vmInstanceIds); } loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); @@ -1234,7 +1232,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { @Override public boolean assignSSLCertToLoadBalancerRule(Long lbId, String certName, String publicCert, String privateKey) { - s_logger.error("Calling the manager for LB"); + logger.error("Calling the manager for LB"); LoadBalancerVO loadBalancer = _lbDao.findById(lbId); return false; //TODO @@ -1255,7 +1253,7 @@ public LbSslCert getLbSslCert(long lbRuleId) { SslCertVO certVO = _entityMgr.findById(SslCertVO.class, lbCertMap.getCertId()); if (certVO == null) { - s_logger.warn("Cert rule with cert ID " + lbCertMap.getCertId() + " but Cert is not found"); + logger.warn("Cert rule with cert ID " + lbCertMap.getCertId() + " but Cert is not found"); return null; } @@ -1317,9 +1315,9 @@ public boolean assignCertToLoadBalancer(long lbRuleId, Long certId) { _lbDao.persist(loadBalancer); LoadBalancerCertMapVO certMap = _lbCertMapDao.findByLbRuleId(lbRuleId); _lbCertMapDao.remove(certMap.getId()); - s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while adding cert"); + logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while adding cert"); } - s_logger.warn("Unable to apply the load balancer config because resource is unavailable.", e); + logger.warn("Unable to apply the load balancer config because resource is unavailable.", e); } return success; } @@ -1353,7 +1351,7 @@ public boolean removeCertFromLoadBalancer(long lbRuleId) { _lbCertMapDao.persist(lbCertMap); if (!applyLoadBalancerConfig(lbRuleId)) { - s_logger.warn("Failed to remove cert from load balancer rule id " + lbRuleId); + logger.warn("Failed to remove cert from load balancer rule id " + lbRuleId); CloudRuntimeException ex = new CloudRuntimeException("Failed to remove certificate load balancer rule id " + lbRuleId); ex.addProxyObject(loadBalancer.getUuid(), "loadBalancerId"); throw ex; @@ -1365,9 +1363,9 @@ public boolean removeCertFromLoadBalancer(long lbRuleId) { _lbCertMapDao.persist(lbCertMap); loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); - s_logger.debug("Rolled back certificate removal lb id " + lbRuleId); + logger.debug("Rolled back certificate removal lb id " + lbRuleId); } - s_logger.warn("Unable to apply the load balancer config because resource is unavailable.", e); + logger.warn("Unable to apply the load balancer config because resource is unavailable.", e); if (!success) { CloudRuntimeException ex = new CloudRuntimeException("Failed to remove certificate from load balancer rule id " + lbRuleId); ex.addProxyObject(loadBalancer.getUuid(), "loadBalancerId"); @@ -1438,7 +1436,7 @@ private boolean removeFromLoadBalancerInternal(long loadBalancerId, List i lbvm.setRevoke(true); _lb2VmMapDao.persist(lbvm); } - s_logger.debug("Set load balancer rule for revoke: rule id " + loadBalancerId + ", vmId " + instanceId); + logger.debug("Set load balancer rule for revoke: rule id " + loadBalancerId + ", vmId " + instanceId); } else { for (String vmIp: lbVmIps) { @@ -1449,14 +1447,14 @@ private boolean removeFromLoadBalancerInternal(long loadBalancerId, List i } map.setRevoke(true); _lb2VmMapDao.persist(map); - s_logger.debug("Set load balancer rule for revoke: rule id " + loadBalancerId + ", vmId " + + logger.debug("Set load balancer rule for revoke: rule id " + loadBalancerId + ", vmId " + instanceId + ", vmip " + vmIp); } } } if (!applyLoadBalancerConfig(loadBalancerId)) { - s_logger.warn("Failed to remove load balancer rule id " + loadBalancerId + " for vms " + instanceIds); + logger.warn("Failed to remove load balancer rule id " + loadBalancerId + " for vms " + instanceIds); CloudRuntimeException ex = new CloudRuntimeException("Failed to remove specified load balancer rule id for vms " + instanceIds); ex.addProxyObject(loadBalancer.getUuid(), "loadBalancerId"); throw ex; @@ -1481,13 +1479,13 @@ private boolean removeFromLoadBalancerInternal(long loadBalancerId, List i LoadBalancerVMMapVO map = _lb2VmMapDao.findByLoadBalancerIdAndVmId(loadBalancerId, instanceId); map.setRevoke(false); _lb2VmMapDao.persist(map); - s_logger.debug("LB Rollback rule id: " + loadBalancerId + ",while removing vmId " + instanceId); + logger.debug("LB Rollback rule id: " + loadBalancerId + ",while removing vmId " + instanceId); }else { for (String vmIp: lbVmIps) { LoadBalancerVMMapVO map = _lb2VmMapDao.findByLoadBalancerIdAndVmIdVmIp (loadBalancerId, instanceId, vmIp); map.setRevoke(true); _lb2VmMapDao.persist(map); - s_logger.debug("LB Rollback rule id: " + loadBalancerId + ",while removing vmId " + + logger.debug("LB Rollback rule id: " + loadBalancerId + ",while removing vmId " + instanceId + ", vmip " + vmIp); } } @@ -1495,9 +1493,9 @@ private boolean removeFromLoadBalancerInternal(long loadBalancerId, List i loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); - s_logger.debug("LB Rollback rule id: " + loadBalancerId + " while removing vm instances"); + logger.debug("LB Rollback rule id: " + loadBalancerId + " while removing vm instances"); } - s_logger.warn("Unable to apply the load balancer config because resource is unavailable.", e); + logger.warn("Unable to apply the load balancer config because resource is unavailable.", e); } if (!success) { CloudRuntimeException ex = new CloudRuntimeException("Failed to remove specified load balancer rule id for vms " + vmIds); @@ -1529,7 +1527,7 @@ public boolean removeVmFromLoadBalancers(long instanceId) { map.setRevoke(true); _lb2VmMapDao.persist(map); - s_logger.debug("Set load balancer rule for revoke: rule id " + map.getLoadBalancerId() + ", vmId " + instanceId); + logger.debug("Set load balancer rule for revoke: rule id " + map.getLoadBalancerId() + ", vmId " + instanceId); } // Reapply all lbs that had the vm assigned @@ -1588,8 +1586,8 @@ public List doInTransaction(TransactionStatus status) { boolean generateUsageEvent = false; if (lb.getState() == FirewallRule.State.Staged) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found a rule that is still in stage state so just removing it: " + lb); + if (logger.isDebugEnabled()) { + logger.debug("Found a rule that is still in stage state so just removing it: " + lb); } generateUsageEvent = true; } else if (lb.getState() == FirewallRule.State.Add || lb.getState() == FirewallRule.State.Active) { @@ -1603,7 +1601,7 @@ public List doInTransaction(TransactionStatus status) { for (LoadBalancerVMMapVO map : maps) { map.setRevoke(true); _lb2VmMapDao.persist(map); - s_logger.debug("Set load balancer rule for revoke: rule id " + loadBalancerId + ", vmId " + map.getInstanceId()); + logger.debug("Set load balancer rule for revoke: rule id " + loadBalancerId + ", vmId " + map.getInstanceId()); } } @@ -1635,7 +1633,7 @@ public List doInTransaction(TransactionStatus status) { if (apply) { try { if (!applyLoadBalancerConfig(loadBalancerId)) { - s_logger.warn("Unable to apply the load balancer config"); + logger.warn("Unable to apply the load balancer config"); return false; } } catch (ResourceUnavailableException e) { @@ -1643,14 +1641,14 @@ public List doInTransaction(TransactionStatus status) { if (backupMaps != null) { for (LoadBalancerVMMapVO map : backupMaps) { _lb2VmMapDao.persist(map); - s_logger.debug("LB Rollback rule id: " + loadBalancerId + ", vmId " + map.getInstanceId()); + logger.debug("LB Rollback rule id: " + loadBalancerId + ", vmId " + map.getInstanceId()); } } lb.setState(backupState); _lbDao.persist(lb); - s_logger.debug("LB Rollback rule id: " + loadBalancerId + " while deleting LB rule."); + logger.debug("LB Rollback rule id: " + loadBalancerId + " while deleting LB rule."); } else { - s_logger.warn("Unable to apply the load balancer config because resource is unavailable.", e); + logger.warn("Unable to apply the load balancer config because resource is unavailable.", e); } return false; } @@ -1658,7 +1656,7 @@ public List doInTransaction(TransactionStatus status) { FirewallRuleVO relatedRule = _firewallDao.findByRelatedId(lb.getId()); if (relatedRule != null) { - s_logger.warn("Unable to remove firewall rule id=" + lb.getId() + " as it has related firewall rule id=" + relatedRule.getId() + + logger.warn("Unable to remove firewall rule id=" + lb.getId() + " as it has related firewall rule id=" + relatedRule.getId() + "; leaving it in Revoke state"); return false; } else { @@ -1670,7 +1668,7 @@ public List doInTransaction(TransactionStatus status) { // Bug CS-15411 opened to document this // _elbMgr.handleDeleteLoadBalancerRule(lb, callerUserId, caller); - s_logger.debug("Load balancer with id " + lb.getId() + " is removed successfully"); + logger.debug("Load balancer with id " + lb.getId() + " is removed successfully"); return true; } @@ -1743,7 +1741,7 @@ public LoadBalancer createPublicLoadBalancerRule(String xId, String name, String // set networkId just for verification purposes _networkModel.checkIpForService(ipVO, Service.Lb, networkId); - s_logger.debug("The ip is not associated with the VPC network id=" + networkId + " so assigning"); + logger.debug("The ip is not associated with the VPC network id=" + networkId + " so assigning"); ipVO = _ipAddrMgr.associateIPToGuestNetwork(ipAddrId, networkId, false); performedIpAssoc = true; } @@ -1758,7 +1756,7 @@ public LoadBalancer createPublicLoadBalancerRule(String xId, String name, String result = createPublicLoadBalancer(xId, name, description, srcPortStart, defPortStart, ipVO.getId(), protocol, algorithm, openFirewall, CallContext.current(), lbProtocol, forDisplay, cidrString); } catch (Exception ex) { - s_logger.warn("Failed to create load balancer due to ", ex); + logger.warn("Failed to create load balancer due to ", ex); if (ex instanceof NetworkRuleConflictException) { throw (NetworkRuleConflictException)ex; } @@ -1769,7 +1767,7 @@ public LoadBalancer createPublicLoadBalancerRule(String xId, String name, String } finally { if (result == null && systemIp != null) { - s_logger.debug("Releasing system IP address " + systemIp + " as corresponding lb rule failed to create"); + logger.debug("Releasing system IP address " + systemIp + " as corresponding lb rule failed to create"); _ipAddrMgr.handleSystemIpRelease(systemIp); } // release ip address if ipassoc was perfored @@ -1791,7 +1789,7 @@ public LoadBalancer createPublicLoadBalancerRule(String xId, String name, String */ protected String generateCidrString(List cidrList) { if (cidrList == null) { - s_logger.trace("The given CIDR list is null, therefore we will return null."); + logger.trace("The given CIDR list is null, therefore we will return null."); return null; } String cidrString; @@ -1801,7 +1799,7 @@ protected String generateCidrString(List cidrList) { sb.append(cidr).append(' '); } cidrString = sb.toString(); - s_logger.trace(String.format("From the cidrList [%s] we generated the following CIDR String [%s].", cidrList, cidrString)); + logger.trace(String.format("From the cidrList [%s] we generated the following CIDR String [%s].", cidrList, cidrString)); return StringUtils.trim(cidrString); } @@ -1907,7 +1905,7 @@ public LoadBalancerVO doInTransaction(TransactionStatus status) throws NetworkRu if (!_firewallDao.setStateToAdd(newRule)) { throw new CloudRuntimeException("Unable to update the state to add for " + newRule); } - s_logger.debug("Load balancer " + newRule.getId() + " for Ip address id=" + sourceIpId + ", public port " + srcPort + ", private port " + destPort + + logger.debug("Load balancer " + newRule.getId() + " for Ip address id=" + sourceIpId + ", public port " + srcPort + ", private port " + destPort + " is added successfully."); CallContext.current().setEventDetails("Load balancer Id: " + newRule.getId()); UsageEventUtils.publishUsageEvent(EventTypes.EVENT_LOAD_BALANCER_CREATE, ipAddr.getAllocatedToAccountId(), ipAddr.getDataCenterId(), newRule.getId(), @@ -1965,8 +1963,8 @@ public boolean applyLoadBalancerConfig(long lbRuleId) throws ResourceUnavailable @Override public boolean revokeLoadBalancersForNetwork(long networkId, Scheme scheme) throws ResourceUnavailableException { List lbs = _lbDao.listByNetworkIdAndScheme(networkId, scheme); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Revoking " + lbs.size() + " " + scheme + " load balancing rules for network id=" + networkId); + if (logger.isDebugEnabled()) { + logger.debug("Revoking " + lbs.size() + " " + scheme + " load balancing rules for network id=" + networkId); } if (lbs != null) { for (LoadBalancerVO lb : lbs) { // called during restart, not persisting state in db @@ -1974,7 +1972,7 @@ public boolean revokeLoadBalancersForNetwork(long networkId, Scheme scheme) thro } return applyLoadBalancerRules(lbs, false); // called during restart, not persisting state in db } else { - s_logger.info("Network id=" + networkId + " doesn't have load balancer rules, nothing to revoke"); + logger.info("Network id=" + networkId + " doesn't have load balancer rules, nothing to revoke"); return true; } } @@ -1983,10 +1981,10 @@ public boolean revokeLoadBalancersForNetwork(long networkId, Scheme scheme) thro public boolean applyLoadBalancersForNetwork(long networkId, Scheme scheme) throws ResourceUnavailableException { List lbs = _lbDao.listByNetworkIdAndScheme(networkId, scheme); if (lbs != null) { - s_logger.debug("Applying load balancer rules of scheme " + scheme + " in network id=" + networkId); + logger.debug("Applying load balancer rules of scheme " + scheme + " in network id=" + networkId); return applyLoadBalancerRules(lbs, true); } else { - s_logger.info("Network id=" + networkId + " doesn't have load balancer rules of scheme " + scheme + ", nothing to apply"); + logger.info("Network id=" + networkId + " doesn't have load balancer rules of scheme " + scheme + ", nothing to apply"); return true; } } @@ -2036,7 +2034,7 @@ protected boolean applyLoadBalancerRules(List lbs, boolean updat } if (!applyLbRules(rules, false)) { - s_logger.debug("LB rules are not completely applied"); + logger.debug("LB rules are not completely applied"); return false; } @@ -2049,11 +2047,11 @@ public Boolean doInTransaction(TransactionStatus status) { if (lb.getState() == FirewallRule.State.Revoke) { removeLBRule(lb); - s_logger.debug("LB " + lb.getId() + " is successfully removed"); + logger.debug("LB " + lb.getId() + " is successfully removed"); checkForReleaseElasticIp = true; } else if (lb.getState() == FirewallRule.State.Add) { lb.setState(FirewallRule.State.Active); - s_logger.debug("LB rule " + lb.getId() + " state is set to Active"); + logger.debug("LB rule " + lb.getId() + " state is set to Active"); _lbDao.persist(lb); } @@ -2064,7 +2062,7 @@ public Boolean doInTransaction(TransactionStatus status) { for (LoadBalancerVMMapVO lbVmMap : lbVmMaps) { instanceIds.add(lbVmMap.getInstanceId()); _lb2VmMapDao.remove(lb.getId(), lbVmMap.getInstanceId(), lbVmMap.getInstanceIp(), null); - s_logger.debug("Load balancer rule id " + lb.getId() + " is removed for vm " + + logger.debug("Load balancer rule id " + lb.getId() + " is removed for vm " + lbVmMap.getInstanceId() + " instance ip " + lbVmMap.getInstanceIp()); } @@ -2072,14 +2070,14 @@ public Boolean doInTransaction(TransactionStatus status) { if (_lb2VmMapDao.listByLoadBalancerId(lb.getId()).isEmpty()) { lb.setState(FirewallRule.State.Add); _lbDao.persist(lb); - s_logger.debug("LB rule " + lb.getId() + " state is set to Add as there are no more active LB-VM mappings"); + logger.debug("LB rule " + lb.getId() + " state is set to Add as there are no more active LB-VM mappings"); } // remove LB-Stickiness policy mapping that were state to revoke List stickinesspolicies = _lb2stickinesspoliciesDao.listByLoadBalancerId(lb.getId(), true); if (!stickinesspolicies.isEmpty()) { _lb2stickinesspoliciesDao.remove(lb.getId(), true); - s_logger.debug("Load balancer rule id " + lb.getId() + " is removed stickiness policies"); + logger.debug("Load balancer rule id " + lb.getId() + " is removed stickiness policies"); } // remove LB-HealthCheck policy mapping that were state to @@ -2087,13 +2085,13 @@ public Boolean doInTransaction(TransactionStatus status) { List healthCheckpolicies = _lb2healthcheckDao.listByLoadBalancerId(lb.getId(), true); if (!healthCheckpolicies.isEmpty()) { _lb2healthcheckDao.remove(lb.getId(), true); - s_logger.debug("Load balancer rule id " + lb.getId() + " is removed health check monitors policies"); + logger.debug("Load balancer rule id " + lb.getId() + " is removed health check monitors policies"); } LoadBalancerCertMapVO lbCertMap = _lbCertMapDao.findByLbRuleId(lb.getId()); if (lbCertMap != null && lbCertMap.isRevoke()) { _lbCertMapDao.remove(lbCertMap.getId()); - s_logger.debug("Load balancer rule id " + lb.getId() + " removed certificate mapping"); + logger.debug("Load balancer rule id " + lb.getId() + " removed certificate mapping"); } return checkForReleaseElasticIp; @@ -2107,11 +2105,11 @@ public Boolean doInTransaction(TransactionStatus status) { try { success = handleSystemLBIpRelease(lb); } catch (Exception ex) { - s_logger.warn("Failed to release system ip as a part of lb rule " + lb + " deletion due to exception ", ex); + logger.warn("Failed to release system ip as a part of lb rule " + lb + " deletion due to exception ", ex); success = false; } finally { if (!success) { - s_logger.warn("Failed to release system ip as a part of lb rule " + lb + " deletion"); + logger.warn("Failed to release system ip as a part of lb rule " + lb + " deletion"); } } } @@ -2132,12 +2130,12 @@ protected boolean handleSystemLBIpRelease(LoadBalancerVO lb) { IpAddress ip = _ipAddressDao.findById(lb.getSourceIpAddressId()); boolean success = true; if (ip.getSystem()) { - s_logger.debug("Releasing system ip address " + lb.getSourceIpAddressId() + " as a part of delete lb rule"); + logger.debug("Releasing system ip address " + lb.getSourceIpAddressId() + " as a part of delete lb rule"); if (!_ipAddrMgr.disassociatePublicIpAddress(lb.getSourceIpAddressId(), CallContext.current().getCallingUserId(), CallContext.current().getCallingAccount())) { - s_logger.warn("Unable to release system ip address id=" + lb.getSourceIpAddressId() + " as a part of delete lb rule"); + logger.warn("Unable to release system ip address id=" + lb.getSourceIpAddressId() + " as a part of delete lb rule"); success = false; } else { - s_logger.warn("Successfully released system ip address id=" + lb.getSourceIpAddressId() + " as a part of delete lb rule"); + logger.warn("Successfully released system ip address id=" + lb.getSourceIpAddressId() + " as a part of delete lb rule"); } } return success; @@ -2150,11 +2148,11 @@ public boolean removeAllLoadBalanacersForIp(long ipId, Account caller, long call List rules = _firewallDao.listByIpAndPurpose(ipId, Purpose.LoadBalancing); if (rules != null) { - s_logger.debug("Found " + rules.size() + " lb rules to cleanup"); + logger.debug("Found " + rules.size() + " lb rules to cleanup"); for (FirewallRule rule : rules) { boolean result = deleteLoadBalancerRule(rule.getId(), true, caller, callerUserId, false); if (result == false) { - s_logger.warn("Unable to remove load balancer rule " + rule.getId()); + logger.warn("Unable to remove load balancer rule " + rule.getId()); return false; } } @@ -2166,11 +2164,11 @@ public boolean removeAllLoadBalanacersForIp(long ipId, Account caller, long call public boolean removeAllLoadBalanacersForNetwork(long networkId, Account caller, long callerUserId) { List rules = _firewallDao.listByNetworkAndPurposeAndNotRevoked(networkId, Purpose.LoadBalancing); if (rules != null) { - s_logger.debug("Found " + rules.size() + " lb rules to cleanup"); + logger.debug("Found " + rules.size() + " lb rules to cleanup"); for (FirewallRule rule : rules) { boolean result = deleteLoadBalancerRule(rule.getId(), true, caller, callerUserId, false); if (result == false) { - s_logger.warn("Unable to remove load balancer rule " + rule.getId()); + logger.warn("Unable to remove load balancer rule " + rule.getId()); return false; } } @@ -2302,9 +2300,9 @@ public LoadBalancer updateLoadBalancerRule(UpdateLoadBalancerRuleCmd cmd) { _lbDao.update(lb.getId(), lb); _lbDao.persist(lb); - s_logger.debug("LB Rollback rule id: " + lbRuleId + " while updating LB rule."); + logger.debug("LB Rollback rule id: " + lbRuleId + " while updating LB rule."); } - s_logger.warn("Unable to apply the load balancer config because resource is unavailable.", e); + logger.warn("Unable to apply the load balancer config because resource is unavailable.", e); success = false; } } @@ -2323,14 +2321,14 @@ public Pair, List> listLoadBalancerInstances(List Boolean applied = cmd.isApplied(); if (applied == null) { - s_logger.info(String.format("The [%s] parameter was not passed. Using the default value [%s].", ApiConstants.APPLIED, Boolean.TRUE)); + logger.info(String.format("The [%s] parameter was not passed. Using the default value [%s].", ApiConstants.APPLIED, Boolean.TRUE)); applied = Boolean.TRUE; } LoadBalancerVO loadBalancer = _lbDao.findById(loadBalancerId); if (loadBalancer == null) { String msg = String.format("Unable to find the load balancer with ID [%s].", cmd.getId()); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } @@ -2344,7 +2342,7 @@ public Pair, List> listLoadBalancerInstances(List if (vmLoadBalancerMappings == null) { String msg = String.format("Unable to find map of VMs related to load balancer [%s].", loadBalancerAsString); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } @@ -2373,17 +2371,17 @@ public Pair, List> listLoadBalancerInstances(List boolean isApplied = appliedInstanceIdList.contains(userVm.getId()); String isAppliedMsg = isApplied ? "is applied" : "is not applied"; - s_logger.debug(String.format("The user VM [%s] %s to a rule of the load balancer [%s].", userVmAsString, isAppliedMsg, loadBalancerAsString)); + logger.debug(String.format("The user VM [%s] %s to a rule of the load balancer [%s].", userVmAsString, isAppliedMsg, loadBalancerAsString)); if (isApplied != applied) { - s_logger.debug(String.format("Skipping adding service state from the user VM [%s] to the service state list. This happens because the VM %s to the load " + logger.debug(String.format("Skipping adding service state from the user VM [%s] to the service state list. This happens because the VM %s to the load " + "balancer rule and the [%s] parameter was passed as [%s].", userVmAsString, isAppliedMsg, ApiConstants.APPLIED, applied)); continue; } loadBalancerInstances.add(userVm); String serviceState = vmServiceState.get(userVm.getId()); - s_logger.debug(String.format("Adding the service state [%s] from the user VM [%s] to the service state list.", serviceState, userVmAsString)); + logger.debug(String.format("Adding the service state [%s] from the user VM [%s] to the service state list.", serviceState, userVmAsString)); serviceStates.add(serviceState); } @@ -2597,7 +2595,7 @@ public void removeLBRule(LoadBalancer rule) { public boolean applyLbRules(List rules, boolean continueOnError) throws ResourceUnavailableException { if (rules == null || rules.size() == 0) { - s_logger.debug("There are no Load Balancing Rules to forward to the network elements"); + logger.debug("There are no Load Balancing Rules to forward to the network elements"); return true; } @@ -2626,7 +2624,7 @@ public boolean applyLbRules(List rules, boolean continueOnErr if (!continueOnError) { throw e; } - s_logger.warn("Problems with applying load balancing rules but pushing on", e); + logger.warn("Problems with applying load balancing rules but pushing on", e); success = false; } diff --git a/server/src/main/java/com/cloud/network/router/CommandSetupHelper.java b/server/src/main/java/com/cloud/network/router/CommandSetupHelper.java index a7ed6478efa2..220617341623 100644 --- a/server/src/main/java/com/cloud/network/router/CommandSetupHelper.java +++ b/server/src/main/java/com/cloud/network/router/CommandSetupHelper.java @@ -30,7 +30,8 @@ import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Qualifier; @@ -150,7 +151,7 @@ public class CommandSetupHelper { - private static final Logger s_logger = Logger.getLogger(CommandSetupHelper.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject private EntityManager _entityMgr; @@ -226,7 +227,7 @@ public void createVmDataCommand(final VirtualRouter router, final UserVm vm, fin Domain domain = domainDao.findById(vm.getDomainId()); if (domain != null && VirtualMachineManager.AllowExposeDomainInMetadata.valueIn(domain.getId())) { - s_logger.debug("Adding domain info to cloud metadata"); + logger.debug("Adding domain info to cloud metadata"); vmDataCommand.addVmData(NetworkModel.METATDATA_DIR, NetworkModel.CLOUD_DOMAIN_FILE, domain.getName()); vmDataCommand.addVmData(NetworkModel.METATDATA_DIR, NetworkModel.CLOUD_DOMAIN_ID_FILE, domain.getUuid()); } @@ -317,8 +318,8 @@ public void configDnsMasq(final VirtualRouter router, final Network network, fin ipList.add(new DhcpTO(router_guest_nic.getIPv4Address(), router_guest_nic.getIPv4Gateway(), router_guest_nic.getIPv4Netmask(), startIpOfSubnet)); for (final NicIpAliasVO ipAliasVO : ipAliasVOList) { final DhcpTO DhcpTO = new DhcpTO(ipAliasVO.getIp4Address(), ipAliasVO.getGateway(), ipAliasVO.getNetmask(), ipAliasVO.getStartIpOfSubnet()); - if (s_logger.isTraceEnabled()) { - s_logger.trace("configDnsMasq : adding ip {" + DhcpTO.getGateway() + ", " + DhcpTO.getNetmask() + ", " + DhcpTO.getRouterIp() + ", " + DhcpTO.getStartIpOfSubnet() + if (logger.isTraceEnabled()) { + logger.trace("configDnsMasq : adding ip {" + DhcpTO.getGateway() + ", " + DhcpTO.getNetmask() + ", " + DhcpTO.getRouterIp() + ", " + DhcpTO.getStartIpOfSubnet() + "}"); } ipList.add(DhcpTO); @@ -734,7 +735,7 @@ public void createVmDataCommandForVMs(final DomainRouterVO router, final Command if (createVmData) { final NicVO nic = _nicDao.findByNtwkIdAndInstanceId(guestNetworkId, vm.getId()); if (nic != null) { - s_logger.debug("Creating user data entry for vm " + vm + " on domR " + router); + logger.debug("Creating user data entry for vm " + vm + " on domR " + router); _userVmDao.loadDetails(vm); createVmDataCommand(router, vm, nic, vm.getDetail("SSH.PublicKey"), cmds); @@ -755,7 +756,7 @@ public void createDhcpEntryCommandsForVMs(final DomainRouterVO router, final Com final NicVO nic = _nicDao.findByNtwkIdAndInstanceId(guestNetworkId, vm.getId()); if (nic != null) { - s_logger.debug("Creating dhcp entry for vm " + vm + " on domR " + router + "."); + logger.debug("Creating dhcp entry for vm " + vm + " on domR " + router + "."); createDhcpEntryCommand(router, vm, nic, false, cmds); } } @@ -1331,7 +1332,7 @@ protected String getGuestDhcpRange(final NicProfile guestNic, final Network gues private void setIpAddressNetworkParams(IpAddressTO ipAddress, final Network network, final VirtualRouter router) { if (_networkModel.isPrivateGateway(network.getId())) { - s_logger.debug("network " + network.getId() + " (name: " + network.getName() + " ) is a vpc private gateway, set traffic type to Public"); + logger.debug("network " + network.getId() + " (name: " + network.getName() + " ) is a vpc private gateway, set traffic type to Public"); ipAddress.setTrafficType(TrafficType.Public); ipAddress.setPrivateGateway(true); } else { @@ -1358,7 +1359,7 @@ private void setIpAddressNetworkParams(IpAddressTO ipAddress, final Network netw private Map getNicDetails(Network network) { if (network == null) { - s_logger.debug("Unable to get NIC details as the network is null"); + logger.debug("Unable to get NIC details as the network is null"); return null; } Map details = networkOfferingDetailsDao.getNtwkOffDetails(network.getNetworkOfferingId()); diff --git a/server/src/main/java/com/cloud/network/router/NetworkHelperImpl.java b/server/src/main/java/com/cloud/network/router/NetworkHelperImpl.java index 38286b5d4d94..9c093e37ead4 100644 --- a/server/src/main/java/com/cloud/network/router/NetworkHelperImpl.java +++ b/server/src/main/java/com/cloud/network/router/NetworkHelperImpl.java @@ -36,7 +36,8 @@ import org.apache.cloudstack.network.router.deployment.RouterDeploymentDefinition; import org.apache.cloudstack.utils.CloudStackVersion; import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -115,7 +116,7 @@ public class NetworkHelperImpl implements NetworkHelper { - private static final Logger s_logger = Logger.getLogger(NetworkHelperImpl.class); + protected Logger logger = LogManager.getLogger(NetworkHelperImpl.class); protected static Account s_systemAccount; protected static String s_vmInstanceName; @@ -188,7 +189,7 @@ protected void setupHypervisorsMap() { @Override public boolean sendCommandsToRouter(final VirtualRouter router, final Commands cmds) throws AgentUnavailableException, ResourceUnavailableException { if (!checkRouterVersion(router)) { - s_logger.debug("Router requires upgrade. Unable to send command to router:" + router.getId() + ", router template version : " + router.getTemplateVersion() + logger.debug("Router requires upgrade. Unable to send command to router:" + router.getId() + ", router template version : " + router.getTemplateVersion() + ", minimal required version : " + NetworkOrchestrationService.MinVRVersion.valueIn(router.getDataCenterId())); throw new ResourceUnavailableException("Unable to send command. Router requires upgrade", VirtualRouter.class, router.getId()); } @@ -196,7 +197,7 @@ public boolean sendCommandsToRouter(final VirtualRouter router, final Commands c try { answers = _agentMgr.send(router.getHostId(), cmds); } catch (final OperationTimedoutException e) { - s_logger.warn("Timed Out", e); + logger.warn("Timed Out", e); throw new AgentUnavailableException("Unable to send commands to virtual router ", router.getHostId(), e); } @@ -237,8 +238,8 @@ public void handleSingleWorkingRedundantRouter(final List params, final DeploymentPlan planToDeploy) throws StorageUnavailableException, InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException { - s_logger.debug("Starting router " + router); + logger.debug("Starting router " + router); try { _itMgr.advanceStart(router.getUuid(), params, planToDeploy, null); } catch (final OperationTimedoutException e) { throw new ResourceUnavailableException("Starting router " + router + " failed! " + e.toString(), DataCenter.class, router.getDataCenterId()); } if (router.isStopPending()) { - s_logger.info("Clear the stop pending flag of router " + router.getHostName() + " after start router successfully!"); + logger.info("Clear the stop pending flag of router " + router.getHostName() + " after start router successfully!"); router.setStopPending(false); router = _routerDao.persist(router); } @@ -339,8 +340,8 @@ protected DomainRouterVO start(DomainRouterVO router, final User user, final Acc protected DomainRouterVO waitRouter(final DomainRouterVO router) { DomainRouterVO vm = _routerDao.findById(router.getId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Router " + router.getInstanceName() + " is not fully up yet, we will wait"); + if (logger.isDebugEnabled()) { + logger.debug("Router " + router.getInstanceName() + " is not fully up yet, we will wait"); } while (vm.getState() == State.Starting) { try { @@ -353,14 +354,14 @@ protected DomainRouterVO waitRouter(final DomainRouterVO router) { } if (vm.getState() == State.Running) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Router " + router.getInstanceName() + " is now fully up"); + if (logger.isDebugEnabled()) { + logger.debug("Router " + router.getInstanceName() + " is now fully up"); } return router; } - s_logger.warn("Router " + router.getInstanceName() + " failed to start. current state: " + vm.getState()); + logger.warn("Router " + router.getInstanceName() + " failed to start. current state: " + vm.getState()); return null; } @@ -400,7 +401,7 @@ public DomainRouterVO startVirtualRouter(final DomainRouterVO router, final User } if (router.getState() == State.Running) { - s_logger.debug("Redundant router " + router.getInstanceName() + " is already running!"); + logger.debug("Redundant router " + router.getInstanceName() + " is already running!"); return router; } @@ -459,8 +460,8 @@ public DomainRouterVO startVirtualRouter(final DomainRouterVO router, final User avoids[4] = new ExcludeList(); for (int i = 0; i < retryIndex; i++) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Try to deploy redundant virtual router:" + router.getHostName() + ", for " + i + " time"); + if (logger.isTraceEnabled()) { + logger.trace("Try to deploy redundant virtual router:" + router.getHostName() + ", for " + i + " time"); } plan.setAvoids(avoids[i]); try { @@ -514,8 +515,8 @@ public DomainRouterVO deployRouter(final RouterDeploymentDefinition routerDeploy checkIfZoneHasCapacity(routerDeploymentDefinition.getDest().getDataCenter(), hType, routerOffering); final long id = _routerDao.getNextInSequence(Long.class, "id"); - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Allocating the VR with id=%s in datacenter %s with the hypervisor type %s", id, routerDeploymentDefinition.getDest() + if (logger.isDebugEnabled()) { + logger.debug(String.format("Allocating the VR with id=%s in datacenter %s with the hypervisor type %s", id, routerDeploymentDefinition.getDest() .getDataCenter(), hType)); } @@ -523,7 +524,7 @@ public DomainRouterVO deployRouter(final RouterDeploymentDefinition routerDeploy final VMTemplateVO template = _templateDao.findRoutingTemplate(hType, templateName); if (template == null) { - s_logger.debug(hType + " won't support system vm, skip it"); + logger.debug(hType + " won't support system vm, skip it"); continue; } @@ -554,7 +555,7 @@ public DomainRouterVO deployRouter(final RouterDeploymentDefinition routerDeploy router = _routerDao.findById(router.getId()); } catch (final InsufficientCapacityException ex) { if (iter.hasNext()) { - s_logger.debug("Failed to allocate the VR with hypervisor type " + hType + ", retrying one more time"); + logger.debug("Failed to allocate the VR with hypervisor type " + hType + ", retrying one more time"); continue; } else { throw ex; @@ -567,7 +568,7 @@ public DomainRouterVO deployRouter(final RouterDeploymentDefinition routerDeploy break; } catch (final InsufficientCapacityException ex) { if (iter.hasNext()) { - s_logger.debug("Failed to start the VR " + router + " with hypervisor type " + hType + ", " + "destroying it and recreating one more time"); + logger.debug("Failed to start the VR " + router + " with hypervisor type " + hType + ", " + "destroying it and recreating one more time"); // destroy the router destroyRouter(router.getId(), _accountMgr.getAccount(Account.ACCOUNT_ID_SYSTEM), User.UID_SYSTEM); continue; @@ -588,18 +589,18 @@ private void checkIfZoneHasCapacity(final DataCenter zone, final HypervisorType List hosts = _hostDao.listByDataCenterIdAndHypervisorType(zone.getId(), hypervisorType); if (CollectionUtils.isEmpty(hosts)) { String msg = String.format("Zone %s has no %s host available which is enabled and in Up state", zone.getName(), hypervisorType); - s_logger.debug(msg); + logger.debug(msg); throw new InsufficientServerCapacityException(msg, DataCenter.class, zone.getId()); } for (HostVO host : hosts) { Pair cpuCapabilityAndCapacity = capacityMgr.checkIfHostHasCpuCapabilityAndCapacity(host, routerOffering, false); if (cpuCapabilityAndCapacity.first() && cpuCapabilityAndCapacity.second()) { - s_logger.debug("Host " + host + " has enough capacity for the router"); + logger.debug("Host " + host + " has enough capacity for the router"); return; } } String msg = String.format("Zone %s has no %s host which has enough capacity", zone.getName(), hypervisorType); - s_logger.debug(msg); + logger.debug(msg); throw new InsufficientServerCapacityException(msg, DataCenter.class, zone.getId()); } @@ -660,7 +661,7 @@ protected HypervisorType getClusterToStartDomainRouterForOvm(final long podId) { for (final HostVO h : hosts) { if (h.getState() == Status.Up) { - s_logger.debug("Pick up host that has hypervisor type " + h.getHypervisorType() + " in cluster " + cv.getId() + " to start domain router for OVM"); + logger.debug("Pick up host that has hypervisor type " + h.getHypervisorType() + " in cluster " + cv.getId() + " to start domain router for OVM"); return h.getHypervisorType(); } } @@ -676,7 +677,7 @@ protected HypervisorType getClusterToStartDomainRouterForOvm(final long podId) { protected LinkedHashMap> configureControlNic(final RouterDeploymentDefinition routerDeploymentDefinition) { final LinkedHashMap> controlConfig = new LinkedHashMap>(3); - s_logger.debug("Adding nic for Virtual Router in Control network "); + logger.debug("Adding nic for Virtual Router in Control network "); final List offerings = _networkModel.getSystemAccountNetworkOfferings(NetworkOffering.SystemControlNetwork); final NetworkOffering controlOffering = offerings.get(0); final Network controlNic = _networkMgr.setupNetwork(s_systemAccount, controlOffering, routerDeploymentDefinition.getPlan(), null, null, false).get(0); @@ -690,7 +691,7 @@ protected LinkedHashMap> configurePublicNic( final LinkedHashMap> publicConfig = new LinkedHashMap>(3); if (routerDeploymentDefinition.isPublicNetwork()) { - s_logger.debug("Adding nic for Virtual Router in Public network "); + logger.debug("Adding nic for Virtual Router in Public network "); // if source nat service is supported by the network, get the source // nat ip address final NicProfile defaultNic = new NicProfile(); @@ -724,7 +725,7 @@ protected LinkedHashMap> configurePublicNic( // interface if possible final NicVO peerNic = _nicDao.findByIp4AddressAndNetworkId(publicIp, publicNetworks.get(0).getId()); if (peerNic != null) { - s_logger.info("Use same MAC as previous RvR, the MAC is " + peerNic.getMacAddress()); + logger.info("Use same MAC as previous RvR, the MAC is " + peerNic.getMacAddress()); defaultNic.setMacAddress(peerNic.getMacAddress()); } if (routerDeploymentDefinition.getGuestNetwork() != null) { @@ -766,13 +767,13 @@ public LinkedHashMap> configureGuestNic(fina final Network guestNetwork = routerDeploymentDefinition.getGuestNetwork(); if (guestNetwork != null) { - s_logger.debug("Adding nic for Virtual Router in Guest network " + guestNetwork); + logger.debug("Adding nic for Virtual Router in Guest network " + guestNetwork); String defaultNetworkStartIp = null, defaultNetworkStartIpv6 = null; final Nic placeholder = _networkModel.getPlaceholderNicForRouter(guestNetwork, routerDeploymentDefinition.getPodId()); if (!routerDeploymentDefinition.isPublicNetwork()) { if (guestNetwork.getCidr() != null) { if (placeholder != null && placeholder.getIPv4Address() != null) { - s_logger.debug("Requesting ipv4 address " + placeholder.getIPv4Address() + " stored in placeholder nic for the network " + logger.debug("Requesting ipv4 address " + placeholder.getIPv4Address() + " stored in placeholder nic for the network " + guestNetwork); defaultNetworkStartIp = placeholder.getIPv4Address(); } else { @@ -785,8 +786,8 @@ public LinkedHashMap> configureGuestNic(fina if (startIp != null && _ipAddressDao.findByIpAndSourceNetworkId(guestNetwork.getId(), startIp).getAllocatedTime() == null) { defaultNetworkStartIp = startIp; - } else if (s_logger.isDebugEnabled()) { - s_logger.debug("First ipv4 " + startIp + " in network id=" + guestNetwork.getId() + } else if (logger.isDebugEnabled()) { + logger.debug("First ipv4 " + startIp + " in network id=" + guestNetwork.getId() + " is already allocated, can't use it for domain router; will get random ip address from the range"); } } @@ -795,7 +796,7 @@ public LinkedHashMap> configureGuestNic(fina if (guestNetwork.getIp6Cidr() != null) { if (placeholder != null && placeholder.getIPv6Address() != null) { - s_logger.debug("Requesting ipv6 address " + placeholder.getIPv6Address() + " stored in placeholder nic for the network " + logger.debug("Requesting ipv6 address " + placeholder.getIPv6Address() + " stored in placeholder nic for the network " + guestNetwork); defaultNetworkStartIpv6 = placeholder.getIPv6Address(); } else { @@ -807,8 +808,8 @@ public LinkedHashMap> configureGuestNic(fina final String startIpv6 = _networkModel.getStartIpv6Address(guestNetwork.getId()); if (startIpv6 != null && _ipv6Dao.findByNetworkIdAndIp(guestNetwork.getId(), startIpv6) == null) { defaultNetworkStartIpv6 = startIpv6; - } else if (s_logger.isDebugEnabled()) { - s_logger.debug("First ipv6 " + startIpv6 + " in network id=" + guestNetwork.getId() + } else if (logger.isDebugEnabled()) { + logger.debug("First ipv6 " + startIpv6 + " in network id=" + guestNetwork.getId() + " is already allocated, can't use it for domain router; will get random ipv6 address from the range"); } } @@ -863,15 +864,15 @@ public boolean validateHAProxyLBRule(final LoadBalancingRule rule) { final String timeEndChar = "dhms"; int haproxy_stats_port = Integer.parseInt(_configDao.getValue(Config.NetworkLBHaproxyStatsPort.key())); if (rule.getSourcePortStart() == haproxy_stats_port) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Can't create LB on port "+ haproxy_stats_port +", haproxy is listening for LB stats on this port"); + if (logger.isDebugEnabled()) { + logger.debug("Can't create LB on port "+ haproxy_stats_port +", haproxy is listening for LB stats on this port"); } return false; } String lbProtocol = rule.getLbProtocol(); if (lbProtocol != null && lbProtocol.toLowerCase().equals(NetUtils.UDP_PROTO)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Can't create LB rule as haproxy does not support udp"); + if (logger.isDebugEnabled()) { + logger.debug("Can't create LB rule as haproxy does not support udp"); } return false; } diff --git a/server/src/main/java/com/cloud/network/router/RouterControlHelper.java b/server/src/main/java/com/cloud/network/router/RouterControlHelper.java index 06cef99bc41e..d99228172934 100644 --- a/server/src/main/java/com/cloud/network/router/RouterControlHelper.java +++ b/server/src/main/java/com/cloud/network/router/RouterControlHelper.java @@ -20,7 +20,8 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.network.Networks.TrafficType; import com.cloud.network.dao.NetworkDao; @@ -32,7 +33,7 @@ public class RouterControlHelper { - private static final Logger logger = Logger.getLogger(RouterControlHelper.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject private DomainRouterDao routerDao; diff --git a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java index d49322bde60c..b1c7ceef2d1b 100644 --- a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java +++ b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java @@ -74,7 +74,6 @@ import org.apache.cloudstack.utils.usage.UsageUtils; import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Qualifier; @@ -285,7 +284,6 @@ */ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements VirtualNetworkApplianceManager, VirtualNetworkApplianceService, VirtualMachineGuru, Listener, Configurable, StateListener { - private static final Logger s_logger = Logger.getLogger(VirtualNetworkApplianceManagerImpl.class); private static final String CONNECTIVITY_TEST = "connectivity.test"; private static final String FILESYSTEM_WRITABLE_TEST = "filesystem.writable.test"; private static final String READONLY_FILESYSTEM_ERROR = "Read-only file system"; @@ -293,7 +291,7 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V /** * Used regex to ensure that the value that will be passed to the VR is an acceptable value */ - public static final String LOGROTATE_REGEX = "((?i)(hourly)|(daily)|(monthly))|(\\*|\\d{2})\\:(\\*|\\d{2})\\:(\\*|\\d{2})"; + public static final String loggerROTATE_REGEX = "((?i)(hourly)|(daily)|(monthly))|(\\*|\\d{2})\\:(\\*|\\d{2})\\:(\\*|\\d{2})"; @Inject private EntityManager _entityMgr; @Inject private DataCenterDao _dcDao; @@ -420,7 +418,7 @@ public VirtualRouter upgradeRouter(final UpgradeRouterCmd cmd) { _accountMgr.checkAccess(caller, null, true, router); if (router.getServiceOfferingId() == serviceOfferingId) { - s_logger.debug("Router: " + routerId + "already has service offering: " + serviceOfferingId); + logger.debug("Router: " + routerId + "already has service offering: " + serviceOfferingId); return _routerDao.findById(routerId); } @@ -441,7 +439,7 @@ public VirtualRouter upgradeRouter(final UpgradeRouterCmd cmd) { // Check that the router is stopped if (!router.getState().equals(VirtualMachine.State.Stopped)) { - s_logger.warn("Unable to upgrade router " + router.toString() + " in state " + router.getState()); + logger.warn("Unable to upgrade router " + router.toString() + " in state " + router.getState()); throw new InvalidParameterValueException("Unable to upgrade router " + router.toString() + " in state " + router.getState() + "; make sure the router is stopped and not in an error state before upgrading."); } @@ -488,7 +486,7 @@ public VirtualRouter stopRouter(final long routerId, final boolean forced) throw // Clear stop pending flag after stopped successfully if (router.isStopPending()) { - s_logger.info("Clear the stop pending flag of router " + router.getHostName() + " after stop router successfully"); + logger.info("Clear the stop pending flag of router " + router.getHostName() + " after stop router successfully"); router.setStopPending(false); _routerDao.persist(router); virtualRouter.setStopPending(false); @@ -516,9 +514,9 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { userStats.setCurrentBytesSent(0); userStats.setNetBytesSent(userStats.getNetBytesSent() + currentBytesSent); _userStatsDao.update(userStats.getId(), userStats); - s_logger.debug("Successfully updated user statistics as a part of domR " + router + " reboot/stop"); + logger.debug("Successfully updated user statistics as a part of domR " + router + " reboot/stop"); } else { - s_logger.warn("User stats were not created for account " + router.getAccountId() + " and dc " + router.getDataCenterId()); + logger.warn("User stats were not created for account " + router.getAccountId() + " and dc " + router.getDataCenterId()); } } } @@ -541,12 +539,12 @@ public VirtualRouter rebootRouter(final long routerId, final boolean reprogramNe // Can reboot domain router only in Running state if (router == null || router.getState() != VirtualMachine.State.Running) { - s_logger.warn("Unable to reboot, virtual router is not in the right state " + router.getState()); + logger.warn("Unable to reboot, virtual router is not in the right state " + router.getState()); throw new ResourceUnavailableException("Unable to reboot domR, it is not in right state " + router.getState(), DataCenter.class, router.getDataCenterId()); } final UserVO user = _userDao.findById(CallContext.current().getCallingUserId()); - s_logger.debug("Stopping and starting router " + router + " as a part of router reboot"); + logger.debug("Stopping and starting router " + router + " as a part of router reboot"); if (stop(router, forced, user, caller) != null) { return startRouter(routerId, reprogramNetwork); @@ -612,7 +610,7 @@ public boolean configure(final String name, final Map params) th _dnsBasicZoneUpdates = String.valueOf(_configDao.getValue(Config.DnsBasicZoneUpdates.key())); - s_logger.info("Router configurations: " + "ramsize=" + _routerRamSize); + logger.info("Router configurations: " + "ramsize=" + _routerRamSize); _agentMgr.registerForHostEvents(new SshKeysDistriMonitor(_agentMgr, _hostDao, _configDao), true, false, false); @@ -622,7 +620,7 @@ public boolean configure(final String name, final Map params) th // this can sometimes happen, if DB is manually or programmatically manipulated if (offerings == null || offerings.size() < 2) { final String msg = "Data integrity problem : System Offering For Software router VM has been removed?"; - s_logger.error(msg); + logger.error(msg); throw new ConfigurationException(msg); } @@ -637,7 +635,7 @@ public boolean configure(final String name, final Map params) th _agentMgr.registerForHostEvents(this, true, false, false); - s_logger.info("DomainRouterManager is configured."); + logger.info("DomainRouterManager is configured."); return true; } @@ -647,7 +645,7 @@ public boolean start() { if (_routerStatsInterval > 0) { _executor.scheduleAtFixedRate(new NetworkUsageTask(), _routerStatsInterval, _routerStatsInterval, TimeUnit.SECONDS); } else { - s_logger.debug("router.stats.interval - " + _routerStatsInterval + " so not scheduling the router stats thread"); + logger.debug("router.stats.interval - " + _routerStatsInterval + " so not scheduling the router stats thread"); } //Schedule Network stats update task @@ -684,7 +682,7 @@ public boolean start() { } if (_usageAggregationRange < UsageUtils.USAGE_AGGREGATION_RANGE_MIN) { - s_logger.warn("Usage stats job aggregation range is to small, using the minimum value of " + UsageUtils.USAGE_AGGREGATION_RANGE_MIN); + logger.warn("Usage stats job aggregation range is to small, using the minimum value of " + UsageUtils.USAGE_AGGREGATION_RANGE_MIN); _usageAggregationRange = UsageUtils.USAGE_AGGREGATION_RANGE_MIN; } @@ -692,7 +690,7 @@ public boolean start() { final long initialDelay = aggDate - System.currentTimeMillis(); if( initialDelay < 0){ - s_logger.warn("Initial delay for network usage stats update task is incorrect. Stats update task will run immediately"); + logger.warn("Initial delay for network usage stats update task is incorrect. Stats update task will run immediately"); } _networkStatsUpdateExecutor.scheduleAtFixedRate(new NetworkStatsUpdateTask(), initialDelay, _usageAggregationRange * 60 * 1000, @@ -704,28 +702,28 @@ public boolean start() { _rvrStatusUpdateExecutor.execute(new RvRStatusUpdateTask()); } } else { - s_logger.debug("router.check.interval - " + _routerCheckInterval + " so not scheduling the redundant router checking thread"); + logger.debug("router.check.interval - " + _routerCheckInterval + " so not scheduling the redundant router checking thread"); } final int routerAlertsCheckInterval = RouterAlertsCheckInterval.value(); if (routerAlertsCheckInterval > 0) { _checkExecutor.scheduleAtFixedRate(new CheckRouterAlertsTask(), routerAlertsCheckInterval, routerAlertsCheckInterval, TimeUnit.SECONDS); } else { - s_logger.debug(RouterAlertsCheckIntervalCK + "=" + routerAlertsCheckInterval + " so not scheduling the router alerts checking thread"); + logger.debug(RouterAlertsCheckIntervalCK + "=" + routerAlertsCheckInterval + " so not scheduling the router alerts checking thread"); } final int routerHealthCheckConfigRefreshInterval = RouterHealthChecksConfigRefreshInterval.value(); if (routerHealthCheckConfigRefreshInterval > 0) { _checkExecutor.scheduleAtFixedRate(new UpdateRouterHealthChecksConfigTask(), routerHealthCheckConfigRefreshInterval, routerHealthCheckConfigRefreshInterval, TimeUnit.MINUTES); } else { - s_logger.debug(RouterHealthChecksConfigRefreshIntervalCK + "=" + routerHealthCheckConfigRefreshInterval + " so not scheduling the router health check data thread"); + logger.debug(RouterHealthChecksConfigRefreshIntervalCK + "=" + routerHealthCheckConfigRefreshInterval + " so not scheduling the router health check data thread"); } final int routerHealthChecksFetchInterval = RouterHealthChecksResultFetchInterval.value(); if (routerHealthChecksFetchInterval > 0) { _checkExecutor.scheduleAtFixedRate(new FetchRouterHealthChecksResultTask(), routerHealthChecksFetchInterval, routerHealthChecksFetchInterval, TimeUnit.MINUTES); } else { - s_logger.debug(RouterHealthChecksResultFetchIntervalCK + "=" + routerHealthChecksFetchInterval + " so not scheduling the router checks fetching thread"); + logger.debug(RouterHealthChecksResultFetchIntervalCK + "=" + routerHealthChecksFetchInterval + " so not scheduling the router checks fetching thread"); } return true; @@ -748,13 +746,13 @@ public NetworkUsageTask() { protected void runInContext() { try { final List routers = _routerDao.listByStateAndNetworkType(VirtualMachine.State.Running, GuestType.Isolated, mgmtSrvrId); - s_logger.debug("Found " + routers.size() + " running routers. "); + logger.debug("Found " + routers.size() + " running routers. "); for (final DomainRouterVO router : routers) { collectNetworkStatistics(router, null); } } catch (final Exception e) { - s_logger.warn("Error while collecting network stats", e); + logger.warn("Error while collecting network stats", e); } } } @@ -773,7 +771,7 @@ protected void runInContext() { // msHost in UP state with min id should run the job final ManagementServerHostVO msHost = _msHostDao.findOneInUpState(new Filter(ManagementServerHostVO.class, "id", false, 0L, 1L)); if (msHost == null || msHost.getMsid() != mgmtSrvrId) { - s_logger.debug("Skipping aggregate network stats update"); + logger.debug("Skipping aggregate network stats update"); scanLock.unlock(); return; } @@ -794,17 +792,17 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { .getCurrentBytesReceived(), stat.getCurrentBytesSent(), stat.getAggBytesReceived(), stat.getAggBytesSent(), updatedTime); _userStatsLogDao.persist(statsLog); } - s_logger.debug("Successfully updated aggregate network stats"); + logger.debug("Successfully updated aggregate network stats"); } }); } catch (final Exception e) { - s_logger.debug("Failed to update aggregate network stats", e); + logger.debug("Failed to update aggregate network stats", e); } finally { scanLock.unlock(); } } } catch (final Exception e) { - s_logger.debug("Exception while trying to acquire network stats lock", e); + logger.debug("Exception while trying to acquire network stats lock", e); } finally { scanLock.releaseRef(); } @@ -860,11 +858,11 @@ protected void updateSite2SiteVpnConnectionState(final List rout if (origAnswer instanceof CheckS2SVpnConnectionsAnswer) { answer = (CheckS2SVpnConnectionsAnswer) origAnswer; } else { - s_logger.warn("Unable to update router " + router.getHostName() + "'s VPN connection status"); + logger.warn("Unable to update router " + router.getHostName() + "'s VPN connection status"); continue; } if (!answer.getResult()) { - s_logger.warn("Unable to update router " + router.getHostName() + "'s VPN connection status"); + logger.warn("Unable to update router " + router.getHostName() + "'s VPN connection status"); continue; } for (final Site2SiteVpnConnectionVO conn : conns) { @@ -891,7 +889,7 @@ protected void updateSite2SiteVpnConnectionState(final List rout final String context = "Site-to-site Vpn Connection to " + gw.getName() + " on router " + router.getHostName() + "(id: " + router.getId() + ") " + " just switched from " + oldState + " to " + conn.getState(); - s_logger.info(context); + logger.info(context); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_DOMAIN_ROUTER, router.getDataCenterId(), router.getPodIdToDeployIn(), title, context); } } @@ -930,14 +928,14 @@ protected void updateRoutersRedundantState(final List routers) { if (origAnswer instanceof CheckRouterAnswer) { answer = (CheckRouterAnswer) origAnswer; } else { - s_logger.warn("Unable to update router " + router.getHostName() + "'s status"); + logger.warn("Unable to update router " + router.getHostName() + "'s status"); } RedundantState state = RedundantState.UNKNOWN; if (answer != null) { if (answer.getResult()) { state = answer.getState(); } else { - s_logger.info("Agent response doesn't seem to be correct ==> " + answer.getResult()); + logger.info("Agent response doesn't seem to be correct ==> " + answer.getResult()); } } router.setRedundantState(state); @@ -952,7 +950,7 @@ protected void updateRoutersRedundantState(final List routers) { final String title = "Redundant virtual router " + router.getInstanceName() + " just switch from " + prevState + " to " + currState; final String context = "Redundant virtual router (name: " + router.getHostName() + ", id: " + router.getId() + ") " + " just switch from " + prevState + " to " + currState; - s_logger.info(context); + logger.info(context); if (currState == RedundantState.PRIMARY) { _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_DOMAIN_ROUTER, router.getDataCenterId(), router.getPodIdToDeployIn(), title, context); } @@ -968,18 +966,18 @@ protected void recoverRedundantNetwork(final DomainRouterVO primaryRouter, final final HostVO backupHost = _hostDao.findById(backupRouter.getHostId()); if (primaryHost.getState() == Status.Up && backupHost.getState() == Status.Up) { final String title = "Reboot " + backupRouter.getInstanceName() + " to ensure redundant virtual routers work"; - if (s_logger.isDebugEnabled()) { - s_logger.debug(title); + if (logger.isDebugEnabled()) { + logger.debug(title); } _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_DOMAIN_ROUTER, backupRouter.getDataCenterId(), backupRouter.getPodIdToDeployIn(), title, title); try { rebootRouter(backupRouter.getId(), true, false); } catch (final ConcurrentOperationException e) { - s_logger.warn("Fail to reboot " + backupRouter.getInstanceName(), e); + logger.warn("Fail to reboot " + backupRouter.getInstanceName(), e); } catch (final ResourceUnavailableException e) { - s_logger.warn("Fail to reboot " + backupRouter.getInstanceName(), e); + logger.warn("Fail to reboot " + backupRouter.getInstanceName(), e); } catch (final InsufficientCapacityException e) { - s_logger.warn("Fail to reboot " + backupRouter.getInstanceName(), e); + logger.warn("Fail to reboot " + backupRouter.getInstanceName(), e); } } } @@ -1061,7 +1059,7 @@ private void checkDuplicatePrimary(final List routers) { final String context = "Virtual router (name: " + router.getHostName() + ", id: " + router.getId() + " and router (name: " + dupRouter.getHostName() + ", id: " + router.getId() + ") are both in PRIMARY state! If the problem persist, restart both of routers. "; _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_DOMAIN_ROUTER, router.getDataCenterId(), router.getPodIdToDeployIn(), title, context); - s_logger.warn(context); + logger.warn(context); } else { networkRouterMaps.put(routerGuestNtwkId, router); } @@ -1113,19 +1111,19 @@ protected void runInContext() { } // && router.getState() == VirtualMachine.State.Stopped if (router.getHostId() == null && router.getState() == VirtualMachine.State.Running) { - s_logger.debug("Skip router pair (" + router0.getInstanceName() + "," + router1.getInstanceName() + ") due to can't find host"); + logger.debug("Skip router pair (" + router0.getInstanceName() + "," + router1.getInstanceName() + ") due to can't find host"); continue; } final HostVO host = _hostDao.findById(router.getHostId()); if (host == null || host.getManagementServerId() == null || host.getManagementServerId() != ManagementServerNode.getManagementServerId()) { - s_logger.debug("Skip router pair (" + router0.getInstanceName() + "," + router1.getInstanceName() + ") due to not belong to this mgmt server"); + logger.debug("Skip router pair (" + router0.getInstanceName() + "," + router1.getInstanceName() + ") due to not belong to this mgmt server"); continue; } updateRoutersRedundantState(routers); checkDuplicatePrimary(routers); checkSanity(routers); } catch (final Exception ex) { - s_logger.error("Fail to complete the RvRStatusUpdateTask! ", ex); + logger.error("Fail to complete the RvRStatusUpdateTask! ", ex); } } } @@ -1140,7 +1138,7 @@ public CheckRouterTask() { protected void runInContext() { try { final List routers = _routerDao.listIsolatedByHostId(null); - s_logger.debug("Found " + routers.size() + " routers to update status. "); + logger.debug("Found " + routers.size() + " routers to update status. "); updateSite2SiteVpnConnectionState(routers); @@ -1151,21 +1149,21 @@ protected void runInContext() { networks.add(vpcNetworks.get(0)); } } - s_logger.debug("Found " + networks.size() + " VPC's to update Redundant State. "); + logger.debug("Found " + networks.size() + " VPC's to update Redundant State. "); pushToUpdateQueue(networks); networks = _networkDao.listRedundantNetworks(); - s_logger.debug("Found " + networks.size() + " networks to update RvR status. "); + logger.debug("Found " + networks.size() + " networks to update RvR status. "); pushToUpdateQueue(networks); } catch (final Exception ex) { - s_logger.error("Fail to complete the CheckRouterTask! ", ex); + logger.error("Fail to complete the CheckRouterTask! ", ex); } } protected void pushToUpdateQueue(final List networks) throws InterruptedException { for (final NetworkVO network : networks) { if (!_vrUpdateQueue.offer(network.getId(), 500, TimeUnit.MILLISECONDS)) { - s_logger.warn("Cannot insert into virtual router update queue! Adjustment of router.check.interval and router.check.poolsize maybe needed."); + logger.warn("Cannot insert into virtual router update queue! Adjustment of router.check.interval and router.check.poolsize maybe needed."); break; } } @@ -1180,9 +1178,9 @@ public FetchRouterHealthChecksResultTask() { protected void runInContext() { try { final List routers = _routerDao.listByStateAndManagementServer(VirtualMachine.State.Running, mgmtSrvrId); - s_logger.info("Found " + routers.size() + " running routers. Fetching, analysing and updating DB for the health checks."); + logger.info("Found " + routers.size() + " running routers. Fetching, analysing and updating DB for the health checks."); if (!RouterHealthChecksEnabled.value()) { - s_logger.debug("Skipping fetching of router health check results as router.health.checks.enabled is disabled"); + logger.debug("Skipping fetching of router health check results as router.health.checks.enabled is disabled"); return; } @@ -1192,7 +1190,7 @@ protected void runInContext() { handleFailingChecks(router, failingChecks); } } catch (final Exception ex) { - s_logger.error("Fail to complete the FetchRouterHealthChecksResultTask! ", ex); + logger.error("Fail to complete the FetchRouterHealthChecksResultTask! ", ex); ex.printStackTrace(); } } @@ -1201,11 +1199,11 @@ protected void runInContext() { private List getFailingChecks(DomainRouterVO router, GetRouterMonitorResultsAnswer answer) { if (answer == null) { - s_logger.warn("Unable to fetch monitor results for router " + router); + logger.warn("Unable to fetch monitor results for router " + router); resetRouterHealthChecksAndConnectivity(router.getId(), false, false, "Communication failed"); return Arrays.asList(CONNECTIVITY_TEST); } else if (!answer.getResult()) { - s_logger.warn("Failed to fetch monitor results from router " + router + " with details: " + answer.getDetails()); + logger.warn("Failed to fetch monitor results from router " + router + " with details: " + answer.getDetails()); if (StringUtils.isNotBlank(answer.getDetails()) && answer.getDetails().equalsIgnoreCase(READONLY_FILESYSTEM_ERROR)) { resetRouterHealthChecksAndConnectivity(router.getId(), true, false, "Failed to write: " + answer.getDetails()); return Arrays.asList(FILESYSTEM_WRITABLE_TEST); @@ -1228,7 +1226,7 @@ private void handleFailingChecks(DomainRouterVO router, List failingChec String alertMessage = String.format("Health checks failed: %d failing checks on router %s / %s", failingChecks.size(), router.getName(), router.getUuid()); _alertMgr.sendAlert(AlertType.ALERT_TYPE_DOMAIN_ROUTER, router.getDataCenterId(), router.getPodIdToDeployIn(), alertMessage, alertMessage); - s_logger.warn(alertMessage + ". Checking failed health checks to see if router needs recreate"); + logger.warn(alertMessage + ". Checking failed health checks to see if router needs recreate"); String checkFailsToRecreateVr = RouterHealthChecksFailuresToRecreateVr.valueIn(router.getDataCenterId()); StringBuilder failingChecksEvent = new StringBuilder(); @@ -1257,7 +1255,7 @@ private void handleFailingChecks(DomainRouterVO router, List failingChec Domain.ROOT_DOMAIN, EventTypes.EVENT_ROUTER_HEALTH_CHECKS, failingChecksEvent.toString(), router.getId(), ApiCommandResourceType.DomainRouter.toString()); if (recreateRouter) { - s_logger.warn("Health Check Alert: Found failing checks in " + + logger.warn("Health Check Alert: Found failing checks in " + RouterHealthChecksFailuresToRecreateVrCK + ", attempting recreating router."); recreateRouter(router.getId()); } @@ -1275,13 +1273,13 @@ private DomainRouterJoinVO getAnyRouterJoinWithVpc(long routerId) { private boolean restartVpcInDomainRouter(DomainRouterJoinVO router, User user) { try { - s_logger.debug("Attempting restart VPC " + router.getVpcName() + " for router recreation " + router.getUuid()); + logger.debug("Attempting restart VPC " + router.getVpcName() + " for router recreation " + router.getUuid()); ActionEventUtils.onActionEvent(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM, Domain.ROOT_DOMAIN, EventTypes.EVENT_ROUTER_HEALTH_CHECKS, "Recreating router " + router.getUuid() + " by restarting VPC " + router.getVpcUuid(), router.getId(), ApiCommandResourceType.DomainRouter.toString()); return vpcService.restartVpc(router.getVpcId(), true, false, false, user); } catch (Exception e) { - s_logger.error("Failed to restart VPC for router recreation " + + logger.error("Failed to restart VPC for router recreation " + router.getVpcName() + " ,router " + router.getUuid(), e); return false; } @@ -1299,13 +1297,13 @@ private DomainRouterJoinVO getAnyRouterJoinWithGuestTraffic(long routerId) { private boolean restartGuestNetworkInDomainRouter(DomainRouterJoinVO router, User user) { try { - s_logger.info("Attempting restart network " + router.getNetworkName() + " for router recreation " + router.getUuid()); + logger.info("Attempting restart network " + router.getNetworkName() + " for router recreation " + router.getUuid()); ActionEventUtils.onActionEvent(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM, Domain.ROOT_DOMAIN, EventTypes.EVENT_ROUTER_HEALTH_CHECKS, "Recreating router " + router.getUuid() + " by restarting network " + router.getNetworkUuid(), router.getId(), ApiCommandResourceType.DomainRouter.toString()); return networkService.restartNetwork(router.getNetworkId(), true, false, false, user); } catch (Exception e) { - s_logger.error("Failed to restart network " + router.getNetworkName() + + logger.error("Failed to restart network " + router.getNetworkName() + " for router recreation " + router.getNetworkName(), e); return false; } @@ -1331,7 +1329,7 @@ private boolean recreateRouter(long routerId) { return restartGuestNetworkInDomainRouter(routerJoinToRestart, systemUser); } - s_logger.warn("Unable to find a valid guest network or VPC to restart for recreating router id " + routerId); + logger.warn("Unable to find a valid guest network or VPC to restart for recreating router id " + routerId); return false; } @@ -1406,7 +1404,7 @@ private RouterHealthCheckResultVO parseHealthCheckVOFromJson(final long routerId } else { routerHealthCheckResultDao.update(hcVo.getId(), hcVo); } - s_logger.info("Found health check " + hcVo + " which took running duration (ms) " + lastRunDuration); + logger.info("Found health check " + hcVo + " which took running duration (ms) " + lastRunDuration); return hcVo; } @@ -1435,7 +1433,7 @@ private List parseHealthCheckResults( for (String checkType : checksJson.keySet()) { if (checksJson.get(checkType).containsKey(lastRunKey)) { // Log last run of this check type run info Map lastRun = checksJson.get(checkType).get(lastRunKey); - s_logger.info("Found check types executed on VR " + checkType + ", start: " + lastRun.get("start") + + logger.info("Found check types executed on VR " + checkType + ", start: " + lastRun.get("start") + ", end: " + lastRun.get("end") + ", duration: " + lastRun.get("duration")); } @@ -1449,7 +1447,7 @@ private List parseHealthCheckResults( routerId, checkName, checkType, checksJson.get(checkType).get(checkName), checksInDb); healthChecks.add(hcVo); } catch (Exception ex) { - s_logger.error("Skipping health check: Exception while parsing check result data for router id " + routerId + + logger.error("Skipping health check: Exception while parsing check result data for router id " + routerId + ", check type: " + checkType + ", check name: " + checkName + ":" + ex.getLocalizedMessage(), ex); } } @@ -1459,17 +1457,17 @@ private List parseHealthCheckResults( private List updateDbHealthChecksFromRouterResponse(final long routerId, final String monitoringResult) { if (StringUtils.isBlank(monitoringResult)) { - s_logger.warn("Attempted parsing empty monitoring results string for router " + routerId); + logger.warn("Attempted parsing empty monitoring results string for router " + routerId); return Collections.emptyList(); } try { - s_logger.debug("Parsing and updating DB health check data for router: " + routerId + " with data: " + monitoringResult) ; + logger.debug("Parsing and updating DB health check data for router: " + routerId + " with data: " + monitoringResult) ; final Type t = new TypeToken>>>() {}.getType(); final Map>> checks = GsonHelper.getGson().fromJson(monitoringResult, t); return parseHealthCheckResults(checks, routerId); } catch (JsonSyntaxException ex) { - s_logger.error("Unable to parse the result of health checks due to " + ex.getLocalizedMessage(), ex); + logger.error("Unable to parse the result of health checks due to " + ex.getLocalizedMessage(), ex); } return Collections.emptyList(); } @@ -1488,17 +1486,17 @@ private GetRouterMonitorResultsAnswer fetchAndUpdateRouterHealthChecks(DomainRou final Answer answer = _agentMgr.easySend(router.getHostId(), command); if (answer == null) { - s_logger.warn("Unable to fetch monitoring results data from router " + router.getHostName()); + logger.warn("Unable to fetch monitoring results data from router " + router.getHostName()); return null; } if (answer instanceof GetRouterMonitorResultsAnswer) { return (GetRouterMonitorResultsAnswer) answer; } else { - s_logger.warn("Unable to fetch health checks results to router " + router.getHostName() + " Received answer " + answer.getDetails()); + logger.warn("Unable to fetch health checks results to router " + router.getHostName() + " Received answer " + answer.getDetails()); return new GetRouterMonitorResultsAnswer(command, false, null, answer.getDetails()); } } catch (final Exception e) { - s_logger.warn("Error while collecting alerts from router: " + router.getInstanceName(), e); + logger.warn("Error while collecting alerts from router: " + router.getInstanceName(), e); return null; } } @@ -1520,17 +1518,17 @@ private GetRouterMonitorResultsAnswer performBasicTestsOnRouter(DomainRouterVO r final Answer answer = _agentMgr.easySend(router.getHostId(), command); if (answer == null) { - s_logger.warn("Unable to fetch basic router test results data from router " + router.getHostName()); + logger.warn("Unable to fetch basic router test results data from router " + router.getHostName()); return null; } if (answer instanceof GetRouterMonitorResultsAnswer) { return (GetRouterMonitorResultsAnswer) answer; } else { - s_logger.warn("Unable to fetch basic router test results from router " + router.getHostName() + " Received answer " + answer.getDetails()); + logger.warn("Unable to fetch basic router test results from router " + router.getHostName() + " Received answer " + answer.getDetails()); return new GetRouterMonitorResultsAnswer(command, false, null, answer.getDetails()); } } catch (final Exception e) { - s_logger.warn("Error while performing basic tests on router: " + router.getInstanceName(), e); + logger.warn("Error while performing basic tests on router: " + router.getInstanceName(), e); return null; } } @@ -1550,7 +1548,7 @@ public Pair performRouterHealthChecks(long routerId) { throw new CloudRuntimeException("Router health checks are not enabled for router: " + router); } - s_logger.info("Running health check results for router " + router.getUuid()); + logger.info("Running health check results for router " + router.getUuid()); GetRouterMonitorResultsAnswer answer = null; String resultDetails = ""; @@ -1559,21 +1557,21 @@ public Pair performRouterHealthChecks(long routerId) { // Step 1: Perform basic tests to check the connectivity and file system on router answer = performBasicTestsOnRouter(router); if (answer == null) { - s_logger.debug("No results received for the basic tests on router: " + router); + logger.debug("No results received for the basic tests on router: " + router); resultDetails = "Basic tests results unavailable"; success = false; } else if (!answer.getResult()) { - s_logger.debug("Basic tests failed on router: " + router); + logger.debug("Basic tests failed on router: " + router); resultDetails = "Basic tests failed - " + answer.getMonitoringResults(); success = false; } else { // Step 2: Update health check data on router and perform and retrieve health checks on router if (!updateRouterHealthChecksConfig(router)) { - s_logger.warn("Unable to update health check config for fresh run successfully for router: " + router + ", so trying to fetch last result."); + logger.warn("Unable to update health check config for fresh run successfully for router: " + router + ", so trying to fetch last result."); success = false; answer = fetchAndUpdateRouterHealthChecks(router, false); } else { - s_logger.info("Successfully updated health check config for fresh run successfully for router: " + router); + logger.info("Successfully updated health check config for fresh run successfully for router: " + router); answer = fetchAndUpdateRouterHealthChecks(router, true); } @@ -1601,7 +1599,7 @@ public UpdateRouterHealthChecksConfigTask() { protected void runInContext() { try { final List routers = _routerDao.listByStateAndManagementServer(VirtualMachine.State.Running, mgmtSrvrId); - s_logger.debug("Found " + routers.size() + " running routers. "); + logger.debug("Found " + routers.size() + " running routers. "); for (final DomainRouterVO router : routers) { GetRouterMonitorResultsAnswer answer = performBasicTestsOnRouter(router); @@ -1609,11 +1607,11 @@ protected void runInContext() { updateRouterHealthChecksConfig(router); } else { String resultDetails = (answer == null) ? "" : ", " + answer.getMonitoringResults(); - s_logger.debug("Couldn't update health checks config on router: " + router + " as basic tests didn't succeed" + resultDetails); + logger.debug("Couldn't update health checks config on router: " + router + " as basic tests didn't succeed" + resultDetails); } } } catch (final Exception ex) { - s_logger.error("Fail to complete the UpdateRouterHealthChecksConfigTask! ", ex); + logger.error("Fail to complete the UpdateRouterHealthChecksConfigTask! ", ex); } } } @@ -1655,22 +1653,22 @@ private boolean updateRouterHealthChecksConfig(DomainRouterVO router) { String controlIP = _routerControlHelper.getRouterControlIp(router.getId()); if (StringUtils.isBlank(controlIP) || controlIP.equals("0.0.0.0")) { - s_logger.debug("Skipping update data on router " + router.getUuid() + " because controlIp is not correct."); + logger.debug("Skipping update data on router " + router.getUuid() + " because controlIp is not correct."); return false; } - s_logger.info("Updating data for router health checks for router " + router.getUuid()); + logger.info("Updating data for router health checks for router " + router.getUuid()); Answer origAnswer = null; try { SetMonitorServiceCommand command = createMonitorServiceCommand(router, null, true, true); origAnswer = _agentMgr.easySend(router.getHostId(), command); } catch (final Exception e) { - s_logger.error("Error while sending update data for health check to router: " + router.getInstanceName(), e); + logger.error("Error while sending update data for health check to router: " + router.getInstanceName(), e); return false; } if (origAnswer == null) { - s_logger.error("Unable to update health checks data to router " + router.getHostName()); + logger.error("Unable to update health checks data to router " + router.getHostName()); return false; } @@ -1678,12 +1676,12 @@ private boolean updateRouterHealthChecksConfig(DomainRouterVO router) { if (origAnswer instanceof GroupAnswer) { answer = (GroupAnswer) origAnswer; } else { - s_logger.error("Unable to update health checks data to router " + router.getHostName() + " Received answer " + origAnswer.getDetails()); + logger.error("Unable to update health checks data to router " + router.getHostName() + " Received answer " + origAnswer.getDetails()); return false; } if (!answer.getResult()) { - s_logger.error("Unable to update health checks data to router " + router.getHostName() + ", details : " + answer.getDetails()); + logger.error("Unable to update health checks data to router " + router.getHostName() + ", details : " + answer.getDetails()); } return answer.getResult(); @@ -1851,7 +1849,7 @@ protected void runInContext() { try { getRouterAlerts(); } catch (final Exception ex) { - s_logger.error("Fail to complete the CheckRouterAlertsTask! ", ex); + logger.error("Fail to complete the CheckRouterAlertsTask! ", ex); } } } @@ -1860,7 +1858,7 @@ protected void getRouterAlerts() { try { final List routers = _routerDao.listByStateAndManagementServer(VirtualMachine.State.Running, mgmtSrvrId); - s_logger.debug("Found " + routers.size() + " running routers. "); + logger.debug("Found " + routers.size() + " running routers. "); for (final DomainRouterVO router : routers) { final Boolean serviceMonitoringFlag = SetServiceMonitor.valueIn(router.getDataCenterId()); // Skip the routers in VPC network or skip the routers where @@ -1891,17 +1889,17 @@ protected void getRouterAlerts() { GetRouterAlertsAnswer answer = null; if (origAnswer == null) { - s_logger.warn("Unable to get alerts from router " + router.getHostName()); + logger.warn("Unable to get alerts from router " + router.getHostName()); continue; } if (origAnswer instanceof GetRouterAlertsAnswer) { answer = (GetRouterAlertsAnswer) origAnswer; } else { - s_logger.warn("Unable to get alerts from router " + router.getHostName()); + logger.warn("Unable to get alerts from router " + router.getHostName()); continue; } if (!answer.getResult()) { - s_logger.warn("Unable to get alerts from router " + router.getHostName() + " " + answer.getDetails()); + logger.warn("Unable to get alerts from router " + router.getHostName() + " " + answer.getDetails()); continue; } @@ -1913,7 +1911,7 @@ protected void getRouterAlerts() { try { sdfrmt.parse(lastAlertTimeStamp); } catch (final ParseException e) { - s_logger.warn("Invalid last alert timestamp received while collecting alerts from router: " + router.getInstanceName()); + logger.warn("Invalid last alert timestamp received while collecting alerts from router: " + router.getInstanceName()); continue; } for (final String alert : alerts) { @@ -1929,13 +1927,13 @@ protected void getRouterAlerts() { } } } catch (final Exception e) { - s_logger.warn("Error while collecting alerts from router: " + router.getInstanceName(), e); + logger.warn("Error while collecting alerts from router: " + router.getInstanceName(), e); continue; } } } } catch (final Exception e) { - s_logger.warn("Error while collecting alerts from router", e); + logger.warn("Error while collecting alerts from router", e); } } @@ -2005,12 +2003,12 @@ public boolean finalizeVirtualMachineProfile(final VirtualMachineProfile profile // DOMR control command is sent over management server in VMware if (dest.getHost().getHypervisorType() == HypervisorType.VMware || dest.getHost().getHypervisorType() == HypervisorType.Hyperv) { - s_logger.info("Check if we need to add management server explicit route to DomR. pod cidr: " + dest.getPod().getCidrAddress() + "/" + logger.info("Check if we need to add management server explicit route to DomR. pod cidr: " + dest.getPod().getCidrAddress() + "/" + dest.getPod().getCidrSize() + ", pod gateway: " + dest.getPod().getGateway() + ", management host: " + ApiServiceConfiguration.ManagementServerAddresses.value()); - if (s_logger.isInfoEnabled()) { - s_logger.info("Add management server explicit route to DomR."); + if (logger.isInfoEnabled()) { + logger.info("Add management server explicit route to DomR."); } // always add management explicit route, for basic @@ -2033,14 +2031,14 @@ public boolean finalizeVirtualMachineProfile(final VirtualMachineProfile profile } } else if (nic.getTrafficType() == TrafficType.Guest) { - s_logger.info("Guest IP : " + nic.getIPv4Address()); + logger.info("Guest IP : " + nic.getIPv4Address()); dnsProvided = _networkModel.isProviderSupportServiceInNetwork(nic.getNetworkId(), Service.Dns, Provider.VirtualRouter); dhcpProvided = _networkModel.isProviderSupportServiceInNetwork(nic.getNetworkId(), Service.Dhcp, Provider.VirtualRouter); buf.append(" privateMtu=").append(nic.getMtu()); // build bootloader parameter for the guest buf.append(createGuestBootLoadArgs(nic, defaultDns1, defaultDns2, router)); } else if (nic.getTrafficType() == TrafficType.Public) { - s_logger.info("Public IP : " + nic.getIPv4Address()); + logger.info("Public IP : " + nic.getIPv4Address()); publicNetwork = true; buf.append(" publicMtu=").append(nic.getMtu()); } @@ -2125,7 +2123,7 @@ public boolean finalizeVirtualMachineProfile(final VirtualMachineProfile profile acntq.and(acntq.entity().getUsername(), SearchCriteria.Op.EQ, "baremetal-system-account"); final UserVO user = acntq.find(); if (user == null) { - s_logger.warn(String + logger.warn(String .format("global setting[baremetal.provision.done.notification] is enabled but user baremetal-system-account is not found. Baremetal provision done notification will not be enabled")); } else { buf.append(String.format(" baremetalnotificationsecuritykey=%s", user.getSecretKey())); @@ -2137,17 +2135,17 @@ public boolean finalizeVirtualMachineProfile(final VirtualMachineProfile profile String routerLogrotateFrequency = RouterLogrotateFrequency.valueIn(router.getDataCenterId()); if (!checkLogrotateTimerPattern(routerLogrotateFrequency)) { - s_logger.debug(String.format("Setting [%s] with value [%s] do not match with the used regex [%s], or any acceptable value ('hourly', 'daily', 'monthly'); " + + logger.debug(String.format("Setting [%s] with value [%s] do not match with the used regex [%s], or any acceptable value ('hourly', 'daily', 'monthly'); " + "therefore, we will use the default value [%s] to configure the logrotate service on the virtual router.",RouterLogrotateFrequency.key(), - routerLogrotateFrequency, LOGROTATE_REGEX, RouterLogrotateFrequency.defaultValue())); + routerLogrotateFrequency, loggerROTATE_REGEX, RouterLogrotateFrequency.defaultValue())); routerLogrotateFrequency = RouterLogrotateFrequency.defaultValue(); } - s_logger.debug(String.format("The setting [%s] with value [%s] for the zone with UUID [%s], will be used to configure the logrotate service frequency" + + logger.debug(String.format("The setting [%s] with value [%s] for the zone with UUID [%s], will be used to configure the logrotate service frequency" + " on the virtual router.", RouterLogrotateFrequency.key(), routerLogrotateFrequency, dc.getUuid())); buf.append(String.format(" logrotatefrequency=%s", routerLogrotateFrequency)); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Boot Args for " + profile + ": " + buf.toString()); + if (logger.isDebugEnabled()) { + logger.debug("Boot Args for " + profile + ": " + buf.toString()); } return true; @@ -2159,7 +2157,7 @@ public boolean finalizeVirtualMachineProfile(final VirtualMachineProfile profile * @return true if the passed value match with any acceptable value based on the regex ((?i)(hourly)|(daily)|(monthly))|(\*|\d{2})\:(\*|\d{2})\:(\*|\d{2}) */ protected boolean checkLogrotateTimerPattern(String routerLogrotateFrequency) { - if (Pattern.matches(LOGROTATE_REGEX, routerLogrotateFrequency)) { + if (Pattern.matches(loggerROTATE_REGEX, routerLogrotateFrequency)) { return true; } return false; @@ -2259,7 +2257,7 @@ protected StringBuilder createRedundantRouterArgs(final NicProfile nic, final Do buf.append(" router_password=").append(password); } catch (final NoSuchAlgorithmException e) { - s_logger.error("Failed to pssword! Will use the plan B instead."); + logger.error("Failed to pssword! Will use the plan B instead."); buf.append(" router_password=").append(vpc.getUuid()); } @@ -2326,7 +2324,7 @@ public boolean finalizeCommandsOnStart(final Commands cmds, final VirtualMachine final NicProfile controlNic = getControlNic(profile); if (controlNic == null) { - s_logger.error("Control network doesn't exist for the router " + router); + logger.error("Control network doesn't exist for the router " + router); return false; } @@ -2375,7 +2373,7 @@ protected void finalizeMonitorService(final Commands cmds, final VirtualMachineP final Boolean isMonitoringServicesEnabled = serviceMonitoringSet != null && serviceMonitoringSet.equalsIgnoreCase("true"); final NetworkVO network = _networkDao.findById(networkId); - s_logger.debug("Creating monitoring services on " + router + " start..."); + logger.debug("Creating monitoring services on " + router + " start..."); // get the list of sevices for this network to monitor final List services = new ArrayList(); @@ -2460,19 +2458,19 @@ protected void finalizeUserDataAndDhcpOnStart(final Commands cmds, final DomainR if (_networkModel.isProviderSupportServiceInNetwork(guestNetworkId, Service.Dhcp, provider) || _networkModel.isProviderSupportServiceInNetwork(guestNetworkId, Service.Dns, provider)) { // Resend dhcp - s_logger.debug("Reapplying dhcp entries as a part of domR " + router + " start..."); + logger.debug("Reapplying dhcp entries as a part of domR " + router + " start..."); _commandSetupHelper.createDhcpEntryCommandsForVMs(router, cmds, guestNetworkId); } if (_networkModel.isProviderSupportServiceInNetwork(guestNetworkId, Service.UserData, provider)) { // Resend user data - s_logger.debug("Reapplying vm data (userData and metaData) entries as a part of domR " + router + " start..."); + logger.debug("Reapplying vm data (userData and metaData) entries as a part of domR " + router + " start..."); _commandSetupHelper.createVmDataCommandForVMs(router, cmds, guestNetworkId); } } protected void finalizeNetworkRulesForNetwork(final Commands cmds, final DomainRouterVO router, final Provider provider, final Long guestNetworkId) { - s_logger.debug("Resending ipAssoc, port forwarding, load balancing rules as a part of Virtual router start"); + logger.debug("Resending ipAssoc, port forwarding, load balancing rules as a part of Virtual router start"); final ArrayList publicIps = getPublicIpsToApply(router, provider, guestNetworkId); final List firewallRulesEgress = new ArrayList(); @@ -2490,12 +2488,12 @@ protected void finalizeNetworkRulesForNetwork(final Commands cmds, final DomainR } // Re-apply firewall Egress rules - s_logger.debug("Found " + firewallRulesEgress.size() + " firewall Egress rule(s) to apply as a part of domR " + router + " start."); + logger.debug("Found " + firewallRulesEgress.size() + " firewall Egress rule(s) to apply as a part of domR " + router + " start."); if (!firewallRulesEgress.isEmpty()) { _commandSetupHelper.createFirewallRulesCommands(firewallRulesEgress, router, cmds, guestNetworkId); } - s_logger.debug(String.format("Found %d Ipv6 firewall rule(s) to apply as a part of domR %s start.", ipv6firewallRules.size(), router)); + logger.debug(String.format("Found %d Ipv6 firewall rule(s) to apply as a part of domR %s start.", ipv6firewallRules.size(), router)); if (!ipv6firewallRules.isEmpty()) { _commandSetupHelper.createIpv6FirewallRulesCommands(ipv6firewallRules, router, cmds, guestNetworkId); } @@ -2533,7 +2531,7 @@ protected void finalizeNetworkRulesForNetwork(final Commands cmds, final DomainR boolean revoke = false; if (ip.getState() == IpAddress.State.Releasing ) { // for ips got struck in releasing state we need to delete the rule not add. - s_logger.debug("Rule revoke set to true for the ip " + ip.getAddress() +" because it is in releasing state"); + logger.debug("Rule revoke set to true for the ip " + ip.getAddress() +" because it is in releasing state"); revoke = true; } final StaticNatImpl staticNat = new StaticNatImpl(ip.getAccountId(), ip.getDomainId(), guestNetworkId, ip.getId(), ip.getVmIp(), revoke); @@ -2544,25 +2542,25 @@ protected void finalizeNetworkRulesForNetwork(final Commands cmds, final DomainR } // Re-apply static nats - s_logger.debug("Found " + staticNats.size() + " static nat(s) to apply as a part of domR " + router + " start."); + logger.debug("Found " + staticNats.size() + " static nat(s) to apply as a part of domR " + router + " start."); if (!staticNats.isEmpty()) { _commandSetupHelper.createApplyStaticNatCommands(staticNats, router, cmds, guestNetworkId); } // Re-apply firewall Ingress rules - s_logger.debug("Found " + firewallRulesIngress.size() + " firewall Ingress rule(s) to apply as a part of domR " + router + " start."); + logger.debug("Found " + firewallRulesIngress.size() + " firewall Ingress rule(s) to apply as a part of domR " + router + " start."); if (!firewallRulesIngress.isEmpty()) { _commandSetupHelper.createFirewallRulesCommands(firewallRulesIngress, router, cmds, guestNetworkId); } // Re-apply port forwarding rules - s_logger.debug("Found " + pfRules.size() + " port forwarding rule(s) to apply as a part of domR " + router + " start."); + logger.debug("Found " + pfRules.size() + " port forwarding rule(s) to apply as a part of domR " + router + " start."); if (!pfRules.isEmpty()) { _commandSetupHelper.createApplyPortForwardingRulesCommands(pfRules, router, cmds, guestNetworkId); } // Re-apply static nat rules - s_logger.debug("Found " + staticNatFirewallRules.size() + " static nat rule(s) to apply as a part of domR " + router + " start."); + logger.debug("Found " + staticNatFirewallRules.size() + " static nat rule(s) to apply as a part of domR " + router + " start."); if (!staticNatFirewallRules.isEmpty()) { final List staticNatRules = new ArrayList(); for (final FirewallRule rule : staticNatFirewallRules) { @@ -2572,7 +2570,7 @@ protected void finalizeNetworkRulesForNetwork(final Commands cmds, final DomainR } // Re-apply vpn rules - s_logger.debug("Found " + vpns.size() + " vpn(s) to apply as a part of domR " + router + " start."); + logger.debug("Found " + vpns.size() + " vpn(s) to apply as a part of domR " + router + " start."); if (!vpns.isEmpty()) { for (final RemoteAccessVpn vpn : vpns) { _commandSetupHelper.createApplyVpnCommands(true, vpn, router, cmds); @@ -2594,7 +2592,7 @@ protected void finalizeNetworkRulesForNetwork(final Commands cmds, final DomainR } } - s_logger.debug("Found " + lbRules.size() + " load balancing rule(s) to apply as a part of domR " + router + " start."); + logger.debug("Found " + lbRules.size() + " load balancing rule(s) to apply as a part of domR " + router + " start."); if (!lbRules.isEmpty()) { _commandSetupHelper.createApplyLoadBalancingRulesCommands(lbRules, router, cmds, guestNetworkId); } @@ -2607,11 +2605,11 @@ protected void finalizeNetworkRulesForNetwork(final Commands cmds, final DomainR final String supportsMultipleSubnets = dhcpCapabilities.get(Network.Capability.DhcpAccrossMultipleSubnets); if (supportsMultipleSubnets != null && Boolean.valueOf(supportsMultipleSubnets)) { final List revokedIpAliasVOs = _nicIpAliasDao.listByNetworkIdAndState(guestNetworkId, NicIpAlias.State.revoked); - s_logger.debug("Found" + revokedIpAliasVOs.size() + "ip Aliases to revoke on the router as a part of dhcp configuration"); + logger.debug("Found" + revokedIpAliasVOs.size() + "ip Aliases to revoke on the router as a part of dhcp configuration"); removeRevokedIpAliasFromDb(revokedIpAliasVOs); final List aliasVOs = _nicIpAliasDao.listByNetworkIdAndState(guestNetworkId, NicIpAlias.State.active); - s_logger.debug("Found" + aliasVOs.size() + "ip Aliases to apply on the router as a part of dhcp configuration"); + logger.debug("Found" + aliasVOs.size() + "ip Aliases to apply on the router as a part of dhcp configuration"); final List activeIpAliasTOs = new ArrayList(); for (final NicIpAliasVO aliasVO : aliasVOs) { activeIpAliasTOs.add(new IpAliasTO(aliasVO.getIp4Address(), aliasVO.getNetmask(), aliasVO.getAliasCount().toString())); @@ -2642,7 +2640,7 @@ private void createDefaultEgressFirewallRule(final List rules, fin rules.add(rule); } else { - s_logger.debug("Egress policy for the Network " + networkId + " is already defined as Deny. So, no need to default the rule to Allow. "); + logger.debug("Egress policy for the Network " + networkId + " is already defined as Deny. So, no need to default the rule to Allow. "); } } @@ -2673,7 +2671,7 @@ protected void finalizeIpAssocForNetwork(final Commands cmds, final VirtualRoute final ArrayList publicIps = getPublicIpsToApply(router, provider, guestNetworkId); if (publicIps != null && !publicIps.isEmpty()) { - s_logger.debug("Found " + publicIps.size() + " ip(s) to apply as a part of domR " + router + " start."); + logger.debug("Found " + publicIps.size() + " ip(s) to apply as a part of domR " + router + " start."); // Re-apply public ip addresses - should come before PF/LB/VPN if (_networkModel.isProviderSupportServiceInNetwork(guestNetworkId, Service.Firewall, provider)) { _commandSetupHelper.createAssociateIPCommands(router, publicIps, cmds, 0); @@ -2701,7 +2699,7 @@ protected ArrayList getPublicIpsToApply(final Virtual if (skipInStates != null) { for (final IpAddress.State stateToSkip : skipInStates) { if (userIp.getState() == stateToSkip) { - s_logger.debug("Skipping ip address " + userIp + " in state " + userIp.getState()); + logger.debug("Skipping ip address " + userIp + " in state " + userIp.getState()); addIp = false; break; } @@ -2740,8 +2738,8 @@ public boolean finalizeStart(final VirtualMachineProfile profile, final long hos final String errorDetails = "Details: " + answer.getDetails() + " " + answer.toString(); // add alerts for the failed commands _alertMgr.sendAlert(AlertService.AlertType.ALERT_TYPE_DOMAIN_ROUTER, router.getDataCenterId(), router.getPodIdToDeployIn(), errorMessage, errorDetails); - s_logger.error(answer.getDetails()); - s_logger.warn(errorMessage); + logger.error(answer.getDetails()); + logger.warn(errorMessage); // Stop the router if any of the commands failed return false; } @@ -2777,7 +2775,7 @@ public boolean finalizeStart(final VirtualMachineProfile profile, final long hos try { result = networkTopology.setupDhcpForPvlan(true, router, router.getHostId(), nicProfile); } catch (final ResourceUnavailableException e) { - s_logger.debug("ERROR in finalizeStart: ", e); + logger.debug("ERROR in finalizeStart: ", e); } } } @@ -2810,7 +2808,7 @@ public void finalizeStop(final VirtualMachineProfile profile, final Answer answe try { networkTopology.setupDhcpForPvlan(false, domR, domR.getHostId(), nicProfile); } catch (final ResourceUnavailableException e) { - s_logger.debug("ERROR in finalizeStop: ", e); + logger.debug("ERROR in finalizeStop: ", e); } } } @@ -2825,13 +2823,13 @@ public void finalizeExpunge(final VirtualMachine vm) { @Override public boolean startRemoteAccessVpn(final Network network, final RemoteAccessVpn vpn, final List routers) throws ResourceUnavailableException { if (routers == null || routers.isEmpty()) { - s_logger.warn("Failed to start remote access VPN: no router found for account and zone"); + logger.warn("Failed to start remote access VPN: no router found for account and zone"); throw new ResourceUnavailableException("Failed to start remote access VPN: no router found for account and zone", DataCenter.class, network.getDataCenterId()); } for (final VirtualRouter router : routers) { if (router.getState() != VirtualMachine.State.Running) { - s_logger.warn("Failed to start remote access VPN: router not in right state " + router.getState()); + logger.warn("Failed to start remote access VPN: router not in right state " + router.getState()); throw new ResourceUnavailableException("Failed to start remote access VPN: router not in right state " + router.getState(), DataCenter.class, network.getDataCenterId()); } @@ -2845,20 +2843,20 @@ public boolean startRemoteAccessVpn(final Network network, final RemoteAccessVpn Answer answer = cmds.getAnswer("users"); if (answer == null) { - s_logger.error("Unable to start vpn: unable add users to vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " + logger.error("Unable to start vpn: unable add users to vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " + router.getInstanceName() + " due to null answer"); throw new ResourceUnavailableException("Unable to start vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " + router.getInstanceName() + " due to null answer", DataCenter.class, router.getDataCenterId()); } if (!answer.getResult()) { - s_logger.error("Unable to start vpn: unable add users to vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " + logger.error("Unable to start vpn: unable add users to vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " + router.getInstanceName() + " due to " + answer.getDetails()); throw new ResourceUnavailableException("Unable to start vpn: Unable to add users to vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " + router.getInstanceName() + " due to " + answer.getDetails(), DataCenter.class, router.getDataCenterId()); } answer = cmds.getAnswer("startVpn"); if (!answer.getResult()) { - s_logger.error("Unable to start vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " + router.getInstanceName() + logger.error("Unable to start vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " + router.getInstanceName() + " due to " + answer.getDetails()); throw new ResourceUnavailableException("Unable to start vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " + router.getInstanceName() + " due to " + answer.getDetails(), DataCenter.class, router.getDataCenterId()); @@ -2871,7 +2869,7 @@ public boolean startRemoteAccessVpn(final Network network, final RemoteAccessVpn @Override public boolean deleteRemoteAccessVpn(final Network network, final RemoteAccessVpn vpn, final List routers) throws ResourceUnavailableException { if (routers == null || routers.isEmpty()) { - s_logger.warn("Failed to delete remote access VPN: no router found for account and zone"); + logger.warn("Failed to delete remote access VPN: no router found for account and zone"); throw new ResourceUnavailableException("Failed to delete remote access VPN", DataCenter.class, network.getDataCenterId()); } @@ -2882,10 +2880,10 @@ public boolean deleteRemoteAccessVpn(final Network network, final RemoteAccessVp _commandSetupHelper.createApplyVpnCommands(false, vpn, router, cmds); result = result && _nwHelper.sendCommandsToRouter(router, cmds); } else if (router.getState() == VirtualMachine.State.Stopped) { - s_logger.debug("Router " + router + " is in Stopped state, not sending deleteRemoteAccessVpn command to it"); + logger.debug("Router " + router + " is in Stopped state, not sending deleteRemoteAccessVpn command to it"); continue; } else { - s_logger.warn("Failed to delete remote access VPN: domR " + router + " is not in right state " + router.getState()); + logger.warn("Failed to delete remote access VPN: domR " + router + " is not in right state " + router.getState()); throw new ResourceUnavailableException("Failed to delete remote access VPN: domR is not in right state " + router.getState(), DataCenter.class, network.getDataCenterId()); } @@ -2897,7 +2895,7 @@ public boolean deleteRemoteAccessVpn(final Network network, final RemoteAccessVp @Override public DomainRouterVO stop(final VirtualRouter router, final boolean forced, final User user, final Account caller) throws ConcurrentOperationException, ResourceUnavailableException { - s_logger.debug("Stopping router " + router); + logger.debug("Stopping router " + router); try { _itMgr.advanceStop(router.getUuid(), forced); return _routerDao.findById(router.getId()); @@ -2909,26 +2907,26 @@ public DomainRouterVO stop(final VirtualRouter router, final boolean forced, fin @Override public boolean removeDhcpSupportForSubnet(final Network network, final List routers) throws ResourceUnavailableException { if (routers == null || routers.isEmpty()) { - s_logger.warn("Failed to add/remove VPN users: no router found for account and zone"); + logger.warn("Failed to add/remove VPN users: no router found for account and zone"); throw new ResourceUnavailableException("Unable to assign ip addresses, domR doesn't exist for network " + network.getId(), DataCenter.class, network.getDataCenterId()); } for (final DomainRouterVO router : routers) { if (router.getState() != VirtualMachine.State.Running) { - s_logger.warn("Failed to add/remove VPN users: router not in running state"); + logger.warn("Failed to add/remove VPN users: router not in running state"); throw new ResourceUnavailableException("Unable to assign ip addresses, domR is not in right state " + router.getState(), DataCenter.class, network.getDataCenterId()); } final Commands cmds = new Commands(Command.OnError.Continue); final List revokedIpAliasVOs = _nicIpAliasDao.listByNetworkIdAndState(network.getId(), NicIpAlias.State.revoked); - s_logger.debug("Found" + revokedIpAliasVOs.size() + "ip Aliases to revoke on the router as a part of dhcp configuration"); + logger.debug("Found" + revokedIpAliasVOs.size() + "ip Aliases to revoke on the router as a part of dhcp configuration"); final List revokedIpAliasTOs = new ArrayList(); for (final NicIpAliasVO revokedAliasVO : revokedIpAliasVOs) { revokedIpAliasTOs.add(new IpAliasTO(revokedAliasVO.getIp4Address(), revokedAliasVO.getNetmask(), revokedAliasVO.getAliasCount().toString())); } final List aliasVOs = _nicIpAliasDao.listByNetworkIdAndState(network.getId(), NicIpAlias.State.active); - s_logger.debug("Found" + aliasVOs.size() + "ip Aliases to apply on the router as a part of dhcp configuration"); + logger.debug("Found" + aliasVOs.size() + "ip Aliases to apply on the router as a part of dhcp configuration"); final List activeIpAliasTOs = new ArrayList(); for (final NicIpAliasVO aliasVO : aliasVOs) { activeIpAliasTOs.add(new IpAliasTO(aliasVO.getIp4Address(), aliasVO.getNetmask(), aliasVO.getAliasCount().toString())); @@ -2992,7 +2990,7 @@ public VirtualRouter startRouter(final long routerId, final boolean reprogramNet for (final NicVO nic : nics) { if (!_networkMgr.startNetwork(nic.getNetworkId(), dest, context)) { - s_logger.warn("Failed to start network id=" + nic.getNetworkId() + " as a part of domR start"); + logger.warn("Failed to start network id=" + nic.getNetworkId() + " as a part of domR start"); throw new CloudRuntimeException("Failed to start network id=" + nic.getNetworkId() + " as a part of domR start"); } } @@ -3061,16 +3059,16 @@ public void processConnect(final Host host, final StartupCommand cmd, final bool final List routers = _routerDao.listIsolatedByHostId(host.getId()); for (DomainRouterVO router : routers) { if (router.isStopPending()) { - s_logger.info("Stopping router " + router.getInstanceName() + " due to stop pending flag found!"); + logger.info("Stopping router " + router.getInstanceName() + " due to stop pending flag found!"); final VirtualMachine.State state = router.getState(); if (state != VirtualMachine.State.Stopped && state != VirtualMachine.State.Destroyed) { try { stopRouter(router.getId(), false); } catch (final ResourceUnavailableException e) { - s_logger.warn("Fail to stop router " + router.getInstanceName(), e); + logger.warn("Fail to stop router " + router.getInstanceName(), e); throw new ConnectionException(false, "Fail to stop router " + router.getInstanceName()); } catch (final ConcurrentOperationException e) { - s_logger.warn("Fail to stop router " + router.getInstanceName(), e); + logger.warn("Fail to stop router " + router.getInstanceName(), e); throw new ConnectionException(false, "Fail to stop router " + router.getInstanceName()); } } @@ -3135,7 +3133,7 @@ public void collectNetworkStatistics(final T router, f //[TODO] Avoiding the NPE now, but I have to find out what is going on with the network. - Wilder Rodrigues if (network == null) { - s_logger.error("Could not find a network with ID => " + routerNic.getNetworkId() + ". It might be a problem!"); + logger.error("Could not find a network with ID => " + routerNic.getNetworkId() + ". It might be a problem!"); continue; } if (forVpc && network.getTrafficType() == TrafficType.Public || !forVpc && network.getTrafficType() == TrafficType.Guest @@ -3148,19 +3146,19 @@ public void collectNetworkStatistics(final T router, f try { answer = (NetworkUsageAnswer) _agentMgr.easySend(router.getHostId(), usageCmd); } catch (final Exception e) { - s_logger.warn("Error while collecting network stats from router: " + router.getInstanceName() + " from host: " + router.getHostId(), e); + logger.warn("Error while collecting network stats from router: " + router.getInstanceName() + " from host: " + router.getHostId(), e); continue; } if (answer != null) { if (!answer.getResult()) { - s_logger.warn("Error while collecting network stats from router: " + router.getInstanceName() + " from host: " + router.getHostId() + "; details: " + logger.warn("Error while collecting network stats from router: " + router.getInstanceName() + " from host: " + router.getHostId() + "; details: " + answer.getDetails()); continue; } try { if (answer.getBytesReceived() == 0 && answer.getBytesSent() == 0) { - s_logger.debug("Recieved and Sent bytes are both 0. Not updating user_statistics"); + logger.debug("Recieved and Sent bytes are both 0. Not updating user_statistics"); continue; } @@ -3171,29 +3169,29 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { final UserStatisticsVO stats = _userStatsDao.lock(router.getAccountId(), router.getDataCenterId(), network.getId(), forVpc ? routerNic.getIPv4Address() : null, router.getId(), routerType); if (stats == null) { - s_logger.warn("unable to find stats for account: " + router.getAccountId()); + logger.warn("unable to find stats for account: " + router.getAccountId()); return; } if (previousStats != null && (previousStats.getCurrentBytesReceived() != stats.getCurrentBytesReceived() || previousStats.getCurrentBytesSent() != stats .getCurrentBytesSent())) { - s_logger.debug("Router stats changed from the time NetworkUsageCommand was sent. " + "Ignoring current answer. Router: " + logger.debug("Router stats changed from the time NetworkUsageCommand was sent. " + "Ignoring current answer. Router: " + answerFinal.getRouterName() + " Rcvd: " + answerFinal.getBytesReceived() + "Sent: " + answerFinal.getBytesSent()); return; } if (stats.getCurrentBytesReceived() > answerFinal.getBytesReceived()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Received # of bytes that's less than the last one. " + "Assuming something went wrong and persisting it. Router: " + if (logger.isDebugEnabled()) { + logger.debug("Received # of bytes that's less than the last one. " + "Assuming something went wrong and persisting it. Router: " + answerFinal.getRouterName() + " Reported: " + toHumanReadableSize(answerFinal.getBytesReceived()) + " Stored: " + toHumanReadableSize(stats.getCurrentBytesReceived())); } stats.setNetBytesReceived(stats.getNetBytesReceived() + stats.getCurrentBytesReceived()); } stats.setCurrentBytesReceived(answerFinal.getBytesReceived()); if (stats.getCurrentBytesSent() > answerFinal.getBytesSent()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Received # of bytes that's less than the last one. " + "Assuming something went wrong and persisting it. Router: " + if (logger.isDebugEnabled()) { + logger.debug("Received # of bytes that's less than the last one. " + "Assuming something went wrong and persisting it. Router: " + answerFinal.getRouterName() + " Reported: " + toHumanReadableSize(answerFinal.getBytesSent()) + " Stored: " + toHumanReadableSize(stats.getCurrentBytesSent())); } stats.setNetBytesSent(stats.getNetBytesSent() + stats.getCurrentBytesSent()); @@ -3208,7 +3206,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { } }); } catch (final Exception e) { - s_logger.warn("Unable to update user statistics for account: " + router.getAccountId() + " Rx: " + toHumanReadableSize(answer.getBytesReceived()) + "; Tx: " + logger.warn("Unable to update user statistics for account: " + router.getAccountId() + " Rx: " + toHumanReadableSize(answer.getBytesReceived()) + "; Tx: " + toHumanReadableSize(answer.getBytesSent())); } } @@ -3291,7 +3289,7 @@ private List rebootRouters(final List routers) { final List jobIds = new ArrayList(); for (final DomainRouterVO router : routers) { if (!_nwHelper.checkRouterTemplateVersion(router)) { - s_logger.debug("Upgrading template for router: " + router.getId()); + logger.debug("Upgrading template for router: " + router.getId()); final Map params = new HashMap(); params.put("ctxUserId", "1"); params.put("ctxAccountId", "" + router.getAccountId()); @@ -3306,7 +3304,7 @@ private List rebootRouters(final List routers) { final long jobId = _asyncMgr.submitAsyncJob(job); jobIds.add(jobId); } else { - s_logger.debug("Router: " + router.getId() + " is already at the latest version. No upgrade required"); + logger.debug("Router: " + router.getId() + " is already at the latest version. No upgrade required"); } } return jobIds; @@ -3359,7 +3357,7 @@ public boolean postStateTransitionEvent(final StateMachine2.Transition params) th public boolean addVpcRouterToGuestNetwork(final VirtualRouter router, final Network network, final Map params) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { if (network.getTrafficType() != TrafficType.Guest) { - s_logger.warn("Network " + network + " is not of type " + TrafficType.Guest); + logger.warn("Network " + network + " is not of type " + TrafficType.Guest); return false; } @@ -160,7 +158,7 @@ public boolean addVpcRouterToGuestNetwork(final VirtualRouter router, final Netw if (guestNic != null) { result = setupVpcGuestNetwork(network, router, true, guestNic); } else { - s_logger.warn("Failed to add router " + router + " to guest network " + network); + logger.warn("Failed to add router " + router + " to guest network " + network); result = false; } // 3) apply networking rules @@ -169,18 +167,18 @@ public boolean addVpcRouterToGuestNetwork(final VirtualRouter router, final Netw sendNetworkRulesToRouter(router.getId(), network.getId(), reprogramNetwork); } } catch (final Exception ex) { - s_logger.warn("Failed to add router " + router + " to network " + network + " due to ", ex); + logger.warn("Failed to add router " + router + " to network " + network + " due to ", ex); result = false; } finally { if (!result) { - s_logger.debug("Removing the router " + router + " from network " + network + " as a part of cleanup"); + logger.debug("Removing the router " + router + " from network " + network + " as a part of cleanup"); if (removeVpcRouterFromGuestNetwork(router, network)) { - s_logger.debug("Removed the router " + router + " from network " + network + " as a part of cleanup"); + logger.debug("Removed the router " + router + " from network " + network + " as a part of cleanup"); } else { - s_logger.warn("Failed to remove the router " + router + " from network " + network + " as a part of cleanup"); + logger.warn("Failed to remove the router " + router + " from network " + network + " as a part of cleanup"); } } else { - s_logger.debug("Successfully added router " + router + " to guest network " + network); + logger.debug("Successfully added router " + router + " to guest network " + network); } } @@ -191,7 +189,7 @@ public boolean addVpcRouterToGuestNetwork(final VirtualRouter router, final Netw public boolean removeVpcRouterFromGuestNetwork(final VirtualRouter router, final Network network) throws ConcurrentOperationException, ResourceUnavailableException { if (network.getTrafficType() != TrafficType.Guest) { - s_logger.warn("Network " + network + " is not of type " + TrafficType.Guest); + logger.warn("Network " + network + " is not of type " + TrafficType.Guest); return false; } @@ -199,13 +197,13 @@ public boolean removeVpcRouterFromGuestNetwork(final VirtualRouter router, final try { // Check if router is a part of the Guest network if (!_networkModel.isVmPartOfNetwork(router.getId(), network.getId())) { - s_logger.debug("Router " + router + " is not a part of the Guest network " + network); + logger.debug("Router " + router + " is not a part of the Guest network " + network); return result; } result = setupVpcGuestNetwork(network, router, false, _networkModel.getNicProfile(router, network.getId(), null)); if (!result) { - s_logger.warn("Failed to destroy guest network config " + network + " on router " + router); + logger.warn("Failed to destroy guest network config " + network + " on router " + router); return false; } @@ -233,15 +231,15 @@ protected boolean setupVpcGuestNetwork(final Network network, final VirtualRoute final Answer setupAnswer = cmds.getAnswer("setupguestnetwork"); final String setup = add ? "set" : "destroy"; if (!(setupAnswer != null && setupAnswer.getResult())) { - s_logger.warn("Unable to " + setup + " guest network on router " + router); + logger.warn("Unable to " + setup + " guest network on router " + router); result = false; } return result; } else if (router.getState() == State.Stopped || router.getState() == State.Stopping) { - s_logger.debug("Router " + router.getInstanceName() + " is in " + router.getState() + ", so not sending setup guest network command to the backend"); + logger.debug("Router " + router.getInstanceName() + " is in " + router.getState() + ", so not sending setup guest network command to the backend"); return true; } else { - s_logger.warn("Unable to setup guest network on virtual router " + router + " is not in the right state " + router.getState()); + logger.warn("Unable to setup guest network on virtual router " + router + " is not in the right state " + router.getState()); throw new ResourceUnavailableException("Unable to setup guest network on the backend," + " virtual router " + router + " is not in the right state", DataCenter.class, router.getDataCenterId()); } @@ -271,7 +269,7 @@ public boolean finalizeVirtualMachineProfile(final VirtualMachineProfile profile defaultIp6Dns1 = nic.getIPv6Dns1(); defaultIp6Dns2 = nic.getIPv6Dns2(); } - s_logger.debug("Removing nic " + nic + " of type " + nic.getTrafficType() + " from the nics passed on vm start. " + "The nic will be plugged later"); + logger.debug("Removing nic " + nic + " of type " + nic.getTrafficType() + " from the nics passed on vm start. " + "The nic will be plugged later"); it.remove(); } } @@ -320,7 +318,7 @@ public boolean finalizeCommandsOnStart(final Commands cmds, final VirtualMachine // 1) FORM SSH CHECK COMMAND final NicProfile controlNic = getControlNic(profile); if (controlNic == null) { - s_logger.error("Control network doesn't exist for the router " + domainRouterVO); + logger.error("Control network doesn't exist for the router " + domainRouterVO); return false; } @@ -418,7 +416,7 @@ public boolean finalizeCommandsOnStart(final Commands cmds, final VirtualMachine if (privateGwAclId != null) { // set network acl on private gateway final List networkACLs = _networkACLItemDao.listByACL(privateGwAclId); - s_logger.debug("Found " + networkACLs.size() + " network ACLs to apply as a part of VPC VR " + domainRouterVO + " start for private gateway ip = " + logger.debug("Found " + networkACLs.size() + " network ACLs to apply as a part of VPC VR " + domainRouterVO + " start for private gateway ip = " + ipVO.getIpAddress()); _commandSetupHelper.createNetworkACLsCommands(networkACLs, domainRouterVO, cmds, ipVO.getNetworkId(), true); @@ -439,7 +437,7 @@ public boolean finalizeCommandsOnStart(final Commands cmds, final VirtualMachine cmds.addCommand(setupCmd); } } catch (final Exception ex) { - s_logger.warn("Failed to add router " + domainRouterVO + " to network due to exception ", ex); + logger.warn("Failed to add router " + domainRouterVO + " to network due to exception ", ex); return false; } @@ -456,7 +454,7 @@ public boolean finalizeCommandsOnStart(final Commands cmds, final VirtualMachine staticRouteProfiles.add(new StaticRouteProfile(route, gateway)); } - s_logger.debug("Found " + staticRouteProfiles.size() + " static routes to apply as a part of vpc route " + domainRouterVO + " start"); + logger.debug("Found " + staticRouteProfiles.size() + " static routes to apply as a part of vpc route " + domainRouterVO + " start"); if (!staticRouteProfiles.isEmpty()) { _commandSetupHelper.createStaticRouteCommands(staticRouteProfiles, domainRouterVO, cmds); } @@ -529,7 +527,7 @@ protected void finalizeNetworkRulesForNetwork(final Commands cmds, final DomainR if (_networkModel.isProviderSupportServiceInNetwork(guestNetworkId, Service.NetworkACL, Provider.VPCVirtualRouter)) { final List networkACLs = _networkACLMgr.listNetworkACLItems(guestNetworkId); if (networkACLs != null && !networkACLs.isEmpty()) { - s_logger.debug("Found " + networkACLs.size() + " network ACLs to apply as a part of VPC VR " + domainRouterVO + " start for guest network id=" + guestNetworkId); + logger.debug("Found " + networkACLs.size() + " network ACLs to apply as a part of VPC VR " + domainRouterVO + " start for guest network id=" + guestNetworkId); _commandSetupHelper.createNetworkACLsCommands(networkACLs, domainRouterVO, cmds, guestNetworkId, false); } } @@ -592,20 +590,20 @@ protected boolean setupVpcPrivateNetwork(final VirtualRouter router, final boole try { if (_nwHelper.sendCommandsToRouter(router, cmds)) { - s_logger.debug("Successfully applied ip association for ip " + ip + " in vpc network " + network); + logger.debug("Successfully applied ip association for ip " + ip + " in vpc network " + network); return true; } else { - s_logger.warn("Failed to associate ip address " + ip + " in vpc network " + network); + logger.warn("Failed to associate ip address " + ip + " in vpc network " + network); return false; } } catch (final Exception ex) { - s_logger.warn("Failed to send " + (add ? "add " : "delete ") + " private network " + network + " commands to rotuer "); + logger.warn("Failed to send " + (add ? "add " : "delete ") + " private network " + network + " commands to rotuer "); return false; } } else if (router.getState() == State.Stopped || router.getState() == State.Stopping) { - s_logger.debug("Router " + router.getInstanceName() + " is in " + router.getState() + ", so not sending setup private network command to the backend"); + logger.debug("Router " + router.getInstanceName() + " is in " + router.getState() + ", so not sending setup private network command to the backend"); } else { - s_logger.warn("Unable to setup private gateway, virtual router " + router + " is not in the right state " + router.getState()); + logger.warn("Unable to setup private gateway, virtual router " + router + " is not in the right state " + router.getState()); throw new ResourceUnavailableException("Unable to setup Private gateway on the backend," + " virtual router " + router + " is not in the right state", DataCenter.class, router.getDataCenterId()); @@ -618,29 +616,29 @@ public boolean destroyPrivateGateway(final PrivateGateway gateway, final Virtual boolean result = true; if (!_networkModel.isVmPartOfNetwork(router.getId(), gateway.getNetworkId())) { - s_logger.debug("Router doesn't have nic for gateway " + gateway + " so no need to removed it"); + logger.debug("Router doesn't have nic for gateway " + gateway + " so no need to removed it"); return result; } final Network privateNetwork = _networkModel.getNetwork(gateway.getNetworkId()); final NicProfile nicProfile = _networkModel.getNicProfile(router, privateNetwork.getId(), null); - s_logger.debug("Releasing private ip for gateway " + gateway + " from " + router); + logger.debug("Releasing private ip for gateway " + gateway + " from " + router); result = setupVpcPrivateNetwork(router, false, nicProfile); if (!result) { - s_logger.warn("Failed to release private ip for gateway " + gateway + " on router " + router); + logger.warn("Failed to release private ip for gateway " + gateway + " on router " + router); return false; } // revoke network acl on the private gateway. if (!_networkACLMgr.revokeACLItemsForPrivateGw(gateway)) { - s_logger.debug("Failed to delete network acl items on " + gateway + " from router " + router); + logger.debug("Failed to delete network acl items on " + gateway + " from router " + router); return false; } - s_logger.debug("Removing router " + router + " from private network " + privateNetwork + " as a part of delete private gateway"); + logger.debug("Removing router " + router + " from private network " + privateNetwork + " as a part of delete private gateway"); result = result && _itMgr.removeVmFromNetwork(router, privateNetwork, null); - s_logger.debug("Private gateawy " + gateway + " is removed from router " + router); + logger.debug("Private gateawy " + gateway + " is removed from router " + router); return result; } @@ -657,7 +655,7 @@ protected void finalizeIpAssocForNetwork(final Commands cmds, final VirtualRoute final ArrayList publicIps = getPublicIpsToApply(domainRouterVO, provider, guestNetworkId, IpAddress.State.Releasing); if (publicIps != null && !publicIps.isEmpty()) { - s_logger.debug("Found " + publicIps.size() + " ip(s) to apply as a part of domR " + domainRouterVO + " start."); + logger.debug("Found " + publicIps.size() + " ip(s) to apply as a part of domR " + domainRouterVO + " start."); // Re-apply public ip addresses - should come before PF/LB/VPN _commandSetupHelper.createVpcAssociatePublicIPCommands(domainRouterVO, publicIps, cmds, vlanMacAddress); } @@ -667,7 +665,7 @@ protected void finalizeIpAssocForNetwork(final Commands cmds, final VirtualRoute @Override public boolean startSite2SiteVpn(final Site2SiteVpnConnection conn, final VirtualRouter router) throws ResourceUnavailableException { if (router.getState() != State.Running) { - s_logger.warn("Unable to apply site-to-site VPN configuration, virtual router is not in the right state " + router.getState()); + logger.warn("Unable to apply site-to-site VPN configuration, virtual router is not in the right state " + router.getState()); throw new ResourceUnavailableException("Unable to apply site 2 site VPN configuration," + " virtual router is not in the right state", DataCenter.class, router.getDataCenterId()); } @@ -689,7 +687,7 @@ public boolean startSite2SiteVpn(DomainRouterVO router) throws ResourceUnavailab @Override public boolean stopSite2SiteVpn(final Site2SiteVpnConnection conn, final VirtualRouter router) throws ResourceUnavailableException { if (router.getState() != State.Running) { - s_logger.warn("Unable to apply site-to-site VPN configuration, virtual router is not in the right state " + router.getState()); + logger.warn("Unable to apply site-to-site VPN configuration, virtual router is not in the right state " + router.getState()); throw new ResourceUnavailableException("Unable to apply site 2 site VPN configuration," + " virtual router is not in the right state", DataCenter.class, router.getDataCenterId()); } @@ -723,7 +721,7 @@ protected Pair, Map> getNi final Nic nic = _nicDao.findByIp4AddressAndNetworkIdAndInstanceId(publicNtwkId, router.getId(), ip.getAddress().addr()); if (nic != null) { nicsToUnplug.put(ip.getVlanTag(), ip); - s_logger.debug("Need to unplug the nic for ip=" + ip + "; vlan=" + ip.getVlanTag() + " in public network id =" + publicNtwkId); + logger.debug("Need to unplug the nic for ip=" + ip + "; vlan=" + ip.getVlanTag() + " in public network id =" + publicNtwkId); } } } @@ -746,14 +744,14 @@ protected Pair, Map> getNi if (nic == null && nicsToPlug.get(ip.getVlanTag()) == null) { nicsToPlug.put(ip.getVlanTag(), ip); - s_logger.debug("Need to plug the nic for ip=" + ip + "; vlan=" + ip.getVlanTag() + " in public network id =" + publicNtwkId); + logger.debug("Need to plug the nic for ip=" + ip + "; vlan=" + ip.getVlanTag() + " in public network id =" + publicNtwkId); } else { final PublicIpAddress nicToUnplug = nicsToUnplug.get(ip.getVlanTag()); if (nicToUnplug != null) { final NicVO nicVO = _nicDao.findByIp4AddressAndNetworkIdAndInstanceId(publicNtwkId, router.getId(), nicToUnplug.getAddress().addr()); nicVO.setIPv4Address(ip.getAddress().addr()); _nicDao.update(nicVO.getId(), nicVO); - s_logger.debug("Updated the nic " + nicVO + " with the new ip address " + ip.getAddress().addr()); + logger.debug("Updated the nic " + nicVO + " with the new ip address " + ip.getAddress().addr()); nicsToUnplug.remove(ip.getVlanTag()); } } @@ -794,7 +792,7 @@ public boolean stop() { @Override public boolean startRemoteAccessVpn(final RemoteAccessVpn vpn, final VirtualRouter router) throws ResourceUnavailableException { if (router.getState() != State.Running) { - s_logger.warn("Unable to apply remote access VPN configuration, virtual router is not in the right state " + router.getState()); + logger.warn("Unable to apply remote access VPN configuration, virtual router is not in the right state " + router.getState()); throw new ResourceUnavailableException("Unable to apply remote access VPN configuration," + " virtual router is not in the right state", DataCenter.class, router.getDataCenterId()); } @@ -805,13 +803,13 @@ public boolean startRemoteAccessVpn(final RemoteAccessVpn vpn, final VirtualRout try { _agentMgr.send(router.getHostId(), cmds); } catch (final OperationTimedoutException e) { - s_logger.debug("Failed to start remote access VPN: ", e); + logger.debug("Failed to start remote access VPN: ", e); throw new AgentUnavailableException("Unable to send commands to virtual router ", router.getHostId(), e); } Answer answer = cmds.getAnswer("users"); if (answer == null || !answer.getResult()) { String errorMessage = (answer == null) ? "null answer object" : answer.getDetails(); - s_logger.error("Unable to start vpn: unable add users to vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " + logger.error("Unable to start vpn: unable add users to vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " + router.getInstanceName() + " due to " + errorMessage); throw new ResourceUnavailableException("Unable to start vpn: Unable to add users to vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " + router.getInstanceName() + " due to " + errorMessage, DataCenter.class, router.getDataCenterId()); @@ -819,7 +817,7 @@ public boolean startRemoteAccessVpn(final RemoteAccessVpn vpn, final VirtualRout answer = cmds.getAnswer("startVpn"); if (answer == null || !answer.getResult()) { String errorMessage = (answer == null) ? "null answer object" : answer.getDetails(); - s_logger.error("Unable to start vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " + router.getInstanceName() + " due to " + logger.error("Unable to start vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " + router.getInstanceName() + " due to " + errorMessage); throw new ResourceUnavailableException("Unable to start vpn in zone " + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " + router.getInstanceName() + " due to " + errorMessage, DataCenter.class, router.getDataCenterId()); @@ -837,9 +835,9 @@ public boolean stopRemoteAccessVpn(final RemoteAccessVpn vpn, final VirtualRoute _commandSetupHelper.createApplyVpnCommands(false, vpn, router, cmds); result = result && _nwHelper.sendCommandsToRouter(router, cmds); } else if (router.getState() == State.Stopped) { - s_logger.debug("Router " + router + " is in Stopped state, not sending deleteRemoteAccessVpn command to it"); + logger.debug("Router " + router + " is in Stopped state, not sending deleteRemoteAccessVpn command to it"); } else { - s_logger.warn("Failed to stop remote access VPN: domR " + router + " is not in right state " + router.getState()); + logger.warn("Failed to stop remote access VPN: domR " + router + " is not in right state " + router.getState()); throw new ResourceUnavailableException("Failed to stop remote access VPN: domR is not in right state " + router.getState(), DataCenter.class, router.getDataCenterId()); } diff --git a/server/src/main/java/com/cloud/network/rules/AdvancedVpnRules.java b/server/src/main/java/com/cloud/network/rules/AdvancedVpnRules.java index c513e7005a8f..631a8a4c5de8 100644 --- a/server/src/main/java/com/cloud/network/rules/AdvancedVpnRules.java +++ b/server/src/main/java/com/cloud/network/rules/AdvancedVpnRules.java @@ -20,7 +20,6 @@ import java.util.List; import org.apache.cloudstack.network.topology.NetworkTopologyVisitor; -import org.apache.log4j.Logger; import com.cloud.dc.DataCenter; import com.cloud.exception.ResourceUnavailableException; @@ -33,7 +32,6 @@ public class AdvancedVpnRules extends BasicVpnRules { - private static final Logger s_logger = Logger.getLogger(AdvancedVpnRules.class); private final RemoteAccessVpn _remoteAccessVpn; @@ -50,7 +48,7 @@ public boolean accept(final NetworkTopologyVisitor visitor, final VirtualRouter Vpc vpc = vpcDao.findById(_remoteAccessVpn.getVpcId()); if (_router.getState() != State.Running) { - s_logger.warn("Failed to add/remove Remote Access VPN users: router not in running state"); + logger.warn("Failed to add/remove Remote Access VPN users: router not in running state"); throw new ResourceUnavailableException("Failed to add/remove Remote Access VPN users: router not in running state: " + router.getState(), DataCenter.class, vpc.getZoneId()); } diff --git a/server/src/main/java/com/cloud/network/rules/DhcpSubNetRules.java b/server/src/main/java/com/cloud/network/rules/DhcpSubNetRules.java index dd12acd89729..ccf8f1884712 100644 --- a/server/src/main/java/com/cloud/network/rules/DhcpSubNetRules.java +++ b/server/src/main/java/com/cloud/network/rules/DhcpSubNetRules.java @@ -22,7 +22,6 @@ import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.network.topology.NetworkTopologyVisitor; -import org.apache.log4j.Logger; import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenter.NetworkType; @@ -59,7 +58,6 @@ public class DhcpSubNetRules extends RuleApplier { - private static final Logger s_logger = Logger.getLogger(DhcpSubNetRules.class); private final NicProfile _nic; private final VirtualMachineProfile _profile; @@ -132,8 +130,8 @@ public boolean accept(final NetworkTopologyVisitor visitor, final VirtualRouter _routerAliasIp = routerPublicIP.getAddress().addr(); } } catch (final InsufficientAddressCapacityException e) { - s_logger.info(e.getMessage()); - s_logger.info("unable to configure dhcp for this VM."); + logger.info(e.getMessage()); + logger.info("unable to configure dhcp for this VM."); return false; } // this means we did not create an IP alias on the router. diff --git a/server/src/main/java/com/cloud/network/rules/NicPlugInOutRules.java b/server/src/main/java/com/cloud/network/rules/NicPlugInOutRules.java index b671e33df086..1b62d1a8a192 100644 --- a/server/src/main/java/com/cloud/network/rules/NicPlugInOutRules.java +++ b/server/src/main/java/com/cloud/network/rules/NicPlugInOutRules.java @@ -24,7 +24,6 @@ import java.util.Map.Entry; import org.apache.cloudstack.network.topology.NetworkTopologyVisitor; -import org.apache.log4j.Logger; import com.cloud.agent.api.Command; import com.cloud.agent.api.NetworkUsageCommand; @@ -62,7 +61,6 @@ public class NicPlugInOutRules extends RuleApplier { - private static final Logger s_logger = Logger.getLogger(NicPlugInOutRules.class); private final List _ipAddresses; @@ -102,7 +100,7 @@ public boolean accept(final NetworkTopologyVisitor visitor, final VirtualRouter final boolean result = networkTopology.applyRules(_network, router, typeString, isPodLevelException, podId, failWhenDisconnect, new RuleApplierWrapper(ipAssociationRules)); if (!result) { - s_logger.warn("Failed to de-associate IPs before unplugging nics"); + logger.warn("Failed to de-associate IPs before unplugging nics"); return false; } } @@ -112,7 +110,7 @@ public boolean accept(final NetworkTopologyVisitor visitor, final VirtualRouter PublicIpAddress ip = entry.getValue(); NicVO nic = nicDao.findByIp4AddressAndNetworkIdAndInstanceId(ip.getNetworkId(), _router.getId(), ip.getAddress().addr()); if (nic != null) { - s_logger.info("Collect network statistics for nic " + nic + " from router " + _router); + logger.info("Collect network statistics for nic " + nic + " from router " + _router); routerService.collectNetworkStatistics(_router, nic); } Network publicNtwk = null; @@ -121,7 +119,7 @@ public boolean accept(final NetworkTopologyVisitor visitor, final VirtualRouter URI broadcastUri = BroadcastDomainType.Vlan.toUri(entry.getKey()); itMgr.removeVmFromNetwork(_router, publicNtwk, broadcastUri); } catch (ConcurrentOperationException e) { - s_logger.warn("Failed to remove router " + _router + " from vlan " + entry.getKey() + " in public network " + publicNtwk + " due to ", e); + logger.warn("Failed to remove router " + _router + " from vlan " + entry.getKey() + " in public network " + publicNtwk + " due to ", e); return false; } } @@ -152,12 +150,12 @@ public boolean accept(final NetworkTopologyVisitor visitor, final VirtualRouter publicNtwk = networkModel.getNetwork(ip.getNetworkId()); publicNic = itMgr.addVmToNetwork(_router, publicNtwk, defaultNic); } catch (ConcurrentOperationException e) { - s_logger.warn("Failed to add router " + _router + " to vlan " + vlanTag + " in public network " + publicNtwk + " due to ", e); + logger.warn("Failed to add router " + _router + " to vlan " + vlanTag + " in public network " + publicNtwk + " due to ", e); } catch (InsufficientCapacityException e) { - s_logger.warn("Failed to add router " + _router + " to vlan " + vlanTag + " in public network " + publicNtwk + " due to ", e); + logger.warn("Failed to add router " + _router + " to vlan " + vlanTag + " in public network " + publicNtwk + " due to ", e); } finally { if (publicNic == null) { - s_logger.warn("Failed to add router " + _router + " to vlan " + vlanTag + " in public network " + publicNtwk); + logger.warn("Failed to add router " + _router + " to vlan " + vlanTag + " in public network " + publicNtwk); return false; } } @@ -220,7 +218,7 @@ private Pair, Map> getNics && (allIp.isSourceNat() || rulesDao.countRulesByIpIdAndState(allIp.getId(), FirewallRule.State.Active) > 0 || (allIp.isOneToOneNat() && allIp.getRuleState() == null))) { - s_logger.debug("Updating the nic " + nic + " with new ip address " + allIp.getAddress().addr()); + logger.debug("Updating the nic " + nic + " with new ip address " + allIp.getAddress().addr()); nic.setIPv4Address(allIp.getAddress().addr()); nicDao.update(nic.getId(), nic); ipUpdated = true; @@ -229,7 +227,7 @@ private Pair, Map> getNics } if (!ipUpdated) { nicsToUnplug.put(ip.getVlanTag(), ip); - s_logger.debug("Need to unplug the nic for ip=" + ip + "; vlan=" + ip.getVlanTag() + " in public network id =" + publicNtwkId); + logger.debug("Need to unplug the nic for ip=" + ip + "; vlan=" + ip.getVlanTag() + " in public network id =" + publicNtwkId); } } } @@ -253,14 +251,14 @@ private Pair, Map> getNics if (nic == null && nicsToPlug.get(ip.getVlanTag()) == null) { nicsToPlug.put(ip.getVlanTag(), ip); - s_logger.debug("Need to plug the nic for ip=" + ip + "; vlan=" + ip.getVlanTag() + " in public network id =" + publicNtwkId); + logger.debug("Need to plug the nic for ip=" + ip + "; vlan=" + ip.getVlanTag() + " in public network id =" + publicNtwkId); } else { final PublicIpAddress nicToUnplug = nicsToUnplug.get(ip.getVlanTag()); if (nicToUnplug != null) { NicVO nicVO = nicDao.findByIp4AddressAndNetworkIdAndInstanceId(publicNtwkId, _router.getId(), nicToUnplug.getAddress().addr()); nicVO.setIPv4Address(ip.getAddress().addr()); nicDao.update(nicVO.getId(), nicVO); - s_logger.debug("Updated the nic " + nicVO + " with the new ip address " + ip.getAddress().addr()); + logger.debug("Updated the nic " + nicVO + " with the new ip address " + ip.getAddress().addr()); nicsToUnplug.remove(ip.getVlanTag()); } } diff --git a/server/src/main/java/com/cloud/network/rules/PrivateGatewayRules.java b/server/src/main/java/com/cloud/network/rules/PrivateGatewayRules.java index e0976acf6ede..bb66839fb134 100644 --- a/server/src/main/java/com/cloud/network/rules/PrivateGatewayRules.java +++ b/server/src/main/java/com/cloud/network/rules/PrivateGatewayRules.java @@ -18,7 +18,6 @@ package com.cloud.network.rules; import org.apache.cloudstack.network.topology.NetworkTopologyVisitor; -import org.apache.log4j.Logger; import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.ResourceUnavailableException; @@ -36,7 +35,6 @@ public class PrivateGatewayRules extends RuleApplier { - private static final Logger s_logger = Logger.getLogger(PrivateGatewayRules.class); private final PrivateGateway _privateGateway; @@ -62,7 +60,7 @@ public boolean accept(final NetworkTopologyVisitor visitor, final VirtualRouter final NetworkHelper networkHelper = visitor.getVirtualNetworkApplianceFactory().getNetworkHelper(); if (!networkHelper.checkRouterVersion(_router)) { - s_logger.warn("Router requires upgrade. Unable to send command to router: " + _router.getId()); + logger.warn("Router requires upgrade. Unable to send command to router: " + _router.getId()); return false; } final VirtualMachineManager itMgr = visitor.getVirtualNetworkApplianceFactory().getItMgr(); @@ -75,17 +73,17 @@ public boolean accept(final NetworkTopologyVisitor visitor, final VirtualRouter result = visitor.visit(this); } } catch (final Exception ex) { - s_logger.warn("Failed to create private gateway " + _privateGateway + " on router " + _router + " due to ", ex); + logger.warn("Failed to create private gateway " + _privateGateway + " on router " + _router + " due to ", ex); } finally { if (!result) { - s_logger.debug("Failed to setup gateway " + _privateGateway + " on router " + _router + " with the source nat. Will now remove the gateway."); + logger.debug("Failed to setup gateway " + _privateGateway + " on router " + _router + " with the source nat. Will now remove the gateway."); _isAddOperation = false; final boolean isRemoved = destroyPrivateGateway(visitor); if (isRemoved) { - s_logger.debug("Removed the gateway " + _privateGateway + " from router " + _router + " as a part of cleanup"); + logger.debug("Removed the gateway " + _privateGateway + " from router " + _router + " as a part of cleanup"); } else { - s_logger.warn("Failed to remove the gateway " + _privateGateway + " from router " + _router + " as a part of cleanup"); + logger.warn("Failed to remove the gateway " + _privateGateway + " from router " + _router + " as a part of cleanup"); } } } @@ -119,32 +117,32 @@ protected boolean destroyPrivateGateway(final NetworkTopologyVisitor visitor) th final NetworkModel networkModel = visitor.getVirtualNetworkApplianceFactory().getNetworkModel(); if (!networkModel.isVmPartOfNetwork(_router.getId(), _privateGateway.getNetworkId())) { - s_logger.debug("Router doesn't have nic for gateway " + _privateGateway + " so no need to removed it"); + logger.debug("Router doesn't have nic for gateway " + _privateGateway + " so no need to removed it"); return true; } final Network privateNetwork = networkModel.getNetwork(_privateGateway.getNetworkId()); - s_logger.debug("Releasing private ip for gateway " + _privateGateway + " from " + _router); + logger.debug("Releasing private ip for gateway " + _privateGateway + " from " + _router); _nicProfile = networkModel.getNicProfile(_router, privateNetwork.getId(), null); boolean result = visitor.visit(this); if (!result) { - s_logger.warn("Failed to release private ip for gateway " + _privateGateway + " on router " + _router); + logger.warn("Failed to release private ip for gateway " + _privateGateway + " on router " + _router); return false; } // revoke network acl on the private gateway. final NetworkACLManager networkACLMgr = visitor.getVirtualNetworkApplianceFactory().getNetworkACLMgr(); if (!networkACLMgr.revokeACLItemsForPrivateGw(_privateGateway)) { - s_logger.debug("Failed to delete network acl items on " + _privateGateway + " from router " + _router); + logger.debug("Failed to delete network acl items on " + _privateGateway + " from router " + _router); return false; } - s_logger.debug("Removing router " + _router + " from private network " + privateNetwork + " as a part of delete private gateway"); + logger.debug("Removing router " + _router + " from private network " + privateNetwork + " as a part of delete private gateway"); final VirtualMachineManager itMgr = visitor.getVirtualNetworkApplianceFactory().getItMgr(); result = result && itMgr.removeVmFromNetwork(_router, privateNetwork, null); - s_logger.debug("Private gateawy " + _privateGateway + " is removed from router " + _router); + logger.debug("Private gateawy " + _privateGateway + " is removed from router " + _router); return result; } } diff --git a/server/src/main/java/com/cloud/network/rules/RuleApplier.java b/server/src/main/java/com/cloud/network/rules/RuleApplier.java index 47a87a822291..73c3855361b0 100644 --- a/server/src/main/java/com/cloud/network/rules/RuleApplier.java +++ b/server/src/main/java/com/cloud/network/rules/RuleApplier.java @@ -22,9 +22,13 @@ import com.cloud.exception.ResourceUnavailableException; import com.cloud.network.Network; import com.cloud.network.router.VirtualRouter; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; public abstract class RuleApplier { + protected Logger logger = LogManager.getLogger(getClass()); + protected Network _network; protected VirtualRouter _router; diff --git a/server/src/main/java/com/cloud/network/rules/RulesManagerImpl.java b/server/src/main/java/com/cloud/network/rules/RulesManagerImpl.java index 624fbfb9d246..15d1db48283b 100644 --- a/server/src/main/java/com/cloud/network/rules/RulesManagerImpl.java +++ b/server/src/main/java/com/cloud/network/rules/RulesManagerImpl.java @@ -35,7 +35,6 @@ import org.apache.cloudstack.api.command.user.firewall.ListPortForwardingRulesCmd; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; -import org.apache.log4j.Logger; import com.cloud.configuration.ConfigurationManager; import com.cloud.domain.dao.DomainDao; @@ -105,7 +104,6 @@ import com.cloud.vm.dao.VMInstanceDao; public class RulesManagerImpl extends ManagerBase implements RulesManager, RulesService { - private static final Logger s_logger = Logger.getLogger(RulesManagerImpl.class); @Inject IpAddressManager _ipAddrMgr; @@ -226,7 +224,7 @@ public PortForwardingRule createPortForwardingRule(final PortForwardingRule rule if (assignToVpcNtwk) { _networkModel.checkIpForService(ipAddress, Service.PortForwarding, networkId); - s_logger.debug("The ip is not associated with the VPC network id=" + networkId + ", so assigning"); + logger.debug("The ip is not associated with the VPC network id=" + networkId + ", so assigning"); try { ipAddress = _ipAddrMgr.associateIPToGuestNetwork(ipAddrId, networkId, false); performedIpAssoc = true; @@ -496,16 +494,16 @@ private boolean enableStaticNat(long ipId, long vmId, long networkId, boolean is if (assignToVpcNtwk) { _networkModel.checkIpForService(ipAddress, Service.StaticNat, networkId); - s_logger.debug("The ip is not associated with the VPC network id=" + networkId + ", so assigning"); + logger.debug("The ip is not associated with the VPC network id=" + networkId + ", so assigning"); try { ipAddress = _ipAddrMgr.associateIPToGuestNetwork(ipId, networkId, false); performedIpAssoc = true; } catch (Exception ex) { - s_logger.warn("Failed to associate ip id=" + ipId + " to VPC network id=" + networkId + " as " + "a part of enable static nat"); + logger.warn("Failed to associate ip id=" + ipId + " to VPC network id=" + networkId + " as " + "a part of enable static nat"); return false; } } else if (ipAddress.isPortable()) { - s_logger.info("Portable IP " + ipAddress.getUuid() + " is not associated with the network yet " + " so associate IP with the network " + + logger.info("Portable IP " + ipAddress.getUuid() + " is not associated with the network yet " + " so associate IP with the network " + networkId); try { // check if StaticNat service is enabled in the network @@ -519,7 +517,7 @@ private boolean enableStaticNat(long ipId, long vmId, long networkId, boolean is // associate portable IP with guest network ipAddress = _ipAddrMgr.associatePortableIPToGuestNetwork(ipId, networkId, false); } catch (Exception e) { - s_logger.warn("Failed to associate portable id=" + ipId + " to network id=" + networkId + " as " + "a part of enable static nat"); + logger.warn("Failed to associate portable id=" + ipId + " to network id=" + networkId + " as " + "a part of enable static nat"); return false; } } @@ -535,7 +533,7 @@ private boolean enableStaticNat(long ipId, long vmId, long networkId, boolean is _ipAddrMgr.transferPortableIP(ipId, ipAddress.getAssociatedWithNetworkId(), networkId); ipAddress = _ipAddressDao.findById(ipId); } catch (Exception e) { - s_logger.warn("Failed to associate portable id=" + ipId + " to network id=" + networkId + " as " + "a part of enable static nat"); + logger.warn("Failed to associate portable id=" + ipId + " to network id=" + networkId + " as " + "a part of enable static nat"); return false; } } else { @@ -596,20 +594,20 @@ private boolean enableStaticNat(long ipId, long vmId, long networkId, boolean is ipAddress.setVmIp(dstIp); if (_ipAddressDao.update(ipAddress.getId(), ipAddress)) { // enable static nat on the backend - s_logger.trace("Enabling static nat for ip address " + ipAddress + " and vm id=" + vmId + " on the backend"); + logger.trace("Enabling static nat for ip address " + ipAddress + " and vm id=" + vmId + " on the backend"); if (applyStaticNatForIp(ipId, false, caller, false)) { applyUserDataIfNeeded(vmId, network, guestNic); performedIpAssoc = false; // ignor unassignIPFromVpcNetwork in finally block return true; } else { - s_logger.warn("Failed to enable static nat rule for ip address " + ipId + " on the backend"); + logger.warn("Failed to enable static nat rule for ip address " + ipId + " on the backend"); ipAddress.setOneToOneNat(isOneToOneNat); ipAddress.setAssociatedWithVmId(associatedWithVmId); ipAddress.setVmIp(null); _ipAddressDao.update(ipAddress.getId(), ipAddress); } } else { - s_logger.warn("Failed to update ip address " + ipAddress + " in the DB as a part of enableStaticNat"); + logger.warn("Failed to update ip address " + ipAddress + " in the DB as a part of enableStaticNat"); } } finally { @@ -627,11 +625,11 @@ protected void applyUserDataIfNeeded(long vmId, Network network, Nic guestNic) t try { element = _networkModel.getUserDataUpdateProvider(network); } catch (UnsupportedServiceException ex) { - s_logger.info(String.format("%s is not supported by network %s, skipping.", Service.UserData.getName(), network)); + logger.info(String.format("%s is not supported by network %s, skipping.", Service.UserData.getName(), network)); return; } if (element == null) { - s_logger.error("Can't find network element for " + Service.UserData.getName() + " provider needed for UserData update"); + logger.error("Can't find network element for " + Service.UserData.getName() + " provider needed for UserData update"); } else { UserVmVO vm = _vmDao.findById(vmId); try { @@ -641,10 +639,10 @@ protected void applyUserDataIfNeeded(long vmId, Network network, Nic guestNic) t _networkModel.getNetworkTag(template.getHypervisorType(), network)); VirtualMachineProfile vmProfile = new VirtualMachineProfileImpl(vm); if (!element.saveUserData(network, nicProfile, vmProfile)) { - s_logger.error("Failed to update userdata for vm " + vm + " and nic " + guestNic); + logger.error("Failed to update userdata for vm " + vm + " and nic " + guestNic); } } catch (Exception e) { - s_logger.error("Failed to update userdata for vm " + vm + " and nic " + guestNic + " due to " + e.getMessage(), e); + logger.error("Failed to update userdata for vm " + vm + " and nic " + guestNic + " due to " + e.getMessage(), e); } } } @@ -696,7 +694,7 @@ protected void isIpReadyForStaticNat(long vmId, IPAddressVO ipAddress, String vm oldIP.getUuid()); } // unassign old static nat rule - s_logger.debug("Disassociating static nat for ip " + oldIP); + logger.debug("Disassociating static nat for ip " + oldIP); if (!disableStaticNat(oldIP.getId(), caller, callerUserId, true)) { throw new CloudRuntimeException("Failed to disable old static nat rule for vm "+ vm.getInstanceName() + " with id "+vm.getUuid() +" and public ip " + oldIP); @@ -786,7 +784,7 @@ public boolean revokePortForwardingRulesForVm(long vmId) { Set ipsToReprogram = new HashSet(); if (rules == null || rules.isEmpty()) { - s_logger.debug("No port forwarding rules are found for vm id=" + vmId); + logger.debug("No port forwarding rules are found for vm id=" + vmId); return true; } @@ -798,9 +796,9 @@ public boolean revokePortForwardingRulesForVm(long vmId) { // apply rules for all ip addresses for (Long ipId : ipsToReprogram) { - s_logger.debug("Applying port forwarding rules for ip address id=" + ipId + " as a part of vm expunge"); + logger.debug("Applying port forwarding rules for ip address id=" + ipId + " as a part of vm expunge"); if (!applyPortForwardingRules(ipId, _ipAddrMgr.RulesContinueOnError.value(), _accountMgr.getSystemAccount())) { - s_logger.warn("Failed to apply port forwarding rules for ip id=" + ipId); + logger.warn("Failed to apply port forwarding rules for ip id=" + ipId); success = false; } } @@ -894,7 +892,7 @@ protected boolean applyPortForwardingRules(long ipId, boolean continueOnError, A List rules = _portForwardingDao.listForApplication(ipId); if (rules.size() == 0) { - s_logger.debug("There are no port forwarding rules to apply for ip id=" + ipId); + logger.debug("There are no port forwarding rules to apply for ip id=" + ipId); return true; } @@ -907,7 +905,7 @@ protected boolean applyPortForwardingRules(long ipId, boolean continueOnError, A return false; } } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to apply port forwarding rules for ip due to ", ex); + logger.warn("Failed to apply port forwarding rules for ip due to ", ex); return false; } @@ -919,7 +917,7 @@ protected boolean applyStaticNatRulesForIp(long sourceIpId, boolean continueOnEr List staticNatRules = new ArrayList(); if (rules.size() == 0) { - s_logger.debug("There are no static nat rules to apply for ip id=" + sourceIpId); + logger.debug("There are no static nat rules to apply for ip id=" + sourceIpId); return true; } @@ -936,7 +934,7 @@ protected boolean applyStaticNatRulesForIp(long sourceIpId, boolean continueOnEr return false; } } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to apply static nat rules for ip due to ", ex); + logger.warn("Failed to apply static nat rules for ip due to ", ex); return false; } @@ -947,7 +945,7 @@ protected boolean applyStaticNatRulesForIp(long sourceIpId, boolean continueOnEr public boolean applyPortForwardingRulesForNetwork(long networkId, boolean continueOnError, Account caller) { List rules = listByNetworkId(networkId); if (rules.size() == 0) { - s_logger.debug("There are no port forwarding rules to apply for network id=" + networkId); + logger.debug("There are no port forwarding rules to apply for network id=" + networkId); return true; } @@ -960,7 +958,7 @@ public boolean applyPortForwardingRulesForNetwork(long networkId, boolean contin return false; } } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to apply port forwarding rules for network due to ", ex); + logger.warn("Failed to apply port forwarding rules for network due to ", ex); return false; } @@ -973,7 +971,7 @@ public boolean applyStaticNatRulesForNetwork(long networkId, boolean continueOnE List staticNatRules = new ArrayList(); if (rules.size() == 0) { - s_logger.debug("There are no static nat rules to apply for network id=" + networkId); + logger.debug("There are no static nat rules to apply for network id=" + networkId); return true; } @@ -990,7 +988,7 @@ public boolean applyStaticNatRulesForNetwork(long networkId, boolean continueOnE return false; } } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to apply static nat rules for network due to ", ex); + logger.warn("Failed to apply static nat rules for network due to ", ex); return false; } @@ -1001,7 +999,7 @@ public boolean applyStaticNatRulesForNetwork(long networkId, boolean continueOnE public boolean applyStaticNatsForNetwork(long networkId, boolean continueOnError, Account caller) { List ips = _ipAddressDao.listStaticNatPublicIps(networkId); if (ips.isEmpty()) { - s_logger.debug("There are no static nat to apply for network id=" + networkId); + logger.debug("There are no static nat to apply for network id=" + networkId); return true; } @@ -1022,7 +1020,7 @@ public boolean applyStaticNatsForNetwork(long networkId, boolean continueOnError return false; } } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to create static nat for network due to ", ex); + logger.warn("Failed to create static nat for network due to ", ex); return false; } @@ -1106,8 +1104,8 @@ public boolean revokeAllPFAndStaticNatRulesForIp(long ipId, long userId, Account List rules = new ArrayList(); List pfRules = _portForwardingDao.listByIpAndNotRevoked(ipId); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing " + pfRules.size() + " port forwarding rules for ip id=" + ipId); + if (logger.isDebugEnabled()) { + logger.debug("Releasing " + pfRules.size() + " port forwarding rules for ip id=" + ipId); } for (PortForwardingRuleVO rule : pfRules) { @@ -1116,8 +1114,8 @@ public boolean revokeAllPFAndStaticNatRulesForIp(long ipId, long userId, Account } List staticNatRules = _firewallDao.listByIpAndPurposeAndNotRevoked(ipId, Purpose.StaticNat); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing " + staticNatRules.size() + " static nat rules for ip id=" + ipId); + if (logger.isDebugEnabled()) { + logger.debug("Releasing " + staticNatRules.size() + " static nat rules for ip id=" + ipId); } for (FirewallRuleVO rule : staticNatRules) { @@ -1154,8 +1152,8 @@ public boolean revokeAllPFAndStaticNatRulesForIp(long ipId, long userId, Account rules.addAll(_portForwardingDao.listByIpAndNotRevoked(ipId)); rules.addAll(_firewallDao.listByIpAndPurposeAndNotRevoked(ipId, Purpose.StaticNat)); - if (s_logger.isDebugEnabled() && success) { - s_logger.debug("Successfully released rules for ip id=" + ipId + " and # of rules now = " + rules.size()); + if (logger.isDebugEnabled() && success) { + logger.debug("Successfully released rules for ip id=" + ipId + " and # of rules now = " + rules.size()); } return (rules.size() == 0 && success); @@ -1166,13 +1164,13 @@ public boolean revokeAllPFStaticNatRulesForNetwork(long networkId, long userId, List rules = new ArrayList(); List pfRules = _portForwardingDao.listByNetwork(networkId); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing " + pfRules.size() + " port forwarding rules for network id=" + networkId); + if (logger.isDebugEnabled()) { + logger.debug("Releasing " + pfRules.size() + " port forwarding rules for network id=" + networkId); } List staticNatRules = _firewallDao.listByNetworkAndPurpose(networkId, Purpose.StaticNat); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing " + staticNatRules.size() + " static nat rules for network id=" + networkId); + if (logger.isDebugEnabled()) { + logger.debug("Releasing " + staticNatRules.size() + " static nat rules for network id=" + networkId); } // Mark all pf rules (Active and non-Active) to be revoked, but don't revoke it yet - pass apply=false @@ -1198,8 +1196,8 @@ public boolean revokeAllPFStaticNatRulesForNetwork(long networkId, long userId, rules.addAll(_portForwardingDao.listByNetworkAndNotRevoked(networkId)); rules.addAll(_firewallDao.listByNetworkAndPurposeAndNotRevoked(networkId, Purpose.StaticNat)); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully released rules for network id=" + networkId + " and # of rules now = " + rules.size()); + if (logger.isDebugEnabled()) { + logger.debug("Successfully released rules for network id=" + networkId + " and # of rules now = " + rules.size()); } return success && rules.size() == 0; @@ -1314,18 +1312,18 @@ public boolean disableStaticNat(long ipId, Account caller, long callerUserId, bo // Revoke all firewall rules for the ip try { - s_logger.debug("Revoking all " + Purpose.Firewall + "rules as a part of disabling static nat for public IP id=" + ipId); + logger.debug("Revoking all " + Purpose.Firewall + "rules as a part of disabling static nat for public IP id=" + ipId); if (!_firewallMgr.revokeFirewallRulesForIp(ipId, callerUserId, caller)) { - s_logger.warn("Unable to revoke all the firewall rules for ip id=" + ipId + " as a part of disable statis nat"); + logger.warn("Unable to revoke all the firewall rules for ip id=" + ipId + " as a part of disable statis nat"); success = false; } } catch (ResourceUnavailableException e) { - s_logger.warn("Unable to revoke all firewall rules for ip id=" + ipId + " as a part of ip release", e); + logger.warn("Unable to revoke all firewall rules for ip id=" + ipId + " as a part of ip release", e); success = false; } if (!revokeAllPFAndStaticNatRulesForIp(ipId, callerUserId, caller)) { - s_logger.warn("Unable to revoke all static nat rules for ip " + ipAddress); + logger.warn("Unable to revoke all static nat rules for ip " + ipAddress); success = false; } @@ -1342,13 +1340,13 @@ public boolean disableStaticNat(long ipId, Account caller, long callerUserId, bo _vpcMgr.unassignIPFromVpcNetwork(ipAddress.getId(), networkId); if (isIpSystem && releaseIpIfElastic && !_ipAddrMgr.handleSystemIpRelease(ipAddress)) { - s_logger.warn("Failed to release system ip address " + ipAddress); + logger.warn("Failed to release system ip address " + ipAddress); success = false; } return true; } else { - s_logger.warn("Failed to disable one to one nat for the ip address id" + ipId); + logger.warn("Failed to disable one to one nat for the ip address id" + ipId); ipAddress = _ipAddressDao.findById(ipId); ipAddress.setRuleState(null); _ipAddressDao.update(ipAddress.getId(), ipAddress); @@ -1388,7 +1386,7 @@ protected boolean applyStaticNatForIp(long sourceIpId, boolean continueOnError, return false; } } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to create static nat rule due to ", ex); + logger.warn("Failed to create static nat rule due to ", ex); return false; } } @@ -1407,18 +1405,18 @@ public boolean applyStaticNatForNetwork(long networkId, boolean continueOnError, if (staticNats != null && !staticNats.isEmpty()) { if (forRevoke) { - s_logger.debug("Found " + staticNats.size() + " static nats to disable for network id " + networkId); + logger.debug("Found " + staticNats.size() + " static nats to disable for network id " + networkId); } try { if (!_ipAddrMgr.applyStaticNats(staticNats, continueOnError, forRevoke)) { return false; } } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to create static nat rule due to ", ex); + logger.warn("Failed to create static nat rule due to ", ex); return false; } } else { - s_logger.debug("Found 0 static nat rules to apply for network id " + networkId); + logger.debug("Found 0 static nat rules to apply for network id " + networkId); } return true; @@ -1427,7 +1425,7 @@ public boolean applyStaticNatForNetwork(long networkId, boolean continueOnError, protected List createStaticNatForIp(IpAddress sourceIp, Account caller, boolean forRevoke) { List staticNats = new ArrayList(); if (!sourceIp.isOneToOneNat()) { - s_logger.debug("Source ip id=" + sourceIp + " is not one to one nat"); + logger.debug("Source ip id=" + sourceIp + " is not one to one nat"); return staticNats; } @@ -1491,36 +1489,36 @@ public void getSystemIpAndEnableStaticNatForVm(VirtualMachine vm, boolean getNew } // check if there is already static nat enabled if (_ipAddressDao.findByAssociatedVmId(vm.getId()) != null && !getNewIp) { - s_logger.debug("Vm " + vm + " already has ip associated with it in guest network " + guestNetwork); + logger.debug("Vm " + vm + " already has ip associated with it in guest network " + guestNetwork); continue; } - s_logger.debug("Allocating system ip and enabling static nat for it for the vm " + vm + " in guest network " + guestNetwork); + logger.debug("Allocating system ip and enabling static nat for it for the vm " + vm + " in guest network " + guestNetwork); IpAddress ip = _ipAddrMgr.assignSystemIp(guestNetwork.getId(), _accountMgr.getAccount(vm.getAccountId()), false, true); if (ip == null) { throw new CloudRuntimeException("Failed to allocate system ip for vm " + vm + " in guest network " + guestNetwork); } - s_logger.debug("Allocated system ip " + ip + ", now enabling static nat on it for vm " + vm); + logger.debug("Allocated system ip " + ip + ", now enabling static nat on it for vm " + vm); try { success = enableStaticNat(ip.getId(), vm.getId(), guestNetwork.getId(), isSystemVM, null); } catch (NetworkRuleConflictException ex) { - s_logger.warn("Failed to enable static nat as a part of enabling elasticIp and staticNat for vm " + vm + " in guest network " + guestNetwork + + logger.warn("Failed to enable static nat as a part of enabling elasticIp and staticNat for vm " + vm + " in guest network " + guestNetwork + " due to exception ", ex); success = false; } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to enable static nat as a part of enabling elasticIp and staticNat for vm " + vm + " in guest network " + guestNetwork + + logger.warn("Failed to enable static nat as a part of enabling elasticIp and staticNat for vm " + vm + " in guest network " + guestNetwork + " due to exception ", ex); success = false; } if (!success) { - s_logger.warn("Failed to enable static nat on system ip " + ip + " for the vm " + vm + ", releasing the ip..."); + logger.warn("Failed to enable static nat on system ip " + ip + " for the vm " + vm + ", releasing the ip..."); _ipAddrMgr.handleSystemIpRelease(ip); throw new CloudRuntimeException("Failed to enable static nat on system ip for the vm " + vm); } else { - s_logger.warn("Successfully enabled static nat on system ip " + ip + " for the vm " + vm); + logger.warn("Successfully enabled static nat on system ip " + ip + " for the vm " + vm); } } } @@ -1532,19 +1530,19 @@ protected void removePFRule(PortForwardingRuleVO rule) { @Override public List listAssociatedRulesForGuestNic(Nic nic) { - s_logger.debug("Checking if PF/StaticNat/LoadBalancer rules are configured for nic " + nic.getId()); + logger.debug("Checking if PF/StaticNat/LoadBalancer rules are configured for nic " + nic.getId()); List result = new ArrayList(); // add PF rules result.addAll(_portForwardingDao.listByNetworkAndDestIpAddr(nic.getIPv4Address(), nic.getNetworkId())); if(result.size() > 0) { - s_logger.debug("Found " + result.size() + " portforwarding rule configured for the nic in the network " + nic.getNetworkId()); + logger.debug("Found " + result.size() + " portforwarding rule configured for the nic in the network " + nic.getNetworkId()); } // add static NAT rules List staticNatRules = _firewallDao.listStaticNatByVmId(nic.getInstanceId()); for (FirewallRuleVO rule : staticNatRules) { if (rule.getNetworkId() == nic.getNetworkId()) { result.add(rule); - s_logger.debug("Found rule " + rule.getId() + " " + rule.getPurpose() + " configured"); + logger.debug("Found rule " + rule.getId() + " " + rule.getPurpose() + " configured"); } } List staticNatIps = _ipAddressDao.listStaticNatPublicIps(nic.getNetworkId()); @@ -1557,7 +1555,7 @@ public List listAssociatedRulesForGuestNic(Nic nic) { new FirewallRuleVO(null, ip.getId(), 0, 65535, NetUtils.ALL_PROTO.toString(), nic.getNetworkId(), vm.getAccountId(), vm.getDomainId(), Purpose.StaticNat, null, null, null, null, null); result.add(staticNatRule); - s_logger.debug("Found rule " + staticNatRule.getId() + " " + staticNatRule.getPurpose() + " configured"); + logger.debug("Found rule " + staticNatRule.getId() + " " + staticNatRule.getPurpose() + " configured"); } } // add LB rules @@ -1566,7 +1564,7 @@ public List listAssociatedRulesForGuestNic(Nic nic) { FirewallRuleVO lbRule = _firewallDao.findById(lb.getLoadBalancerId()); if (lbRule.getNetworkId() == nic.getNetworkId()) { result.add(lbRule); - s_logger.debug("Found rule " + lbRule.getId() + " " + lbRule.getPurpose() + " configured"); + logger.debug("Found rule " + lbRule.getId() + " " + lbRule.getPurpose() + " configured"); } } return result; diff --git a/server/src/main/java/com/cloud/network/rules/VpcIpAssociationRules.java b/server/src/main/java/com/cloud/network/rules/VpcIpAssociationRules.java index 00a1fb1e7a92..c196a27bf327 100644 --- a/server/src/main/java/com/cloud/network/rules/VpcIpAssociationRules.java +++ b/server/src/main/java/com/cloud/network/rules/VpcIpAssociationRules.java @@ -23,7 +23,6 @@ import java.util.Map; import org.apache.cloudstack.network.topology.NetworkTopologyVisitor; -import org.apache.log4j.Logger; import com.cloud.exception.ResourceUnavailableException; import com.cloud.network.IpAddress; @@ -37,7 +36,6 @@ public class VpcIpAssociationRules extends RuleApplier { - private static final Logger s_logger = Logger.getLogger(VpcIpAssociationRules.class); private final List _ipAddresses; @@ -67,7 +65,7 @@ public boolean accept(final NetworkTopologyVisitor visitor, final VirtualRouter if (ipAddr.getState() != IpAddress.State.Releasing) { throw new CloudRuntimeException("Unable to find the nic in network " + ipAddr.getNetworkId() + " to apply the ip address " + ipAddr + " for"); } - s_logger.debug("Not sending release for ip address " + ipAddr + " as its nic is already gone from VPC router " + _router); + logger.debug("Not sending release for ip address " + ipAddr + " as its nic is already gone from VPC router " + _router); } else { macAddress = nic.getMacAddress(); _vlanMacAddress.put(BroadcastDomainType.getValue(BroadcastDomainType.fromString(ipAddr.getVlanTag())), macAddress); diff --git a/server/src/main/java/com/cloud/network/security/LocalSecurityGroupWorkQueue.java b/server/src/main/java/com/cloud/network/security/LocalSecurityGroupWorkQueue.java index 93811a921705..0080ae9b8d67 100644 --- a/server/src/main/java/com/cloud/network/security/LocalSecurityGroupWorkQueue.java +++ b/server/src/main/java/com/cloud/network/security/LocalSecurityGroupWorkQueue.java @@ -25,7 +25,8 @@ import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantLock; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.network.security.SecurityGroupWork.Step; @@ -34,7 +35,7 @@ * */ public class LocalSecurityGroupWorkQueue implements SecurityGroupWorkQueue { - protected static Logger s_logger = Logger.getLogger(LocalSecurityGroupWorkQueue.class); + protected static Logger LOGGER = LogManager.getLogger(LocalSecurityGroupWorkQueue.class); //protected Set _currentWork = new HashSet(); protected Set _currentWork = new TreeSet(); diff --git a/server/src/main/java/com/cloud/network/security/SecurityGroupListener.java b/server/src/main/java/com/cloud/network/security/SecurityGroupListener.java index 32186cc04697..b925137c4ce8 100644 --- a/server/src/main/java/com/cloud/network/security/SecurityGroupListener.java +++ b/server/src/main/java/com/cloud/network/security/SecurityGroupListener.java @@ -22,7 +22,8 @@ import java.util.Random; import java.util.concurrent.ConcurrentHashMap; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; @@ -48,7 +49,7 @@ * */ public class SecurityGroupListener implements Listener { - public static final Logger s_logger = Logger.getLogger(SecurityGroupListener.class.getName()); + protected Logger logger = LogManager.getLogger(getClass()); private static final int MAX_RETRIES_ON_FAILURE = 3; private static final int MIN_TIME_BETWEEN_CLEANUPS = 30 * 60;//30 minutes @@ -86,23 +87,23 @@ public boolean processAnswers(long agentId, long seq, Answer[] answers) { if (ans instanceof SecurityGroupRuleAnswer) { SecurityGroupRuleAnswer ruleAnswer = (SecurityGroupRuleAnswer)ans; if (ans.getResult()) { - s_logger.debug("Successfully programmed rule " + ruleAnswer.toString() + " into host " + agentId); + logger.debug("Successfully programmed rule " + ruleAnswer.toString() + " into host " + agentId); _workDao.updateStep(ruleAnswer.getVmId(), ruleAnswer.getLogSequenceNumber(), Step.Done); recordSuccess(ruleAnswer.getVmId()); } else { _workDao.updateStep(ruleAnswer.getVmId(), ruleAnswer.getLogSequenceNumber(), Step.Error); ; - s_logger.debug("Failed to program rule " + ruleAnswer.toString() + " into host " + agentId + " due to " + ruleAnswer.getDetails() + + logger.debug("Failed to program rule " + ruleAnswer.toString() + " into host " + agentId + " due to " + ruleAnswer.getDetails() + " and updated jobs"); if (ruleAnswer.getReason() == FailureReason.CANNOT_BRIDGE_FIREWALL) { - s_logger.debug("Not retrying security group rules for vm " + ruleAnswer.getVmId() + " on failure since host " + agentId + + logger.debug("Not retrying security group rules for vm " + ruleAnswer.getVmId() + " on failure since host " + agentId + " cannot do bridge firewalling"); } else if (ruleAnswer.getReason() == FailureReason.PROGRAMMING_FAILED) { if (checkShouldRetryOnFailure(ruleAnswer.getVmId())) { - s_logger.debug("Retrying security group rules on failure for vm " + ruleAnswer.getVmId()); + logger.debug("Retrying security group rules on failure for vm " + ruleAnswer.getVmId()); affectedVms.add(ruleAnswer.getVmId()); } else { - s_logger.debug("Not retrying security group rules for vm " + ruleAnswer.getVmId() + " on failure: too many retries"); + logger.debug("Not retrying security group rules for vm " + ruleAnswer.getVmId() + " on failure: too many retries"); } } } @@ -157,8 +158,8 @@ public void processHostAdded(long hostId) { @Override public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) { - if (s_logger.isInfoEnabled()) - s_logger.info("Received a host startup notification"); + if (logger.isInfoEnabled()) + logger.info("Received a host startup notification"); if (cmd instanceof StartupRoutingCommand) { //if (Boolean.toString(true).equals(host.getDetail("can_bridge_firewall"))) { @@ -167,11 +168,11 @@ public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) CleanupNetworkRulesCmd cleanupCmd = new CleanupNetworkRulesCmd(interval); Commands c = new Commands(cleanupCmd); _agentMgr.send(host.getId(), c, this); - if (s_logger.isInfoEnabled()) - s_logger.info("Scheduled network rules cleanup, interval=" + cleanupCmd.getInterval()); + if (logger.isInfoEnabled()) + logger.info("Scheduled network rules cleanup, interval=" + cleanupCmd.getInterval()); } catch (AgentUnavailableException e) { //usually hypervisors that do not understand sec group rules. - s_logger.debug("Unable to schedule network rules cleanup for host " + host.getId(), e); + logger.debug("Unable to schedule network rules cleanup for host " + host.getId(), e); } if (_workTracker != null) { _workTracker.processConnect(host.getId()); diff --git a/server/src/main/java/com/cloud/network/security/SecurityGroupManagerImpl.java b/server/src/main/java/com/cloud/network/security/SecurityGroupManagerImpl.java index e35503f32de2..fd5bd4480899 100644 --- a/server/src/main/java/com/cloud/network/security/SecurityGroupManagerImpl.java +++ b/server/src/main/java/com/cloud/network/security/SecurityGroupManagerImpl.java @@ -54,7 +54,6 @@ import org.apache.cloudstack.utils.identity.ManagementServerNode; import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.NetworkRulesSystemVmCommand; @@ -125,7 +124,6 @@ import com.cloud.vm.dao.VMInstanceDao; public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGroupManager, SecurityGroupService, StateListener { - public static final Logger s_logger = Logger.getLogger(SecurityGroupManagerImpl.class); @Inject SecurityGroupDao _securityGroupDao; @@ -200,7 +198,7 @@ protected void runInContext() { try { work(); } catch (Throwable th) { - s_logger.error("Problem with SG work", th); + logger.error("Problem with SG work", th); } } } @@ -213,7 +211,7 @@ protected void runInContext() { cleanupUnfinishedWork(); //processScheduledWork(); } catch (Throwable th) { - s_logger.error("Problem with SG Cleanup", th); + logger.error("Problem with SG Cleanup", th); } } } @@ -394,17 +392,17 @@ public void scheduleRulesetUpdateToHosts(final List affectedVms, final boo } Collections.sort(affectedVms); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Security Group Mgr: scheduling ruleset updates for " + affectedVms.size() + " vms"); + if (logger.isTraceEnabled()) { + logger.trace("Security Group Mgr: scheduling ruleset updates for " + affectedVms.size() + " vms"); } boolean locked = _workLock.lock(_globalWorkLockTimeout); if (!locked) { - s_logger.warn("Security Group Mgr: failed to acquire global work lock"); + logger.warn("Security Group Mgr: failed to acquire global work lock"); return; } - if (s_logger.isTraceEnabled()) { - s_logger.trace("Security Group Mgr: acquired global work lock"); + if (logger.isTraceEnabled()) { + logger.trace("Security Group Mgr: acquired global work lock"); } try { @@ -412,8 +410,8 @@ public void scheduleRulesetUpdateToHosts(final List affectedVms, final boo @Override public void doInTransactionWithoutResult(TransactionStatus status) { for (Long vmId : affectedVms) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Security Group Mgr: scheduling ruleset update for " + vmId); + if (logger.isTraceEnabled()) { + logger.trace("Security Group Mgr: scheduling ruleset update for " + vmId); } VmRulesetLogVO log = null; SecurityGroupWorkVO work = null; @@ -432,8 +430,8 @@ public void doInTransactionWithoutResult(TransactionStatus status) { if (work == null) { work = new SecurityGroupWorkVO(vmId, null, null, SecurityGroupWork.Step.Scheduled, null); work = _workDao.persist(work); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Security Group Mgr: created new work item for " + vmId + "; id = " + work.getId()); + if (logger.isTraceEnabled()) { + logger.trace("Security Group Mgr: created new work item for " + vmId + "; id = " + work.getId()); } } @@ -447,8 +445,8 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } } finally { _workLock.unlock(); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Security Group Mgr: released global work lock"); + if (logger.isTraceEnabled()) { + logger.trace("Security Group Mgr: released global work lock"); } } } @@ -570,9 +568,9 @@ protected void handleVmMigrated(VMInstanceVO vm) { try { _agentMgr.send(vm.getHostId(), cmds); } catch (AgentUnavailableException e) { - s_logger.debug(e.toString()); + logger.debug(e.toString()); } catch (OperationTimedoutException e) { - s_logger.debug(e.toString()); + logger.debug(e.toString()); } } else { @@ -760,7 +758,7 @@ public List doInTransaction(TransactionStatus status) { // Prevents other threads/management servers from creating duplicate security rules SecurityGroup securityGroup = _securityGroupDao.acquireInLockTable(securityGroupId); if (securityGroup == null) { - s_logger.warn("Could not acquire lock on network security group: id= " + securityGroupId); + logger.warn("Could not acquire lock on network security group: id= " + securityGroupId); return null; } List newRules = new ArrayList(); @@ -771,14 +769,14 @@ public List doInTransaction(TransactionStatus status) { if (ngVO.getId() != securityGroup.getId()) { final SecurityGroupVO tmpGrp = _securityGroupDao.lockRow(ngId, false); if (tmpGrp == null) { - s_logger.warn("Failed to acquire lock on security group: " + ngId); + logger.warn("Failed to acquire lock on security group: " + ngId); throw new CloudRuntimeException("Failed to acquire lock on security group: " + ngId); } } SecurityGroupRuleVO securityGroupRule = _securityGroupRuleDao.findByProtoPortsAndAllowedGroupId(securityGroup.getId(), protocolFinal, startPortOrTypeFinal, endPortOrCodeFinal, ngVO.getId()); if ((securityGroupRule != null) && (securityGroupRule.getRuleType() == ruleType)) { - s_logger.warn("The rule already exists. id= " + securityGroupRule.getUuid()); + logger.warn("The rule already exists. id= " + securityGroupRule.getUuid()); continue; // rule already exists. } securityGroupRule = new SecurityGroupRuleVO(ruleType, securityGroup.getId(), startPortOrTypeFinal, endPortOrCodeFinal, protocolFinal, ngVO.getId()); @@ -797,12 +795,12 @@ public List doInTransaction(TransactionStatus status) { newRules.add(securityGroupRule); } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Added " + newRules.size() + " rules to security group " + securityGroup.getName()); + if (logger.isDebugEnabled()) { + logger.debug("Added " + newRules.size() + " rules to security group " + securityGroup.getName()); } return newRules; } catch (Exception e) { - s_logger.warn("Exception caught when adding security group rules ", e); + logger.warn("Exception caught when adding security group rules ", e); throw new CloudRuntimeException("Exception caught when adding security group rules", e); } finally { if (securityGroup != null) { @@ -819,7 +817,7 @@ public List doInTransaction(TransactionStatus status) { affectedVms.addAll(_securityGroupVMMapDao.listVmIdsBySecurityGroup(securityGroup.getId())); scheduleRulesetUpdateToHosts(affectedVms, true, null); } catch (Exception e) { - s_logger.debug("can't update rules on host, ignore", e); + logger.debug("can't update rules on host, ignore", e); } return newRules; @@ -848,13 +846,13 @@ private boolean revokeSecurityGroupRule(final Long id, SecurityRuleType type) { final SecurityGroupRuleVO rule = _securityGroupRuleDao.findById(id); if (rule == null) { - s_logger.debug("Unable to find security rule with id " + id); + logger.debug("Unable to find security rule with id " + id); throw new InvalidParameterValueException("Unable to find security rule with id " + id); } // check type if (type != rule.getRuleType()) { - s_logger.debug("Mismatch in rule type for security rule with id " + id); + logger.debug("Mismatch in rule type for security rule with id " + id); throw new InvalidParameterValueException("Mismatch in rule type for security rule with id " + id); } @@ -872,16 +870,16 @@ public Boolean doInTransaction(TransactionStatus status) { // acquire lock on parent group (preserving this logic) groupHandle = _securityGroupDao.acquireInLockTable(rule.getSecurityGroupId()); if (groupHandle == null) { - s_logger.warn("Could not acquire lock on security group id: " + rule.getSecurityGroupId()); + logger.warn("Could not acquire lock on security group id: " + rule.getSecurityGroupId()); return false; } _securityGroupRuleDao.remove(id); - s_logger.debug("revokeSecurityGroupRule succeeded for security rule id: " + id); + logger.debug("revokeSecurityGroupRule succeeded for security rule id: " + id); return true; } catch (Exception e) { - s_logger.warn("Exception caught when deleting security rules ", e); + logger.warn("Exception caught when deleting security rules ", e); throw new CloudRuntimeException("Exception caught when deleting security rules", e); } finally { if (groupHandle != null) { @@ -896,7 +894,7 @@ public Boolean doInTransaction(TransactionStatus status) { affectedVms.addAll(_securityGroupVMMapDao.listVmIdsBySecurityGroup(securityGroupId)); scheduleRulesetUpdateToHosts(affectedVms, true, null); } catch (Exception e) { - s_logger.debug("Can't update rules for host, ignore", e); + logger.debug("Can't update rules for host, ignore", e); } if(Boolean.TRUE.equals(result)) { @@ -930,9 +928,9 @@ public SecurityGroupVO createSecurityGroup(String name, String description, Long if (group == null) { group = new SecurityGroupVO(name, description, domainId, accountId); group = _securityGroupDao.persist(group); - s_logger.debug("Created security group " + group + " for account id=" + accountId); + logger.debug("Created security group " + group + " for account id=" + accountId); } else { - s_logger.debug("Returning existing security group " + group + " for account id=" + accountId); + logger.debug("Returning existing security group " + group + " for account id=" + accountId); } return group; @@ -953,7 +951,7 @@ public boolean configure(String name, Map params) throws Configu _serverId = ManagementServerNode.getManagementServerId(); - s_logger.info("SecurityGroupManager: num worker threads=" + _numWorkerThreads + ", time between cleanups=" + _timeBetweenCleanups + " global lock timeout=" + logger.info("SecurityGroupManager: num worker threads=" + _numWorkerThreads + ", time between cleanups=" + _timeBetweenCleanups + " global lock timeout=" + _globalWorkLockTimeout); createThreadPools(); @@ -996,27 +994,27 @@ public SecurityGroupVO createDefaultSecurityGroup(Long accountId) { @DB public void work() { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Checking the database"); + if (logger.isTraceEnabled()) { + logger.trace("Checking the database"); } final SecurityGroupWorkVO work = _workDao.take(_serverId); if (work == null) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Security Group work: no work found"); + if (logger.isTraceEnabled()) { + logger.trace("Security Group work: no work found"); } return; } final Long userVmId = work.getInstanceId(); if (work.getStep() == Step.Done) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Security Group work: found a job in done state, rescheduling for vm: " + userVmId); + if (logger.isDebugEnabled()) { + logger.debug("Security Group work: found a job in done state, rescheduling for vm: " + userVmId); } ArrayList affectedVms = new ArrayList(); affectedVms.add(userVmId); scheduleRulesetUpdateToHosts(affectedVms, false, _timeBetweenCleanups * 1000l); return; } - s_logger.debug("Working on " + work); + logger.debug("Working on " + work); Transaction.execute(new TransactionCallbackNoReturn() { @Override @@ -1030,18 +1028,18 @@ public void doInTransactionWithoutResult(TransactionStatus status) { if (vm == null) { vm = _userVMDao.findById(work.getInstanceId()); if (vm == null) { - s_logger.info("VM " + work.getInstanceId() + " is removed"); + logger.info("VM " + work.getInstanceId() + " is removed"); locked = true; return; } - s_logger.warn("Unable to acquire lock on vm id=" + userVmId); + logger.warn("Unable to acquire lock on vm id=" + userVmId); return; } locked = true; Long agentId = null; VmRulesetLogVO log = _rulesetLogDao.findByVmId(userVmId); if (log == null) { - s_logger.warn("Cannot find log record for vm id=" + userVmId); + logger.warn("Cannot find log record for vm id=" + userVmId); return; } seqnum = log.getLogsequence(); @@ -1068,7 +1066,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { try { _agentMgr.send(agentId, cmds, _answerListener); } catch (AgentUnavailableException e) { - s_logger.debug("Unable to send ingress rules updates for vm: " + userVmId + "(agentid=" + agentId + ")"); + logger.debug("Unable to send ingress rules updates for vm: " + userVmId + "(agentid=" + agentId + ")"); _workDao.updateStep(work.getInstanceId(), seqnum, Step.Done); } @@ -1089,7 +1087,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { @DB public boolean addInstanceToGroups(final Long userVmId, final List groups) { if (!isVmSecurityGroupEnabled(userVmId)) { - s_logger.trace("User vm " + userVmId + " is not security group enabled, not adding it to security group"); + logger.trace("User vm " + userVmId + " is not security group enabled, not adding it to security group"); return false; } if (groups != null && !groups.isEmpty()) { @@ -1104,14 +1102,14 @@ public Boolean doInTransaction(TransactionStatus status) { final Set uniqueGroups = new TreeSet(new SecurityGroupVOComparator()); uniqueGroups.addAll(sgs); if (userVm == null) { - s_logger.warn("Failed to acquire lock on user vm id=" + userVmId); + logger.warn("Failed to acquire lock on user vm id=" + userVmId); } try { for (SecurityGroupVO securityGroup : uniqueGroups) { // don't let the group be deleted from under us. SecurityGroupVO ngrpLock = _securityGroupDao.lockRow(securityGroup.getId(), false); if (ngrpLock == null) { - s_logger.warn("Failed to acquire lock on network group id=" + securityGroup.getId() + " name=" + securityGroup.getName()); + logger.warn("Failed to acquire lock on network group id=" + securityGroup.getId() + " name=" + securityGroup.getName()); throw new ConcurrentModificationException("Failed to acquire lock on network group id=" + securityGroup.getId() + " name=" + securityGroup.getName()); } @@ -1137,7 +1135,7 @@ public Boolean doInTransaction(TransactionStatus status) { @DB public void removeInstanceFromGroups(final long userVmId) { if (_securityGroupVMMapDao.countSGForVm(userVmId) < 1) { - s_logger.trace("No security groups found for vm id=" + userVmId + ", returning"); + logger.trace("No security groups found for vm id=" + userVmId + ", returning"); return; } Transaction.execute(new TransactionCallbackNoReturn() { @@ -1146,14 +1144,14 @@ public void doInTransactionWithoutResult(TransactionStatus status) { UserVm userVm = _userVMDao.acquireInLockTable(userVmId); // ensures that duplicate entries are not created in // addInstance if (userVm == null) { - s_logger.warn("Failed to acquire lock on user vm id=" + userVmId); + logger.warn("Failed to acquire lock on user vm id=" + userVmId); } int n = _securityGroupVMMapDao.deleteVM(userVmId); - s_logger.info("Disassociated " + n + " network groups " + " from uservm " + userVmId); + logger.info("Disassociated " + n + " network groups " + " from uservm " + userVmId); _userVMDao.releaseFromLockTable(userVmId); } }); - s_logger.debug("Security group mappings are removed successfully for vm id=" + userVmId); + logger.debug("Security group mappings are removed successfully for vm id=" + userVmId); } @DB @@ -1170,7 +1168,7 @@ public SecurityGroup updateSecurityGroup(UpdateSecurityGroupCmd cmd) { } if (newName == null) { - s_logger.debug("security group name is not changed. id=" + groupId); + logger.debug("security group name is not changed. id=" + groupId); return group; } @@ -1190,7 +1188,7 @@ public SecurityGroupVO doInTransaction(TransactionStatus status) { } if (newName.equals(group.getName())) { - s_logger.debug("security group name is not changed. id=" + groupId); + logger.debug("security group name is not changed. id=" + groupId); return group; } else if (newName.equalsIgnoreCase(SecurityGroupManager.DEFAULT_GROUP_NAME)) { throw new InvalidParameterValueException("The security group name " + SecurityGroupManager.DEFAULT_GROUP_NAME + " is reserved"); @@ -1203,7 +1201,7 @@ public SecurityGroupVO doInTransaction(TransactionStatus status) { group.setName(newName); _securityGroupDao.update(groupId, group); - s_logger.debug("Updated security group id=" + groupId); + logger.debug("Updated security group id=" + groupId); return group; } @@ -1247,7 +1245,7 @@ public Boolean doInTransaction(TransactionStatus status) throws ResourceInUseExc _securityGroupDao.expunge(groupId); - s_logger.debug("Deleted security group id=" + groupId); + logger.debug("Deleted security group id=" + groupId); return true; } @@ -1272,7 +1270,7 @@ public void fullSync(long agentId, HashMap> newGroupSta } } if (affectedVms.size() > 0) { - s_logger.info("Network Group full sync for agent " + agentId + " found " + affectedVms.size() + " vms out of sync"); + logger.info("Network Group full sync for agent " + agentId + " found " + affectedVms.size() + " vms out of sync"); scheduleRulesetUpdateToHosts(affectedVms, false, null); } @@ -1282,7 +1280,7 @@ public void cleanupFinishedWork() { Date before = new Date(System.currentTimeMillis() - 6 * 3600 * 1000l); int numDeleted = _workDao.deleteFinishedWork(before); if (numDeleted > 0) { - s_logger.info("Network Group Work cleanup deleted " + numDeleted + " finished work items older than " + before.toString()); + logger.info("Network Group Work cleanup deleted " + numDeleted + " finished work items older than " + before.toString()); } } @@ -1291,7 +1289,7 @@ private void cleanupUnfinishedWork() { Date before = new Date(System.currentTimeMillis() - 2 * _timeBetweenCleanups * 1000l); List unfinished = _workDao.findUnfinishedWork(before); if (unfinished.size() > 0) { - s_logger.info("Network Group Work cleanup found " + unfinished.size() + " unfinished work items older than " + before.toString()); + logger.info("Network Group Work cleanup found " + unfinished.size() + " unfinished work items older than " + before.toString()); ArrayList affectedVms = new ArrayList(); for (SecurityGroupWorkVO work : unfinished) { affectedVms.add(work.getInstanceId()); @@ -1300,7 +1298,7 @@ private void cleanupUnfinishedWork() { } scheduleRulesetUpdateToHosts(affectedVms, false, null); } else { - s_logger.debug("Network Group Work cleanup found no unfinished work items older than " + before.toString()); + logger.debug("Network Group Work cleanup found no unfinished work items older than " + before.toString()); } } @@ -1329,7 +1327,7 @@ public String getSecurityGroupsNamesForVm(long vmId) { return networkGroupNames.toString(); } catch (Exception e) { - s_logger.warn("Error trying to get network groups for a vm: " + e); + logger.warn("Error trying to get network groups for a vm: " + e); return null; } @@ -1363,18 +1361,18 @@ public boolean postStateTransitionEvent(StateMachine2.Transition t State newState = transition.getToState(); Event event = transition.getEvent(); if (VirtualMachine.State.isVmStarted(oldState, event, newState)) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Security Group Mgr: handling start of vm id" + vm.getId()); + if (logger.isTraceEnabled()) { + logger.trace("Security Group Mgr: handling start of vm id" + vm.getId()); } handleVmStarted((VMInstanceVO)vm); } else if (VirtualMachine.State.isVmStopped(oldState, event, newState)) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Security Group Mgr: handling stop of vm id" + vm.getId()); + if (logger.isTraceEnabled()) { + logger.trace("Security Group Mgr: handling stop of vm id" + vm.getId()); } handleVmStopped((VMInstanceVO)vm); } else if (VirtualMachine.State.isVmMigrated(oldState, event, newState)) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Security Group Mgr: handling migration of vm id" + vm.getId()); + if (logger.isTraceEnabled()) { + logger.trace("Security Group Mgr: handling migration of vm id" + vm.getId()); } handleVmMigrated((VMInstanceVO)vm); } @@ -1410,7 +1408,7 @@ public boolean isVmMappedToDefaultSecurityGroup(long vmId) { UserVmVO vm = _userVmMgr.getVirtualMachine(vmId); SecurityGroup defaultGroup = getDefaultSecurityGroup(vm.getAccountId()); if (defaultGroup == null) { - s_logger.warn("Unable to find default security group for account id=" + vm.getAccountId()); + logger.warn("Unable to find default security group for account id=" + vm.getAccountId()); return false; } SecurityGroupVMMapVO map = _securityGroupVMMapDao.findByVmIdGroupId(vmId, defaultGroup.getId()); @@ -1448,14 +1446,14 @@ public boolean securityGroupRulesForVmSecIp(long nicId, String secondaryIp, bool // Validate parameters List vmSgGrps = getSecurityGroupsForVm(vmId); if (vmSgGrps.isEmpty()) { - s_logger.debug("Vm is not in any Security group "); + logger.debug("Vm is not in any Security group "); return true; } //If network does not support SG service, no need add SG rules for secondary ip Network network = _networkModel.getNetwork(nic.getNetworkId()); if (!_networkModel.isSecurityGroupSupportedInNetwork(network)) { - s_logger.debug("Network " + network + " is not enabled with security group service, "+ + logger.debug("Network " + network + " is not enabled with security group service, "+ "so not applying SG rules for secondary ip"); return true; } @@ -1468,16 +1466,16 @@ public boolean securityGroupRulesForVmSecIp(long nicId, String secondaryIp, bool //create command for the to add ip in ipset and arptables rules NetworkRulesVmSecondaryIpCommand cmd = new NetworkRulesVmSecondaryIpCommand(vmName, vmMac, secondaryIp, ruleAction); - s_logger.debug("Asking agent to configure rules for vm secondary ip"); + logger.debug("Asking agent to configure rules for vm secondary ip"); Commands cmds = null; cmds = new Commands(cmd); try { _agentMgr.send(vm.getHostId(), cmds); } catch (AgentUnavailableException e) { - s_logger.debug(e.toString()); + logger.debug(e.toString()); } catch (OperationTimedoutException e) { - s_logger.debug(e.toString()); + logger.debug(e.toString()); } return true; diff --git a/server/src/main/java/com/cloud/network/security/SecurityGroupManagerImpl2.java b/server/src/main/java/com/cloud/network/security/SecurityGroupManagerImpl2.java index b75c39560cf6..bd6f0e32bb02 100644 --- a/server/src/main/java/com/cloud/network/security/SecurityGroupManagerImpl2.java +++ b/server/src/main/java/com/cloud/network/security/SecurityGroupManagerImpl2.java @@ -76,7 +76,7 @@ public void run() { } }); } catch (final Throwable th) { - s_logger.error("SG Work: Caught this throwable, ", th); + logger.error("SG Work: Caught this throwable, ", th); } } } @@ -98,15 +98,15 @@ void scheduleRulesetUpdateToHosts(List affectedVms, boolean updateSeqno, L return; } if (_schedulerDisabled) { - s_logger.debug("Security Group Mgr v2: scheduler disabled, doing nothing for " + affectedVms.size() + " vms"); + logger.debug("Security Group Mgr v2: scheduler disabled, doing nothing for " + affectedVms.size() + " vms"); return; } Set workItems = new TreeSet(); workItems.addAll(affectedVms); workItems.removeAll(_disabledVms); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Security Group Mgr v2: scheduling ruleset updates for " + affectedVms.size() + " vms " + " (unique=" + workItems.size() + + if (logger.isDebugEnabled()) { + logger.debug("Security Group Mgr v2: scheduling ruleset updates for " + affectedVms.size() + " vms " + " (unique=" + workItems.size() + "), current queue size=" + _workQueue.size()); } @@ -122,8 +122,8 @@ void scheduleRulesetUpdateToHosts(List affectedVms, boolean updateSeqno, L int newJobs = _workQueue.submitWorkForVms(workItems); _mBean.logScheduledDetails(workItems); p.stop(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Security Group Mgr v2: done scheduling ruleset updates for " + workItems.size() + " vms: num new jobs=" + newJobs + + if (logger.isDebugEnabled()) { + logger.debug("Security Group Mgr v2: done scheduling ruleset updates for " + workItems.size() + " vms: num new jobs=" + newJobs + " num rows insert or updated=" + updated + " time taken=" + p.getDurationInMillis()); } } @@ -138,31 +138,31 @@ public boolean start() { @Override public void work() { - s_logger.trace("Checking the work queue"); + logger.trace("Checking the work queue"); List workItems; try { workItems = _workQueue.getWork(1); for (SecurityGroupWork work : workItems) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Processing " + work.getInstanceId()); + if (logger.isTraceEnabled()) { + logger.trace("Processing " + work.getInstanceId()); } try { VmRulesetLogVO rulesetLog = _rulesetLogDao.findByVmId(work.getInstanceId()); if (rulesetLog == null) { - s_logger.warn("Could not find ruleset log for vm " + work.getInstanceId()); + logger.warn("Could not find ruleset log for vm " + work.getInstanceId()); continue; } work.setLogsequenceNumber(rulesetLog.getLogsequence()); sendRulesetUpdates(work); _mBean.logUpdateDetails(work.getInstanceId(), work.getLogsequenceNumber()); } catch (Exception e) { - s_logger.error("Problem during SG work " + work, e); + logger.error("Problem during SG work " + work, e); work.setStep(Step.Error); } } } catch (InterruptedException e1) { - s_logger.warn("SG work: caught InterruptException", e1); + logger.warn("SG work: caught InterruptException", e1); } } @@ -171,8 +171,8 @@ public void sendRulesetUpdates(SecurityGroupWork work) { UserVm vm = _userVMDao.findById(userVmId); if (vm != null && vm.getState() == State.Running) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("SecurityGroupManager v2: found vm, " + userVmId + " state=" + vm.getState()); + if (logger.isTraceEnabled()) { + logger.trace("SecurityGroupManager v2: found vm, " + userVmId + " state=" + vm.getState()); } Map> ingressRules = generateRulesForVM(userVmId, SecurityRuleType.IngressRule); Map> egressRules = generateRulesForVM(userVmId, SecurityRuleType.EgressRule); @@ -191,28 +191,28 @@ public void sendRulesetUpdates(SecurityGroupWork work) { generateRulesetCmd(vm.getInstanceName(), nic.getIPv4Address(), nic.getIPv6Address(), nic.getMacAddress(), vm.getId(), null, work.getLogsequenceNumber(), ingressRules, egressRules, nicSecIps); cmd.setMsId(_serverId); - if (s_logger.isDebugEnabled()) { - s_logger.debug("SecurityGroupManager v2: sending ruleset update for vm " + vm.getInstanceName() + ":ingress num rules=" + + if (logger.isDebugEnabled()) { + logger.debug("SecurityGroupManager v2: sending ruleset update for vm " + vm.getInstanceName() + ":ingress num rules=" + cmd.getIngressRuleSet().size() + ":egress num rules=" + cmd.getEgressRuleSet().size() + " num cidrs=" + cmd.getTotalNumCidrs() + " sig=" + cmd.getSignature()); } Commands cmds = new Commands(cmd); try { _agentMgr.send(agentId, cmds, _answerListener); - if (s_logger.isTraceEnabled()) { - s_logger.trace("SecurityGroupManager v2: sent ruleset updates for " + vm.getInstanceName() + " curr queue size=" + _workQueue.size()); + if (logger.isTraceEnabled()) { + logger.trace("SecurityGroupManager v2: sent ruleset updates for " + vm.getInstanceName() + " curr queue size=" + _workQueue.size()); } } catch (AgentUnavailableException e) { - s_logger.debug("Unable to send updates for vm: " + userVmId + "(agentid=" + agentId + ")"); + logger.debug("Unable to send updates for vm: " + userVmId + "(agentid=" + agentId + ")"); _workTracker.handleException(agentId); } } } else { - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { if (vm != null) - s_logger.debug("No rules sent to vm " + vm + "state=" + vm.getState()); + logger.debug("No rules sent to vm " + vm + "state=" + vm.getState()); else - s_logger.debug("Could not find vm: No rules sent to vm " + userVmId); + logger.debug("Could not find vm: No rules sent to vm " + userVmId); } } } @@ -277,7 +277,7 @@ public boolean configure(String name, Map params) throws Configu try { JmxUtil.registerMBean("SecurityGroupManager", "SecurityGroupManagerImpl2", _mBean); } catch (Exception e) { - s_logger.error("Failed to register MBean", e); + logger.error("Failed to register MBean", e); } boolean result = super.configure(name, params); Map configs = _configDao.getConfiguration("Network", params); @@ -293,7 +293,7 @@ public void disableSchedulerForVm(Long vmId, boolean disable) { } else { _disabledVms.remove(vmId); } - s_logger.warn("JMX operation: Scheduler state for vm " + vmId + ": new state disabled=" + disable); + logger.warn("JMX operation: Scheduler state for vm " + vmId + ": new state disabled=" + disable); } @@ -303,13 +303,13 @@ public Long[] getDisabledVmsForScheduler() { } public void enableAllVmsForScheduler() { - s_logger.warn("Cleared list of disabled VMs (JMX operation?)"); + logger.warn("Cleared list of disabled VMs (JMX operation?)"); _disabledVms.clear(); } public void disableScheduler(boolean disable) { _schedulerDisabled = disable; - s_logger.warn("JMX operation: Scheduler state changed: new state disabled=" + disable); + logger.warn("JMX operation: Scheduler state changed: new state disabled=" + disable); } public boolean isSchedulerDisabled() { @@ -318,7 +318,7 @@ public boolean isSchedulerDisabled() { public void clearWorkQueue() { _workQueue.clear(); - s_logger.warn("Cleared the work queue (possible JMX operation)"); + logger.warn("Cleared the work queue (possible JMX operation)"); } } diff --git a/server/src/main/java/com/cloud/network/security/SecurityGroupWorkTracker.java b/server/src/main/java/com/cloud/network/security/SecurityGroupWorkTracker.java index 63b1cbb6be93..be928f351082 100644 --- a/server/src/main/java/com/cloud/network/security/SecurityGroupWorkTracker.java +++ b/server/src/main/java/com/cloud/network/security/SecurityGroupWorkTracker.java @@ -20,14 +20,15 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; import com.cloud.agent.api.Answer; public class SecurityGroupWorkTracker { - protected static final Logger s_logger = Logger.getLogger(SecurityGroupWorkTracker.class); + protected Logger logger = LogManager.getLogger(getClass()); protected AtomicLong _discardCount = new AtomicLong(0); AgentManager _agentMgr; Listener _answerListener; @@ -55,7 +56,7 @@ public boolean canSend(long agentId) { if (currLength + 1 > _bufferLength) { long discarded = _discardCount.incrementAndGet(); //drop it on the floor - s_logger.debug("SecurityGroupManager: dropping a message because there are more than " + currLength + " outstanding messages, total dropped=" + discarded); + logger.debug("SecurityGroupManager: dropping a message because there are more than " + currLength + " outstanding messages, total dropped=" + discarded); return false; } _unackedMessages.put(agentId, ++currLength); diff --git a/server/src/main/java/com/cloud/network/vpc/NetworkACLManagerImpl.java b/server/src/main/java/com/cloud/network/vpc/NetworkACLManagerImpl.java index d95cf9ac7af9..047961467ac2 100644 --- a/server/src/main/java/com/cloud/network/vpc/NetworkACLManagerImpl.java +++ b/server/src/main/java/com/cloud/network/vpc/NetworkACLManagerImpl.java @@ -24,7 +24,6 @@ import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.messagebus.MessageBus; import org.apache.cloudstack.framework.messagebus.PublishScope; -import org.apache.log4j.Logger; import com.cloud.event.ActionEvent; import com.cloud.event.EventTypes; @@ -53,7 +52,6 @@ import com.cloud.utils.net.NetUtils; public class NetworkACLManagerImpl extends ManagerBase implements NetworkACLManager { - private static final Logger s_logger = Logger.getLogger(NetworkACLManagerImpl.class); @Inject private NetworkModel _networkMgr; @@ -119,7 +117,7 @@ public boolean applyNetworkACL(final long aclId) throws ResourceUnavailableExcep if (!applyACLToPrivateGw(privateGateway)) { aclApplyStatus = false; - s_logger.debug("failed to apply network acl item on private gateway " + privateGateway.getId() + "acl id " + aclId); + logger.debug("failed to apply network acl item on private gateway " + privateGateway.getId() + "acl id " + aclId); break; } } @@ -172,7 +170,7 @@ public boolean replaceNetworkACLForPrivateGw(final NetworkACL acl, final Private if (aclItems == null || aclItems.isEmpty()) { //Revoke ACL Items of the existing ACL if the new network acl is empty //Other wise existing rules will not be removed on the router elelment - s_logger.debug("New network ACL is empty. Revoke existing rules before applying ACL"); + logger.debug("New network ACL is empty. Revoke existing rules before applying ACL"); if (!revokeACLItemsForPrivateGw(gateway)) { throw new CloudRuntimeException("Failed to replace network ACL. Error while removing existing ACL " + "items for privatewa gateway: " + gateway.getId()); } @@ -205,7 +203,7 @@ public boolean replaceNetworkACL(final NetworkACL acl, final NetworkVO network) //Existing rules won't be removed otherwise final List aclItems = _networkACLItemDao.listByACL(acl.getId()); if (aclItems == null || aclItems.isEmpty()) { - s_logger.debug("New network ACL is empty. Revoke existing rules before applying ACL"); + logger.debug("New network ACL is empty. Revoke existing rules before applying ACL"); if (!revokeACLItemsForNetwork(network.getId())) { throw new CloudRuntimeException("Failed to replace network ACL. Error while removing existing ACL items for network: " + network.getId()); } @@ -215,7 +213,7 @@ public boolean replaceNetworkACL(final NetworkACL acl, final NetworkVO network) network.setNetworkACLId(acl.getId()); //Update Network ACL if (_networkDao.update(network.getId(), network)) { - s_logger.debug("Updated network: " + network.getId() + " with Network ACL Id: " + acl.getId() + ", Applying ACL items"); + logger.debug("Updated network: " + network.getId() + " with Network ACL Id: " + acl.getId() + ", Applying ACL items"); //Apply ACL to network final Boolean result = applyACLToNetwork(network.getId()); if (result) { @@ -276,8 +274,8 @@ public boolean revokeNetworkACLItem(final long ruleId) { @DB private void revokeRule(final NetworkACLItemVO rule) { if (rule.getState() == State.Staged) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found a rule that is still in stage state so just removing it: " + rule); + if (logger.isDebugEnabled()) { + logger.debug("Found a rule that is still in stage state so just removing it: " + rule); } removeRule(rule); } else if (rule.getState() == State.Add || rule.getState() == State.Active) { @@ -294,12 +292,12 @@ public boolean revokeACLItemsForNetwork(final long networkId) throws ResourceUna } final List aclItems = _networkACLItemDao.listByACL(network.getNetworkACLId()); if (aclItems.isEmpty()) { - s_logger.debug("Found no network ACL Items for network id=" + networkId); + logger.debug("Found no network ACL Items for network id=" + networkId); return true; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing " + aclItems.size() + " Network ACL Items for network id=" + networkId); + if (logger.isDebugEnabled()) { + logger.debug("Releasing " + aclItems.size() + " Network ACL Items for network id=" + networkId); } for (final NetworkACLItemVO aclItem : aclItems) { @@ -311,8 +309,8 @@ public boolean revokeACLItemsForNetwork(final long networkId) throws ResourceUna final boolean success = applyACLItemsToNetwork(network.getId(), aclItems); - if (s_logger.isDebugEnabled() && success) { - s_logger.debug("Successfully released Network ACLs for network id=" + networkId + " and # of rules now = " + aclItems.size()); + if (logger.isDebugEnabled() && success) { + logger.debug("Successfully released Network ACLs for network id=" + networkId + " and # of rules now = " + aclItems.size()); } return success; @@ -323,12 +321,12 @@ public boolean revokeACLItemsForPrivateGw(final PrivateGateway gateway) throws R final long networkACLId = gateway.getNetworkACLId(); final List aclItems = _networkACLItemDao.listByACL(networkACLId); if (aclItems.isEmpty()) { - s_logger.debug("Found no network ACL Items for private gateway 'id=" + gateway.getId() + "'"); + logger.debug("Found no network ACL Items for private gateway 'id=" + gateway.getId() + "'"); return true; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Releasing " + aclItems.size() + " Network ACL Items for private gateway id=" + gateway.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Releasing " + aclItems.size() + " Network ACL Items for private gateway id=" + gateway.getId()); } for (final NetworkACLItemVO aclItem : aclItems) { @@ -340,8 +338,8 @@ public boolean revokeACLItemsForPrivateGw(final PrivateGateway gateway) throws R final boolean success = applyACLToPrivateGw(gateway, aclItems); - if (s_logger.isDebugEnabled() && success) { - s_logger.debug("Successfully released Network ACLs for private gateway id=" + gateway.getId() + " and # of rules now = " + aclItems.size()); + if (logger.isDebugEnabled() && success) { + logger.debug("Successfully released Network ACLs for private gateway id=" + gateway.getId() + " and # of rules now = " + aclItems.size()); } return success; @@ -378,7 +376,7 @@ private boolean applyACLToPrivateGw(final PrivateGateway gateway, final List allAclRules) { if (CollectionUtils.isEmpty(allAclRules)) { - s_logger.debug(String.format("No ACL rules for [id=%s, name=%s]. Therefore, there is no need for consistency validation.", lockedAcl.getUuid(), lockedAcl.getName())); + logger.debug(String.format("No ACL rules for [id=%s, name=%s]. Therefore, there is no need for consistency validation.", lockedAcl.getUuid(), lockedAcl.getName())); return; } String aclConsistencyHash = moveNetworkAclItemCmd.getAclConsistencyHash(); @@ -1000,7 +998,7 @@ protected void validateAclConsistency(MoveNetworkAclItemCmd moveNetworkAclItemCm User callingUser = CallContext.current().getCallingUser(); Account callingAccount = CallContext.current().getCallingAccount(); - s_logger.warn(String.format( + logger.warn(String.format( "User [id=%s, name=%s] from Account [id=%s, name=%s] has not entered an ACL consistency hash to execute the replacement of an ACL rule. Therefore, she/he is assuming all of the risks of procedding without this validation.", callingUser.getUuid(), callingUser.getUsername(), callingAccount.getUuid(), callingAccount.getAccountName())); return; @@ -1183,10 +1181,10 @@ protected void validateAclAssociatedToVpc(Long aclVpcId, Account account, String */ protected void validateGlobalAclPermissionAndAclAssociatedToVpc(NetworkACL acl, Account account, String exception){ if (isGlobalAcl(acl.getVpcId())) { - s_logger.info(String.format("Checking if account [%s] has permission to manipulate global ACL [%s].", account, acl)); + logger.info(String.format("Checking if account [%s] has permission to manipulate global ACL [%s].", account, acl)); checkGlobalAclPermission(acl.getVpcId(), account, exception); } else { - s_logger.info(String.format("Validating ACL [%s] associated to VPC [%s] with account [%s].", acl, acl.getVpcId(), account)); + logger.info(String.format("Validating ACL [%s] associated to VPC [%s] with account [%s].", acl, acl.getVpcId(), account)); validateAclAssociatedToVpc(acl.getVpcId(), account, acl.getUuid()); } } diff --git a/server/src/main/java/com/cloud/network/vpc/VpcManagerImpl.java b/server/src/main/java/com/cloud/network/vpc/VpcManagerImpl.java index 341a3b81b423..e313b1bfdf04 100644 --- a/server/src/main/java/com/cloud/network/vpc/VpcManagerImpl.java +++ b/server/src/main/java/com/cloud/network/vpc/VpcManagerImpl.java @@ -64,7 +64,6 @@ import org.apache.cloudstack.query.QueryService; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.ObjectUtils; -import org.apache.log4j.Logger; import org.jetbrains.annotations.Nullable; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Qualifier; @@ -179,7 +178,6 @@ import com.cloud.vm.dao.NicDao; public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvisioningService, VpcService { - private static final Logger s_logger = Logger.getLogger(VpcManagerImpl.class); public static final String SERVICE = "service"; public static final String CAPABILITYTYPE = "capabilitytype"; @@ -311,7 +309,7 @@ public boolean configure(final String name, final Map params) th public void doInTransactionWithoutResult(final TransactionStatus status) { if (_vpcOffDao.findByUniqueName(VpcOffering.defaultVPCOfferingName) == null) { - s_logger.debug("Creating default VPC offering " + VpcOffering.defaultVPCOfferingName); + logger.debug("Creating default VPC offering " + VpcOffering.defaultVPCOfferingName); final Map> svcProviderMap = new HashMap>(); final Set defaultProviders = new HashSet(); @@ -331,7 +329,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { // configure default vpc offering with Netscaler as LB Provider if (_vpcOffDao.findByUniqueName(VpcOffering.defaultVPCNSOfferingName) == null) { - s_logger.debug("Creating default VPC offering with Netscaler as LB Provider" + VpcOffering.defaultVPCNSOfferingName); + logger.debug("Creating default VPC offering with Netscaler as LB Provider" + VpcOffering.defaultVPCNSOfferingName); final Map> svcProviderMap = new HashMap>(); final Set defaultProviders = new HashSet(); defaultProviders.add(Provider.VPCVirtualRouter); @@ -350,7 +348,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { } if (_vpcOffDao.findByUniqueName(VpcOffering.redundantVPCOfferingName) == null) { - s_logger.debug("Creating Redundant VPC offering " + VpcOffering.redundantVPCOfferingName); + logger.debug("Creating Redundant VPC offering " + VpcOffering.redundantVPCOfferingName); final Map> svcProviderMap = new HashMap>(); final Set defaultProviders = new HashSet(); @@ -482,7 +480,7 @@ public VpcOffering createVpcOffering(final String name, final String displayText } if (service == Service.Connectivity) { - s_logger.debug("Applying Connectivity workaround, setting provider to NiciraNvp"); + logger.debug("Applying Connectivity workaround, setting provider to NiciraNvp"); svcProviderMap.put(service, sdnProviders); } else { svcProviderMap.put(service, defaultProviders); @@ -497,12 +495,12 @@ public VpcOffering createVpcOffering(final String name, final String displayText } if (!sourceNatSvc) { - s_logger.debug("Automatically adding source nat service to the list of VPC services"); + logger.debug("Automatically adding source nat service to the list of VPC services"); svcProviderMap.put(Service.SourceNat, defaultProviders); } if (!firewallSvs) { - s_logger.debug("Automatically adding network ACL service to the list of VPC services"); + logger.debug("Automatically adding network ACL service to the list of VPC services"); svcProviderMap.put(Service.NetworkACL, defaultProviders); } @@ -578,7 +576,7 @@ public VpcOfferingVO doInTransaction(final TransactionStatus status) { if (state != null) { offering.setState(state); } - s_logger.debug("Adding vpc offering " + offering); + logger.debug("Adding vpc offering " + offering); offering = _vpcOffDao.persist(offering); // populate services and providers if (svcProviderMap != null) { @@ -588,7 +586,7 @@ public VpcOfferingVO doInTransaction(final TransactionStatus status) { for (final Network.Provider provider : providers) { final VpcOfferingServiceMapVO offService = new VpcOfferingServiceMapVO(offering.getId(), service, provider); _vpcOffSvcMapDao.persist(offService); - s_logger.trace("Added service for the vpc offering: " + offService + " with provider " + provider.getName()); + logger.trace("Added service for the vpc offering: " + offService + " with provider " + provider.getName()); } } else { throw new InvalidParameterValueException("Provider is missing for the VPC offering service " + service.getName()); @@ -996,7 +994,7 @@ private VpcOffering updateVpcOfferingInternal(long vpcOffId, String vpcOfferingN vpcOfferingDetailsDao.persist(detailVO); } } - s_logger.debug("Updated VPC offeirng id=" + vpcOffId); + logger.debug("Updated VPC offeirng id=" + vpcOffId); return _vpcOffDao.findById(vpcOffId); } @@ -1078,13 +1076,13 @@ public Vpc createVpc(final long zoneId, final long vpcOffId, final long vpcOwner String message = String.format("Configured MTU for network VR's public interfaces exceeds the upper limit " + "enforced by zone level setting: %s. VR's public interfaces can be configured with a maximum MTU of %s", NetworkService.VRPublicInterfaceMtu.key(), NetworkService.VRPublicInterfaceMtu.valueIn(zoneId)); - s_logger.warn(message); + logger.warn(message); alertManager.sendAlert(AlertService.AlertType.ALERT_TYPE_VR_PUBLIC_IFACE_MTU, zoneId, null, subject, message); publicMtu = NetworkService.VRPublicInterfaceMtu.valueIn(zoneId); } else if (publicMtu < NetworkService.MINIMUM_MTU) { String subject = "Incorrect MTU configured on network for public interfaces of the VPC VR"; String message = String.format("Configured MTU for network VR's public interfaces is lesser than the supported minim MTU of %s", NetworkService.MINIMUM_MTU); - s_logger.warn(message); + logger.warn(message); alertManager.sendAlert(AlertService.AlertType.ALERT_TYPE_VR_PUBLIC_IFACE_MTU, zoneId, null, subject, message); publicMtu = NetworkService.MINIMUM_MTU; } @@ -1109,7 +1107,7 @@ public Vpc createVpc(CreateVPCCmd cmd) throws ResourceAllocationException { String sourceNatIP = cmd.getSourceNatIP(); if (sourceNatIP != null) { - s_logger.info(String.format("Trying to allocate the specified IP [%s] as the source NAT of VPC [%s].", sourceNatIP, vpc)); + logger.info(String.format("Trying to allocate the specified IP [%s] as the source NAT of VPC [%s].", sourceNatIP, vpc)); allocateSourceNatIp(vpc, sourceNatIP); } return vpc; @@ -1152,7 +1150,7 @@ protected Vpc createVpc(final Boolean displayVpc, final VpcVO vpc) { public VpcVO doInTransaction(final TransactionStatus status) { final VpcVO persistedVpc = vpcDao.persist(vpc, finalizeServicesAndProvidersForVpc(vpc.getZoneId(), vpc.getVpcOfferingId())); _resourceLimitMgr.incrementResourceCount(vpc.getAccountId(), ResourceType.vpc); - s_logger.debug("Created VPC " + persistedVpc); + logger.debug("Created VPC " + persistedVpc); CallContext.current().putContextParameter(Vpc.class, persistedVpc.getUuid()); return persistedVpc; } @@ -1211,7 +1209,7 @@ public boolean deleteVpc(final long vpcId) throws ConcurrentOperationException, @Override @DB public boolean destroyVpc(final Vpc vpc, final Account caller, final Long callerUserId) throws ConcurrentOperationException, ResourceUnavailableException { - s_logger.debug("Destroying vpc " + vpc); + logger.debug("Destroying vpc " + vpc); // don't allow to delete vpc if it's in use by existing non system // networks (system networks are networks of a private gateway of the @@ -1224,7 +1222,7 @@ public boolean destroyVpc(final Vpc vpc, final Account caller, final Long caller // mark VPC as inactive if (vpc.getState() != Vpc.State.Inactive) { - s_logger.debug("Updating VPC " + vpc + " with state " + Vpc.State.Inactive + " as a part of vpc delete"); + logger.debug("Updating VPC " + vpc + " with state " + Vpc.State.Inactive + " as a part of vpc delete"); final VpcVO vpcVO = vpcDao.findById(vpc.getId()); vpcVO.setState(Vpc.State.Inactive); @@ -1241,23 +1239,23 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { // shutdown VPC if (!shutdownVpc(vpc.getId())) { - s_logger.warn("Failed to shutdown vpc " + vpc + " as a part of vpc destroy process"); + logger.warn("Failed to shutdown vpc " + vpc + " as a part of vpc destroy process"); return false; } // cleanup vpc resources if (!cleanupVpcResources(vpc.getId(), caller, callerUserId)) { - s_logger.warn("Failed to cleanup resources for vpc " + vpc); + logger.warn("Failed to cleanup resources for vpc " + vpc); return false; } // update the instance with removed flag only when the cleanup is // executed successfully if (vpcDao.remove(vpc.getId())) { - s_logger.debug("Vpc " + vpc + " is destroyed successfully"); + logger.debug("Vpc " + vpc + " is destroyed successfully"); return true; } else { - s_logger.warn("Vpc " + vpc + " failed to destroy"); + logger.warn("Vpc " + vpc + " failed to destroy"); return false; } } @@ -1307,21 +1305,21 @@ public Vpc updateVpc(final long vpcId, final String vpcName, final String displa boolean restartRequired = checkAndUpdateRouterSourceNatIp(vpcToUpdate, sourceNatIp); if (vpcDao.update(vpcId, vpc) || restartRequired) { // Note that the update may fail because nothing has changed, other than the sourcenat ip - s_logger.debug("Updated VPC id=" + vpcId); + logger.debug("Updated VPC id=" + vpcId); if (restartRequired) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("restarting vpc %s/%s, due to changing sourcenat in Update VPC call", vpc.getName(), vpc.getUuid())); + if (logger.isDebugEnabled()) { + logger.debug(String.format("restarting vpc %s/%s, due to changing sourcenat in Update VPC call", vpc.getName(), vpc.getUuid())); } final User callingUser = _accountMgr.getActiveUser(CallContext.current().getCallingUserId()); restartVpc(vpcId, true, false, false, callingUser); } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("no restart needed."); + if (logger.isDebugEnabled()) { + logger.debug("no restart needed."); } } return vpcDao.findById(vpcId); } else { - s_logger.error(String.format("failed to update vpc %s/%s",vpc.getName(), vpc.getUuid())); + logger.error(String.format("failed to update vpc %s/%s",vpc.getName(), vpc.getUuid())); return null; } } @@ -1337,7 +1335,7 @@ private boolean checkAndUpdateRouterSourceNatIp(Vpc vpc, String sourceNatIp) { } catch (Exception e) { // pokemon exception from transaction String msg = String.format("Update of source NAT ip to %s for network \"%s\"/%s failed due to %s", requestedIp.getAddress().addr(), vpc.getName(), vpc.getUuid(), e.getLocalizedMessage()); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg, e); } } @@ -1347,20 +1345,20 @@ private boolean checkAndUpdateRouterSourceNatIp(Vpc vpc, String sourceNatIp) { @Nullable protected IPAddressVO validateSourceNatip(Vpc vpc, String sourceNatIp) { if (sourceNatIp == null) { - s_logger.trace(String.format("no source NAT ip given to update vpc %s with.", vpc.getName())); + logger.trace(String.format("no source NAT ip given to update vpc %s with.", vpc.getName())); return null; } else { - s_logger.info(String.format("updating VPC %s to have source NAT ip %s", vpc.getName(), sourceNatIp)); + logger.info(String.format("updating VPC %s to have source NAT ip %s", vpc.getName(), sourceNatIp)); } IPAddressVO requestedIp = getIpAddressVO(vpc, sourceNatIp); if (requestedIp == null) return null; // check if it is the current source NAT address if (requestedIp.isSourceNat()) { - s_logger.info(String.format("IP address %s is already the source Nat address. Not updating!", sourceNatIp)); + logger.info(String.format("IP address %s is already the source Nat address. Not updating!", sourceNatIp)); return null; } if (_firewallDao.countRulesByIpId(requestedIp.getId()) > 0) { - s_logger.info(String.format("IP address %s has firewall/portforwarding rules. Not updating!", sourceNatIp)); + logger.info(String.format("IP address %s has firewall/portforwarding rules. Not updating!", sourceNatIp)); return null; } return requestedIp; @@ -1371,7 +1369,7 @@ private IPAddressVO getIpAddressVO(Vpc vpc, String sourceNatIp) { // check if the address is already aqcuired for this network IPAddressVO requestedIp = _ipAddressDao.findByIp(sourceNatIp); if (requestedIp == null || requestedIp.getVpcId() == null || ! requestedIp.getVpcId().equals(vpc.getId())) { - s_logger.warn(String.format("Source NAT IP %s is not associated with network %s/%s. It cannot be used as source NAT IP.", + logger.warn(String.format("Source NAT IP %s is not associated with network %s/%s. It cannot be used as source NAT IP.", sourceNatIp, vpc.getName(), vpc.getUuid())); return null; } @@ -1388,18 +1386,18 @@ protected Integer validateMtu(VpcVO vpcToUpdate, Integer mtu) { String message = String.format("Configured MTU for network VR's public interfaces exceeds the upper limit " + "enforced by zone level setting: %s. VR's public interfaces can be configured with a maximum MTU of %s", NetworkService.VRPublicInterfaceMtu.key(), NetworkService.VRPublicInterfaceMtu.valueIn(zoneId)); - s_logger.warn(message); + logger.warn(message); alertManager.sendAlert(AlertService.AlertType.ALERT_TYPE_VR_PUBLIC_IFACE_MTU, zoneId, null, subject, message); mtu = NetworkService.VRPublicInterfaceMtu.valueIn(zoneId); } else if (mtu < NetworkService.MINIMUM_MTU) { String subject = "Incorrect MTU configured on network for public interfaces of the VPC VR"; String message = String.format("Configured MTU for network VR's public interfaces is lesser than the minimum MTU of %s", NetworkService.MINIMUM_MTU ); - s_logger.warn(message); + logger.warn(message); alertManager.sendAlert(AlertService.AlertType.ALERT_TYPE_VR_PUBLIC_IFACE_MTU, zoneId, null, subject, message); mtu = NetworkService.MINIMUM_MTU; } if (Objects.equals(mtu, vpcToUpdate.getPublicMtu())) { - s_logger.info(String.format("Desired MTU of %s already configured on the VPC public interfaces", mtu)); + logger.info(String.format("Desired MTU of %s already configured on the VPC public interfaces", mtu)); mtu = null; } return mtu; @@ -1426,7 +1424,7 @@ protected void updateMtuOfVpcNetwork(VpcVO vpcToUpdate, VpcVO vpc, Integer mtu) network.setPublicMtu(mtu); _ntwkDao.update(network.getId(), network); } - s_logger.info("Successfully update MTU of VPC network"); + logger.info("Successfully update MTU of VPC network"); } else { throw new CloudRuntimeException("Failed to update MTU on the network"); } @@ -1453,12 +1451,12 @@ protected boolean updateMtuOnVpcVr(Long vpcId, Set ips) { networkHelper.sendCommandsToRouter(router, cmds); final Answer updateNetworkAnswer = cmds.getAnswer("updateNetwork"); if (!(updateNetworkAnswer != null && updateNetworkAnswer.getResult())) { - s_logger.warn("Unable to update guest network on router " + router); + logger.warn("Unable to update guest network on router " + router); throw new CloudRuntimeException("Failed to update guest network with new MTU"); } success = true; } catch (ResourceUnavailableException e) { - s_logger.error(String.format("Failed to update network MTU for router %s due to %s", router, e.getMessage())); + logger.error(String.format("Failed to update network MTU for router %s due to %s", router, e.getMessage())); } } return success; @@ -1651,20 +1649,20 @@ public boolean startVpc(final long vpcId, final boolean destroyOnFailure) throws boolean result = true; try { if (!startVpc(vpc, dest, context)) { - s_logger.warn("Failed to start vpc " + vpc); + logger.warn("Failed to start vpc " + vpc); result = false; } } catch (final Exception ex) { - s_logger.warn("Failed to start vpc " + vpc + " due to ", ex); + logger.warn("Failed to start vpc " + vpc + " due to ", ex); result = false; } finally { // do cleanup if (!result && destroyOnFailure) { - s_logger.debug("Destroying vpc " + vpc + " that failed to start"); + logger.debug("Destroying vpc " + vpc + " that failed to start"); if (destroyVpc(vpc, caller, callerUser.getId())) { - s_logger.warn("Successfully destroyed vpc " + vpc + " that failed to start"); + logger.warn("Successfully destroyed vpc " + vpc + " that failed to start"); } else { - s_logger.warn("Failed to destroy vpc " + vpc + " that failed to start"); + logger.warn("Failed to destroy vpc " + vpc + " that failed to start"); } } } @@ -1679,9 +1677,9 @@ protected boolean startVpc(final Vpc vpc, final DeployDestination dest, final Re for (final VpcProvider element : getVpcElements()) { if (providersToImplement.contains(element.getProvider())) { if (element.implementVpc(vpc, dest, context)) { - s_logger.debug("Vpc " + vpc + " has started successfully"); + logger.debug("Vpc " + vpc + " has started successfully"); } else { - s_logger.warn("Vpc " + vpc + " failed to start"); + logger.warn("Vpc " + vpc + " failed to start"); success = false; } } @@ -1704,7 +1702,7 @@ public boolean shutdownVpc(final long vpcId) throws ConcurrentOperationException _accountMgr.checkAccess(caller, null, false, vpc); // shutdown provider - s_logger.debug("Shutting down vpc " + vpc); + logger.debug("Shutting down vpc " + vpc); // TODO - shutdown all vpc resources here (ACLs, gateways, etc) boolean success = true; @@ -1713,9 +1711,9 @@ public boolean shutdownVpc(final long vpcId) throws ConcurrentOperationException for (final VpcProvider element : getVpcElements()) { if (providersToImplement.contains(element.getProvider())) { if (element.shutdownVpc(vpc, context)) { - s_logger.debug("Vpc " + vpc + " has been shutdown successfully"); + logger.debug("Vpc " + vpc + " has been shutdown successfully"); } else { - s_logger.warn("Vpc " + vpc + " failed to shutdown"); + logger.warn("Vpc " + vpc + " failed to shutdown"); success = false; } } @@ -1811,7 +1809,7 @@ public void validateNtwkOffForVpc(final NetworkOffering guestNtwkOff, final List // 4) Conserve mode should be off in older versions if (guestNtwkOff.isConserveMode()) { - s_logger.info("Creating a network with conserve mode in VPC"); + logger.info("Creating a network with conserve mode in VPC"); } // 5) If Netscaler is LB provider make sure it is in dedicated mode @@ -1835,7 +1833,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { try { // check number of active networks in vpc if (_ntwkDao.countVpcNetworks(vpc.getId()) >= _maxNetworks) { - s_logger.warn(String.format("Failed to create a new VPC Guest Network because the number of networks per VPC has reached its maximum capacity of [%s]. Increase it by modifying global config [%s].", _maxNetworks, Config.VpcMaxNetworks)); + logger.warn(String.format("Failed to create a new VPC Guest Network because the number of networks per VPC has reached its maximum capacity of [%s]. Increase it by modifying global config [%s].", _maxNetworks, Config.VpcMaxNetworks)); throw new CloudRuntimeException(String.format("Number of networks per VPC cannot surpass [%s].", _maxNetworks)); } @@ -1873,7 +1871,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { throw new InvalidParameterValueException("Invalid gateway specified. It should never be equal to the cidr subnet value"); } } finally { - s_logger.debug("Releasing lock for " + locked); + logger.debug("Releasing lock for " + locked); vpcDao.releaseFromLockTable(locked.getId()); } } @@ -1886,7 +1884,7 @@ private void CheckAccountsAccess(Vpc vpc, Account networkAccount) { _accountMgr.checkAccess(vpcaccount, null, false, networkAccount); } catch (PermissionDeniedException e) { - s_logger.error(e.getMessage()); + logger.error(e.getMessage()); throw new InvalidParameterValueException(String.format("VPC owner does not have access to account [%s].", networkAccount.getAccountName())); } } @@ -1913,18 +1911,18 @@ public List getVpcsForAccount(final long accountId) { } public boolean cleanupVpcResources(final long vpcId, final Account caller, final long callerUserId) throws ResourceUnavailableException, ConcurrentOperationException { - s_logger.debug("Cleaning up resources for vpc id=" + vpcId); + logger.debug("Cleaning up resources for vpc id=" + vpcId); boolean success = true; // 1) Remove VPN connections and VPN gateway - s_logger.debug("Cleaning up existed site to site VPN connections"); + logger.debug("Cleaning up existed site to site VPN connections"); _s2sVpnMgr.cleanupVpnConnectionByVpc(vpcId); - s_logger.debug("Cleaning up existed site to site VPN gateways"); + logger.debug("Cleaning up existed site to site VPN gateways"); _s2sVpnMgr.cleanupVpnGatewayByVpc(vpcId); // 2) release all ip addresses final List ipsToRelease = _ipAddressDao.listByAssociatedVpc(vpcId, null); - s_logger.debug("Releasing ips for vpc id=" + vpcId + " as a part of vpc cleanup"); + logger.debug("Releasing ips for vpc id=" + vpcId + " as a part of vpc cleanup"); for (final IPAddressVO ipToRelease : ipsToRelease) { if (ipToRelease.isPortable()) { // portable IP address are associated with owner, until @@ -1933,26 +1931,26 @@ public boolean cleanupVpcResources(final long vpcId, final Account caller, final ipToRelease.setVpcId(null); ipToRelease.setAssociatedWithNetworkId(null); _ipAddressDao.update(ipToRelease.getId(), ipToRelease); - s_logger.debug("Portable IP address " + ipToRelease + " is no longer associated with any VPC"); + logger.debug("Portable IP address " + ipToRelease + " is no longer associated with any VPC"); } else { success = success && _ipAddrMgr.disassociatePublicIpAddress(ipToRelease.getId(), callerUserId, caller); if (!success) { - s_logger.warn("Failed to cleanup ip " + ipToRelease + " as a part of vpc id=" + vpcId + " cleanup"); + logger.warn("Failed to cleanup ip " + ipToRelease + " as a part of vpc id=" + vpcId + " cleanup"); } } } if (success) { - s_logger.debug("Released ip addresses for vpc id=" + vpcId + " as a part of cleanup vpc process"); + logger.debug("Released ip addresses for vpc id=" + vpcId + " as a part of cleanup vpc process"); } else { - s_logger.warn("Failed to release ip addresses for vpc id=" + vpcId + " as a part of cleanup vpc process"); + logger.warn("Failed to release ip addresses for vpc id=" + vpcId + " as a part of cleanup vpc process"); // although it failed, proceed to the next cleanup step as it // doesn't depend on the public ip release } // 3) Delete all static route rules if (!revokeStaticRoutesForVpc(vpcId, caller)) { - s_logger.warn("Failed to revoke static routes for vpc " + vpcId + " as a part of cleanup vpc process"); + logger.warn("Failed to revoke static routes for vpc " + vpcId + " as a part of cleanup vpc process"); return false; } @@ -1961,12 +1959,12 @@ public boolean cleanupVpcResources(final long vpcId, final Account caller, final if (gateways != null) { for (final PrivateGateway gateway : gateways) { if (gateway != null) { - s_logger.debug("Deleting private gateway " + gateway + " as a part of vpc " + vpcId + " resources cleanup"); + logger.debug("Deleting private gateway " + gateway + " as a part of vpc " + vpcId + " resources cleanup"); if (!deleteVpcPrivateGateway(gateway.getId())) { success = false; - s_logger.debug("Failed to delete private gateway " + gateway + " as a part of vpc " + vpcId + " resources cleanup"); + logger.debug("Failed to delete private gateway " + gateway + " as a part of vpc " + vpcId + " resources cleanup"); } else { - s_logger.debug("Deleted private gateway " + gateway + " as a part of vpc " + vpcId + " resources cleanup"); + logger.debug("Deleted private gateway " + gateway + " as a part of vpc " + vpcId + " resources cleanup"); } } } @@ -2019,7 +2017,7 @@ public boolean restartVpc(Long vpcId, boolean cleanUp, boolean makeRedundant, bo final ReservationContext context = new ReservationContextImpl(null, null, user, callerAccount); _accountMgr.checkAccess(callerAccount, null, false, vpc); - s_logger.debug("Restarting VPC " + vpc); + logger.debug("Restarting VPC " + vpc); boolean restartRequired = false; try { boolean forceCleanup = cleanUp; @@ -2043,7 +2041,7 @@ public boolean restartVpc(Long vpcId, boolean cleanUp, boolean makeRedundant, bo if (forceCleanup) { if (!rollingRestartVpc(vpc, context)) { - s_logger.warn("Failed to execute a rolling restart as a part of VPC " + vpc + " restart process"); + logger.warn("Failed to execute a rolling restart as a part of VPC " + vpc + " restart process"); restartRequired = true; return false; } @@ -2056,16 +2054,16 @@ public boolean restartVpc(Long vpcId, boolean cleanUp, boolean makeRedundant, bo restartVPCNetworks(vpcId, callerAccount, user, cleanUp, livePatch); - s_logger.debug("Starting VPC " + vpc + " as a part of VPC restart process without cleanup"); + logger.debug("Starting VPC " + vpc + " as a part of VPC restart process without cleanup"); if (!startVpc(vpcId, false)) { - s_logger.warn("Failed to start vpc as a part of VPC " + vpc + " restart process"); + logger.warn("Failed to start vpc as a part of VPC " + vpc + " restart process"); restartRequired = true; return false; } - s_logger.debug("VPC " + vpc + " was restarted successfully"); + logger.debug("VPC " + vpc + " was restarted successfully"); return true; } finally { - s_logger.debug("Updating VPC " + vpc + " with restartRequired=" + restartRequired); + logger.debug("Updating VPC " + vpc + " with restartRequired=" + restartRequired); final VpcVO vo = vpcDao.findById(vpcId); vo.setRestartRequired(restartRequired); vpcDao.update(vpc.getId(), vo); @@ -2162,7 +2160,7 @@ private PrivateGateway createVpcPrivateGateway(final long vpcId, Long physicalNe try { validateVpcPrivateGatewayAclId(vpcId, aclId); - s_logger.debug("Creating Private gateway for VPC " + vpc); + logger.debug("Creating Private gateway for VPC " + vpc); // 1) create private network unless it is existing and // lswitch'd Network privateNtwk = null; @@ -2174,13 +2172,13 @@ private PrivateGateway createVpcPrivateGateway(final long vpcId, Long physicalNe // try to create it } if (privateNtwk == null) { - s_logger.info("creating new network for vpc " + vpc + " using broadcast uri: " + broadcastUri + " and associated network id: " + associatedNetworkId); + logger.info("creating new network for vpc " + vpc + " using broadcast uri: " + broadcastUri + " and associated network id: " + associatedNetworkId); final String networkName = "vpc-" + vpc.getName() + "-privateNetwork"; privateNtwk = _ntwkSvc.createPrivateNetwork(networkName, networkName, physicalNetworkIdFinal, broadcastUri, ipAddress, null, gateway, netmask, gatewayOwnerId, vpcId, isSourceNat, networkOfferingId, bypassVlanOverlapCheck, associatedNetworkId); } else { // create the nic/ip as createPrivateNetwork // doesn''t do that work for us now - s_logger.info("found and using existing network for vpc " + vpc + ": " + broadcastUri); + logger.info("found and using existing network for vpc " + vpc + ": " + broadcastUri); final DataCenterVO dc = _dcDao.lockRow(physNetFinal.getDataCenterId(), true); // add entry to private_ip_address table @@ -2194,7 +2192,7 @@ private PrivateGateway createVpcPrivateGateway(final long vpcId, Long physicalNe final Long nextMac = mac + 1; dc.setMacAddress(nextMac); - s_logger.info("creating private ip address for vpc (" + ipAddress + ", " + privateNtwk.getId() + ", " + nextMac + ", " + vpcId + ", " + isSourceNat + ")"); + logger.info("creating private ip address for vpc (" + ipAddress + ", " + privateNtwk.getId() + ", " + nextMac + ", " + vpcId + ", " + isSourceNat + ")"); privateIp = new PrivateIpVO(ipAddress, privateNtwk.getId(), nextMac, vpcId, isSourceNat); _privateIpDao.persist(privateIp); @@ -2222,7 +2220,7 @@ private PrivateGateway createVpcPrivateGateway(final long vpcId, Long physicalNe gateway, netmask, vpc.getAccountId(), vpc.getDomainId(), isSourceNat, networkAclId); _vpcGatewayDao.persist(gatewayVO); - s_logger.debug("Created vpc gateway entry " + gatewayVO); + logger.debug("Created vpc gateway entry " + gatewayVO); } catch (final Exception e) { ExceptionUtil.rethrowRuntime(e); ExceptionUtil.rethrow(e, InsufficientCapacityException.class); @@ -2344,29 +2342,29 @@ public PrivateGateway applyVpcPrivateGateway(final long gatewayId, final boolean } } if (success) { - s_logger.debug("Private gateway " + gateway + " was applied successfully on the backend"); + logger.debug("Private gateway " + gateway + " was applied successfully on the backend"); if (vo.getState() != VpcGateway.State.Ready) { vo.setState(VpcGateway.State.Ready); _vpcGatewayDao.update(vo.getId(), vo); - s_logger.debug("Marke gateway " + gateway + " with state " + VpcGateway.State.Ready); + logger.debug("Marke gateway " + gateway + " with state " + VpcGateway.State.Ready); } CallContext.current().setEventDetails("Private Gateway Id: " + gatewayId); return getVpcPrivateGateway(gatewayId); } else { - s_logger.warn("Private gateway " + gateway + " failed to apply on the backend"); + logger.warn("Private gateway " + gateway + " failed to apply on the backend"); return null; } } finally { // do cleanup if (!success) { if (destroyOnFailure) { - s_logger.debug("Destroying private gateway " + vo + " that failed to start"); + logger.debug("Destroying private gateway " + vo + " that failed to start"); // calling deleting from db because on createprivategateway // fail, destroyPrivateGateway is already called if (deletePrivateGatewayFromTheDB(getVpcPrivateGateway(gatewayId))) { - s_logger.warn("Successfully destroyed vpc " + vo + " that failed to start"); + logger.warn("Successfully destroyed vpc " + vo + " that failed to start"); } else { - s_logger.warn("Failed to destroy vpc " + vo + " that failed to start"); + logger.warn("Failed to destroy vpc " + vo + " that failed to start"); } } } @@ -2379,7 +2377,7 @@ public PrivateGateway applyVpcPrivateGateway(final long gatewayId, final boolean public boolean deleteVpcPrivateGateway(final long gatewayId) throws ConcurrentOperationException, ResourceUnavailableException { final VpcGatewayVO gatewayToBeDeleted = _vpcGatewayDao.findById(gatewayId); if (gatewayToBeDeleted == null) { - s_logger.debug("VPC gateway is already deleted for id=" + gatewayId); + logger.debug("VPC gateway is already deleted for id=" + gatewayId); return true; } @@ -2413,7 +2411,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { gatewayVO.setState(VpcGateway.State.Deleting); _vpcGatewayDao.update(gatewayVO.getId(), gatewayVO); - s_logger.debug("Marked gateway " + gatewayVO + " with state " + VpcGateway.State.Deleting); + logger.debug("Marked gateway " + gatewayVO + " with state " + VpcGateway.State.Deleting); } }); @@ -2423,12 +2421,12 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { for (final VpcProvider provider : getVpcElements()) { if (providersToImplement.contains(provider.getProvider())) { if (provider.deletePrivateGateway(gateway)) { - s_logger.debug("Private gateway " + gateway + " was applied successfully on the backend"); + logger.debug("Private gateway " + gateway + " was applied successfully on the backend"); } else { - s_logger.warn("Private gateway " + gateway + " failed to apply on the backend"); + logger.warn("Private gateway " + gateway + " failed to apply on the backend"); gatewayVO.setState(VpcGateway.State.Ready); _vpcGatewayDao.update(gatewayVO.getId(), gatewayVO); - s_logger.debug("Marked gateway " + gatewayVO + " with state " + VpcGateway.State.Ready); + logger.debug("Marked gateway " + gatewayVO + " with state " + VpcGateway.State.Ready); return false; } @@ -2473,12 +2471,12 @@ protected boolean deletePrivateGatewayFromTheDB(final PrivateGateway gateway) { final Account owner = _accountMgr.getAccount(Account.ACCOUNT_ID_SYSTEM); final ReservationContext context = new ReservationContextImpl(null, null, callerUser, owner); _ntwkMgr.destroyNetwork(networkId, context, false); - s_logger.debug("Deleted private network id=" + networkId); + logger.debug("Deleted private network id=" + networkId); } } catch (final InterruptedException e) { - s_logger.error("deletePrivateGatewayFromTheDB failed to delete network id " + networkId + "due to => ", e); + logger.error("deletePrivateGatewayFromTheDB failed to delete network id " + networkId + "due to => ", e); } catch (final ExecutionException e) { - s_logger.error("deletePrivateGatewayFromTheDB failed to delete network id " + networkId + "due to => ", e); + logger.error("deletePrivateGatewayFromTheDB failed to delete network id " + networkId + "due to => ", e); } return true; @@ -2571,19 +2569,19 @@ protected boolean applyStaticRoutes(final List routes, fi staticRouteProfiles.add(new StaticRouteProfile(route, gateway)); } if (!applyStaticRoutes(staticRouteProfiles)) { - s_logger.warn("Routes are not completely applied"); + logger.warn("Routes are not completely applied"); return false; } else { if (updateRoutesInDB) { for (final StaticRoute route : routes) { if (route.getState() == StaticRoute.State.Revoke) { _staticRouteDao.remove(route.getId()); - s_logger.debug("Removed route " + route + " from the DB"); + logger.debug("Removed route " + route + " from the DB"); } else if (route.getState() == StaticRoute.State.Add) { final StaticRouteVO ruleVO = _staticRouteDao.findById(route.getId()); ruleVO.setState(StaticRoute.State.Active); _staticRouteDao.update(ruleVO.getId(), ruleVO); - s_logger.debug("Marked route " + route + " with state " + StaticRoute.State.Active); + logger.debug("Marked route " + route + " with state " + StaticRoute.State.Active); } } } @@ -2594,12 +2592,12 @@ protected boolean applyStaticRoutes(final List routes, fi protected boolean applyStaticRoutes(final List routes) throws ResourceUnavailableException { if (routes.isEmpty()) { - s_logger.debug("No static routes to apply"); + logger.debug("No static routes to apply"); return true; } final Vpc vpc = vpcDao.findById(routes.get(0).getVpcId()); - s_logger.debug("Applying static routes for vpc " + vpc); + logger.debug("Applying static routes for vpc " + vpc); final String staticNatProvider = _vpcSrvcDao.getProviderForServiceInVpc(vpc.getId(), Service.StaticNat); for (final VpcProvider provider : getVpcElements()) { @@ -2608,9 +2606,9 @@ protected boolean applyStaticRoutes(final List routes) throw } if (provider.applyStaticRoutes(vpc, routes)) { - s_logger.debug("Applied static routes for vpc " + vpc); + logger.debug("Applied static routes for vpc " + vpc); } else { - s_logger.warn("Failed to apply static routes for vpc " + vpc); + logger.warn("Failed to apply static routes for vpc " + vpc); return false; } } @@ -2639,7 +2637,7 @@ public boolean revokeStaticRoute(final long routeId) throws ResourceUnavailableE protected boolean revokeStaticRoutesForVpc(final long vpcId, final Account caller) throws ResourceUnavailableException { // get all static routes for the vpc final List routes = _staticRouteDao.listByVpcId(vpcId); - s_logger.debug("Found " + routes.size() + " to revoke for the vpc " + vpcId); + logger.debug("Found " + routes.size() + " to revoke for the vpc " + vpcId); if (!routes.isEmpty()) { // mark all of them as revoke Transaction.execute(new TransactionCallbackNoReturn() { @@ -2702,7 +2700,7 @@ public StaticRoute createStaticRoute(final long gatewayId, final String cidr) th @Override public StaticRouteVO doInTransaction(final TransactionStatus status) throws NetworkRuleConflictException { StaticRouteVO newRoute = new StaticRouteVO(gateway.getId(), cidr, vpc.getId(), vpc.getAccountId(), vpc.getDomainId()); - s_logger.debug("Adding static route " + newRoute); + logger.debug("Adding static route " + newRoute); newRoute = _staticRouteDao.persist(newRoute); detectRoutesConflict(newRoute); @@ -2829,20 +2827,20 @@ protected void detectRoutesConflict(final StaticRoute newRoute) throws NetworkRu } protected void markStaticRouteForRevoke(final StaticRouteVO route, final Account caller) { - s_logger.debug("Revoking static route " + route); + logger.debug("Revoking static route " + route); if (caller != null) { _accountMgr.checkAccess(caller, null, false, route); } if (route.getState() == StaticRoute.State.Staged) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found a static route that is still in stage state so just removing it: " + route); + if (logger.isDebugEnabled()) { + logger.debug("Found a static route that is still in stage state so just removing it: " + route); } _staticRouteDao.remove(route.getId()); } else if (route.getState() == StaticRoute.State.Add || route.getState() == StaticRoute.State.Active) { route.setState(StaticRoute.State.Revoke); _staticRouteDao.update(route.getId(), route); - s_logger.debug("Marked static route " + route + " with state " + StaticRoute.State.Revoke); + logger.debug("Marked static route " + route + " with state " + StaticRoute.State.Revoke); } } @@ -2852,12 +2850,12 @@ protected void runInContext() { try { final GlobalLock lock = GlobalLock.getInternLock("VpcCleanup"); if (lock == null) { - s_logger.debug("Couldn't get the global lock"); + logger.debug("Couldn't get the global lock"); return; } if (!lock.lock(30)) { - s_logger.debug("Couldn't lock the db"); + logger.debug("Couldn't lock the db"); return; } @@ -2865,19 +2863,19 @@ protected void runInContext() { // Cleanup inactive VPCs final List inactiveVpcs = vpcDao.listInactiveVpcs(); if (inactiveVpcs != null) { - s_logger.info("Found " + inactiveVpcs.size() + " removed VPCs to cleanup"); + logger.info("Found " + inactiveVpcs.size() + " removed VPCs to cleanup"); for (final VpcVO vpc : inactiveVpcs) { - s_logger.debug("Cleaning up " + vpc); + logger.debug("Cleaning up " + vpc); destroyVpc(vpc, _accountMgr.getAccount(Account.ACCOUNT_ID_SYSTEM), User.UID_SYSTEM); } } } catch (final Exception e) { - s_logger.error("Exception ", e); + logger.error("Exception ", e); } finally { lock.unlock(); } } catch (final Exception e) { - s_logger.error("Exception ", e); + logger.error("Exception ", e); } } } @@ -2895,7 +2893,7 @@ public IpAddress associateIPToVpc(final long ipId, final long vpcId) throws Reso _accountMgr.checkAccess(caller, null, true, ipToAssoc); owner = _accountMgr.getAccount(ipToAssoc.getAllocatedToAccountId()); } else { - s_logger.debug("Unable to find ip address by id: " + ipId); + logger.debug("Unable to find ip address by id: " + ipId); return null; } @@ -2907,7 +2905,7 @@ public IpAddress associateIPToVpc(final long ipId, final long vpcId) throws Reso // check permissions _accountMgr.checkAccess(caller, null, false, owner, vpc); - s_logger.debug("Associating ip " + ipToAssoc + " to vpc " + vpc); + logger.debug("Associating ip " + ipToAssoc + " to vpc " + vpc); final boolean isSourceNatFinal = isSrcNatIpRequired(vpc.getVpcOfferingId()) && getExistingSourceNatInVpc(vpc.getAccountId(), vpcId) == null; Transaction.execute(new TransactionCallbackNoReturn() { @@ -2925,7 +2923,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { } }); - s_logger.debug("Successfully assigned ip " + ipToAssoc + " to vpc " + vpc); + logger.debug("Successfully assigned ip " + ipToAssoc + " to vpc " + vpc); CallContext.current().putContextParameter(IpAddress.class, ipToAssoc.getUuid()); return _ipAddressDao.findById(ipId); } @@ -2941,7 +2939,7 @@ public void unassignIPFromVpcNetwork(final long ipId, final long networkId) { return; } - s_logger.debug("Releasing VPC ip address " + ip + " from vpc network id=" + networkId); + logger.debug("Releasing VPC ip address " + ip + " from vpc network id=" + networkId); final long vpcId = ip.getVpcId(); boolean success = false; @@ -2955,11 +2953,11 @@ public void unassignIPFromVpcNetwork(final long ipId, final long networkId) { if (success) { ip.setAssociatedWithNetworkId(null); _ipAddressDao.update(ipId, ip); - s_logger.debug("IP address " + ip + " is no longer associated with the network inside vpc id=" + vpcId); + logger.debug("IP address " + ip + " is no longer associated with the network inside vpc id=" + vpcId); } else { throw new CloudRuntimeException("Failed to apply ip associations for network id=" + networkId + " as a part of unassigning ip " + ipId + " from vpc"); } - s_logger.debug("Successfully released VPC ip address " + ip + " back to VPC pool "); + logger.debug("Successfully released VPC ip address " + ip + " back to VPC pool "); } @Override @@ -3109,10 +3107,10 @@ private boolean rollingRestartVpc(final Vpc vpc, final ReservationContext contex if (shutdownVpc(vpc.getId())) { return startVpc(vpc.getId(), false); } - s_logger.warn("Failed to shutdown vpc as a part of VPC " + vpc + " restart process"); + logger.warn("Failed to shutdown vpc as a part of VPC " + vpc + " restart process"); return false; } - s_logger.debug("Performing rolling restart of routers of VPC " + vpc); + logger.debug("Performing rolling restart of routers of VPC " + vpc); _ntwkMgr.destroyExpendableRouters(routerDao.listByVpcId(vpc.getId()), context); final DeployDestination dest = new DeployDestination(_dcDao.findById(vpc.getZoneId()), null, null, null); @@ -3143,7 +3141,7 @@ private boolean rollingRestartVpc(final Vpc vpc, final ReservationContext contex // Re-program VPC VR or add a new backup router for redundant VPC if (!startVpc(vpc, dest, context)) { - s_logger.debug("Failed to re-program VPC router or deploy a new backup router for VPC" + vpc); + logger.debug("Failed to re-program VPC router or deploy a new backup router for VPC" + vpc); return false; } diff --git a/server/src/main/java/com/cloud/network/vpc/VpcPrivateGatewayTransactionCallable.java b/server/src/main/java/com/cloud/network/vpc/VpcPrivateGatewayTransactionCallable.java index 69267fb96d4f..072b17ab9b99 100644 --- a/server/src/main/java/com/cloud/network/vpc/VpcPrivateGatewayTransactionCallable.java +++ b/server/src/main/java/com/cloud/network/vpc/VpcPrivateGatewayTransactionCallable.java @@ -21,7 +21,8 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.stereotype.Component; import com.cloud.network.vpc.dao.PrivateIpDao; @@ -33,7 +34,7 @@ @Component public class VpcPrivateGatewayTransactionCallable implements Callable { - private static final Logger s_logger = Logger.getLogger(VpcPrivateGatewayTransactionCallable.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject private VpcGatewayDao _vpcGatewayDao; @@ -53,18 +54,18 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { final List privateIps = _privateIpDao.listByNetworkId(networkId); if (privateIps.size() > 1 || !privateIps.get(0).getIpAddress().equalsIgnoreCase(gateway.getIp4Address())) { - s_logger.debug("Not removing network id=" + gateway.getNetworkId() + " as it has private ip addresses for other gateways"); + logger.debug("Not removing network id=" + gateway.getNetworkId() + " as it has private ip addresses for other gateways"); deleteNetwork = false; } final PrivateIpVO ip = _privateIpDao.findByIpAndVpcId(gateway.getVpcId(), gateway.getIp4Address()); if (ip != null) { _privateIpDao.remove(ip.getId()); - s_logger.debug("Deleted private ip " + ip); + logger.debug("Deleted private ip " + ip); } _vpcGatewayDao.remove(gateway.getId()); - s_logger.debug("Deleted private gateway " + gateway); + logger.debug("Deleted private gateway " + gateway); } }); diff --git a/server/src/main/java/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java b/server/src/main/java/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java index 61d247d7b8a5..6fdf54936b01 100644 --- a/server/src/main/java/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java +++ b/server/src/main/java/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java @@ -34,7 +34,6 @@ import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; import com.cloud.configuration.Config; import com.cloud.domain.DomainVO; @@ -95,7 +94,6 @@ import com.cloud.utils.net.NetUtils; public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAccessVpnService, Configurable { - private final static Logger s_logger = Logger.getLogger(RemoteAccessVpnManagerImpl.class); static final ConfigKey RemoteAccessVpnClientIpRange = new ConfigKey("Network", String.class, RemoteAccessVpnClientIpRangeCK, "10.1.2.1-10.1.2.8", "The range of ips to be allocated to remote access vpn clients. The first ip in the range is used by the VPN server", false, ConfigKey.Scope.Account); @@ -263,7 +261,7 @@ public RemoteAccessVpn doInTransaction(TransactionStatus status) throws NetworkR private void validateRemoteAccessVpnConfiguration() throws ConfigurationException { String ipRange = RemoteAccessVpnClientIpRange.value(); if (ipRange == null) { - s_logger.warn(String.format("Remote access VPN configuration: Global configuration [%s] missing client IP range.", RemoteAccessVpnClientIpRange.key())); + logger.warn(String.format("Remote access VPN configuration: Global configuration [%s] missing client IP range.", RemoteAccessVpnClientIpRange.key())); return; } @@ -304,7 +302,7 @@ protected void handleExceptionOnValidateIpRangeError(Class public boolean destroyRemoteAccessVpnForIp(long ipId, Account caller, final boolean forceCleanup) throws ResourceUnavailableException { final RemoteAccessVpnVO vpn = _remoteAccessVpnDao.findByPublicIpAddress(ipId); if (vpn == null) { - s_logger.debug("there are no Remote access vpns for public ip address id=" + ipId); + logger.debug("there are no Remote access vpns for public ip address id=" + ipId); return true; } @@ -325,7 +323,7 @@ public boolean destroyRemoteAccessVpnForIp(long ipId, Account caller, final bool }catch (ResourceUnavailableException ex) { vpn.setState(prevState); _remoteAccessVpnDao.update(vpn.getId(), vpn); - s_logger.debug("Failed to stop the vpn " + vpn.getId() + " , so reverted state to "+ + logger.debug("Failed to stop the vpn " + vpn.getId() + " , so reverted state to "+ RemoteAccessVpn.State.Running); success = false; } finally { @@ -348,11 +346,11 @@ public void doInTransactionWithoutResult(TransactionStatus status) { fwRules.add(_rulesDao.findByRelatedId(vpnFwRule.getId())); } - s_logger.debug("Marked " + fwRules.size() + " firewall rules as Revoked as a part of disable remote access vpn"); + logger.debug("Marked " + fwRules.size() + " firewall rules as Revoked as a part of disable remote access vpn"); } }); - s_logger.debug("Reapplying firewall rules for ip id=" + ipId + " as a part of disable remote access vpn"); + logger.debug("Reapplying firewall rules for ip id=" + ipId + " as a part of disable remote access vpn"); success = _firewallMgr.applyIngressFirewallRules(ipId, caller); } @@ -373,14 +371,14 @@ public void doInTransactionWithoutResult(TransactionStatus status) { if (vpnFwRules != null) { for (FirewallRule vpnFwRule : vpnFwRules) { _rulesDao.remove(vpnFwRule.getId()); - s_logger.debug("Successfully removed firewall rule with ip id=" + vpnFwRule.getSourceIpAddressId() + " and port " + + logger.debug("Successfully removed firewall rule with ip id=" + vpnFwRule.getSourceIpAddressId() + " and port " + vpnFwRule.getSourcePortStart() + " as a part of vpn cleanup"); } } } }); } catch (Exception ex) { - s_logger.warn(String.format("Unable to release the VPN ports from the firewall rules [%s] due to [%s]", fwRules.stream().map(rule -> + logger.warn(String.format("Unable to release the VPN ports from the firewall rules [%s] due to [%s]", fwRules.stream().map(rule -> String.format("{\"ipId\": %s, \"port\": %s}", rule.getSourceIpAddressId(), rule.getSourcePortStart())).collect(Collectors.joining(", ")), ex.getMessage()), ex); } } @@ -435,7 +433,7 @@ public boolean removeVpnUser(long vpnOwnerId, String username, Account caller) { final VpnUserVO user = _vpnUsersDao.findByAccountAndUsername(vpnOwnerId, username); if (user == null) { String errorMessage = String.format("Could not find VPN user=[%s]. VPN owner id=[%s]", username, vpnOwnerId); - s_logger.debug(errorMessage); + logger.debug(errorMessage); throw new InvalidParameterValueException(errorMessage); } _accountMgr.checkAccess(caller, null, true, user); @@ -520,11 +518,11 @@ public void doInTransactionWithoutResult(TransactionStatus status) { private boolean removeVpnUserWithoutRemoteAccessVpn(long vpnOwnerId, String userName) { VpnUserVO vpnUser = _vpnUsersDao.findByAccountAndUsername(vpnOwnerId, userName); if (vpnUser == null) { - s_logger.error(String.format("VPN user not found with ownerId: %d and username: %s", vpnOwnerId, userName)); + logger.error(String.format("VPN user not found with ownerId: %d and username: %s", vpnOwnerId, userName)); return false; } if (!State.Revoke.equals(vpnUser.getState())) { - s_logger.error(String.format("VPN user with ownerId: %d and username: %s is not in revoked state, current state: %s", vpnOwnerId, userName, vpnUser.getState())); + logger.error(String.format("VPN user with ownerId: %d and username: %s is not in revoked state, current state: %s", vpnOwnerId, userName, vpnUser.getState())); return false; } return _vpnUsersDao.remove(vpnUser.getId()); @@ -537,14 +535,14 @@ public boolean applyVpnUsers(long vpnOwnerId, String userName, boolean forRemove Account owner = _accountDao.findById(vpnOwnerId); _accountMgr.checkAccess(caller, null, true, owner); - s_logger.debug(String.format("Applying VPN users for %s.", owner.toString())); + logger.debug(String.format("Applying VPN users for %s.", owner.toString())); List vpns = getValidRemoteAccessVpnForAccount(vpnOwnerId); if (CollectionUtils.isEmpty(vpns)) { if (forRemove) { return removeVpnUserWithoutRemoteAccessVpn(vpnOwnerId, userName); } - s_logger.warn(String.format("Unable to apply VPN user due to there are no remote access VPNs configured on %s to apply VPN user.", owner.toString())); + logger.warn(String.format("Unable to apply VPN user due to there are no remote access VPNs configured on %s to apply VPN user.", owner.toString())); return true; } @@ -563,7 +561,7 @@ public boolean applyVpnUsers(long vpnOwnerId, String userName, boolean forRemove Boolean[] finals = new Boolean[users.size()]; for (RemoteAccessVPNServiceProvider element : _vpnServiceProviders) { - s_logger.debug("Applying vpn access to " + element.getName()); + logger.debug("Applying vpn access to " + element.getName()); for (RemoteAccessVpnVO vpn : vpns) { try { String[] results = element.applyVpnUsers(vpn, users); @@ -574,7 +572,7 @@ public boolean applyVpnUsers(long vpnOwnerId, String userName, boolean forRemove if (indexUser == users.size()) { indexUser = 0; } - s_logger.debug("VPN User " + users.get(indexUser) + (result == null ? " is set on " : (" couldn't be set due to " + result) + " on ") + vpn.getUuid()); + logger.debug("VPN User " + users.get(indexUser) + (result == null ? " is set on " : (" couldn't be set due to " + result) + " on ") + vpn.getUuid()); if (result == null) { if (finals[indexUser] == null) { finals[indexUser] = true; @@ -587,7 +585,7 @@ public boolean applyVpnUsers(long vpnOwnerId, String userName, boolean forRemove } } } catch (ResourceUnavailableException e) { - s_logger.warn(String.format("Unable to apply VPN users [%s] due to [%s].", users.stream().map(user -> user.toString()).collect(Collectors.joining(", ")), e.getMessage()), e); + logger.warn(String.format("Unable to apply VPN users [%s] due to [%s].", users.stream().map(user -> user.toString()).collect(Collectors.joining(", ")), e.getMessage()), e); success = false; vpnTemp = vpn; @@ -619,7 +617,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { }); } - s_logger.warn(String.format("Failed to apply VPN for %s.", user.toString())); + logger.warn(String.format("Failed to apply VPN for %s.", user.toString())); } } diff --git a/server/src/main/java/com/cloud/network/vpn/Site2SiteVpnManagerImpl.java b/server/src/main/java/com/cloud/network/vpn/Site2SiteVpnManagerImpl.java index 51d5f9cf97b4..e76c52b9ebf0 100644 --- a/server/src/main/java/com/cloud/network/vpn/Site2SiteVpnManagerImpl.java +++ b/server/src/main/java/com/cloud/network/vpn/Site2SiteVpnManagerImpl.java @@ -25,7 +25,6 @@ import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.command.user.vpn.CreateVpnConnectionCmd; @@ -84,7 +83,6 @@ @Component public class Site2SiteVpnManagerImpl extends ManagerBase implements Site2SiteVpnManager { - private static final Logger s_logger = Logger.getLogger(Site2SiteVpnManagerImpl.class); List _s2sProviders; @Inject @@ -532,7 +530,7 @@ private void setupVpnConnection(Account caller, Long vpnCustomerGwIp) { } catch (PermissionDeniedException e) { // Just don't restart this connection, as the user has no rights to it // Maybe should issue a notification to the system? - s_logger.info("Site2SiteVpnManager:updateCustomerGateway() Not resetting VPN connection " + conn.getId() + " as user lacks permission"); + logger.info("Site2SiteVpnManager:updateCustomerGateway() Not resetting VPN connection " + conn.getId() + " as user lacks permission"); continue; } @@ -547,7 +545,7 @@ private void setupVpnConnection(Account caller, Long vpnCustomerGwIp) { startVpnConnection(conn.getId()); } catch (ResourceUnavailableException e) { // Should never get here, as we are looping on the actual connections, but we must handle it regardless - s_logger.warn("Failed to update VPN connection"); + logger.warn("Failed to update VPN connection"); } } } @@ -851,7 +849,7 @@ public void reconnectDisconnectedVpnByVpc(Long vpcId) { startVpnConnection(conn.getId()); } catch (ResourceUnavailableException e) { Site2SiteCustomerGatewayVO gw = _customerGatewayDao.findById(conn.getCustomerGatewayId()); - s_logger.warn("Site2SiteVpnManager: Fail to re-initiate VPN connection " + conn.getId() + " which connect to " + gw.getName()); + logger.warn("Site2SiteVpnManager: Fail to re-initiate VPN connection " + conn.getId() + " which connect to " + gw.getName()); } } } diff --git a/server/src/main/java/com/cloud/projects/ProjectManagerImpl.java b/server/src/main/java/com/cloud/projects/ProjectManagerImpl.java index 19776d4993f5..cb1623b5858d 100644 --- a/server/src/main/java/com/cloud/projects/ProjectManagerImpl.java +++ b/server/src/main/java/com/cloud/projects/ProjectManagerImpl.java @@ -49,7 +49,6 @@ import org.apache.cloudstack.utils.mailing.SMTPMailProperties; import org.apache.cloudstack.utils.mailing.SMTPMailSender; import org.apache.commons.lang3.BooleanUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.api.ApiDBUtils; @@ -106,7 +105,6 @@ @Component public class ProjectManagerImpl extends ManagerBase implements ProjectManager, Configurable { - public static final Logger s_logger = Logger.getLogger(ProjectManagerImpl.class); private static final SecureRandom secureRandom = new SecureRandom(); @@ -364,7 +362,7 @@ public boolean deleteProject(Account caller, long callerUserId, final ProjectVO boolean updateResult = Transaction.execute(new TransactionCallback() { @Override public Boolean doInTransaction(TransactionStatus status) { - s_logger.debug("Marking project id=" + project.getId() + " with state " + State.Disabled + " as a part of project delete..."); + logger.debug("Marking project id=" + project.getId() + " with state " + State.Disabled + " as a part of project delete..."); project.setState(State.Disabled); boolean updateResult = _projectDao.update(project.getId(), project); //owner can be already removed at this point, so adding the conditional check @@ -380,7 +378,7 @@ public Boolean doInTransaction(TransactionStatus status) { if (updateResult) { //pass system caller when clenaup projects account if (!cleanupProject(project, _accountDao.findById(Account.ACCOUNT_ID_SYSTEM), User.UID_SYSTEM)) { - s_logger.warn("Failed to cleanup project's id=" + project.getId() + " resources, not removing the project yet"); + logger.warn("Failed to cleanup project's id=" + project.getId() + " resources, not removing the project yet"); return false; } else { //check if any Tungsten-Fabric provider exists and delete the project from Tungsten-Fabric providers @@ -388,7 +386,7 @@ public Boolean doInTransaction(TransactionStatus status) { return _projectDao.remove(project.getId()); } } else { - s_logger.warn("Failed to mark the project id=" + project.getId() + " with state " + State.Disabled); + logger.warn("Failed to mark the project id=" + project.getId() + " with state " + State.Disabled); return false; } } @@ -398,7 +396,7 @@ private boolean cleanupProject(final Project project, AccountVO caller, Long cal boolean result = true; //Delete project's account AccountVO account = _accountDao.findById(project.getProjectAccountId()); - s_logger.debug("Deleting projects " + project + " internal account id=" + account.getId() + " as a part of project cleanup..."); + logger.debug("Deleting projects " + project + " internal account id=" + account.getId() + " as a part of project cleanup..."); result = result && _accountMgr.deleteAccount(account, callerUserId, caller); @@ -408,22 +406,22 @@ private boolean cleanupProject(final Project project, AccountVO caller, Long cal @Override public Boolean doInTransaction(TransactionStatus status) { boolean result = true; - s_logger.debug("Unassigning all accounts from project " + project + " as a part of project cleanup..."); + logger.debug("Unassigning all accounts from project " + project + " as a part of project cleanup..."); List projectAccounts = _projectAccountDao.listByProjectId(project.getId()); for (ProjectAccount projectAccount : projectAccounts) { result = result && unassignAccountFromProject(projectAccount.getProjectId(), projectAccount.getAccountId()); } - s_logger.debug("Removing all invitations for the project " + project + " as a part of project cleanup..."); + logger.debug("Removing all invitations for the project " + project + " as a part of project cleanup..."); _projectInvitationDao.cleanupInvitations(project.getId()); return result; } }); if (result) { - s_logger.debug("Accounts are unassign successfully from project " + project + " as a part of project cleanup..."); + logger.debug("Accounts are unassign successfully from project " + project + " as a part of project cleanup..."); } } else { - s_logger.warn("Failed to cleanup project's internal account"); + logger.warn("Failed to cleanup project's internal account"); } return result; @@ -433,14 +431,14 @@ public Boolean doInTransaction(TransactionStatus status) { public boolean unassignAccountFromProject(long projectId, long accountId) { ProjectAccountVO projectAccount = _projectAccountDao.findByProjectIdAccountId(projectId, accountId); if (projectAccount == null) { - s_logger.debug("Account id=" + accountId + " is not assigned to project id=" + projectId + " so no need to unassign"); + logger.debug("Account id=" + accountId + " is not assigned to project id=" + projectId + " so no need to unassign"); return true; } if (_projectAccountDao.remove(projectAccount.getId())) { return true; } else { - s_logger.warn("Failed to unassign account id=" + accountId + " from the project id=" + projectId); + logger.warn("Failed to unassign account id=" + accountId + " from the project id=" + projectId); return false; } } @@ -479,7 +477,7 @@ public Boolean doInTransaction(TransactionStatus status) { //remove all invitations for account if (success) { - s_logger.debug("Removed account " + accountId + " from project " + projectId + " , cleaning up old invitations for account/project..."); + logger.debug("Removed account " + accountId + " from project " + projectId + " , cleaning up old invitations for account/project..."); ProjectInvitation invite = _projectInvitationDao.findByAccountIdProjectId(accountId, projectId); if (invite != null) { success = success && _projectInvitationDao.remove(invite.getId()); @@ -557,7 +555,7 @@ public boolean addUserToProject(Long projectId, String username, String email, L ProjectAccount projectAccountUser = _projectAccountDao.findByProjectIdUserId(projectId, user.getAccountId(), user.getId()); if (projectAccountUser != null) { - s_logger.info("User with id: " + user.getId() + " is already added to the project with id: " + projectId); + logger.info("User with id: " + user.getId() + " is already added to the project with id: " + projectId); return true; } @@ -583,7 +581,7 @@ public boolean addUserToProject(Long projectId, String username, String email, L Optional.ofNullable(role).map(ProjectRole::getId).orElse(null)) != null) { return true; } - s_logger.warn("Failed to add user to project with id: " + projectId); + logger.warn("Failed to add user to project with id: " + projectId); return false; } } @@ -676,7 +674,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) throws Resour } Account currentOwnerAccount = getProjectOwner(projectId); if (currentOwnerAccount == null) { - s_logger.error("Unable to find the current owner for the project id=" + projectId); + logger.error("Unable to find the current owner for the project id=" + projectId); throw new InvalidParameterValueException("Unable to find the current owner for the project id=" + projectId); } if (currentOwnerAccount.getId() != futureOwnerAccount.getId()) { @@ -701,7 +699,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) throws Resour _resourceLimitMgr.incrementResourceCount(futureOwnerAccount.getId(), ResourceType.project); } else { - s_logger.trace("Future owner " + newOwnerName + "is already the owner of the project id=" + projectId); + logger.trace("Future owner " + newOwnerName + "is already the owner of the project id=" + projectId); } } } @@ -820,7 +818,7 @@ public boolean addAccountToProject(long projectId, String accountName, String em //Check if the account already added to the project ProjectAccount projectAccount = _projectAccountDao.findByProjectIdAccountId(projectId, account.getId()); if (projectAccount != null) { - s_logger.debug("Account " + accountName + " already added to the project id=" + projectId); + logger.debug("Account " + accountName + " already added to the project id=" + projectId); return true; } } @@ -847,7 +845,7 @@ public boolean addAccountToProject(long projectId, String accountName, String em Optional.ofNullable(projectRole).map(ProjectRole::getId).orElse(null)) != null) { return true; } else { - s_logger.warn("Failed to add account " + accountName + " to project id=" + projectId); + logger.warn("Failed to add account " + accountName + " to project id=" + projectId); return false; } } @@ -859,7 +857,7 @@ private boolean inviteAccountToProject(Project project, Account account, String Optional.ofNullable(projectRole).map(ProjectRole::getId).orElse(null)) != null) { return true; } else { - s_logger.warn("Failed to generate invitation for account " + account.getAccountName() + " to project id=" + project); + logger.warn("Failed to generate invitation for account " + account.getAccountName() + " to project id=" + project); return false; } } @@ -871,7 +869,7 @@ private boolean inviteAccountToProject(Project project, Account account, String Optional.ofNullable(projectRole).map(ProjectRole::getId).orElse(null)) != null) { return true; } else { - s_logger.warn("Failed to generate invitation for email " + email + " to project id=" + project); + logger.warn("Failed to generate invitation for email " + email + " to project id=" + project); return false; } } @@ -885,7 +883,7 @@ private boolean inviteUserToProject(Project project, User user, String email, Ro Optional.ofNullable(projectRole).map(ProjectRole::getId).orElse(null)) != null) { return true; } else { - s_logger.warn("Failed to generate invitation for account " + user.getUsername() + " to project id=" + project); + logger.warn("Failed to generate invitation for account " + user.getUsername() + " to project id=" + project); return false; } } else { @@ -895,7 +893,7 @@ private boolean inviteUserToProject(Project project, User user, String email, Ro Optional.ofNullable(projectRole).map(ProjectRole::getId).orElse(null)) != null) { return true; } else { - s_logger.warn("Failed to generate invitation for email " + email + " to project id=" + project); + logger.warn("Failed to generate invitation for email " + email + " to project id=" + project); return false; } } @@ -1013,9 +1011,9 @@ private void deletePendingInvite(Long projectId, User user) { if (invite != null) { boolean success = _projectInvitationDao.remove(invite.getId()); if (success){ - s_logger.info("Successfully deleted invite pending for the user : "+user.getUsername()); + logger.info("Successfully deleted invite pending for the user : "+user.getUsername()); } else { - s_logger.info("Failed to delete project invite for user: "+ user.getUsername()); + logger.info("Failed to delete project invite for user: "+ user.getUsername()); } } } @@ -1030,7 +1028,7 @@ public Boolean doInTransaction(TransactionStatus status) { success = _projectAccountDao.remove(projectAccount.getId()); if (success) { - s_logger.debug("Removed user " + user.getId() + " from project. Removing any invite sent to the user"); + logger.debug("Removed user " + user.getId() + " from project. Removing any invite sent to the user"); ProjectInvitation invite = _projectInvitationDao.findByUserIdProjectId(user.getId(), user.getAccountId(), projectId); if (invite != null) { success = success && _projectInvitationDao.remove(invite.getId()); @@ -1084,11 +1082,11 @@ public Boolean doInTransaction(TransactionStatus status) { } //remove the expired/declined invitation if (accountId != null) { - s_logger.debug("Removing invitation in state " + invite.getState() + " for account id=" + accountId + " to project " + project); + logger.debug("Removing invitation in state " + invite.getState() + " for account id=" + accountId + " to project " + project); } else if (userId != null) { - s_logger.debug("Removing invitation in state " + invite.getState() + " for user id=" + userId + " to project " + project); + logger.debug("Removing invitation in state " + invite.getState() + " for user id=" + userId + " to project " + project); } else if (email != null) { - s_logger.debug("Removing invitation in state " + invite.getState() + " for email " + email + " to project " + project); + logger.debug("Removing invitation in state " + invite.getState() + " for email " + email + " to project " + project); } _projectInvitationDao.expunge(invite.getId()); @@ -1121,7 +1119,7 @@ public ProjectInvitation generateTokenBasedInvitation(Project project, Long user try { sendInvite(token, email, project.getId()); } catch (Exception ex) { - s_logger.warn("Failed to send project id=" + project + " invitation to the email " + email + "; removing the invitation record from the db", ex); + logger.warn("Failed to send project id=" + project + " invitation to the email " + email + "; removing the invitation record from the db", ex); _projectInvitationDao.remove(projectInvitation.getId()); return null; } @@ -1151,7 +1149,7 @@ protected void sendInvite(String token, String email, long projectId) throws Mes } private boolean expireInvitation(ProjectInvitationVO invite) { - s_logger.debug("Expiring invitation id=" + invite.getId()); + logger.debug("Expiring invitation id=" + invite.getId()); invite.setState(ProjectInvitation.State.Expired); return _projectInvitationDao.update(invite.getId(), invite); } @@ -1226,7 +1224,7 @@ public Boolean doInTransaction(TransactionStatus status) { ProjectInvitation.State newState = accept ? ProjectInvitation.State.Completed : ProjectInvitation.State.Declined; //update invitation - s_logger.debug("Marking invitation " + inviteFinal + " with state " + newState); + logger.debug("Marking invitation " + inviteFinal + " with state " + newState); inviteFinal.setState(newState); result = _projectInvitationDao.update(inviteFinal.getId(), inviteFinal); @@ -1235,20 +1233,20 @@ public Boolean doInTransaction(TransactionStatus status) { if (inviteFinal.getForUserId() == -1) { ProjectAccount projectAccount = _projectAccountDao.findByProjectIdAccountId(projectId, accountIdFinal); if (projectAccount != null) { - s_logger.debug("Account " + accountNameFinal + " already added to the project id=" + projectId); + logger.debug("Account " + accountNameFinal + " already added to the project id=" + projectId); } else { assignAccountToProject(project, accountIdFinal, inviteFinal.getAccountRole(), null, inviteFinal.getProjectRoleId()); } } else { ProjectAccount projectAccount = _projectAccountDao.findByProjectIdUserId(projectId, finalUser.getAccountId(), finalUser.getId()); if (projectAccount != null) { - s_logger.debug("User " + finalUser.getId() + "has already been added to the project id=" + projectId); + logger.debug("User " + finalUser.getId() + "has already been added to the project id=" + projectId); } else { assignUserToProject(project, inviteFinal.getForUserId(), finalUser.getAccountId(), inviteFinal.getAccountRole(), inviteFinal.getProjectRoleId()); } } } else { - s_logger.warn("Failed to update project invitation " + inviteFinal + " with state " + newState); + logger.warn("Failed to update project invitation " + inviteFinal + " with state " + newState); } return result; } @@ -1297,7 +1295,7 @@ public Project activateProject(final long projectId) { Project.State currentState = project.getState(); if (currentState == State.Active) { - s_logger.debug("The project id=" + projectId + " is already active, no need to activate it again"); + logger.debug("The project id=" + projectId + " is already active, no need to activate it again"); return project; } @@ -1335,7 +1333,7 @@ public Project suspendProject(long projectId) throws ConcurrentOperationExceptio _accountMgr.checkAccess(caller, AccessType.ModifyProject, true, _accountMgr.getAccount(project.getProjectAccountId())); if (suspendProject(project)) { - s_logger.debug("Successfully suspended project id=" + projectId); + logger.debug("Successfully suspended project id=" + projectId); return _projectDao.findById(projectId); } else { CloudRuntimeException ex = new CloudRuntimeException("Failed to suspend project with specified id"); @@ -1347,14 +1345,14 @@ public Project suspendProject(long projectId) throws ConcurrentOperationExceptio private boolean suspendProject(ProjectVO project) throws ConcurrentOperationException, ResourceUnavailableException { - s_logger.debug("Marking project " + project + " with state " + State.Suspended + " as a part of project suspend..."); + logger.debug("Marking project " + project + " with state " + State.Suspended + " as a part of project suspend..."); project.setState(State.Suspended); boolean updateResult = _projectDao.update(project.getId(), project); if (updateResult) { long projectAccountId = project.getProjectAccountId(); if (!_accountMgr.disableAccount(projectAccountId)) { - s_logger.warn("Failed to suspend all project's " + project + " resources; the resources will be suspended later by background thread"); + logger.warn("Failed to suspend all project's " + project + " resources; the resources will be suspended later by background thread"); } } else { throw new CloudRuntimeException("Failed to mark the project " + project + " with state " + State.Suspended); @@ -1391,10 +1389,10 @@ public boolean deleteProjectInvitation(long id) { _accountMgr.checkAccess(caller, AccessType.ModifyProject, true, _accountMgr.getAccount(project.getProjectAccountId())); if (_projectInvitationDao.remove(id)) { - s_logger.debug("Project Invitation id=" + id + " is removed"); + logger.debug("Project Invitation id=" + id + " is removed"); return true; } else { - s_logger.debug("Failed to remove project invitation id=" + id); + logger.debug("Failed to remove project invitation id=" + id); return false; } } @@ -1406,15 +1404,15 @@ protected void runInContext() { TimeZone.getDefault(); List invitationsToExpire = _projectInvitationDao.listInvitationsToExpire(_invitationTimeOut); if (!invitationsToExpire.isEmpty()) { - s_logger.debug("Found " + invitationsToExpire.size() + " projects to expire"); + logger.debug("Found " + invitationsToExpire.size() + " projects to expire"); for (ProjectInvitationVO invitationToExpire : invitationsToExpire) { invitationToExpire.setState(ProjectInvitation.State.Expired); _projectInvitationDao.update(invitationToExpire.getId(), invitationToExpire); - s_logger.trace("Expired project invitation id=" + invitationToExpire.getId()); + logger.trace("Expired project invitation id=" + invitationToExpire.getId()); } } } catch (Exception ex) { - s_logger.warn("Exception while running expired invitations cleanup", ex); + logger.warn("Exception while running expired invitations cleanup", ex); } } } diff --git a/server/src/main/java/com/cloud/resource/DiscovererBase.java b/server/src/main/java/com/cloud/resource/DiscovererBase.java index d30b8cc10f0a..e594a0a0aebe 100644 --- a/server/src/main/java/com/cloud/resource/DiscovererBase.java +++ b/server/src/main/java/com/cloud/resource/DiscovererBase.java @@ -26,7 +26,6 @@ import com.cloud.utils.component.AdapterBase; import com.cloud.utils.net.UrlUtil; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.log4j.Logger; import javax.inject.Inject; import javax.naming.ConfigurationException; @@ -38,7 +37,6 @@ public abstract class DiscovererBase extends AdapterBase implements Discoverer { protected Map _params; - private static final Logger s_logger = Logger.getLogger(DiscovererBase.class); @Inject protected ClusterDao _clusterDao; @Inject @@ -90,19 +88,19 @@ protected ServerResource getResource(String resourceName) { Constructor constructor = clazz.getConstructor(); resource = (ServerResource)constructor.newInstance(); } catch (ClassNotFoundException e) { - s_logger.warn("Unable to find class " + resourceName, e); + logger.warn("Unable to find class " + resourceName, e); } catch (InstantiationException e) { - s_logger.warn("Unable to instantiate class " + resourceName, e); + logger.warn("Unable to instantiate class " + resourceName, e); } catch (IllegalAccessException e) { - s_logger.warn("Illegal access " + resourceName, e); + logger.warn("Illegal access " + resourceName, e); } catch (SecurityException e) { - s_logger.warn("Security error on " + resourceName, e); + logger.warn("Security error on " + resourceName, e); } catch (NoSuchMethodException e) { - s_logger.warn("NoSuchMethodException error on " + resourceName, e); + logger.warn("NoSuchMethodException error on " + resourceName, e); } catch (IllegalArgumentException e) { - s_logger.warn("IllegalArgumentException error on " + resourceName, e); + logger.warn("IllegalArgumentException error on " + resourceName, e); } catch (InvocationTargetException e) { - s_logger.warn("InvocationTargetException error on " + resourceName, e); + logger.warn("InvocationTargetException error on " + resourceName, e); } return resource; @@ -157,11 +155,11 @@ public ServerResource reloadResource(HostVO host) { try { resource.configure(host.getName(), params); } catch (ConfigurationException e) { - s_logger.warn("Unable to configure resource due to " + e.getMessage()); + logger.warn("Unable to configure resource due to " + e.getMessage()); return null; } if (!resource.start()) { - s_logger.warn("Unable to start the resource"); + logger.warn("Unable to start the resource"); return null; } } diff --git a/server/src/main/java/com/cloud/resource/DummyHostDiscoverer.java b/server/src/main/java/com/cloud/resource/DummyHostDiscoverer.java index 88f87889ad8c..abba5a23529f 100644 --- a/server/src/main/java/com/cloud/resource/DummyHostDiscoverer.java +++ b/server/src/main/java/com/cloud/resource/DummyHostDiscoverer.java @@ -24,7 +24,6 @@ import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.host.HostVO; @@ -33,7 +32,6 @@ @Component public class DummyHostDiscoverer extends AdapterBase implements Discoverer { - private static final Logger s_logger = Logger.getLogger(DummyHostDiscoverer.class); @Override public Map> find(long dcId, Long podId, Long clusterId, URI url, String username, String password, List hostTags) { @@ -60,7 +58,7 @@ public Map> find(long dcId, Long podId, Long try { resource.configure("Dummy Host Server", params); } catch (ConfigurationException e) { - s_logger.warn("Unable to instantiate dummy host server resource"); + logger.warn("Unable to instantiate dummy host server resource"); } resource.start(); resources.put(resource, details); diff --git a/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java b/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java index 922df25a7268..8e8605874b4b 100755 --- a/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java +++ b/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java @@ -70,7 +70,6 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.ObjectUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; @@ -214,7 +213,6 @@ @Component public class ResourceManagerImpl extends ManagerBase implements ResourceManager, ResourceService, Manager { - private static final Logger s_logger = Logger.getLogger(ResourceManagerImpl.class); Gson _gson; @@ -414,7 +412,7 @@ protected void processResourceEvent(final Integer event, final Object... params) } else { throw new CloudRuntimeException("Unknown resource event:" + event); } - s_logger.debug("Sent resource event " + eventName + " to listener " + l.getClass().getSimpleName()); + logger.debug("Sent resource event " + eventName + " to listener " + l.getClass().getSimpleName()); } } @@ -478,7 +476,7 @@ public List discoverCluster(final AddClusterCmd cmd) throws I final Hypervisor.HypervisorType hypervisorType = Hypervisor.HypervisorType.getType(cmd.getHypervisor()); if (hypervisorType == null) { - s_logger.error("Unable to resolve " + cmd.getHypervisor() + " to a valid supported hypervisor type"); + logger.error("Unable to resolve " + cmd.getHypervisor() + " to a valid supported hypervisor type"); throw new InvalidParameterValueException("Unable to resolve " + cmd.getHypervisor() + " to a supported "); } @@ -592,13 +590,13 @@ public List discoverCluster(final AddClusterCmd cmd) throws I } discoverer.postDiscovery(hosts, _nodeId); } - s_logger.info("External cluster has been successfully discovered by " + discoverer.getName()); + logger.info("External cluster has been successfully discovered by " + discoverer.getName()); success = true; CallContext.current().putContextParameter(Cluster.class, cluster.getUuid()); return result; } - s_logger.warn("Unable to find the server resources at " + url); + logger.warn("Unable to find the server resources at " + url); throw new DiscoveryException("Unable to add the external cluster"); } finally { if (!success) { @@ -809,7 +807,7 @@ private List discoverHostsFull(final Long dcId, final Long podId, Long c } final List hosts = new ArrayList(); - s_logger.info("Trying to add a new host at " + url + " in data center " + dcId); + logger.info("Trying to add a new host at " + url + " in data center " + dcId); boolean isHypervisorTypeSupported = false; for (final Discoverer discoverer : _discoverers) { if (params != null) { @@ -828,15 +826,15 @@ private List discoverHostsFull(final Long dcId, final Long podId, Long c } catch (final DiscoveryException e) { String errorMsg = String.format("Could not add host at [%s] with zone [%s], pod [%s] and cluster [%s] due to: [%s].", uri, dcId, podId, clusterId, e.getMessage()); - if (s_logger.isDebugEnabled()) { - s_logger.debug(errorMsg, e); + if (logger.isDebugEnabled()) { + logger.debug(errorMsg, e); } throw new DiscoveryException(errorMsg, e); } catch (final Exception e) { String err = "Exception in host discovery process with discoverer: " + discoverer.getName(); - s_logger.info(err + ", skip to another discoverer if there is any"); - if (s_logger.isDebugEnabled()) { - s_logger.debug(err + ":" + e.getMessage(), e); + logger.info(err + ", skip to another discoverer if there is any"); + if (logger.isDebugEnabled()) { + logger.debug(err + ":" + e.getMessage(), e); } } processResourceEvent(ResourceListener.EVENT_DISCOVER_AFTER, resources); @@ -855,8 +853,8 @@ private List discoverHostsFull(final Long dcId, final Long podId, Long c for (final HostVO host : kvmHosts) { if (host.getGuid().equalsIgnoreCase(guid)) { if (hostTags != null) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Adding Host Tags for KVM host, tags: :" + hostTags); + if (logger.isTraceEnabled()) { + logger.trace("Adding Host Tags for KVM host, tags: :" + hostTags); } _hostTagsDao.persist(host.getId(), hostTags, false); } @@ -882,17 +880,17 @@ private List discoverHostsFull(final Long dcId, final Long podId, Long c discoverer.postDiscovery(hosts, _nodeId); } - s_logger.info("server resources successfully discovered by " + discoverer.getName()); + logger.info("server resources successfully discovered by " + discoverer.getName()); return hosts; } } if (!isHypervisorTypeSupported) { final String msg = "Do not support HypervisorType " + hypervisorType + " for " + url; - s_logger.warn(msg); + logger.warn(msg); throw new DiscoveryException(msg); } String errorMsg = "Cannot find the server resources at " + url; - s_logger.warn(errorMsg); + logger.warn(errorMsg); throw new DiscoveryException("Unable to add the host: " + errorMsg); } @@ -974,7 +972,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { try { resourceStateTransitTo(host, ResourceState.Event.DeleteHost, _nodeId); } catch (final NoTransitionException e) { - s_logger.debug(String.format("Cannot transit %s to Enabled state", host), e); + logger.debug(String.format("Cannot transit %s to Enabled state", host), e); } // Delete the associated entries in host ref table @@ -1000,7 +998,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { storagePool.setClusterId(null); _storagePoolDao.update(poolId, storagePool); _storagePoolDao.remove(poolId); - s_logger.debug(String.format("Local storage [id: %s] is removed as a part of %s removal", poolId, hostRemoved.toString())); + logger.debug(String.format("Local storage [id: %s] is removed as a part of %s removal", poolId, hostRemoved.toString())); } } @@ -1080,8 +1078,8 @@ public boolean deleteCluster(final DeleteClusterCmd cmd) { public void doInTransactionWithoutResult(final TransactionStatus status) { final ClusterVO cluster = _clusterDao.lockRow(cmd.getId(), true); if (cluster == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cluster: " + cmd.getId() + " does not even exist. Delete call is ignored."); + if (logger.isDebugEnabled()) { + logger.debug("Cluster: " + cmd.getId() + " does not even exist. Delete call is ignored."); } throw new CloudRuntimeException("Cluster: " + cmd.getId() + " does not exist"); } @@ -1090,8 +1088,8 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { final List hosts = listAllHostsInCluster(cmd.getId()); if (hosts.size() > 0) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cluster: " + cmd.getId() + " still has hosts, can't remove"); + if (logger.isDebugEnabled()) { + logger.debug("Cluster: " + cmd.getId() + " still has hosts, can't remove"); } throw new CloudRuntimeException("Cluster: " + cmd.getId() + " cannot be removed. Cluster still has hosts"); } @@ -1100,8 +1098,8 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { // pools final List storagePools = _storagePoolDao.listPoolsByCluster(cmd.getId()); if (storagePools.size() > 0) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cluster: " + cmd.getId() + " still has storage pools, can't remove"); + if (logger.isDebugEnabled()) { + logger.debug("Cluster: " + cmd.getId() + " still has storage pools, can't remove"); } throw new CloudRuntimeException("Cluster: " + cmd.getId() + " cannot be removed. Cluster still has storage pools"); } @@ -1129,7 +1127,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { } catch (final CloudRuntimeException e) { throw e; } catch (final Throwable t) { - s_logger.error("Unable to delete cluster: " + cmd.getId(), t); + logger.error("Unable to delete cluster: " + cmd.getId(), t); return false; } } @@ -1151,7 +1149,7 @@ public Cluster updateCluster(UpdateClusterCmd cmd) { if(cluster.getHypervisorType() == HypervisorType.VMware) { throw new InvalidParameterValueException("Renaming VMware cluster is not supported as it could cause problems if the updated cluster name is not mapped on VCenter."); } - s_logger.debug("Updating Cluster name to: " + name); + logger.debug("Updating Cluster name to: " + name); cluster.setName(name); doUpdate = true; } @@ -1159,7 +1157,7 @@ public Cluster updateCluster(UpdateClusterCmd cmd) { if (hypervisor != null && !hypervisor.isEmpty()) { final Hypervisor.HypervisorType hypervisorType = Hypervisor.HypervisorType.getType(hypervisor); if (hypervisorType == null) { - s_logger.error("Unable to resolve " + hypervisor + " to a valid supported hypervisor type"); + logger.error("Unable to resolve " + hypervisor + " to a valid supported hypervisor type"); throw new InvalidParameterValueException("Unable to resolve " + hypervisor + " to a supported type"); } else { cluster.setHypervisorType(hypervisor); @@ -1175,7 +1173,7 @@ public Cluster updateCluster(UpdateClusterCmd cmd) { throw new InvalidParameterValueException("Unable to resolve " + clusterType + " to a supported type"); } if (newClusterType == null) { - s_logger.error("Unable to resolve " + clusterType + " to a valid supported cluster type"); + logger.error("Unable to resolve " + clusterType + " to a valid supported cluster type"); throw new InvalidParameterValueException("Unable to resolve " + clusterType + " to a supported type"); } else { cluster.setClusterType(newClusterType); @@ -1191,7 +1189,7 @@ public Cluster updateCluster(UpdateClusterCmd cmd) { throw new InvalidParameterValueException("Unable to resolve Allocation State '" + allocationState + "' to a supported state"); } if (newAllocationState == null) { - s_logger.error("Unable to resolve " + allocationState + " to a valid supported allocation State"); + logger.error("Unable to resolve " + allocationState + " to a valid supported allocation State"); throw new InvalidParameterValueException("Unable to resolve " + allocationState + " to a supported state"); } else { cluster.setAllocationState(newAllocationState); @@ -1208,7 +1206,7 @@ public Cluster updateCluster(UpdateClusterCmd cmd) { throw new InvalidParameterValueException("Unable to resolve Managed State '" + managedstate + "' to a supported state"); } if (newManagedState == null) { - s_logger.error("Unable to resolve Managed State '" + managedstate + "' to a supported state"); + logger.error("Unable to resolve Managed State '" + managedstate + "' to a supported state"); throw new InvalidParameterValueException("Unable to resolve Managed State '" + managedstate + "' to a supported state"); } else { doUpdate = true; @@ -1340,17 +1338,17 @@ private void handleVmForLastHostOrWithVGpu(final HostVO host, final VMInstanceVO // for the last host in this cluster, destroy SSVM/CPVM and stop all other VMs if (VirtualMachine.Type.SecondaryStorageVm.equals(vm.getType()) || VirtualMachine.Type.ConsoleProxy.equals(vm.getType())) { - s_logger.error(String.format("Maintenance: VM is of type %s. Destroying VM %s (ID: %s) immediately instead of migration.", vm.getType().toString(), vm.getInstanceName(), vm.getUuid())); + logger.error(String.format("Maintenance: VM is of type %s. Destroying VM %s (ID: %s) immediately instead of migration.", vm.getType().toString(), vm.getInstanceName(), vm.getUuid())); _haMgr.scheduleDestroy(vm, host.getId()); return; } - s_logger.error(String.format("Maintenance: No hosts available for migrations. Scheduling shutdown for VM %s instead of migration.", vm.getUuid())); + logger.error(String.format("Maintenance: No hosts available for migrations. Scheduling shutdown for VM %s instead of migration.", vm.getUuid())); _haMgr.scheduleStop(vm, host.getId(), WorkType.ForceStop); } private boolean doMaintain(final long hostId) { final HostVO host = _hostDao.findById(hostId); - s_logger.info("Maintenance: attempting maintenance of host " + host.getUuid()); + logger.info("Maintenance: attempting maintenance of host " + host.getUuid()); ResourceState hostState = host.getResourceState(); if (!ResourceState.canAttemptMaintenance(hostState)) { throw new CloudRuntimeException("Cannot perform maintain when resource state is " + hostState + ", hostId = " + hostId); @@ -1358,7 +1356,7 @@ private boolean doMaintain(final long hostId) { final MaintainAnswer answer = (MaintainAnswer)_agentMgr.easySend(hostId, new MaintainCommand()); if (answer == null || !answer.getResult()) { - s_logger.warn("Unable to send MaintainCommand to host: " + hostId); + logger.warn("Unable to send MaintainCommand to host: " + hostId); return false; } @@ -1366,7 +1364,7 @@ private boolean doMaintain(final long hostId) { resourceStateTransitTo(host, ResourceState.Event.AdminAskMaintenance, _nodeId); } catch (final NoTransitionException e) { final String err = String.format("Cannot transit resource state of %s to %s", host, ResourceState.Maintenance); - s_logger.debug(err, e); + logger.debug(err, e); throw new CloudRuntimeException(err + e.getMessage()); } @@ -1383,7 +1381,7 @@ private boolean doMaintain(final long hostId) { List hosts = listAllUpAndEnabledHosts(Host.Type.Routing, host.getClusterId(), host.getPodId(), host.getDataCenterId()); if (CollectionUtils.isEmpty(hosts)) { - s_logger.warn("Unable to find a host for vm migration in cluster: " + host.getClusterId()); + logger.warn("Unable to find a host for vm migration in cluster: " + host.getClusterId()); if (! isClusterWideMigrationPossible(host, vms, hosts)) { return false; } @@ -1406,11 +1404,11 @@ private boolean doMaintain(final long hostId) { "Unsupported host.maintenance.local.storage.strategy: %s. Please set a strategy according to the global settings description: " + "'Error', 'Migration', or 'ForceStop'.", HOST_MAINTENANCE_LOCAL_STRATEGY.value().toString()); - s_logger.error(logMessage); + logger.error(logMessage); throw new CloudRuntimeException("There are active VMs using the host's local storage pool. Please stop all VMs on this host that use local storage."); } } else { - s_logger.info("Maintenance: scheduling migration of VM " + vm.getUuid() + " from host " + host.getUuid()); + logger.info("Maintenance: scheduling migration of VM " + vm.getUuid() + " from host " + host.getUuid()); _haMgr.scheduleMigration(vm); } } @@ -1420,7 +1418,7 @@ private boolean doMaintain(final long hostId) { private boolean isClusterWideMigrationPossible(Host host, List vms, List hosts) { if (MIGRATE_VM_ACROSS_CLUSTERS.valueIn(host.getDataCenterId())) { - s_logger.info("Looking for hosts across different clusters in zone: " + host.getDataCenterId()); + logger.info("Looking for hosts across different clusters in zone: " + host.getDataCenterId()); Long podId = null; for (final VMInstanceVO vm : vms) { if (VirtualMachine.systemVMs.contains(vm.getType())) { @@ -1431,23 +1429,23 @@ private boolean isClusterWideMigrationPossible(Host host, List vms } hosts.addAll(listAllUpAndEnabledHosts(Host.Type.Routing, null, podId, host.getDataCenterId())); if (CollectionUtils.isEmpty(hosts)) { - s_logger.warn("Unable to find a host for vm migration in zone: " + host.getDataCenterId()); + logger.warn("Unable to find a host for vm migration in zone: " + host.getDataCenterId()); return false; } - s_logger.info("Found hosts in the zone for vm migration: " + hosts); + logger.info("Found hosts in the zone for vm migration: " + hosts); if (HypervisorType.VMware.equals(host.getHypervisorType())) { - s_logger.debug("Skipping pool check of volumes on VMware environment because across-cluster vm migration is supported by vMotion"); + logger.debug("Skipping pool check of volumes on VMware environment because across-cluster vm migration is supported by vMotion"); return true; } // Don't migrate vm if it has volumes on cluster-wide pool for (final VMInstanceVO vm : vms) { if (_vmMgr.checkIfVmHasClusterWideVolumes(vm.getId())) { - s_logger.warn(String.format("VM %s cannot be migrated across cluster as it has volumes on cluster-wide pool", vm)); + logger.warn(String.format("VM %s cannot be migrated across cluster as it has volumes on cluster-wide pool", vm)); return false; } } } else { - s_logger.warn(String.format("VMs cannot be migrated across cluster since %s is false for zone ID: %d", MIGRATE_VM_ACROSS_CLUSTERS.key(), host.getDataCenterId())); + logger.warn(String.format("VMs cannot be migrated across cluster since %s is false for zone ID: %d", MIGRATE_VM_ACROSS_CLUSTERS.key(), host.getDataCenterId())); return false; } return true; @@ -1495,7 +1493,7 @@ public Host maintain(final PrepareForMaintenanceCmd cmd) { final HostVO host = _hostDao.findById(hostId); if (host == null) { - s_logger.debug("Unable to find host " + hostId); + logger.debug("Unable to find host " + hostId); throw new InvalidParameterValueException("Unable to find host with ID: " + hostId + ". Please specify a valid host ID."); } if (!ResourceState.canAttemptMaintenance(host.getResourceState())) { @@ -1603,7 +1601,7 @@ public Host declareHostAsDegraded(final DeclareHostAsDegradedCmd cmd) throws NoT resourceStateTransitTo(host, ResourceState.Event.DeclareHostDegraded, _nodeId); host.setResourceState(ResourceState.Degraded); } catch (NoTransitionException e) { - s_logger.error(String.format("Cannot transmit host [id:%s, name:%s, state:%s, status:%s] to %s state", host.getId(), host.getName(), host.getState(), host.getStatus(), + logger.error(String.format("Cannot transmit host [id:%s, name:%s, state:%s, status:%s] to %s state", host.getId(), host.getName(), host.getState(), host.getStatus(), ResourceState.Event.DeclareHostDegraded), e); throw e; } @@ -1619,10 +1617,10 @@ public Host declareHostAsDegraded(final DeclareHostAsDegradedCmd cmd) throws NoT private void scheduleVmsRestart(Long hostId) { List allVmsOnHost = _vmDao.listByHostId(hostId); if (CollectionUtils.isEmpty(allVmsOnHost)) { - s_logger.debug(String.format("Host [id=%s] was marked as Degraded with no allocated VMs, no need to schedule VM restart", hostId)); + logger.debug(String.format("Host [id=%s] was marked as Degraded with no allocated VMs, no need to schedule VM restart", hostId)); } - s_logger.debug(String.format("Host [id=%s] was marked as Degraded with a total of %s allocated VMs. Triggering HA to start VMs that have HA enabled.", hostId, allVmsOnHost.size())); + logger.debug(String.format("Host [id=%s] was marked as Degraded with a total of %s allocated VMs. Triggering HA to start VMs that have HA enabled.", hostId, allVmsOnHost.size())); for (VMInstanceVO vm : allVmsOnHost) { State vmState = vm.getState(); if (vmState == State.Starting || vmState == State.Running || vmState == State.Stopping) { @@ -1687,7 +1685,7 @@ protected void configureVncAccessForKVMHostFailedMigrations(HostVO host, List errorVms) throws NoTransitionException { - s_logger.debug("Unable to migrate / fix errors for " + errorVms.size() + " VM(s) from host " + host.getUuid()); + logger.debug("Unable to migrate / fix errors for " + errorVms.size() + " VM(s) from host " + host.getUuid()); _haMgr.cancelScheduledMigrations(host); configureVncAccessForKVMHostFailedMigrations(host, errorVms); resourceStateTransitTo(host, ResourceState.Event.UnableToMaintain, _nodeId); @@ -1709,14 +1707,14 @@ protected boolean setHostIntoErrorInMaintenance(HostVO host, List } protected boolean setHostIntoErrorInPrepareForMaintenance(HostVO host, List errorVms) throws NoTransitionException { - s_logger.debug("Host " + host.getUuid() + " entering in PrepareForMaintenanceWithErrors state"); + logger.debug("Host " + host.getUuid() + " entering in PrepareForMaintenanceWithErrors state"); configureVncAccessForKVMHostFailedMigrations(host, errorVms); resourceStateTransitTo(host, ResourceState.Event.UnableToMigrate, _nodeId); return false; } protected boolean setHostIntoPrepareForMaintenanceAfterErrorsFixed(HostVO host) throws NoTransitionException { - s_logger.debug("Host " + host.getUuid() + " entering in PrepareForMaintenance state as any previous corrections have been fixed"); + logger.debug("Host " + host.getUuid() + " entering in PrepareForMaintenance state as any previous corrections have been fixed"); resourceStateTransitTo(host, ResourceState.Event.ErrorsCorrected, _nodeId); return false; } @@ -1742,7 +1740,7 @@ protected boolean setHostIntoPrepareForMaintenanceAfterErrorsFixed(HostVO host) protected boolean attemptMaintain(HostVO host) throws NoTransitionException { final long hostId = host.getId(); - s_logger.info(String.format("Attempting maintenance for %s", host)); + logger.info(String.format("Attempting maintenance for %s", host)); // Step 0: First gather if VMs have pending HAWork for migration with retries left. final List allVmsOnHost = _vmDao.listByHostId(hostId); @@ -1750,7 +1748,7 @@ protected boolean attemptMaintain(HostVO host) throws NoTransitionException { boolean hasPendingMigrationRetries = false; for (VMInstanceVO vmInstanceVO : allVmsOnHost) { if (_haMgr.hasPendingMigrationsWork(vmInstanceVO.getId())) { - s_logger.info(String.format("Attempting maintenance for %s found pending migration for %s.", host, vmInstanceVO)); + logger.info(String.format("Attempting maintenance for %s found pending migration for %s.", host, vmInstanceVO)); hasPendingMigrationRetries = true; break; } @@ -1760,7 +1758,7 @@ protected boolean attemptMaintain(HostVO host) throws NoTransitionException { if (!hasMigratingAwayVms && CollectionUtils.isEmpty(_vmDao.findByHostInStates(host.getId(), State.Migrating, State.Running, State.Starting, State.Stopping, State.Error, State.Unknown))) { if (hasPendingMigrationRetries) { - s_logger.error("There should not be pending retries VMs for this host as there are no running, migrating," + + logger.error("There should not be pending retries VMs for this host as there are no running, migrating," + "starting, stopping, error or unknown states on host " + host); } return setHostIntoMaintenance(host); @@ -1807,7 +1805,7 @@ public boolean checkAndMaintain(final long hostId) { hostInMaintenance = attemptMaintain(host); } } catch (final NoTransitionException e) { - s_logger.warn(String.format("Cannot transit %s from %s to Maintenance state.", host, host.getResourceState()), e); + logger.warn(String.format("Cannot transit %s from %s to Maintenance state.", host, host.getResourceState()), e); } return hostInMaintenance; } @@ -1832,7 +1830,7 @@ private void handleAutoEnableDisableKVMHost(boolean autoEnableDisableKVMSetting, _hostDetailsDao.update(hostDetail.getId(), hostDetail); } else if (!isUpdateFromHostHealthCheck && hostDetail != null && Boolean.parseBoolean(hostDetail.getValue()) && resourceEvent == ResourceState.Event.Disable) { - s_logger.info(String.format("The setting %s is enabled but the host %s is manually set into %s state," + + logger.info(String.format("The setting %s is enabled but the host %s is manually set into %s state," + "ignoring future auto enabling of the host based on health check results", AgentManager.EnableKVMAutoEnableDisable.key(), host.getName(), resourceEvent)); hostDetail.setValue(Boolean.FALSE.toString()); @@ -1853,12 +1851,12 @@ private boolean updateHostAllocationState(HostVO host, String allocationState, if ((host.getResourceState() == ResourceState.Enabled && resourceEvent == ResourceState.Event.Enable) || (host.getResourceState() == ResourceState.Disabled && resourceEvent == ResourceState.Event.Disable)) { - s_logger.info(String.format("The host %s is already on the allocated state", host.getName())); + logger.info(String.format("The host %s is already on the allocated state", host.getName())); return false; } if (isAutoEnableAttemptForADisabledHost(autoEnableDisableKVMSetting, isUpdateFromHostHealthCheck, hostDetail, resourceEvent)) { - s_logger.debug(String.format("The setting '%s' is enabled and the health check succeeds on the host, " + + logger.debug(String.format("The setting '%s' is enabled and the health check succeeds on the host, " + "but the host has been manually disabled previously, ignoring auto enabling", AgentManager.EnableKVMAutoEnableDisable.key())); return false; @@ -1879,7 +1877,7 @@ private boolean isAutoEnableAttemptForADisabledHost(boolean autoEnableDisableKVM } private void updateHostName(HostVO host, String name) { - s_logger.debug("Updating Host name to: " + name); + logger.debug("Updating Host name to: " + name); host.setName(name); _hostDao.update(host.getId(), host); } @@ -1913,11 +1911,11 @@ private void updateHostGuestOSCategory(Long hostId, Long guestOSCategoryId) { private void updateHostTags(HostVO host, Long hostId, List hostTags, Boolean isTagARule) { List activeVMs = _vmDao.listByHostId(hostId); - s_logger.warn(String.format("The following active VMs [%s] are using the host [%s]. " + + logger.warn(String.format("The following active VMs [%s] are using the host [%s]. " + "Updating the host tags will not affect them.", activeVMs, host)); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Updating Host Tags to :" + hostTags); + if (logger.isDebugEnabled()) { + logger.debug("Updating Host Tags to :" + hostTags); } _hostTagsDao.persist(hostId, new ArrayList<>(new HashSet<>(hostTags)), isTagARule); } @@ -1959,7 +1957,7 @@ private Host updateHost(Long hostId, String name, Long guestOSCategoryId, String try { _storageMgr.enableHost(hostId); } catch (StorageUnavailableException | StorageConflictException e) { - s_logger.error(String.format("Failed to setup host %s when enabled", host)); + logger.error(String.format("Failed to setup host %s when enabled", host)); } final HostVO updatedHost = _hostDao.findById(hostId); @@ -2151,7 +2149,7 @@ private Object dispatchToStateAdapters(final ResourceStateAdapter.Event event, f final ResourceStateAdapter adapter = item.getValue(); final String msg = "Dispatching resource state event " + event + " to " + item.getKey(); - s_logger.debug(msg); + logger.debug(msg); if (event == ResourceStateAdapter.Event.CREATE_HOST_VO_FOR_CONNECTED) { result = adapter.createHostVOForConnectedAgent((HostVO)args[0], (StartupCommand[])args[1]); @@ -2172,7 +2170,7 @@ private Object dispatchToStateAdapters(final ResourceStateAdapter.Event event, f break; } } catch (final UnableDeleteHostException e) { - s_logger.debug("Adapter " + adapter.getName() + " says unable to delete host", e); + logger.debug("Adapter " + adapter.getName() + " says unable to delete host", e); result = new ResourceStateAdapter.DeleteHostAnswer(false, true); } } else { @@ -2198,7 +2196,7 @@ public void checkCIDR(final HostPodVO pod, final DataCenterVO dc, final String s final String cidrSubnet = NetUtils.getCidrSubNet(cidrAddress, cidrSize); final String serverSubnet = NetUtils.getSubNet(serverPrivateIP, serverPrivateNetmask); if (!cidrSubnet.equals(serverSubnet)) { - s_logger.warn("The private ip address of the server (" + serverPrivateIP + ") is not compatible with the CIDR of pod: " + pod.getName() + " and zone: " + + logger.warn("The private ip address of the server (" + serverPrivateIP + ") is not compatible with the CIDR of pod: " + pod.getName() + " and zone: " + dc.getName()); throw new IllegalArgumentException("The private ip address of the server (" + serverPrivateIP + ") is not compatible with the CIDR of pod: " + pod.getName() + " and zone: " + dc.getName()); @@ -2296,7 +2294,7 @@ protected HostVO createHostVO(final StartupCommand[] cmds, final ServerResource dcId = Long.parseLong(dataCenter); dc = _dcDao.findById(dcId); } catch (final NumberFormatException e) { - s_logger.debug("Cannot parse " + dataCenter + " into Long."); + logger.debug("Cannot parse " + dataCenter + " into Long."); } } if (dc == null) { @@ -2310,7 +2308,7 @@ protected HostVO createHostVO(final StartupCommand[] cmds, final ServerResource final long podId = Long.parseLong(pod); p = _podDao.findById(podId); } catch (final NumberFormatException e) { - s_logger.debug("Cannot parse " + pod + " into Long."); + logger.debug("Cannot parse " + pod + " into Long."); } } /* @@ -2403,12 +2401,12 @@ protected HostVO createHostVO(final StartupCommand[] cmds, final ServerResource /* Agent goes to Connecting status */ _agentMgr.agentStatusTransitTo(host, Status.Event.AgentConnected, _nodeId); } catch (final Exception e) { - s_logger.debug(String.format("Cannot transit %s to Creating state", host), e); + logger.debug(String.format("Cannot transit %s to Creating state", host), e); _agentMgr.agentStatusTransitTo(host, Status.Event.Error, _nodeId); try { resourceStateTransitTo(host, ResourceState.Event.Error, _nodeId); } catch (final NoTransitionException e1) { - s_logger.debug(String.format("Cannot transit %s to Error state", host), e); + logger.debug(String.format("Cannot transit %s to Error state", host), e); } } @@ -2519,7 +2517,7 @@ private Host createHostAndAgent(final ServerResource resource, final Map details, final List hostTags) { if (host.getPodId() == null) { - s_logger.error("Host " + ssCmd.getPrivateIpAddress() + " sent incorrect pod, pod id is null"); + logger.error("Host " + ssCmd.getPrivateIpAddress() + " sent incorrect pod, pod id is null"); throw new IllegalArgumentException("Host " + ssCmd.getPrivateIpAddress() + " sent incorrect pod, pod id is null"); } @@ -2795,8 +2793,8 @@ public void deleteRoutingHost(final HostVO host, final boolean isForced, final b throw new CloudRuntimeException(String.format("Non-Routing host gets in deleteRoutingHost, id is %s", host.getId())); } - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Deleting %s", host)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Deleting %s", host)); } final StoragePoolVO storagePool = _storageMgr.findLocalStorageOnHost(host.getId()); @@ -2807,12 +2805,12 @@ public void deleteRoutingHost(final HostVO host, final boolean isForced, final b try { final StoragePool pool = _storageSvr.preparePrimaryStorageForMaintenance(storagePool.getId()); if (pool == null) { - s_logger.debug("Failed to set primary storage into maintenance mode"); + logger.debug("Failed to set primary storage into maintenance mode"); throw new UnableDeleteHostException("Failed to set primary storage into maintenance mode"); } } catch (final Exception e) { - s_logger.debug("Failed to set primary storage into maintenance mode, due to: " + e.toString()); + logger.debug("Failed to set primary storage into maintenance mode, due to: " + e.toString()); throw new UnableDeleteHostException("Failed to set primary storage into maintenance mode, due to: " + e.toString()); } } @@ -2823,7 +2821,7 @@ public void deleteRoutingHost(final HostVO host, final boolean isForced, final b _vmMgr.destroy(vm.getUuid(), false); } catch (final Exception e) { String errorMsg = String.format("There was an error when destroying %s as a part of hostDelete for %s", vm, host); - s_logger.debug(errorMsg, e); + logger.debug(errorMsg, e); throw new UnableDeleteHostException(errorMsg + "," + e.getMessage()); } } @@ -2837,20 +2835,20 @@ public void deleteRoutingHost(final HostVO host, final boolean isForced, final b try { resourceStateTransitTo(host, ResourceState.Event.DeleteHost, host.getId()); } catch (final NoTransitionException e) { - s_logger.debug("Cannot transmit host " + host.getId() + " to Disabled state", e); + logger.debug("Cannot transmit host " + host.getId() + " to Disabled state", e); } for (final VMInstanceVO vm : vms) { if ((! HighAvailabilityManager.ForceHA.value() && !vm.isHaEnabled()) || vm.getState() == State.Stopping) { - s_logger.debug(String.format("Stopping %s as a part of hostDelete for %s",vm, host)); + logger.debug(String.format("Stopping %s as a part of hostDelete for %s",vm, host)); try { _haMgr.scheduleStop(vm, host.getId(), WorkType.Stop); } catch (final Exception e) { final String errorMsg = String.format("There was an error stopping the %s as a part of hostDelete for %s", vm, host); - s_logger.debug(errorMsg, e); + logger.debug(errorMsg, e); throw new UnableDeleteHostException(errorMsg + "," + e.getMessage()); } } else if ((HighAvailabilityManager.ForceHA.value() || vm.isHaEnabled()) && (vm.getState() == State.Running || vm.getState() == State.Starting)) { - s_logger.debug(String.format("Scheduling restart for %s, state: %s on host: %s.", vm, vm.getState(), host)); + logger.debug(String.format("Scheduling restart for %s, state: %s on host: %s.", vm, vm.getState(), host)); _haMgr.scheduleRestart(vm, false); } } @@ -2866,7 +2864,7 @@ private boolean doCancelMaintenance(final long hostId) { HostVO host; host = _hostDao.findById(hostId); if (host == null || host.getRemoved() != null) { - s_logger.warn("Unable to find host " + hostId); + logger.warn("Unable to find host " + hostId); return true; } @@ -2885,7 +2883,7 @@ private boolean doCancelMaintenance(final long hostId) { final List vms = _haMgr.findTakenMigrationWork(); for (final VMInstanceVO vm : vms) { if (vm.getHostId() != null && vm.getHostId() == hostId) { - s_logger.warn("Unable to cancel migration because the vm is being migrated: " + vm + ", hostId = " + hostId); + logger.warn("Unable to cancel migration because the vm is being migrated: " + vm + ", hostId = " + hostId); vms_migrating = true; } } @@ -2896,7 +2894,7 @@ private boolean doCancelMaintenance(final long hostId) { resourceStateTransitTo(host, ResourceState.Event.AdminCancelMaintenance, _nodeId); _agentMgr.pullAgentOutMaintenance(hostId); } catch (final NoTransitionException e) { - s_logger.debug(String.format("Cannot transit %s to Enabled state", host), e); + logger.debug(String.format("Cannot transit %s to Enabled state", host), e); return false; } @@ -2957,7 +2955,7 @@ protected void connectAndRestartAgentOnHost(HostVO host, String username, String if (result.getReturnCode() != 0) { throw new CloudRuntimeException(String.format("Could not restart agent on %s due to: %s", host, result.getStdErr())); } - s_logger.debug("cloudstack-agent restart result: " + result.toString()); + logger.debug("cloudstack-agent restart result: " + result.toString()); } catch (final SshException e) { throw new CloudRuntimeException("SSH to agent is enabled, but agent restart failed", e); } @@ -2997,7 +2995,7 @@ public boolean executeUserRequest(final long hostId, final ResourceState.Event e private boolean doUmanageHost(final long hostId) { final HostVO host = _hostDao.findById(hostId); if (host == null) { - s_logger.debug("Cannot find host " + hostId + ", assuming it has been deleted, skip umanage"); + logger.debug("Cannot find host " + hostId + ", assuming it has been deleted, skip umanage"); return true; } @@ -3041,7 +3039,7 @@ private boolean doUpdateHostPassword(final long hostId) { final UpdateHostPasswordCommand cmd = new UpdateHostPasswordCommand(username, password, hostIpAddress); final Answer answer = _agentMgr.easySend(hostId, cmd); - s_logger.info("Result returned from update host password ==> " + answer.getDetails()); + logger.info("Result returned from update host password ==> " + answer.getDetails()); return answer.getResult(); } @@ -3057,7 +3055,7 @@ public boolean updateClusterPassword(final UpdateHostPasswordCmd command) { return result; } } catch (final AgentUnavailableException e) { - s_logger.error("Agent is not available!", e); + logger.error("Agent is not available!", e); } if (shouldUpdateHostPasswd) { @@ -3081,7 +3079,7 @@ public boolean updateHostPassword(final UpdateHostPasswordCmd command) { return result; } } catch (final AgentUnavailableException e) { - s_logger.error("Agent is not available!", e); + logger.error("Agent is not available!", e); } final boolean shouldUpdateHostPasswd = command.getUpdatePasswdOnHost(); @@ -3108,7 +3106,7 @@ public Boolean propagateResourceEvent(final long agentId, final ResourceState.Ev return null; } - s_logger.debug("Propagating resource request event:" + event.toString() + " to agent:" + agentId); + logger.debug("Propagating resource request event:" + event.toString() + " to agent:" + agentId); final Command[] cmds = new Command[1]; cmds[0] = new PropagateResourceEventCommand(agentId, event); @@ -3119,8 +3117,8 @@ public Boolean propagateResourceEvent(final long agentId, final ResourceState.Ev final Answer[] answers = _gson.fromJson(AnsStr, Answer[].class); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Result for agent change is " + answers[0].getResult()); + if (logger.isDebugEnabled()) { + logger.debug("Result for agent change is " + answers[0].getResult()); } return answers[0].getResult(); @@ -3130,17 +3128,17 @@ public Boolean propagateResourceEvent(final long agentId, final ResourceState.Ev public boolean migrateAwayFailed(final long hostId, final long vmId) { final HostVO host = _hostDao.findById(hostId); if (host == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cant not find host " + hostId); + if (logger.isDebugEnabled()) { + logger.debug("Cant not find host " + hostId); } return false; } else { try { - s_logger.warn("Migration of VM " + _vmDao.findById(vmId) + " failed from host " + _hostDao.findById(hostId) + + logger.warn("Migration of VM " + _vmDao.findById(vmId) + " failed from host " + _hostDao.findById(hostId) + ". Emitting event UnableToMigrate."); return resourceStateTransitTo(host, ResourceState.Event.UnableToMigrate, _nodeId); } catch (final NoTransitionException e) { - s_logger.debug(String.format("No next resource state for %s while current state is [%s] with event %s", host, host.getResourceState(), ResourceState.Event.UnableToMigrate), e); + logger.debug(String.format("No next resource state for %s while current state is [%s] with event %s", host, host.getResourceState(), ResourceState.Event.UnableToMigrate), e); return false; } } @@ -3323,7 +3321,7 @@ public HostStats getHostStatistics(final long hostId) { if (answer == null || !answer.getResult()) { final String msg = "Unable to obtain host " + hostId + " statistics. "; - s_logger.warn(msg); + logger.warn(msg); return null; } else { @@ -3422,8 +3420,8 @@ public boolean isGPUDeviceAvailable(final long hostId, final String groupName, f if(!listAvailableGPUDevice(hostId, groupName, vgpuType).isEmpty()) { return true; } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host ID: "+ hostId +" does not have GPU device available"); + if (logger.isDebugEnabled()) { + logger.debug("Host ID: "+ hostId +" does not have GPU device available"); } return false; } @@ -3435,7 +3433,7 @@ public GPUDeviceTO getGPUDevice(final long hostId, final String groupName, final if (CollectionUtils.isEmpty(gpuDeviceList)) { final String errorMsg = "Host " + hostId + " does not have required GPU device or out of capacity. GPU group: " + groupName + ", vGPU Type: " + vgpuType; - s_logger.error(errorMsg); + logger.error(errorMsg); throw new CloudRuntimeException(errorMsg); } @@ -3460,7 +3458,7 @@ public HashMap> getGPUStatistics(final Ho } if (answer == null || !answer.getResult()) { final String msg = String.format("Unable to obtain GPU stats for %s", host); - s_logger.warn(msg); + logger.warn(msg); return null; } else { // now construct the result object @@ -3504,8 +3502,8 @@ public Boolean doInTransaction(final TransactionStatus status) { final long id = reservationEntry.getId(); final PlannerHostReservationVO hostReservation = _plannerHostReserveDao.lockRow(id, true); if (hostReservation == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host reservation for host: " + hostId + " does not even exist. Release reservartion call is ignored."); + if (logger.isDebugEnabled()) { + logger.debug("Host reservation for host: " + hostId + " does not even exist. Release reservartion call is ignored."); } return false; } @@ -3514,8 +3512,8 @@ public Boolean doInTransaction(final TransactionStatus status) { return true; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host reservation for host: " + hostId + " does not even exist. Release reservartion call is ignored."); + if (logger.isDebugEnabled()) { + logger.debug("Host reservation for host: " + hostId + " does not even exist. Release reservartion call is ignored."); } return false; @@ -3524,7 +3522,7 @@ public Boolean doInTransaction(final TransactionStatus status) { } catch (final CloudRuntimeException e) { throw e; } catch (final Throwable t) { - s_logger.error("Unable to release host reservation for host: " + hostId, t); + logger.error("Unable to release host reservation for host: " + hostId, t); return false; } } diff --git a/server/src/main/java/com/cloud/resource/RollingMaintenanceManagerImpl.java b/server/src/main/java/com/cloud/resource/RollingMaintenanceManagerImpl.java index 25b2ad53bf2b..c7bdf9c6f6c9 100644 --- a/server/src/main/java/com/cloud/resource/RollingMaintenanceManagerImpl.java +++ b/server/src/main/java/com/cloud/resource/RollingMaintenanceManagerImpl.java @@ -37,7 +37,6 @@ import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -100,7 +99,6 @@ public void setAffinityGroupProcessors(List affinityProc _affinityProcessors = affinityProcessors; } - public static final Logger s_logger = Logger.getLogger(RollingMaintenanceManagerImpl.class.getName()); private Pair> getResourceTypeAndIdPair(List podIds, List clusterIds, List zoneIds, List hostIds) { Pair> pair = CollectionUtils.isNotEmpty(podIds) ? new Pair<>(ResourceType.Pod, podIds) : @@ -194,11 +192,11 @@ public Ternary, List>> star } disableClusterIfEnabled(cluster, disabledClusters); - s_logger.debug("State checks on the hosts in the cluster"); + logger.debug("State checks on the hosts in the cluster"); performStateChecks(cluster, hosts, forced, hostsSkipped); - s_logger.debug("Checking hosts capacity before attempting rolling maintenance"); + logger.debug("Checking hosts capacity before attempting rolling maintenance"); performCapacityChecks(cluster, hosts, forced); - s_logger.debug("Attempting pre-flight stages on each host before starting rolling maintenance"); + logger.debug("Attempting pre-flight stages on each host before starting rolling maintenance"); performPreFlightChecks(hosts, timeout, payload, forced, hostsToAvoidMaintenance); for (Host host: hosts) { @@ -217,7 +215,7 @@ public Ternary, List>> star } } catch (AgentUnavailableException | InterruptedException | CloudRuntimeException e) { String err = "Error starting rolling maintenance: " + e.getMessage(); - s_logger.error(err, e); + logger.error(err, e); success = false; details = err; return new Ternary<>(success, details, new Pair<>(hostsUpdated, hostsSkipped)); @@ -311,7 +309,7 @@ private Ternary startRollingMaintenanceHostInCluster(C return new Ternary<>(false, true, "Maintenance stage must be avoided"); } - s_logger.debug("Updating capacity before re-checking capacity"); + logger.debug("Updating capacity before re-checking capacity"); alertManager.recalculateCapacity(); result = reCheckCapacityBeforeMaintenanceOnHost(cluster, host, forced, hostsSkipped); if (result.first() || result.second()) { @@ -366,7 +364,7 @@ private Ternary performPostMaintenanceStageOnHost(Host private void cancelHostMaintenance(Host host) { if (!resourceManager.cancelMaintenance(host.getId())) { String message = "Could not cancel maintenance on host " + host.getUuid(); - s_logger.error(message); + logger.error(message); throw new CloudRuntimeException(message); } } @@ -399,7 +397,7 @@ private Ternary performMaintenanceStageOnHost(Host hos * @throws AgentUnavailableException */ private void putHostIntoMaintenance(Host host) throws InterruptedException, AgentUnavailableException { - s_logger.debug(String.format("Trying to set %s into maintenance", host)); + logger.debug(String.format("Trying to set %s into maintenance", host)); PrepareForMaintenanceCmd cmd = new PrepareForMaintenanceCmd(); cmd.setId(host.getId()); resourceManager.maintain(cmd); @@ -429,7 +427,7 @@ private Ternary reCheckCapacityBeforeMaintenanceOnHost if (!capacityCheckBeforeMaintenance.first()) { String errorMsg = String.format("Capacity check failed for %s: %s", host, capacityCheckBeforeMaintenance.second()); if (forced) { - s_logger.info(String.format("Skipping %s as: %s", host, errorMsg)); + logger.info(String.format("Skipping %s as: %s", host, errorMsg)); hostsSkipped.add(new HostSkipped(host, errorMsg)); return new Ternary<>(true, true, capacityCheckBeforeMaintenance.second()); } @@ -445,7 +443,7 @@ private boolean isMaintenanceStageAvoided(Host host, Map hostsToAv if (hostsToAvoidMaintenance.containsKey(host.getId())) { HostSkipped hostSkipped = new HostSkipped(host, hostsToAvoidMaintenance.get(host.getId())); hostsSkipped.add(hostSkipped); - s_logger.debug(String.format("%s is in avoid maintenance list [hosts skipped: %d], skipping its maintenance.", host, hostsSkipped.size())); + logger.debug(String.format("%s is in avoid maintenance list [hosts skipped: %d], skipping its maintenance.", host, hostsSkipped.size())); return true; } return false; @@ -496,7 +494,7 @@ private boolean isMaintenanceScriptDefinedOnHost(Host host, List ho return answer.isMaintenaceScriptDefined(); } catch (AgentUnavailableException | OperationTimedoutException e) { String msg = String.format("Could not check for maintenance script on %s due to: %s", host, e.getMessage()); - s_logger.error(msg, e); + logger.error(msg, e); return false; } } @@ -542,7 +540,7 @@ private Ternary sendRollingMaintenanceCommandToHost(Ho } catch (AgentUnavailableException | OperationTimedoutException e) { // Agent may be restarted on the scripts - continue polling until it is up String msg = String.format("Cannot send command to %s, waiting %sms - %s", host, pingInterval, e.getMessage()); - s_logger.warn(msg, e); + logger.warn(msg, e); cmd.setStarted(true); Thread.sleep(pingInterval); timeSpent += pingInterval; @@ -582,7 +580,7 @@ private void performPreFlightChecks(List hosts, int timeout, String payloa } private void logHostAddedToAvoidMaintenanceSet(Host host) { - s_logger.debug(String.format("%s added to the avoid maintenance set.", host)); + logger.debug(String.format("%s added to the avoid maintenance set.", host)); } /** @@ -624,7 +622,7 @@ private Pair performCapacityChecksBeforeHostInMaintenance(Host ServiceOfferingVO serviceOffering = serviceOfferingDao.findById(runningVM.getServiceOfferingId()); for (Host hostInCluster : hostsInCluster) { if (!checkHostTags(hostTags, hostTagsDao.getHostTags(hostInCluster.getId()), serviceOffering.getHostTag())) { - s_logger.debug(String.format("Host tags mismatch between %s and %s Skipping it from the capacity check", host, hostInCluster)); + logger.debug(String.format("Host tags mismatch between %s and %s Skipping it from the capacity check", host, hostInCluster)); continue; } DeployDestination deployDestination = new DeployDestination(null, null, null, host); @@ -634,7 +632,7 @@ private Pair performCapacityChecksBeforeHostInMaintenance(Host affinityChecks = affinityChecks && affinityProcessor.check(vmProfile, deployDestination); } if (!affinityChecks) { - s_logger.debug(String.format("Affinity check failed between %s and %s Skipping it from the capacity check", host, hostInCluster)); + logger.debug(String.format("Affinity check failed between %s and %s Skipping it from the capacity check", host, hostInCluster)); continue; } boolean maxGuestLimit = capacityManager.checkIfHostReachMaxGuestLimit(host); @@ -654,7 +652,7 @@ private Pair performCapacityChecksBeforeHostInMaintenance(Host } if (!canMigrateVm) { String msg = String.format("%s cannot be migrated away from %s to any other host in the cluster", runningVM, host); - s_logger.error(msg); + logger.error(msg); return new Pair<>(false, msg); } successfullyCheckedVmMigrations++; @@ -726,10 +724,10 @@ private void waitForHostInMaintenance(long hostId) throws CloudRuntimeException, if (host.getResourceState() != ResourceState.Maintenance) { String errorMsg = "Timeout: waited " + timeout + "ms for host " + host.getUuid() + "(" + host.getName() + ")" + " to be in Maintenance state, but after timeout it is in " + host.getResourceState().toString() + " state"; - s_logger.error(errorMsg); + logger.error(errorMsg); throw new CloudRuntimeException(errorMsg); } - s_logger.debug("Host " + host.getUuid() + "(" + host.getName() + ") is in maintenance"); + logger.debug("Host " + host.getUuid() + "(" + host.getName() + ") is in maintenance"); } @Override diff --git a/server/src/main/java/com/cloud/resourceicon/ResourceIconManagerImpl.java b/server/src/main/java/com/cloud/resourceicon/ResourceIconManagerImpl.java index 41d6c1f52ac8..943c68c7c8dd 100644 --- a/server/src/main/java/com/cloud/resourceicon/ResourceIconManagerImpl.java +++ b/server/src/main/java/com/cloud/resourceicon/ResourceIconManagerImpl.java @@ -25,13 +25,11 @@ import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import com.cloud.domain.PartOf; import com.cloud.event.ActionEvent; import com.cloud.event.EventTypes; import com.cloud.exception.InvalidParameterValueException; -import com.cloud.metadata.ResourceMetaDataManagerImpl; import com.cloud.network.security.SecurityGroupRuleVO; import com.cloud.network.security.SecurityGroupVO; import com.cloud.network.vpc.NetworkACLItemVO; @@ -64,7 +62,6 @@ import com.cloud.utils.exception.CloudRuntimeException; public class ResourceIconManagerImpl extends ManagerBase implements ResourceIconManager { - public static final Logger s_logger = Logger.getLogger(ResourceMetaDataManagerImpl.class); @Inject AccountService accountService; @@ -211,7 +208,7 @@ public boolean deleteResourceIcon(List resourceIds, ResourceTag.Resource Account caller = CallContext.current().getCallingAccount(); List resourceIcons = searchResourceIcons(resourceIds, resourceType); if (resourceIcons.isEmpty()) { - s_logger.debug("No resource Icon(s) uploaded for the specified resources"); + logger.debug("No resource Icon(s) uploaded for the specified resources"); return false; } Transaction.execute(new TransactionCallbackNoReturn() { @@ -226,7 +223,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { Long accountId = accountDomainPair.first(); resourceManagerUtil.checkResourceAccessible(accountId, domainId, String.format("Account ' %s ' doesn't have permissions to upload icon for resource ' %s ", caller, id)); resourceIconDao.remove(resourceIcon.getId()); - s_logger.debug("Removed icon for resources (" + + logger.debug("Removed icon for resources (" + String.join(", ", resourceIds) + ")"); } } diff --git a/server/src/main/java/com/cloud/resourcelimit/CheckedReservation.java b/server/src/main/java/com/cloud/resourcelimit/CheckedReservation.java index 5650af153395..e52bd5632cb6 100644 --- a/server/src/main/java/com/cloud/resourcelimit/CheckedReservation.java +++ b/server/src/main/java/com/cloud/resourcelimit/CheckedReservation.java @@ -22,7 +22,8 @@ import org.apache.cloudstack.reservation.ReservationVO; import org.apache.cloudstack.reservation.dao.ReservationDao; import org.apache.cloudstack.user.ResourceReservation; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.jetbrains.annotations.NotNull; import com.cloud.configuration.Resource.ResourceType; @@ -34,7 +35,7 @@ public class CheckedReservation implements AutoCloseable, ResourceReservation { - private static final Logger LOG = Logger.getLogger(CheckedReservation.class); + protected Logger logger = LogManager.getLogger(getClass()); private static final int TRY_TO_GET_LOCK_TIME = 120; private GlobalLock quotaLimitLock; @@ -64,8 +65,8 @@ public CheckedReservation(Account account, ResourceType resourceType, Long amoun this.reservation = null; setGlobalLock(account, resourceType); if (this.amount != null && this.amount <= 0) { - if(LOG.isDebugEnabled()){ - LOG.debug(String.format("not reserving no amount of resources for %s in domain %d, type: %s, %s ", account.getAccountName(), account.getDomainId(), resourceType, amount)); + if(logger.isDebugEnabled()){ + logger.debug(String.format("not reserving no amount of resources for %s in domain %d, type: %s, %s ", account.getAccountName(), account.getDomainId(), resourceType, amount)); } this.amount = null; } @@ -86,8 +87,8 @@ public CheckedReservation(Account account, ResourceType resourceType, Long amoun throw new ResourceAllocationException(String.format("unable to acquire resource reservation \"%s\"", quotaLimitLock.getName()), resourceType); } } else { - if(LOG.isDebugEnabled()){ - LOG.debug(String.format("not reserving no amount of resources for %s in domain %d, type: %s ", account.getAccountName(), account.getDomainId(), resourceType)); + if(logger.isDebugEnabled()){ + logger.debug(String.format("not reserving no amount of resources for %s in domain %d, type: %s ", account.getAccountName(), account.getDomainId(), resourceType)); } } } diff --git a/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java b/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java index 959a0dc3bb2c..de2b109e73e5 100644 --- a/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java +++ b/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java @@ -49,7 +49,6 @@ import org.apache.cloudstack.user.ResourceReservation; import org.apache.cloudstack.utils.identity.ManagementServerNode; import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.alert.AlertManager; @@ -117,7 +116,6 @@ @Component public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLimitService, Configurable { - public static final Logger s_logger = Logger.getLogger(ResourceLimitManagerImpl.class); @Inject private AccountManager _accountMgr; @@ -279,7 +277,7 @@ public boolean configure(final String name, final Map params) th domainResourceLimitMap.put(Resource.ResourceType.primary_storage, Long.parseLong(_configDao.getValue(Config.DefaultMaxDomainPrimaryStorage.key()))); domainResourceLimitMap.put(Resource.ResourceType.secondary_storage, Long.parseLong(_configDao.getValue(Config.DefaultMaxDomainSecondaryStorage.key()))); } catch (NumberFormatException e) { - s_logger.error("NumberFormatException during configuration", e); + logger.error("NumberFormatException during configuration", e); throw new ConfigurationException("Configuration failed due to NumberFormatException, see log for the stacktrace"); } @@ -290,7 +288,7 @@ public boolean configure(final String name, final Map params) th public void incrementResourceCount(long accountId, ResourceType type, Long... delta) { // don't upgrade resource count for system account if (accountId == Account.ACCOUNT_ID_SYSTEM) { - s_logger.trace("Not incrementing resource count for system accounts, returning"); + logger.trace("Not incrementing resource count for system accounts, returning"); return; } @@ -302,7 +300,7 @@ public void incrementResourceCount(long accountId, ResourceType type, Long... de public void decrementResourceCount(long accountId, ResourceType type, Long... delta) { // don't upgrade resource count for system account if (accountId == Account.ACCOUNT_ID_SYSTEM) { - s_logger.trace("Not decrementing resource count for system accounts, returning"); + logger.trace("Not decrementing resource count for system accounts, returning"); return; } long numToDecrement = (delta.length == 0) ? 1 : delta[0].longValue(); @@ -473,14 +471,14 @@ private void checkDomainResourceLimit(final Account account, final Project proje convCurrentDomainResourceCount, convCurrentResourceReservation, convNumResources ); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Checking if" + messageSuffix); + if (logger.isDebugEnabled()) { + logger.debug("Checking if" + messageSuffix); } if (domainResourceLimit != Resource.RESOURCE_UNLIMITED && requestedDomainResourceCount > domainResourceLimit) { String message = "Maximum" + messageSuffix; ResourceAllocationException e = new ResourceAllocationException(message, type); - s_logger.error(message, e); + logger.error(message, e); throw e; } } @@ -514,14 +512,14 @@ private void checkAccountResourceLimit(final Account account, final Project proj convertedAccountResourceLimit, convertedCurrentResourceCount, convertedCurrentResourceReservation, convertedNumResources ); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Checking if" + messageSuffix); + if (logger.isDebugEnabled()) { + logger.debug("Checking if" + messageSuffix); } if (accountResourceLimit != Resource.RESOURCE_UNLIMITED && requestedResourceCount > accountResourceLimit) { String message = "Maximum" + messageSuffix; ResourceAllocationException e = new ResourceAllocationException(message, type); - s_logger.error(message, e); + logger.error(message, e); throw e; } } @@ -876,12 +874,12 @@ public List recalculateResourceCount(Long accountId, Long domai @DB protected boolean updateResourceCountForAccount(final long accountId, final ResourceType type, final boolean increment, final long delta) { - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { String convertedDelta = String.valueOf(delta); if (type == ResourceType.secondary_storage || type == ResourceType.primary_storage){ convertedDelta = toHumanReadableSize(delta); } - s_logger.debug("Updating resource Type = " + type + " count for Account = " + accountId + " Operation = " + (increment ? "increasing" : "decreasing") + " Amount = " + convertedDelta); + logger.debug("Updating resource Type = " + type + " count for Account = " + accountId + " Operation = " + (increment ? "increasing" : "decreasing") + " Amount = " + convertedDelta); } try { return Transaction.execute(new TransactionCallback() { @@ -891,7 +889,7 @@ public Boolean doInTransaction(TransactionStatus status) { List rowsToUpdate = lockAccountAndOwnerDomainRows(accountId, type); for (ResourceCountVO rowToUpdate : rowsToUpdate) { if (!_resourceCountDao.updateById(rowToUpdate.getId(), increment, delta)) { - s_logger.trace("Unable to update resource count for the row " + rowToUpdate); + logger.trace("Unable to update resource count for the row " + rowToUpdate); result = false; } } @@ -899,7 +897,7 @@ public Boolean doInTransaction(TransactionStatus status) { } }); } catch (Exception ex) { - s_logger.error("Failed to update resource count for account id=" + accountId); + logger.error("Failed to update resource count for account id=" + accountId); return false; } } @@ -942,7 +940,7 @@ public Long doInTransaction(TransactionStatus status) { _resourceCountDao.setResourceCount(domainId, ResourceOwnerType.Domain, type, newResourceCount); if (oldResourceCount != newResourceCount) { - s_logger.warn("Discrepency in the resource count has been detected " + "(original count = " + oldResourceCount + " correct count = " + newResourceCount + ") for Type = " + type + logger.warn("Discrepency in the resource count has been detected " + "(original count = " + oldResourceCount + " correct count = " + newResourceCount + ") for Type = " + type + " for Domain ID = " + domainId + " is fixed during resource count recalculation."); } @@ -1004,7 +1002,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { // resource count which will not lead to any discrepancy. if (newCount != null && !newCount.equals(oldCount) && type != Resource.ResourceType.primary_storage && type != Resource.ResourceType.secondary_storage) { - s_logger.warn("Discrepancy in the resource count " + "(original count=" + oldCount + " correct count = " + newCount + ") for type " + type + + logger.warn("Discrepancy in the resource count " + "(original count=" + oldCount + " correct count = " + newCount + ") for type " + type + " for account ID " + accountId + " is fixed during resource count recalculation."); } @@ -1184,7 +1182,7 @@ protected void runInContext() { try { ManagementServerHostVO msHost = managementServerHostDao.findOneByLongestRuntime(); if (msHost == null || (msHost.getMsid() != ManagementServerNode.getManagementServerId())) { - s_logger.trace("Skipping the resource counters recalculation task on this management server"); + logger.trace("Skipping the resource counters recalculation task on this management server"); return; } runResourceCheckTaskInternal(); @@ -1198,14 +1196,14 @@ protected void runInContext() { } private void runResourceCheckTaskInternal() { - s_logger.info("Started resource counters recalculation periodic task."); + logger.info("Started resource counters recalculation periodic task."); List domains; List accounts; // try/catch task, otherwise it won't be rescheduled in case of exception try { domains = _domainDao.findImmediateChildrenForParent(Domain.ROOT_DOMAIN); } catch (Exception e) { - s_logger.warn("Resource counters recalculation periodic task failed, unable to fetch immediate children for the domain " + Domain.ROOT_DOMAIN, e); + logger.warn("Resource counters recalculation periodic task failed, unable to fetch immediate children for the domain " + Domain.ROOT_DOMAIN, e); // initialize domains as empty list to do best effort recalculation domains = new ArrayList<>(); } @@ -1213,7 +1211,7 @@ private void runResourceCheckTaskInternal() { try { accounts = _accountDao.findActiveAccountsForDomain(Domain.ROOT_DOMAIN); } catch (Exception e) { - s_logger.warn("Resource counters recalculation periodic task failed, unable to fetch active accounts for domain " + Domain.ROOT_DOMAIN, e); + logger.warn("Resource counters recalculation periodic task failed, unable to fetch active accounts for domain " + Domain.ROOT_DOMAIN, e); // initialize accounts as empty list to do best effort recalculation accounts = new ArrayList<>(); } @@ -1232,21 +1230,21 @@ private void runResourceCheckTaskInternal() { recalculateAccountResourceCountInContext(account.getId(), type); } } - s_logger.info("Finished resource counters recalculation periodic task."); + logger.info("Finished resource counters recalculation periodic task."); } private void recalculateDomainResourceCountInContext(long domainId, ResourceType type) { try { recalculateDomainResourceCount(domainId, type); } catch (Exception e) { - s_logger.warn("Resource counters recalculation periodic task failed for the domain " + domainId + " and the resource type " + type + " .", e); + logger.warn("Resource counters recalculation periodic task failed for the domain " + domainId + " and the resource type " + type + " .", e); } } private void recalculateAccountResourceCountInContext(long accountId, ResourceType type) { try { recalculateAccountResourceCount(accountId, type); } catch (Exception e) { - s_logger.warn("Resource counters recalculation periodic task failed for the account " + accountId + " and the resource type " + type + " .", e); + logger.warn("Resource counters recalculation periodic task failed for the account " + accountId + " and the resource type " + type + " .", e); } } } diff --git a/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java b/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java index eb409bd701f4..4cb909648d88 100644 --- a/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java +++ b/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java @@ -47,7 +47,6 @@ import org.apache.cloudstack.framework.config.impl.ConfigurationVO; import org.apache.commons.codec.binary.Base64; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationManager; @@ -115,7 +114,6 @@ import com.cloud.utils.script.Script; public class ConfigurationServerImpl extends ManagerBase implements ConfigurationServer { - public static final Logger s_logger = Logger.getLogger(ConfigurationServerImpl.class); @Inject private ConfigurationDao _configDao; @@ -163,7 +161,7 @@ public boolean configure(String name, Map params) throws Configu persistDefaultValues(); _configDepotAdmin.populateConfigurations(); } catch (InternalErrorException | CloudRuntimeException e) { - s_logger.error("Unhandled configuration exception: " + e.getMessage()); + logger.error("Unhandled configuration exception: " + e.getMessage()); throw new CloudRuntimeException("Unhandled configuration exception", e); } return true; @@ -179,7 +177,7 @@ public void persistDefaultValues() throws InternalErrorException { String init = _configDao.getValue("init"); if (init == null || init.equals("false")) { - s_logger.debug("ConfigurationServer is saving default values to the database."); + logger.debug("ConfigurationServer is saving default values to the database."); // Save default Configuration Table values List categories = Config.getCategories(); @@ -219,19 +217,19 @@ public void persistDefaultValues() throws InternalErrorException { } _configDao.update(Config.UseSecondaryStorageVm.key(), Config.UseSecondaryStorageVm.getCategory(), "true"); - s_logger.debug("ConfigurationServer made secondary storage vm required."); + logger.debug("ConfigurationServer made secondary storage vm required."); _configDao.update(Config.SecStorageEncryptCopy.key(), Config.SecStorageEncryptCopy.getCategory(), "false"); - s_logger.debug("ConfigurationServer made secondary storage copy encrypt set to false."); + logger.debug("ConfigurationServer made secondary storage copy encrypt set to false."); _configDao.update("secstorage.secure.copy.cert", "realhostip"); - s_logger.debug("ConfigurationServer made secondary storage copy use realhostip."); + logger.debug("ConfigurationServer made secondary storage copy use realhostip."); _configDao.update("user.password.encoders.exclude", "MD5,LDAP,PLAINTEXT"); - s_logger.debug("Configuration server excluded insecure encoders"); + logger.debug("Configuration server excluded insecure encoders"); _configDao.update("user.authenticators.exclude", "PLAINTEXT"); - s_logger.debug("Configuration server excluded plaintext authenticator"); + logger.debug("Configuration server excluded plaintext authenticator"); // Save default service offerings createServiceOffering(User.UID_SYSTEM, "Small Instance", 1, 512, 500, "Small Instance", ProvisioningType.THIN, false, false, null); @@ -247,9 +245,9 @@ public void persistDefaultValues() throws InternalErrorException { String mountParent = getMountParent(); if (mountParent != null) { _configDao.update(Config.MountParent.key(), Config.MountParent.getCategory(), mountParent); - s_logger.debug("ConfigurationServer saved \"" + mountParent + "\" as mount.parent."); + logger.debug("ConfigurationServer saved \"" + mountParent + "\" as mount.parent."); } else { - s_logger.debug("ConfigurationServer could not detect mount.parent."); + logger.debug("ConfigurationServer could not detect mount.parent."); } String hostIpAdr = NetUtils.getDefaultHostIp(); @@ -265,7 +263,7 @@ public void persistDefaultValues() throws InternalErrorException { if (needUpdateHostIp) { _configDepot.createOrUpdateConfigObject(ApiServiceConfiguration.class.getSimpleName(), ApiServiceConfiguration.ManagementServerAddresses, hostIpAdr); - s_logger.debug("ConfigurationServer saved \"" + hostIpAdr + "\" as host."); + logger.debug("ConfigurationServer saved \"" + hostIpAdr + "\" as host."); } } @@ -366,7 +364,7 @@ private void templateDetailsInitIfNotExist(long id, String name, String value) { } txn.commit(); } catch (Exception e) { - s_logger.warn("Unable to init template " + id + " datails: " + name, e); + logger.warn("Unable to init template " + id + " datails: " + name, e); throw new CloudRuntimeException("Unable to init template " + id + " datails: " + name); } } @@ -413,7 +411,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } } } catch (Exception e) { - s_logger.debug("initiateXenServerPVDriverVersion failed due to " + e.toString()); + logger.debug("initiateXenServerPVDriverVersion failed due to " + e.toString()); // ignore } } @@ -435,7 +433,7 @@ private String getEnvironmentProperty(String name) { try(final FileInputStream finputstream = new FileInputStream(propsFile);) { props.load(finputstream); }catch (IOException e) { - s_logger.error("getEnvironmentProperty:Exception:" + e.getMessage()); + logger.error("getEnvironmentProperty:Exception:" + e.getMessage()); } return props.getProperty("mount.parent"); } @@ -457,7 +455,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql); stmt.executeUpdate(); } catch (SQLException ex) { - s_logger.debug("Looks like system account already exists"); + logger.debug("Looks like system account already exists"); } // insert system user insertSql = "INSERT INTO `cloud`.`user` (id, uuid, username, password, account_id, firstname, lastname, created, user.default)" @@ -467,7 +465,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql); stmt.executeUpdate(); } catch (SQLException ex) { - s_logger.debug("Looks like system user already exists"); + logger.debug("Looks like system user already exists"); } // insert admin user, but leave the account disabled until we set a @@ -484,7 +482,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql); stmt.executeUpdate(); } catch (SQLException ex) { - s_logger.debug("Looks like admin account already exists"); + logger.debug("Looks like admin account already exists"); } // now insert the user @@ -495,7 +493,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql); stmt.executeUpdate(); } catch (SQLException ex) { - s_logger.debug("Looks like admin user already exists"); + logger.debug("Looks like admin user already exists"); } try { @@ -526,12 +524,12 @@ public void doInTransactionWithoutResult(TransactionStatus status) { stmt = txn.prepareAutoCloseStatement(insertSql); stmt.executeUpdate(); } catch (SQLException ex) { - s_logger.warn("Failed to create default security group for default admin account due to ", ex); + logger.warn("Failed to create default security group for default admin account due to ", ex); } } rs.close(); } catch (Exception ex) { - s_logger.warn("Failed to create default security group for default admin account due to ", ex); + logger.warn("Failed to create default security group for default admin account due to ", ex); } } }); @@ -567,9 +565,9 @@ protected void updateSystemvmPassword() { PreparedStatement stmt = txn.prepareAutoCloseStatement(wSql); stmt.setString(1, DBEncryptionUtil.encrypt(rpassword)); stmt.executeUpdate(); - s_logger.info("Updated systemvm password in database"); + logger.info("Updated systemvm password in database"); } catch (SQLException e) { - s_logger.error("Cannot retrieve systemvm password", e); + logger.error("Cannot retrieve systemvm password", e); } } @@ -583,7 +581,7 @@ public void updateKeyPairs() { String username = System.getProperty("user.name"); Boolean devel = Boolean.valueOf(_configDao.getValue("developer")); if (!username.equalsIgnoreCase("cloud") && !devel) { - s_logger.warn("Systemvm keypairs could not be set. Management server should be run as cloud user, or in development mode."); + logger.warn("Systemvm keypairs could not be set. Management server should be run as cloud user, or in development mode."); return; } String already = _configDao.getValue("ssh.privatekey"); @@ -592,12 +590,12 @@ public void updateKeyPairs() { throw new CloudRuntimeException("Cannot get home directory for account: " + username); } - if (s_logger.isInfoEnabled()) { - s_logger.info("Processing updateKeyPairs"); + if (logger.isInfoEnabled()) { + logger.info("Processing updateKeyPairs"); } if (homeDir != null && homeDir.startsWith("~")) { - s_logger.error("No home directory was detected for the user '" + username + "'. Please check the profile of this user."); + logger.error("No home directory was detected for the user '" + username + "'. Please check the profile of this user."); throw new CloudRuntimeException("No home directory was detected for the user '" + username + "'. Please check the profile of this user."); } @@ -613,8 +611,8 @@ public void updateKeyPairs() { } if (already == null || already.isEmpty()) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Systemvm keypairs not found in database. Need to store them in the database"); + if (logger.isInfoEnabled()) { + logger.info("Systemvm keypairs not found in database. Need to store them in the database"); } // FIXME: take a global database lock here for safety. boolean onWindows = isOnWindows(); @@ -627,13 +625,13 @@ public void updateKeyPairs() { try { privateKey = new String(Files.readAllBytes(privkeyfile.toPath())); } catch (IOException e) { - s_logger.error("Cannot read the private key file", e); + logger.error("Cannot read the private key file", e); throw new CloudRuntimeException("Cannot read the private key file"); } try { publicKey = new String(Files.readAllBytes(pubkeyfile.toPath())); } catch (IOException e) { - s_logger.error("Cannot read the public key file", e); + logger.error("Cannot read the public key file", e); throw new CloudRuntimeException("Cannot read the public key file"); } @@ -654,29 +652,29 @@ public void doInTransactionWithoutResult(TransactionStatus status) { try { PreparedStatement stmt1 = txn.prepareAutoCloseStatement(insertSql1); stmt1.executeUpdate(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Private key inserted into database"); + if (logger.isDebugEnabled()) { + logger.debug("Private key inserted into database"); } } catch (SQLException ex) { - s_logger.error("SQL of the private key failed", ex); + logger.error("SQL of the private key failed", ex); throw new CloudRuntimeException("SQL of the private key failed"); } try { PreparedStatement stmt2 = txn.prepareAutoCloseStatement(insertSql2); stmt2.executeUpdate(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Public key inserted into database"); + if (logger.isDebugEnabled()) { + logger.debug("Public key inserted into database"); } } catch (SQLException ex) { - s_logger.error("SQL of the public key failed", ex); + logger.error("SQL of the public key failed", ex); throw new CloudRuntimeException("SQL of the public key failed"); } } }); } else { - s_logger.info("Keypairs already in database, updating local copy"); + logger.info("Keypairs already in database, updating local copy"); updateKeyPairsOnDisk(homeDir); } try { @@ -708,7 +706,7 @@ private void writeKeyToDisk(String key, String keyPath) { try { keyfile.createNewFile(); } catch (IOException e) { - s_logger.warn("Failed to create file: " + e.toString()); + logger.warn("Failed to create file: " + e.toString()); throw new CloudRuntimeException("Failed to update keypairs on disk: cannot create key file " + keyPath); } } @@ -719,10 +717,10 @@ private void writeKeyToDisk(String key, String keyPath) { kStream.write(key.getBytes()); } } catch (FileNotFoundException e) { - s_logger.warn("Failed to write key to " + keyfile.getAbsolutePath(), e); + logger.warn("Failed to write key to " + keyfile.getAbsolutePath(), e); throw new CloudRuntimeException("Failed to update keypairs on disk: cannot find key file " + keyPath); } catch (IOException e) { - s_logger.warn("Failed to write key to " + keyfile.getAbsolutePath(), e); + logger.warn("Failed to write key to " + keyfile.getAbsolutePath(), e); throw new CloudRuntimeException("Failed to update keypairs on disk: cannot write to key file " + keyPath); } } @@ -733,7 +731,7 @@ private void updateKeyPairsOnDisk(String homeDir) { File keyDir = new File(homeDir + "/.ssh"); Boolean devel = Boolean.valueOf(_configDao.getValue("developer")); if (!keyDir.isDirectory()) { - s_logger.warn("Failed to create " + homeDir + "/.ssh for storing the SSH keypars"); + logger.warn("Failed to create " + homeDir + "/.ssh for storing the SSH keypars"); keyDir.mkdirs(); } String pubKey = _configDao.getValue("ssh.publickey"); @@ -750,7 +748,7 @@ private void updateKeyPairsOnDisk(String homeDir) { } protected void copyPrivateKeyToHosts(String publicKeyPath, String privKeyPath) { - s_logger.info("Trying to copy private keys to hosts"); + logger.info("Trying to copy private keys to hosts"); String injectScript = getInjectScript(); String scriptPath = Script.findScript("", injectScript); if (scriptPath == null) { @@ -759,9 +757,9 @@ protected void copyPrivateKeyToHosts(String publicKeyPath, String privKeyPath) { Script command = null; if(isOnWindows()) { - command = new Script("python", s_logger); + command = new Script("python", logger); } else { - command = new Script("/bin/bash", s_logger); + command = new Script("/bin/bash", logger); } if (isOnWindows()) { scriptPath = scriptPath.replaceAll("\\\\" ,"/" ); @@ -771,9 +769,9 @@ protected void copyPrivateKeyToHosts(String publicKeyPath, String privKeyPath) { command.add(scriptPath); command.add(privKeyPath); final String result = command.execute(); - s_logger.info("The script injectkeys.sh was run with result : " + result); + logger.info("The script injectkeys.sh was run with result : " + result); if (result != null) { - s_logger.warn("The script injectkeys.sh failed to run successfully : " + result); + logger.warn("The script injectkeys.sh failed to run successfully : " + result); throw new CloudRuntimeException("The script injectkeys.sh failed to run successfully : " + result); } } @@ -801,7 +799,7 @@ protected void generateSecStorageVmCopyPassword() { if (already == null) { - s_logger.info("Need to store secondary storage vm copy password in the database"); + logger.info("Need to store secondary storage vm copy password in the database"); String password = PasswordGenerator.generateRandomPassword(12); final String insertSql1 = @@ -816,9 +814,9 @@ public void doInTransactionWithoutResult(TransactionStatus status) { try { PreparedStatement stmt1 = txn.prepareAutoCloseStatement(insertSql1); stmt1.executeUpdate(); - s_logger.debug("secondary storage vm copy password inserted into database"); + logger.debug("secondary storage vm copy password inserted into database"); } catch (SQLException ex) { - s_logger.warn("Failed to insert secondary storage vm copy password", ex); + logger.warn("Failed to insert secondary storage vm copy password", ex); } } }); @@ -829,7 +827,7 @@ private void updateSSOKey() { try { _configDao.update(Config.SSOKey.key(), Config.SSOKey.getCategory(), getPrivateKey()); } catch (NoSuchAlgorithmException ex) { - s_logger.error("error generating sso key", ex); + logger.error("error generating sso key", ex); } } @@ -842,14 +840,14 @@ private void updateSecondaryStorageVMSharedKey() { if(configInDB == null) { ConfigurationVO configVO = new ConfigurationVO(Config.SSVMPSK.getCategory(), "DEFAULT", Config.SSVMPSK.getComponent(), Config.SSVMPSK.key(), getPrivateKey(), Config.SSVMPSK.getDescription()); - s_logger.info("generating a new SSVM PSK. This goes to SSVM on Start"); + logger.info("generating a new SSVM PSK. This goes to SSVM on Start"); _configDao.persist(configVO); } else if (StringUtils.isEmpty(configInDB.getValue())) { - s_logger.info("updating the SSVM PSK with new value. This goes to SSVM on Start"); + logger.info("updating the SSVM PSK with new value. This goes to SSVM on Start"); _configDao.update(Config.SSVMPSK.key(), Config.SSVMPSK.getCategory(), getPrivateKey()); } } catch (NoSuchAlgorithmException ex) { - s_logger.error("error generating ssvm psk", ex); + logger.error("error generating ssvm psk", ex); } } @@ -913,7 +911,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) throws Intern } }); } catch (Exception e) { - s_logger.error("Unable to create new pod due to " + e.getMessage(), e); + logger.error("Unable to create new pod due to " + e.getMessage(), e); throw new InternalErrorException("Failed to create new pod. Please contact Cloud Support."); } @@ -1036,7 +1034,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO(defaultSharedSGNetworkOffering.getId(), service, defaultSharedSGNetworkOfferingProviders.get(service)); _ntwkOfferingServiceMapDao.persist(offService); - s_logger.trace("Added service for the network offering: " + offService); + logger.trace("Added service for the network offering: " + offService); } // Offering #2 @@ -1051,7 +1049,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO(defaultSharedNetworkOffering.getId(), service, defaultSharedNetworkOfferingProviders.get(service)); _ntwkOfferingServiceMapDao.persist(offService); - s_logger.trace("Added service for the network offering: " + offService); + logger.trace("Added service for the network offering: " + offService); } NetworkOfferingVO defaultTungstenSharedSGNetworkOffering = @@ -1066,7 +1064,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO(defaultTungstenSharedSGNetworkOffering.getId(), service.getKey(), service.getValue()); _ntwkOfferingServiceMapDao.persist(offService); - s_logger.trace("Added service for the network offering: " + offService); + logger.trace("Added service for the network offering: " + offService); } // Offering #3 @@ -1084,7 +1082,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { new NetworkOfferingServiceMapVO(defaultIsolatedSourceNatEnabledNetworkOffering.getId(), service, defaultIsolatedSourceNatEnabledNetworkOfferingProviders.get(service)); _ntwkOfferingServiceMapDao.persist(offService); - s_logger.trace("Added service for the network offering: " + offService); + logger.trace("Added service for the network offering: " + offService); } // Offering #4 @@ -1099,7 +1097,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO(defaultIsolatedEnabledNetworkOffering.getId(), service, defaultIsolatedNetworkOfferingProviders.get(service)); _ntwkOfferingServiceMapDao.persist(offService); - s_logger.trace("Added service for the network offering: " + offService); + logger.trace("Added service for the network offering: " + offService); } // Offering #5 @@ -1116,7 +1114,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO(defaultNetscalerNetworkOffering.getId(), service, netscalerServiceProviders.get(service)); _ntwkOfferingServiceMapDao.persist(offService); - s_logger.trace("Added service for the network offering: " + offService); + logger.trace("Added service for the network offering: " + offService); } // Offering #6 @@ -1145,7 +1143,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO(defaultNetworkOfferingForVpcNetworks.getId(), entry.getKey(), entry.getValue()); _ntwkOfferingServiceMapDao.persist(offService); - s_logger.trace("Added service for the network offering: " + offService); + logger.trace("Added service for the network offering: " + offService); } // Offering #7 @@ -1172,7 +1170,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO(defaultNetworkOfferingForVpcNetworksNoLB.getId(), entry.getKey(), entry.getValue()); _ntwkOfferingServiceMapDao.persist(offService); - s_logger.trace("Added service for the network offering: " + offService); + logger.trace("Added service for the network offering: " + offService); } //offering #8 - network offering with internal lb service @@ -1196,7 +1194,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { for (Service service : internalLbOffProviders.keySet()) { NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO(internalLbOff.getId(), service, internalLbOffProviders.get(service)); _ntwkOfferingServiceMapDao.persist(offService); - s_logger.trace("Added service for the network offering: " + offService); + logger.trace("Added service for the network offering: " + offService); } _networkOfferingDao.persistDefaultL2NetworkOfferings(); @@ -1318,7 +1316,7 @@ public void updateResourceCount() { final int expectedCount = resourceTypes.length; if ((domainResourceCount.size() < expectedCount * domains.size())) { - s_logger.debug("resource_count table has records missing for some domains...going to insert them"); + logger.debug("resource_count table has records missing for some domains...going to insert them"); for (final DomainVO domain : domains) { // Lock domain Transaction.execute(new TransactionCallbackNoReturn() { @@ -1335,7 +1333,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { for (ResourceType resourceType : resourceTypes) { if (!domainCountStr.contains(resourceType.toString())) { ResourceCountVO resourceCountVO = new ResourceCountVO(resourceType, 0, domain.getId(), ResourceOwnerType.Domain); - s_logger.debug("Inserting resource count of type " + resourceType + " for domain id=" + domain.getId()); + logger.debug("Inserting resource count of type " + resourceType + " for domain id=" + domain.getId()); _resourceCountDao.persist(resourceCountVO); } } @@ -1347,7 +1345,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } if ((accountResourceCount.size() < expectedCount * accounts.size())) { - s_logger.debug("resource_count table has records missing for some accounts...going to insert them"); + logger.debug("resource_count table has records missing for some accounts...going to insert them"); for (final AccountVO account : accounts) { // lock account Transaction.execute(new TransactionCallbackNoReturn() { @@ -1364,7 +1362,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { for (ResourceType resourceType : resourceTypes) { if (!accountCountStr.contains(resourceType.toString())) { ResourceCountVO resourceCountVO = new ResourceCountVO(resourceType, 0, account.getId(), ResourceOwnerType.Account); - s_logger.debug("Inserting resource count of type " + resourceType + " for account id=" + account.getId()); + logger.debug("Inserting resource count of type " + resourceType + " for account id=" + account.getId()); _resourceCountDao.persist(resourceCountVO); } } diff --git a/server/src/main/java/com/cloud/server/ManagementServerImpl.java b/server/src/main/java/com/cloud/server/ManagementServerImpl.java index a73ba9b092ca..8e94237b4867 100644 --- a/server/src/main/java/com/cloud/server/ManagementServerImpl.java +++ b/server/src/main/java/com/cloud/server/ManagementServerImpl.java @@ -634,7 +634,6 @@ import org.apache.commons.codec.binary.Base64; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -835,7 +834,6 @@ import com.cloud.vm.dao.VMInstanceDao; public class ManagementServerImpl extends ManagerBase implements ManagementServer, Configurable { - public static final Logger s_logger = Logger.getLogger(ManagementServerImpl.class.getName()); protected StateMachine2 _stateMachine; static final ConfigKey vmPasswordLength = new ConfigKey("Advanced", Integer.class, "vm.password.length", "6", "Specifies the length of a randomly generated password", false); @@ -1117,7 +1115,7 @@ private void setStateMachine() { @Override public boolean start() { - s_logger.info("Startup CloudStack management server..."); + logger.info("Startup CloudStack management server..."); // Set human readable sizes NumbersUtil.enableHumanReadableSizes = _configDao.findByName("display.human.readable.sizes").getValue().equals("true"); @@ -1165,7 +1163,7 @@ protected void checkPortParameters(final String publicPort, final String private throw new InvalidParameterValueException("privatePort is an invalid value"); } - // s_logger.debug("Checking if " + privateIp + + // logger.debug("Checking if " + privateIp + // " is a valid private IP address. Guest IP address is: " + // _configs.get("guest.ip.network")); // @@ -1358,7 +1356,7 @@ protected Pair> filterUefiHostsForMigration(List a if (userVmDetailVO != null && (ApiConstants.BootMode.LEGACY.toString().equalsIgnoreCase(userVmDetailVO.getValue()) || ApiConstants.BootMode.SECURE.toString().equalsIgnoreCase(userVmDetailVO.getValue()))) { - s_logger.info(" Live Migration of UEFI enabled VM : " + vm.getInstanceName() + " is not supported"); + logger.info(" Live Migration of UEFI enabled VM : " + vm.getInstanceName() + " is not supported"); if (CollectionUtils.isEmpty(filteredHosts)) { filteredHosts = new ArrayList<>(allHosts); } @@ -1376,8 +1374,8 @@ protected Pair> filterUefiHostsForMigration(List a private void validateVmForHostMigration(VirtualMachine vm) { final Account caller = getCaller(); if (!_accountMgr.isRootAdmin(caller.getId())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Caller is not a root admin, permission denied to migrate the VM"); + if (logger.isDebugEnabled()) { + logger.debug("Caller is not a root admin, permission denied to migrate the VM"); } throw new PermissionDeniedException("No permission to migrate VM, Only Root Admin can migrate a VM!"); } @@ -1387,8 +1385,8 @@ private void validateVmForHostMigration(VirtualMachine vm) { } if (vm.getState() != State.Running) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("VM is not running, cannot migrate the vm" + vm); + if (logger.isDebugEnabled()) { + logger.debug("VM is not running, cannot migrate the vm" + vm); } final InvalidParameterValueException ex = new InvalidParameterValueException("VM is not Running, cannot " + "migrate the vm with specified id"); ex.addProxyObject(vm.getUuid(), "vmId"); @@ -1396,8 +1394,8 @@ private void validateVmForHostMigration(VirtualMachine vm) { } if (!LIVE_MIGRATION_SUPPORTING_HYPERVISORS.contains(vm.getHypervisorType())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(vm + " is not XenServer/VMware/KVM/Ovm/Hyperv/Ovm3, cannot migrate this VM."); + if (logger.isDebugEnabled()) { + logger.debug(vm + " is not XenServer/VMware/KVM/Ovm/Hyperv/Ovm3, cannot migrate this VM."); } throw new InvalidParameterValueException("Unsupported Hypervisor Type for VM migration, we support " + "XenServer/VMware/KVM/Ovm/Hyperv/Ovm3 only"); } @@ -1421,7 +1419,7 @@ public Ternary, Integer>, List, Map(new Pair<>(new ArrayList<>(), 0), new ArrayList<>(), new HashMap<>()); @@ -1430,8 +1428,8 @@ public Ternary, Integer>, List, Map, Integer>, List, Map, Integer>, List, Map, List> listStorag if (!bypassAccountCheck) { final Account caller = getCaller(); if (!_accountMgr.isRootAdmin(caller.getId())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Caller is not a root admin, permission denied to migrate the volume"); + if (logger.isDebugEnabled()) { + logger.debug("Caller is not a root admin, permission denied to migrate the volume"); } throw new PermissionDeniedException("No permission to migrate volume, only root admin can migrate a volume"); } @@ -1683,7 +1681,7 @@ public Pair, List> listStorag // Volume must be in Ready state to be migrated. if (!Volume.State.Ready.equals(volume.getState())) { - s_logger.info("Volume " + volume + " must be in ready state for migration."); + logger.info("Volume " + volume + " must be in ready state for migration."); return new Pair, List>(allPools, suitablePools); } @@ -1694,11 +1692,11 @@ public Pair, List> listStorag } if (vm == null) { - s_logger.info("Volume " + volume + " isn't attached to any vm. Looking for storage pools in the " + "zone to which this volumes can be migrated."); + logger.info("Volume " + volume + " isn't attached to any vm. Looking for storage pools in the " + "zone to which this volumes can be migrated."); } else if (vm.getState() != State.Running) { - s_logger.info("Volume " + volume + " isn't attached to any running vm. Looking for storage pools in the " + "cluster to which this volumes can be migrated."); + logger.info("Volume " + volume + " isn't attached to any running vm. Looking for storage pools in the " + "cluster to which this volumes can be migrated."); } else { - s_logger.info("Volume " + volume + " is attached to any running vm. Looking for storage pools in the " + "cluster to which this volumes can be migrated."); + logger.info("Volume " + volume + " is attached to any running vm. Looking for storage pools in the " + "cluster to which this volumes can be migrated."); boolean storageMotionSupported = false; // Check if the underlying hypervisor supports storage motion. final Long hostId = vm.getHostId(); @@ -1708,18 +1706,18 @@ public Pair, List> listStorag if (host != null) { capabilities = _hypervisorCapabilitiesDao.findByHypervisorTypeAndVersion(host.getHypervisorType(), host.getHypervisorVersion()); } else { - s_logger.error("Details of the host on which the vm " + vm + ", to which volume " + volume + " is " + "attached, couldn't be retrieved."); + logger.error("Details of the host on which the vm " + vm + ", to which volume " + volume + " is " + "attached, couldn't be retrieved."); } if (capabilities != null) { storageMotionSupported = capabilities.isStorageMotionSupported(); } else { - s_logger.error("Capabilities for host " + host + " couldn't be retrieved."); + logger.error("Capabilities for host " + host + " couldn't be retrieved."); } } if (!storageMotionSupported) { - s_logger.info("Volume " + volume + " is attached to a running vm and the hypervisor doesn't support" + " storage motion."); + logger.info("Volume " + volume + " is attached to a running vm and the hypervisor doesn't support" + " storage motion."); return new Pair, List>(allPools, suitablePools); } } @@ -2304,10 +2302,10 @@ public Pair, Integer> searchForConfigurations(fina } configVOList.add(configVo); } else { - s_logger.warn("ConfigDepot could not find parameter " + param.getName() + " for scope " + scope); + logger.warn("ConfigDepot could not find parameter " + param.getName() + " for scope " + scope); } } else { - s_logger.warn("Configuration item " + param.getName() + " not found in " + scope); + logger.warn("Configuration item " + param.getName() + " not found in " + scope); } } @@ -2398,7 +2396,7 @@ public Pair, Integer> searchForIPAddresses(final ListP try { _accountMgr.checkAccess(caller, null, false, _accountDao.findById(networkMap.getAccountId())); } catch (PermissionDeniedException ex) { - s_logger.info("Account " + caller + " do not have permission to access account of network " + network); + logger.info("Account " + caller + " do not have permission to access account of network " + network); _accountMgr.checkAccess(caller, SecurityChecker.AccessType.UseEntry, false, network); isAllocated = Boolean.TRUE; } @@ -2415,7 +2413,7 @@ public Pair, Integer> searchForIPAddresses(final ListP } } else if (caller.getType() == Account.Type.DOMAIN_ADMIN || caller.getType() == Account.Type.RESOURCE_DOMAIN_ADMIN) { if (caller.getDomainId() == networkMap.getDomainId() || _domainDao.isChildDomain(caller.getDomainId(), networkMap.getDomainId())) { - s_logger.debug("Caller " + caller.getUuid() + " has permission to access the network : " + network.getUuid()); + logger.debug("Caller " + caller.getUuid() + " has permission to access the network : " + network.getUuid()); } else { if (_networkMgr.isNetworkAvailableInDomain(network.getId(), caller.getDomainId())) { isAllocated = Boolean.TRUE; @@ -2505,7 +2503,7 @@ public Pair, Integer> searchForIPAddresses(final ListP freeAddrs.addAll(_ipAddressMgr.listAvailablePublicIps(dcId, null, vlanDbIds, owner, VlanType.VirtualNetwork, associatedNetworkId, false, false, false, null, null, false, cmd.getVpcId(), cmd.isDisplay(), false, false)); // Free } catch (InsufficientAddressCapacityException e) { - s_logger.warn("no free address is found in zone " + dcId); + logger.warn("no free address is found in zone " + dcId); } } for (IPAddressVO addr: freeAddrs) { @@ -2513,7 +2511,7 @@ public Pair, Integer> searchForIPAddresses(final ListP } } else if (vlanType == VlanType.DirectAttached && network != null && !isAllocatedTemp && isAllocated) { if (caller.getType() != Account.Type.ADMIN && !IpAddressManager.AllowUserListAvailableIpsOnSharedNetwork.value()) { - s_logger.debug("Non-admin users are not allowed to list available IPs on shared networks"); + logger.debug("Non-admin users are not allowed to list available IPs on shared networks"); } else { final SearchBuilder searchBuilder = _publicIpAddressDao.createSearchBuilder(); buildParameters(searchBuilder, cmd, false); @@ -2897,7 +2895,7 @@ public GuestOS addGuestOs(final AddGuestOsCmd cmd) { throw new InvalidParameterValueException("The specified Guest OS name : " + displayName + " already exists. Please specify a unique name"); } - s_logger.debug("GuestOSDetails"); + logger.debug("GuestOSDetails"); final GuestOSVO guestOsVo = new GuestOSVO(); guestOsVo.setCategoryId(categoryId.longValue()); guestOsVo.setDisplayName(displayName); @@ -3101,7 +3099,7 @@ public Pair setConsoleAccessForVm(long vmId, String sessionUuid answer = _agentMgr.send(hostVO.getId(), cmd); } catch (AgentUnavailableException | OperationTimedoutException e) { String errorMsg = "Could not send allow session command to CPVM: " + e.getMessage(); - s_logger.error(errorMsg, e); + logger.error(errorMsg, e); return new Pair<>(false, errorMsg); } boolean result = false; @@ -3127,12 +3125,12 @@ public String getConsoleAccessAddress(long vmId) { @Override public Pair getVncPort(final VirtualMachine vm) { if (vm.getHostId() == null) { - s_logger.warn("VM " + vm.getHostName() + " does not have host, return -1 for its VNC port"); + logger.warn("VM " + vm.getHostName() + " does not have host, return -1 for its VNC port"); return new Pair(null, -1); } - if (s_logger.isTraceEnabled()) { - s_logger.trace("Trying to retrieve VNC port from agent about VM " + vm.getHostName()); + if (logger.isTraceEnabled()) { + logger.trace("Trying to retrieve VNC port from agent about VM " + vm.getHostName()); } GetVncPortAnswer answer = null; @@ -3958,30 +3956,30 @@ protected void runInContext() { try { final GlobalLock lock = GlobalLock.getInternLock("EventPurge"); if (lock == null) { - s_logger.debug("Couldn't get the global lock"); + logger.debug("Couldn't get the global lock"); return; } if (!lock.lock(30)) { - s_logger.debug("Couldn't lock the db"); + logger.debug("Couldn't lock the db"); return; } try { final Calendar purgeCal = Calendar.getInstance(); purgeCal.add(Calendar.DAY_OF_YEAR, -_purgeDelay); final Date purgeTime = purgeCal.getTime(); - s_logger.debug("Deleting events older than: " + purgeTime.toString()); + logger.debug("Deleting events older than: " + purgeTime.toString()); final List oldEvents = _eventDao.listOlderEvents(purgeTime); - s_logger.debug("Found " + oldEvents.size() + " events to be purged"); + logger.debug("Found " + oldEvents.size() + " events to be purged"); for (final EventVO event : oldEvents) { _eventDao.expunge(event.getId()); } } catch (final Exception e) { - s_logger.error("Exception ", e); + logger.error("Exception ", e); } finally { lock.unlock(); } } catch (final Exception e) { - s_logger.error("Exception ", e); + logger.error("Exception ", e); } } } @@ -3992,30 +3990,30 @@ protected void runInContext() { try { final GlobalLock lock = GlobalLock.getInternLock("AlertPurge"); if (lock == null) { - s_logger.debug("Couldn't get the global lock"); + logger.debug("Couldn't get the global lock"); return; } if (!lock.lock(30)) { - s_logger.debug("Couldn't lock the db"); + logger.debug("Couldn't lock the db"); return; } try { final Calendar purgeCal = Calendar.getInstance(); purgeCal.add(Calendar.DAY_OF_YEAR, -_alertPurgeDelay); final Date purgeTime = purgeCal.getTime(); - s_logger.debug("Deleting alerts older than: " + purgeTime.toString()); + logger.debug("Deleting alerts older than: " + purgeTime.toString()); final List oldAlerts = _alertDao.listOlderAlerts(purgeTime); - s_logger.debug("Found " + oldAlerts.size() + " events to be purged"); + logger.debug("Found " + oldAlerts.size() + " events to be purged"); for (final AlertVO alert : oldAlerts) { _alertDao.expunge(alert.getId()); } } catch (final Exception e) { - s_logger.error("Exception ", e); + logger.error("Exception ", e); } finally { lock.unlock(); } } catch (final Exception e) { - s_logger.error("Exception ", e); + logger.error("Exception ", e); } } } @@ -4268,8 +4266,8 @@ public VMInstanceVO destroySystemVM(final DestroySystemVmCmd cmd) { private String signRequest(final String request, final String key) { try { - s_logger.info("Request: " + request); - s_logger.info("Key: " + key); + logger.info("Request: " + request); + logger.info("Key: " + key); if (key != null && request != null) { final Mac mac = Mac.getInstance("HmacSHA1"); @@ -4280,7 +4278,7 @@ private String signRequest(final String request, final String key) { return new String(Base64.encodeBase64(encryptedBytes)); } } catch (final Exception ex) { - s_logger.error("unable to sign request", ex); + logger.error("unable to sign request", ex); } return null; } @@ -4313,7 +4311,7 @@ public ArrayList getCloudIdentifierResponse(final long userId) { final String input = cloudIdentifier; signature = signRequest(input, secretKey); } catch (final Exception e) { - s_logger.warn("Exception whilst creating a signature:" + e); + logger.warn("Exception whilst creating a signature:" + e); } final ArrayList cloudParams = new ArrayList(); @@ -4922,8 +4920,8 @@ private boolean updateHostsInCluster(final UpdateHostPasswordCmd command) { @Override public void doInTransactionWithoutResult(final TransactionStatus status) { for (final HostVO h : hosts) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Changing password for host name = " + h.getName()); + if (logger.isDebugEnabled()) { + logger.debug("Changing password for host name = " + h.getName()); } // update password for this host final DetailVO nv = _detailsDao.findDetail(h.getId(), ApiConstants.USERNAME); @@ -4989,8 +4987,8 @@ public boolean updateHostPassword(final UpdateHostPasswordCmd cmd) { Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(final TransactionStatus status) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Changing password for host name = " + host.getName()); + if (logger.isDebugEnabled()) { + logger.debug("Changing password for host name = " + host.getName()); } // update password for this host final DetailVO nv = _detailsDao.findDetail(host.getId(), ApiConstants.USERNAME); @@ -5026,9 +5024,9 @@ public String[] listEventTypes() { } return eventTypes; } catch (final IllegalArgumentException e) { - s_logger.error("Error while listing Event Types", e); + logger.error("Error while listing Event Types", e); } catch (final IllegalAccessException e) { - s_logger.error("Error while listing Event Types", e); + logger.error("Error while listing Event Types", e); } return null; } @@ -5177,7 +5175,7 @@ private void enableAdminUser(final String password) { final UserVO adminUser = _userDao.getUser(2); if (adminUser == null) { final String msg = "CANNOT find admin user"; - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } if (adminUser.getState() == Account.State.DISABLED) { @@ -5194,7 +5192,7 @@ private void enableAdminUser(final String password) { adminUser.setPassword(encodedPassword); adminUser.setState(Account.State.ENABLED); _userDao.persist(adminUser); - s_logger.info("Admin user enabled"); + logger.info("Admin user enabled"); } } @@ -5211,8 +5209,8 @@ public List listDeploymentPlanners() { @Override public void cleanupVMReservations() { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Processing cleanupVMReservations"); + if (logger.isDebugEnabled()) { + logger.debug("Processing cleanupVMReservations"); } _dpMgr.cleanupVMReservations(); @@ -5249,7 +5247,7 @@ private String getControlIp(final long systemVmId) { } if (controlIpAddress == null) { - s_logger.warn(String.format("Unable to find systemVm's control ip in its attached NICs!. systemVmId: %s", systemVmId)); + logger.warn(String.format("Unable to find systemVm's control ip in its attached NICs!. systemVmId: %s", systemVmId)); VMInstanceVO systemVM = _vmInstanceDao.findById(systemVmId); return systemVM.getPrivateIpAddress(); } @@ -5260,7 +5258,7 @@ private String getControlIp(final long systemVmId) { public Pair updateSystemVM(VMInstanceVO systemVM, boolean forced) { String msg = String.format("Unable to patch SystemVM: %s as it is not in Running state. Please destroy and recreate the SystemVM.", systemVM); if (systemVM.getState() != State.Running) { - s_logger.error(msg); + logger.error(msg); return new Pair<>(false, msg); } return patchSystemVm(systemVM, forced); @@ -5295,22 +5293,22 @@ private Pair patchSystemVm(VMInstanceVO systemVM, boolean force answer = (PatchSystemVmAnswer) answers[0]; if (!answer.getResult()) { String errMsg = String.format("Failed to patch systemVM %s due to %s", systemVM.getInstanceName(), answer.getDetails()); - s_logger.error(errMsg); + logger.error(errMsg); return new Pair<>(false, errMsg); } } catch (AgentUnavailableException | OperationTimedoutException e) { String errMsg = "SystemVM live patch failed"; - s_logger.error(errMsg, e); + logger.error(errMsg, e); return new Pair<>(false, String.format("%s due to: %s", errMsg, e.getMessage())); } - s_logger.info(String.format("Successfully patched system VM %s", systemVM.getInstanceName())); + logger.info(String.format("Successfully patched system VM %s", systemVM.getInstanceName())); List routerTypes = new ArrayList<>(); routerTypes.add(VirtualMachine.Type.DomainRouter); routerTypes.add(VirtualMachine.Type.InternalLoadBalancerVm); if (routerTypes.contains(systemVM.getType())) { boolean updated = updateRouterDetails(systemVM.getId(), answer.getScriptsVersion(), answer.getTemplateVersion()); if (!updated) { - s_logger.warn("Failed to update router's script and template version details"); + logger.warn("Failed to update router's script and template version details"); } } return new Pair<>(true, answer.getDetails()); diff --git a/server/src/main/java/com/cloud/server/StatsCollector.java b/server/src/main/java/com/cloud/server/StatsCollector.java index 96eeb5bc33c8..6623d8dcde8f 100644 --- a/server/src/main/java/com/cloud/server/StatsCollector.java +++ b/server/src/main/java/com/cloud/server/StatsCollector.java @@ -67,7 +67,6 @@ import org.apache.commons.lang3.BooleanUtils; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.time.DateUtils; -import org.apache.log4j.Logger; import org.influxdb.BatchOptions; import org.influxdb.InfluxDB; import org.influxdb.InfluxDBFactory; @@ -216,7 +215,6 @@ public String toString() { } } - private static final Logger LOGGER = Logger.getLogger(StatsCollector.class); private static final int UNDEFINED_PORT_VALUE = -1; @@ -452,7 +450,7 @@ protected void init(Map configs) { try { externalStatsType = ExternalStatsProtocol.valueOf(externalStatsScheme.toUpperCase()); } catch (IllegalArgumentException e) { - LOGGER.error(externalStatsScheme + " is not a valid protocol for external statistics. No statistics will be send."); + logger.error(externalStatsScheme + " is not a valid protocol for external statistics. No statistics will be send."); } if (StringUtils.isNotEmpty(uri.getHost())) { @@ -475,7 +473,7 @@ protected void init(Map configs) { } } catch (URISyntaxException e) { - LOGGER.error("Failed to parse external statistics URI: ", e); + logger.error("Failed to parse external statistics URI: ", e); } } @@ -486,7 +484,7 @@ protected void init(Map configs) { if (vmStatsInterval > 0) { _executor.scheduleWithFixedDelay(new VmStatsCollector(), DEFAULT_INITIAL_DELAY, vmStatsInterval, TimeUnit.MILLISECONDS); } else { - LOGGER.info("Skipping collect VM stats. The global parameter vm.stats.interval is set to 0 or less than 0."); + logger.info("Skipping collect VM stats. The global parameter vm.stats.interval is set to 0 or less than 0."); } _executor.scheduleWithFixedDelay(new VmStatsCleaner(), DEFAULT_INITIAL_DELAY, 60000L, TimeUnit.MILLISECONDS); @@ -506,26 +504,26 @@ protected void init(Map configs) { if (vmDiskStatsInterval.value() > 0) { if (vmDiskStatsInterval.value() < vmDiskStatsIntervalMin.value()) { - LOGGER.debug("vm.disk.stats.interval - " + vmDiskStatsInterval.value() + " is smaller than vm.disk.stats.interval.min - " + vmDiskStatsIntervalMin.value() + logger.debug("vm.disk.stats.interval - " + vmDiskStatsInterval.value() + " is smaller than vm.disk.stats.interval.min - " + vmDiskStatsIntervalMin.value() + ", so use vm.disk.stats.interval.min"); _executor.scheduleAtFixedRate(new VmDiskStatsTask(), vmDiskStatsIntervalMin.value(), vmDiskStatsIntervalMin.value(), TimeUnit.SECONDS); } else { _executor.scheduleAtFixedRate(new VmDiskStatsTask(), vmDiskStatsInterval.value(), vmDiskStatsInterval.value(), TimeUnit.SECONDS); } } else { - LOGGER.debug("vm.disk.stats.interval - " + vmDiskStatsInterval.value() + " is 0 or less than 0, so not scheduling the vm disk stats thread"); + logger.debug("vm.disk.stats.interval - " + vmDiskStatsInterval.value() + " is 0 or less than 0, so not scheduling the vm disk stats thread"); } if (vmNetworkStatsInterval.value() > 0) { if (vmNetworkStatsInterval.value() < vmNetworkStatsIntervalMin.value()) { - LOGGER.debug("vm.network.stats.interval - " + vmNetworkStatsInterval.value() + " is smaller than vm.network.stats.interval.min - " + logger.debug("vm.network.stats.interval - " + vmNetworkStatsInterval.value() + " is smaller than vm.network.stats.interval.min - " + vmNetworkStatsIntervalMin.value() + ", so use vm.network.stats.interval.min"); _executor.scheduleAtFixedRate(new VmNetworkStatsTask(), vmNetworkStatsIntervalMin.value(), vmNetworkStatsIntervalMin.value(), TimeUnit.SECONDS); } else { _executor.scheduleAtFixedRate(new VmNetworkStatsTask(), vmNetworkStatsInterval.value(), vmNetworkStatsInterval.value(), TimeUnit.SECONDS); } } else { - LOGGER.debug("vm.network.stats.interval - " + vmNetworkStatsInterval.value() + " is 0 or less than 0, so not scheduling the vm network stats thread"); + logger.debug("vm.network.stats.interval - " + vmNetworkStatsInterval.value() + " is 0 or less than 0, so not scheduling the vm network stats thread"); } if (volumeStatsInterval > 0) { @@ -567,7 +565,7 @@ protected void init(Map configs) { _dailyOrHourly = false; } if (_usageAggregationRange < UsageUtils.USAGE_AGGREGATION_RANGE_MIN) { - LOGGER.warn("Usage stats job aggregation range is to small, using the minimum value of " + UsageUtils.USAGE_AGGREGATION_RANGE_MIN); + logger.warn("Usage stats job aggregation range is to small, using the minimum value of " + UsageUtils.USAGE_AGGREGATION_RANGE_MIN); _usageAggregationRange = UsageUtils.USAGE_AGGREGATION_RANGE_MIN; } @@ -578,7 +576,7 @@ protected void init(Map configs) { if (mgmtServerVo != null) { msId = mgmtServerVo.getId(); } else { - LOGGER.warn(String.format("Cannot find management server with msid [%s]. " + logger.warn(String.format("Cannot find management server with msid [%s]. " + "Therefore, VM stats will be recorded with the management server MAC address converted as a long in the mgmt_server_id column.", managementServerNodeId)); } } @@ -590,7 +588,7 @@ private void scheduleCollection(ConfigKey statusCollectionInterval, Abs statusCollectionInterval.value(), TimeUnit.SECONDS); } else { - LOGGER.debug(String.format("%s - %d is 0 or less, so not scheduling the status collector thread", + logger.debug(String.format("%s - %d is 0 or less, so not scheduling the status collector thread", statusCollectionInterval.key(), statusCollectionInterval.value())); } } @@ -650,7 +648,7 @@ protected void runInContext() { SearchCriteria sc = createSearchCriteriaForHostTypeRoutingStateUpAndNotInMaintenance(); List hosts = _hostDao.search(sc, null); - LOGGER.debug(String.format("HostStatsCollector is running to process %d UP hosts", hosts.size())); + logger.debug(String.format("HostStatsCollector is running to process %d UP hosts", hosts.size())); Map metrics = new HashMap<>(); for (HostVO host : hosts) { @@ -660,7 +658,7 @@ protected void runInContext() { metrics.put(hostStatsEntry.getHostId(), hostStatsEntry); _hostStats.put(host.getId(), hostStatsEntry); } else { - LOGGER.warn("The Host stats is null for host: " + host.getId()); + logger.warn("The Host stats is null for host: " + host.getId()); } } @@ -670,7 +668,7 @@ protected void runInContext() { updateGpuEnabledHostsDetails(hosts); } catch (Throwable t) { - LOGGER.error("Error trying to retrieve host stats", t); + logger.error("Error trying to retrieve host stats", t); } } @@ -709,7 +707,7 @@ class DbCollector extends AbstractStatsCollector { } @Override protected void runInContext() { - LOGGER.debug(String.format("%s is running...", this.getClass().getSimpleName())); + logger.debug(String.format("%s is running...", this.getClass().getSimpleName())); try { long lastUptime = (dbStats.containsKey(uptime) ? (Long) dbStats.get(uptime) : 0); @@ -724,9 +722,9 @@ protected void runInContext() { } } catch (Throwable e) { // pokemon catch to make sure the thread stays running - LOGGER.error("db statistics collection failed due to " + e.getLocalizedMessage()); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("db statistics collection failed.", e); + logger.error("db statistics collection failed due to " + e.getLocalizedMessage()); + if (logger.isDebugEnabled()) { + logger.debug("db statistics collection failed.", e); } } } @@ -748,7 +746,7 @@ protected Point createInfluxDbPoint(Object metricsObject) { class ManagementServerCollector extends AbstractStatsCollector { @Override protected void runInContext() { - LOGGER.debug(String.format("%s is running...", this.getClass().getSimpleName())); + logger.debug(String.format("%s is running...", this.getClass().getSimpleName())); long msid = ManagementServerNode.getManagementServerId(); ManagementServerHostVO mshost = null; ManagementServerHostStatsEntry hostStatsEntry = null; @@ -761,14 +759,14 @@ protected void runInContext() { clusterManager.publishStatus(gson.toJson(hostStatsEntry)); } catch (Throwable t) { // pokemon catch to make sure the thread stays running - LOGGER.error("Error trying to retrieve management server host statistics", t); + logger.error("Error trying to retrieve management server host statistics", t); } try { // send to DB storeStatus(hostStatsEntry, mshost); } catch (Throwable t) { // pokemon catch to make sure the thread stays running - LOGGER.error("Error trying to store management server host statistics", t); + logger.error("Error trying to store management server host statistics", t); } } @@ -778,7 +776,7 @@ private void storeStatus(ManagementServerHostStatsEntry hostStatsEntry, Manageme } ManagementServerStatusVO msStats = managementServerStatusDao.findByMsId(hostStatsEntry.getManagementServerHostUuid()); if (msStats == null) { - LOGGER.info(String.format("creating new status info record for host %s - %s", + logger.info(String.format("creating new status info record for host %s - %s", mshost.getName(), hostStatsEntry.getManagementServerHostUuid())); msStats = new ManagementServerStatusVO(); @@ -788,8 +786,8 @@ private void storeStatus(ManagementServerHostStatsEntry hostStatsEntry, Manageme msStats.setJavaName(hostStatsEntry.getJvmVendor()); msStats.setJavaVersion(hostStatsEntry.getJvmVersion()); Date startTime = new Date(hostStatsEntry.getJvmStartTime()); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format("reporting starttime %s", startTime)); + if (logger.isTraceEnabled()) { + logger.trace(String.format("reporting starttime %s", startTime)); } msStats.setLastJvmStart(startTime); msStats.setLastSystemBoot(hostStatsEntry.getSystemBootTime()); @@ -800,14 +798,14 @@ private void storeStatus(ManagementServerHostStatsEntry hostStatsEntry, Manageme @NotNull private ManagementServerHostStatsEntry getDataFrom(ManagementServerHostVO mshost) { ManagementServerHostStatsEntry newEntry = new ManagementServerHostStatsEntry(); - LOGGER.trace("Metrics collection start..."); + logger.trace("Metrics collection start..."); newEntry.setManagementServerHostId(mshost.getId()); newEntry.setManagementServerHostUuid(mshost.getUuid()); newEntry.setDbLocal(isDbLocal()); newEntry.setUsageLocal(isUsageLocal()); retrieveSession(newEntry); getJvmDimensions(newEntry); - LOGGER.trace("Metrics collection extra..."); + logger.trace("Metrics collection extra..."); getRuntimeData(newEntry); getMemoryData(newEntry); // newEntry must now include a pid! @@ -817,17 +815,17 @@ private ManagementServerHostStatsEntry getDataFrom(ManagementServerHostVO mshost getFileSystemData(newEntry); getDataBaseStatistics(newEntry, mshost.getMsid()); gatherAllMetrics(newEntry); - LOGGER.trace("Metrics collection end!"); + logger.trace("Metrics collection end!"); return newEntry; } private void retrieveSession(ManagementServerHostStatsEntry newEntry) { long sessions = ApiSessionListener.getSessionCount(); newEntry.setSessions(sessions); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format("Sessions found in Api %d vs context %d", sessions,ApiSessionListener.getNumberOfSessions())); + if (logger.isTraceEnabled()) { + logger.trace(String.format("Sessions found in Api %d vs context %d", sessions,ApiSessionListener.getNumberOfSessions())); } else { - LOGGER.debug("Sessions active: " + sessions); + logger.debug("Sessions active: " + sessions); } } @@ -848,8 +846,8 @@ private void getCpuData(@NotNull ManagementServerHostStatsEntry newEntry) { java.lang.management.OperatingSystemMXBean bean = ManagementFactory.getOperatingSystemMXBean(); newEntry.setAvailableProcessors(bean.getAvailableProcessors()); newEntry.setLoadAverage(bean.getSystemLoadAverage()); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format( + if (logger.isTraceEnabled()) { + logger.trace(String.format( "Metrics processors - %d , loadavg - %f ", newEntry.getAvailableProcessors(), newEntry.getLoadAverage())); @@ -866,8 +864,8 @@ private void getCpuData(@NotNull ManagementServerHostStatsEntry newEntry) { if (newEntry.getSystemMemoryUsed() <= 0) { newEntry.setSystemMemoryUsed(mxBean.getCommittedVirtualMemorySize()); } - if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format("data from 'OperatingSystemMXBean': total mem: %d, free mem: %d, used mem: %d", + if (logger.isTraceEnabled()) { + logger.trace(String.format("data from 'OperatingSystemMXBean': total mem: %d, free mem: %d, used mem: %d", newEntry.getSystemMemoryTotal(), newEntry.getSystemMemoryFree(), newEntry.getSystemMemoryUsed())); @@ -883,8 +881,8 @@ private void getRuntimeData(@NotNull ManagementServerHostStatsEntry newEntry) { newEntry.setJvmName(mxBean.getName()); newEntry.setJvmVendor(mxBean.getVmVendor()); newEntry.setJvmVersion(mxBean.getVmVersion()); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format( + if (logger.isTraceEnabled()) { + logger.trace(String.format( "Metrics uptime - %d , starttime - %d", newEntry.getJvmUptime(), newEntry.getJvmStartTime())); @@ -897,8 +895,8 @@ private void getJvmDimensions(@NotNull ManagementServerHostStatsEntry newEntry) newEntry.setFreeJvmMemoryBytes(runtime.freeMemory()); newEntry.setMaxJvmMemoryBytes(runtime.maxMemory()); //long maxMem = runtime.maxMemory(); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format( + if (logger.isTraceEnabled()) { + logger.trace(String.format( "Metrics proc - %d , maxMem - %d , totalMemory - %d , freeMemory - %f ", newEntry.getAvailableProcessors(), newEntry.getMaxJvmMemoryBytes(), @@ -922,17 +920,17 @@ private void getProcFileSystemData(@NotNull ManagementServerHostStatsEntry newEn if (newEntry.getSystemMemoryTotal() == 0) { String mem = Script.runSimpleBashScript("cat /proc/meminfo | grep MemTotal | cut -f 2 -d ':' | tr -d 'a-zA-z '").trim(); newEntry.setSystemMemoryTotal(Long.parseLong(mem) * ByteScaleUtils.KiB); - LOGGER.info(String.format("system memory from /proc: %d", newEntry.getSystemMemoryTotal())); + logger.info(String.format("system memory from /proc: %d", newEntry.getSystemMemoryTotal())); } if (newEntry.getSystemMemoryFree() == 0) { String free = Script.runSimpleBashScript("cat /proc/meminfo | grep MemFree | cut -f 2 -d ':' | tr -d 'a-zA-z '").trim(); newEntry.setSystemMemoryFree(Long.parseLong(free) * ByteScaleUtils.KiB); - LOGGER.info(String.format("free memory from /proc: %d", newEntry.getSystemMemoryFree())); + logger.info(String.format("free memory from /proc: %d", newEntry.getSystemMemoryFree())); } if (newEntry.getSystemMemoryUsed() <= 0) { String used = Script.runSimpleBashScript(String.format("ps -o rss= %d", newEntry.getPid())); newEntry.setSystemMemoryUsed(Long.parseLong(used)); - LOGGER.info(String.format("used memory from /proc: %d", newEntry.getSystemMemoryUsed())); + logger.info(String.format("used memory from /proc: %d", newEntry.getSystemMemoryUsed())); } try { String bootTime = Script.runSimpleBashScript("uptime -s"); @@ -940,7 +938,7 @@ private void getProcFileSystemData(@NotNull ManagementServerHostStatsEntry newEn Date date = formatter.parse(bootTime); newEntry.setSystemBootTime(date); } catch (ParseException e) { - LOGGER.error("can not retrieve system uptime"); + logger.error("can not retrieve system uptime"); } String maxuse = Script.runSimpleBashScript(String.format("ps -o vsz= %d", newEntry.getPid())); newEntry.setSystemMemoryVirtualSize(Long.parseLong(maxuse) * 1024); @@ -949,8 +947,8 @@ private void getProcFileSystemData(@NotNull ManagementServerHostStatsEntry newEn newEntry.setSystemLoadAverages(getCpuLoads()); newEntry.setSystemCyclesUsage(getSystemCpuUsage()); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace( + if (logger.isTraceEnabled()) { + logger.trace( String.format("cpu\ncapacities: %f\n loads: %s ; %s ; %s\n stats: %d ; %d ; %d", newEntry.getSystemTotalCpuCycles(), newEntry.getSystemLoadAverages()[0], newEntry.getSystemLoadAverages()[1], newEntry.getSystemLoadAverages()[2], @@ -999,8 +997,8 @@ private void getFileSystemData(@NotNull ManagementServerHostStatsEntry newEntry) logInfoBuilder.append(fileName).append(" using: ").append(du).append('\n').append(df); } newEntry.setLogInfo(logInfoBuilder.toString()); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("log stats:\n" + newEntry.getLogInfo()); + if (logger.isTraceEnabled()) { + logger.trace("log stats:\n" + newEntry.getLogInfo()); } } @@ -1009,8 +1007,8 @@ private void gatherAllMetrics(ManagementServerHostStatsEntry metricsEntry) { for (String metricName : METRIC_REGISTRY.getGauges().keySet()) { Object value = getMetric(metricName); metricDetails.put(metricName, value); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format("Metrics collection '%s'=%s", metricName, value)); + if (logger.isTraceEnabled()) { + logger.trace(String.format("Metrics collection '%s'=%s", metricName, value)); } // gather what we need from this list extractDetailToField(metricsEntry, metricName, value); @@ -1054,7 +1052,7 @@ private void extractDetailToField(ManagementServerHostStatsEntry metricsEntry, S case "threadsnew.count": case "threadstimed_waiting.count": default: - LOGGER.debug(String.format("not storing detail %s, %s", metricName, value)); + logger.debug(String.format("not storing detail %s, %s", metricName, value)); /* * 'buffers.direct.capacity'=8192 type=Long * 'buffers.direct.count'=1 type=Long @@ -1122,7 +1120,7 @@ protected Point createInfluxDbPoint(Object metricsObject) { protected boolean isUsageLocal() { boolean local = false; String usageInstall = Script.runSimpleBashScript("systemctl status cloudstack-usage | grep \" Loaded:\""); - LOGGER.debug(String.format("usage install: %s", usageInstall)); + logger.debug(String.format("usage install: %s", usageInstall)); if (StringUtils.isNotBlank(usageInstall)) { local = usageInstall.contains("enabled"); @@ -1152,8 +1150,8 @@ protected Properties getDbProperties() { protected class ManagementServerStatusAdministrator implements ClusterManager.StatusAdministrator, ClusterManagerListener { @Override public String newStatus(ClusterServicePdu pdu) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("StatusUpdate from %s, json: %s", pdu.getSourcePeer(), pdu.getJsonPackage())); + if (logger.isDebugEnabled()) { + logger.debug(String.format("StatusUpdate from %s, json: %s", pdu.getSourcePeer(), pdu.getJsonPackage())); } ManagementServerHostStatsEntry hostStatsEntry = null; @@ -1161,9 +1159,9 @@ public String newStatus(ClusterServicePdu pdu) { hostStatsEntry = gson.fromJson(pdu.getJsonPackage(),new TypeToken(){}.getType()); managementServerHostStats.put(hostStatsEntry.getManagementServerHostUuid(), hostStatsEntry); } catch (JsonParseException e) { - LOGGER.error("Exception in decoding of other MS hosts status from : " + pdu.getSourcePeer()); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Exception in decoding of other MS hosts status: ", e); + logger.error("Exception in decoding of other MS hosts status from : " + pdu.getSourcePeer()); + if (logger.isDebugEnabled()) { + logger.debug("Exception in decoding of other MS hosts status: ", e); } } return null; @@ -1178,14 +1176,14 @@ public void onManagementNodeJoined(List nodeList public void onManagementNodeLeft(List nodeList, long selfNodeId) { // remove the status for those ones for (ManagementServerHost node : nodeList) { - LOGGER.info(String.format("node %s (%s) at %s (%od) is reported to have left the cluster, invalidating status.",node.getName(), node.getUuid(), node.getServiceIP(), node.getMsid())); + logger.info(String.format("node %s (%s) at %s (%od) is reported to have left the cluster, invalidating status.",node.getName(), node.getUuid(), node.getServiceIP(), node.getMsid())); managementServerHostStats.remove(node.getUuid()); } } @Override public void onManagementNodeIsolated() { - LOGGER.error(String.format("This management server is reported to be isolated (msid %d", mgmtSrvrId)); + logger.error(String.format("This management server is reported to be isolated (msid %d", mgmtSrvrId)); // not sure if anything should be done now. } } @@ -1197,7 +1195,7 @@ protected void runInContext() { SearchCriteria sc = createSearchCriteriaForHostTypeRoutingStateUpAndNotInMaintenance(); List hosts = _hostDao.search(sc, null); - LOGGER.debug(String.format("VmStatsCollector is running to process VMs across %d UP hosts", hosts.size())); + logger.debug(String.format("VmStatsCollector is running to process VMs across %d UP hosts", hosts.size())); Map metrics = new HashMap<>(); for (HostVO host : hosts) { @@ -1234,13 +1232,13 @@ protected void runInContext() { metrics.clear(); } } catch (Exception e) { - LOGGER.debug("Failed to get VM stats for host with ID: " + host.getId()); + logger.debug("Failed to get VM stats for host with ID: " + host.getId()); continue; } } } catch (Throwable t) { - LOGGER.error("Error trying to retrieve VM stats", t); + logger.error("Error trying to retrieve VM stats", t); } } @@ -1352,7 +1350,7 @@ protected void runInContext() { //msHost in UP state with min id should run the job ManagementServerHostVO msHost = managementServerHostDao.findOneInUpState(new Filter(ManagementServerHostVO.class, "id", true, 0L, 1L)); if (msHost == null || (msHost.getMsid() != mgmtSrvrId)) { - LOGGER.debug("Skipping aggregate disk stats update"); + logger.debug("Skipping aggregate disk stats update"); scanLock.unlock(); return; } @@ -1372,17 +1370,17 @@ public void doInTransactionWithoutResult(TransactionStatus status) { _vmDiskStatsDao.update(stat.getId(), stat); } } - LOGGER.debug("Successfully updated aggregate vm disk stats"); + logger.debug("Successfully updated aggregate vm disk stats"); } }); } catch (Exception e) { - LOGGER.debug("Failed to update aggregate disk stats", e); + logger.debug("Failed to update aggregate disk stats", e); } finally { scanLock.unlock(); } } } catch (Exception e) { - LOGGER.debug("Exception while trying to acquire disk stats lock", e); + logger.debug("Exception while trying to acquire disk stats lock", e); } finally { scanLock.releaseRef(); } @@ -1390,8 +1388,8 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } private void logLessLatestStatDiscrepancy(String prefix, String hostName, String vmName, long reported, long stored, boolean toHumanReadable) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("%s that's less than the last one. Assuming something went wrong and persisting it. Host: %s . VM: %s Reported: %s Stored: %s", + if (logger.isDebugEnabled()) { + logger.debug(String.format("%s that's less than the last one. Assuming something went wrong and persisting it. Host: %s . VM: %s Reported: %s Stored: %s", prefix, hostName, vmName, toHumanReadable ? toHumanReadableSize(reported) : reported, toHumanReadable ? toHumanReadableSize(stored) : stored)); } } @@ -1404,11 +1402,11 @@ protected void runInContext() { ManagementServerHostVO msHost = managementServerHostDao.findOneInUpState(new Filter(ManagementServerHostVO.class, "id", true, 0L, 1L)); boolean persistVolumeStats = vmDiskStatsRetentionEnabled.value(); if (msHost == null || (msHost.getMsid() != mgmtSrvrId)) { - LOGGER.debug("Skipping collect vm disk stats from hosts"); + logger.debug("Skipping collect vm disk stats from hosts"); return; } // collect the vm disk statistics(total) from hypervisor. added by weizhou, 2013.03. - LOGGER.debug("VmDiskStatsTask is running..."); + logger.debug("VmDiskStatsTask is running..."); SearchCriteria sc = createSearchCriteriaForHostTypeRoutingStateUpAndNotInMaintenance(); sc.addAnd("hypervisorType", SearchCriteria.Op.IN, HypervisorType.KVM, HypervisorType.VMware); @@ -1449,18 +1447,18 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } if (areAllDiskStatsZero(vmDiskStatEntry)) { - LOGGER.debug("IO/bytes read and write are all 0. Not updating vm_disk_statistics"); + logger.debug("IO/bytes read and write are all 0. Not updating vm_disk_statistics"); continue; } if (vmDiskStat_lock == null) { - LOGGER.warn("unable to find vm disk stats from host for account: " + vm.getAccountId() + " with vmId: " + vm.getId() + logger.warn("unable to find vm disk stats from host for account: " + vm.getAccountId() + " with vmId: " + vm.getId() + " and volumeId:" + volume.getId()); continue; } if (isCurrentVmDiskStatsDifferentFromPrevious(previousVmDiskStats, vmDiskStat_lock)) { - LOGGER.debug("vm disk stats changed from the time GetVmDiskStatsCommand was sent. " + "Ignoring current answer. Host: " + host.getName() + logger.debug("vm disk stats changed from the time GetVmDiskStatsCommand was sent. " + "Ignoring current answer. Host: " + host.getName() + " . VM: " + vmDiskStatEntry.getVmName() + " Read(Bytes): " + toHumanReadableSize(vmDiskStatEntry.getBytesRead()) + " write(Bytes): " + toHumanReadableSize(vmDiskStatEntry.getBytesWrite()) + " Read(IO): " + toHumanReadableSize(vmDiskStatEntry.getIORead()) + " write(IO): " + toHumanReadableSize(vmDiskStatEntry.getIOWrite())); continue; @@ -1501,7 +1499,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } }); } catch (Exception e) { - LOGGER.warn(String.format("Error while collecting vm disk stats from host %s : ", host.getName()), e); + logger.warn(String.format("Error while collecting vm disk stats from host %s : ", host.getName()), e); } } } @@ -1514,11 +1512,11 @@ protected void runInContext() { //msHost in UP state with min id should run the job ManagementServerHostVO msHost = managementServerHostDao.findOneInUpState(new Filter(ManagementServerHostVO.class, "id", true, 0L, 1L)); if (msHost == null || (msHost.getMsid() != mgmtSrvrId)) { - LOGGER.debug("Skipping collect vm network stats from hosts"); + logger.debug("Skipping collect vm network stats from hosts"); return; } // collect the vm network statistics(total) from hypervisor - LOGGER.debug("VmNetworkStatsTask is running..."); + logger.debug("VmNetworkStatsTask is running..."); SearchCriteria sc = createSearchCriteriaForHostTypeRoutingStateUpAndNotInMaintenance(); List hosts = _hostDao.search(sc, null); @@ -1540,10 +1538,10 @@ public void doInTransactionWithoutResult(TransactionStatus status) { continue; UserVmVO userVm = _userVmDao.findById(vmId); if (userVm == null) { - LOGGER.debug("Cannot find uservm with id: " + vmId + " , continue"); + logger.debug("Cannot find uservm with id: " + vmId + " , continue"); continue; } - LOGGER.debug("Now we are updating the user_statistics table for VM: " + userVm.getInstanceName() + logger.debug("Now we are updating the user_statistics table for VM: " + userVm.getInstanceName() + " after collecting vm network statistics from host: " + host.getName()); for (VmNetworkStats vmNetworkStat : vmNetworkStats) { VmNetworkStatsEntry vmNetworkStatEntry = (VmNetworkStatsEntry)vmNetworkStat; @@ -1564,19 +1562,19 @@ public void doInTransactionWithoutResult(TransactionStatus status) { nic.getIPv4Address(), vmId, "UserVm"); if ((vmNetworkStatEntry.getBytesSent() == 0) && (vmNetworkStatEntry.getBytesReceived() == 0)) { - LOGGER.debug("bytes sent and received are all 0. Not updating user_statistics"); + logger.debug("bytes sent and received are all 0. Not updating user_statistics"); continue; } if (vmNetworkStat_lock == null) { - LOGGER.warn("unable to find vm network stats from host for account: " + userVm.getAccountId() + " with vmId: " + userVm.getId() + logger.warn("unable to find vm network stats from host for account: " + userVm.getAccountId() + " with vmId: " + userVm.getId() + " and nicId:" + nic.getId()); continue; } if (previousvmNetworkStats != null && ((previousvmNetworkStats.getCurrentBytesSent() != vmNetworkStat_lock.getCurrentBytesSent()) || (previousvmNetworkStats.getCurrentBytesReceived() != vmNetworkStat_lock.getCurrentBytesReceived()))) { - LOGGER.debug("vm network stats changed from the time GetNmNetworkStatsCommand was sent. " + "Ignoring current answer. Host: " + logger.debug("vm network stats changed from the time GetNmNetworkStatsCommand was sent. " + "Ignoring current answer. Host: " + host.getName() + " . VM: " + vmNetworkStatEntry.getVmName() + " Sent(Bytes): " + vmNetworkStatEntry.getBytesSent() + " Received(Bytes): " + vmNetworkStatEntry.getBytesReceived()); continue; @@ -1606,7 +1604,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } }); } catch (Exception e) { - LOGGER.warn(String.format("Error while collecting vm network stats from host %s : ", host.getName()), e); + logger.warn(String.format("Error while collecting vm network stats from host %s : ", host.getName()), e); } } } @@ -1623,7 +1621,7 @@ protected void runInContext() { for (VolumeVO volume : volumes) { if (!List.of(ImageFormat.QCOW2, ImageFormat.VHD, ImageFormat.OVA, ImageFormat.RAW).contains(volume.getFormat()) && !List.of(Storage.StoragePoolType.PowerFlex, Storage.StoragePoolType.FiberChannel).contains(pool.getPoolType())) { - LOGGER.warn("Volume stats not implemented for this format type " + volume.getFormat()); + logger.warn("Volume stats not implemented for this format type " + volume.getFormat()); break; } } @@ -1649,12 +1647,12 @@ protected void runInContext() { } } } catch (Exception e) { - LOGGER.warn("Failed to get volume stats for cluster with ID: " + pool.getClusterId(), e); + logger.warn("Failed to get volume stats for cluster with ID: " + pool.getClusterId(), e); continue; } } } catch (Throwable t) { - LOGGER.error("Error trying to retrieve volume stats", t); + logger.error("Error trying to retrieve volume stats", t); } } } @@ -1670,8 +1668,8 @@ class StorageCollector extends ManagedContextRunnable { @Override protected void runInContext() { try { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("StorageCollector is running..."); + if (logger.isDebugEnabled()) { + logger.debug("StorageCollector is running..."); } List stores = _dataStoreMgr.listImageStores(); @@ -1685,14 +1683,14 @@ protected void runInContext() { GetStorageStatsCommand command = new GetStorageStatsCommand(store.getTO(), nfsVersion); EndPoint ssAhost = _epSelector.select(store); if (ssAhost == null) { - LOGGER.debug("There is no secondary storage VM for secondary storage host " + store.getName()); + logger.debug("There is no secondary storage VM for secondary storage host " + store.getName()); continue; } long storeId = store.getId(); Answer answer = ssAhost.sendMessage(command); if (answer != null && answer.getResult()) { storageStats.put(storeId, (StorageStats)answer); - LOGGER.trace("HostId: " + storeId + " Used: " + toHumanReadableSize(((StorageStats)answer).getByteUsed()) + " Total Available: " + toHumanReadableSize(((StorageStats)answer).getCapacityBytes())); + logger.trace("HostId: " + storeId + " Used: " + toHumanReadableSize(((StorageStats)answer).getByteUsed()) + " Total Available: " + toHumanReadableSize(((StorageStats)answer).getCapacityBytes())); } } _storageStats = storageStats; @@ -1718,7 +1716,7 @@ protected void runInContext() { pool.setCapacityBytes(((StorageStats)answer).getCapacityBytes()); poolNeedsUpdating = true; } else { - LOGGER.warn("Not setting capacity bytes, received " + ((StorageStats)answer).getCapacityBytes() + " capacity for pool ID " + poolId); + logger.warn("Not setting capacity bytes, received " + ((StorageStats)answer).getCapacityBytes() + " capacity for pool ID " + poolId); } } if (pool.getUsedBytes() != ((StorageStats)answer).getByteUsed() && (pool.getStorageProviderName().equalsIgnoreCase(DataStoreProvider.DEFAULT_PRIMARY) || _storageManager.canPoolProvideStorageStats(pool))) { @@ -1731,14 +1729,14 @@ protected void runInContext() { } } } catch (StorageUnavailableException e) { - LOGGER.info("Unable to reach " + pool, e); + logger.info("Unable to reach " + pool, e); } catch (Exception e) { - LOGGER.warn("Unable to get stats for " + pool, e); + logger.warn("Unable to get stats for " + pool, e); } } _storagePoolStats = storagePoolStats; } catch (Throwable t) { - LOGGER.error("Error trying to retrieve storage stats", t); + logger.error("Error trying to retrieve storage stats", t); } } } @@ -1747,20 +1745,20 @@ class AutoScaleMonitor extends ManagedContextRunnable { @Override protected void runInContext() { try { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("AutoScaling Monitor is running..."); + if (logger.isDebugEnabled()) { + logger.debug("AutoScaling Monitor is running..."); } //msHost in UP state with min id should run the job ManagementServerHostVO msHost = managementServerHostDao.findOneInUpState(new Filter(ManagementServerHostVO.class, "id", true, 0L, 1L)); if (msHost == null || (msHost.getMsid() != mgmtSrvrId)) { - LOGGER.debug("Skipping AutoScaling Monitor"); + logger.debug("Skipping AutoScaling Monitor"); return; } _asManager.checkAllAutoScaleVmGroups(); } catch (Throwable t) { - LOGGER.error("Error trying to monitor autoscaling", t); + logger.error("Error trying to monitor autoscaling", t); } } @@ -1787,7 +1785,7 @@ protected void sendMetricsToInfluxdb(Map metrics) { Collection metricsObjects = metrics.values(); List points = new ArrayList<>(); - LOGGER.debug(String.format("Sending stats to %s host %s:%s", externalStatsType, externalStatsHost, externalStatsPort)); + logger.debug(String.format("Sending stats to %s host %s:%s", externalStatsType, externalStatsHost, externalStatsPort)); for (Object metricsObject : metricsObjects) { Point vmPoint = createInfluxDbPoint(metricsObject); @@ -1814,7 +1812,7 @@ public boolean imageStoreHasEnoughCapacity(DataStore imageStore) { StorageStats imageStoreStats = _storageStats.get(imageStoreId); if (imageStoreStats == null) { - LOGGER.debug(String.format("Stats for image store [%s] not found.", imageStoreId)); + logger.debug(String.format("Stats for image store [%s] not found.", imageStoreId)); return false; } @@ -1824,13 +1822,13 @@ public boolean imageStoreHasEnoughCapacity(DataStore imageStore) { String readableTotalCapacity = FileUtils.byteCountToDisplaySize((long) totalCapacity); String readableUsedCapacity = FileUtils.byteCountToDisplaySize((long) usedCapacity); - LOGGER.debug(String.format("Verifying image storage [%s]. Capacity: total=[%s], used=[%s], threshold=[%s%%].", imageStoreId, readableTotalCapacity, readableUsedCapacity, threshold * 100)); + logger.debug(String.format("Verifying image storage [%s]. Capacity: total=[%s], used=[%s], threshold=[%s%%].", imageStoreId, readableTotalCapacity, readableUsedCapacity, threshold * 100)); if (usedCapacity / totalCapacity <= threshold) { return true; } - LOGGER.warn(String.format("Image storage [%s] has not enough capacity. Capacity: total=[%s], used=[%s], threshold=[%s%%].", imageStoreId, readableTotalCapacity, readableUsedCapacity, threshold * 100)); + logger.warn(String.format("Image storage [%s] has not enough capacity. Capacity: total=[%s], used=[%s], threshold=[%s%%].", imageStoreId, readableTotalCapacity, readableUsedCapacity, threshold * 100)); return false; } @@ -1857,12 +1855,12 @@ public boolean imageStoreHasEnoughCapacity(DataStore imageStore, Double storeCap * Sends VMs metrics to the configured graphite host. */ protected void sendVmMetricsToGraphiteHost(Map metrics, HostVO host) { - LOGGER.debug(String.format("Sending VmStats of host %s to %s host %s:%s", host.getId(), externalStatsType, externalStatsHost, externalStatsPort)); + logger.debug(String.format("Sending VmStats of host %s to %s host %s:%s", host.getId(), externalStatsType, externalStatsHost, externalStatsPort)); try { GraphiteClient g = new GraphiteClient(externalStatsHost, externalStatsPort); g.sendMetrics(metrics); } catch (GraphiteException e) { - LOGGER.debug("Failed sending VmStats to Graphite host " + externalStatsHost + ":" + externalStatsPort + ": " + e.getMessage()); + logger.debug("Failed sending VmStats to Graphite host " + externalStatsHost + ":" + externalStatsPort + ": " + e.getMessage()); } } @@ -1900,7 +1898,7 @@ protected void persistVirtualMachineStats(VmStatsEntry statsForCurrentIteration, statsForCurrentIteration.getDiskWriteKBs(), statsForCurrentIteration.getDiskReadIOs(), statsForCurrentIteration.getDiskWriteIOs(), statsForCurrentIteration.getEntityType()); VmStatsVO vmStatsVO = new VmStatsVO(statsForCurrentIteration.getVmId(), msId, timestamp, gson.toJson(vmStats)); - LOGGER.trace(String.format("Recording VM stats: [%s].", vmStatsVO.toString())); + logger.trace(String.format("Recording VM stats: [%s].", vmStatsVO.toString())); vmStatsDao.persist(vmStatsVO); } @@ -1931,8 +1929,8 @@ private String getVmDiskStatsEntryAsString(VmDiskStatsEntry statsForCurrentItera */ protected void persistVolumeStats(long volumeId, VmDiskStatsEntry statsForCurrentIteration, Hypervisor.HypervisorType hypervisorType, Date timestamp) { VolumeStatsVO volumeStatsVO = new VolumeStatsVO(volumeId, msId, timestamp, getVmDiskStatsEntryAsString(statsForCurrentIteration, hypervisorType)); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Recording volume stats: [%s].", volumeStatsVO)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Recording volume stats: [%s].", volumeStatsVO)); } volumeStatsDao.persist(volumeStatsVO); } @@ -1944,11 +1942,11 @@ protected void persistVolumeStats(long volumeId, VmDiskStatsEntry statsForCurren protected void cleanUpVirtualMachineStats() { Integer maxRetentionTime = vmStatsMaxRetentionTime.value(); if (maxRetentionTime <= 0) { - LOGGER.debug(String.format("Skipping VM stats cleanup. The [%s] parameter [%s] is set to 0 or less than 0.", + logger.debug(String.format("Skipping VM stats cleanup. The [%s] parameter [%s] is set to 0 or less than 0.", vmStatsMaxRetentionTime.scope(), vmStatsMaxRetentionTime.toString())); return; } - LOGGER.trace("Removing older VM stats records."); + logger.trace("Removing older VM stats records."); Date now = new Date(); Date limit = DateUtils.addMinutes(now, -maxRetentionTime); vmStatsDao.removeAllByTimestampLessThan(limit); @@ -1961,13 +1959,13 @@ protected void cleanUpVirtualMachineStats() { protected void cleanUpVolumeStats() { Integer maxRetentionTime = vmDiskStatsMaxRetentionTime.value(); if (maxRetentionTime <= 0) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Skipping Volume stats cleanup. The [%s] parameter [%s] is set to 0 or less than 0.", + if (logger.isDebugEnabled()) { + logger.debug(String.format("Skipping Volume stats cleanup. The [%s] parameter [%s] is set to 0 or less than 0.", vmDiskStatsMaxRetentionTime.scope(), vmDiskStatsMaxRetentionTime.toString())); } return; } - LOGGER.trace("Removing older Volume stats records."); + logger.trace("Removing older Volume stats records."); Date now = new Date(); Date limit = DateUtils.addMinutes(now, -maxRetentionTime); volumeStatsDao.removeAllByTimestampLessThan(limit); diff --git a/server/src/main/java/com/cloud/servlet/CloudStartupServlet.java b/server/src/main/java/com/cloud/servlet/CloudStartupServlet.java index 5840f3d8c847..03933a8ad0e2 100644 --- a/server/src/main/java/com/cloud/servlet/CloudStartupServlet.java +++ b/server/src/main/java/com/cloud/servlet/CloudStartupServlet.java @@ -23,7 +23,6 @@ import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; -import org.apache.log4j.Logger; import org.springframework.web.context.support.SpringBeanAutowiringSupport; import com.cloud.utils.LogUtils; @@ -32,7 +31,6 @@ import com.cloud.utils.db.TransactionLegacy; public class CloudStartupServlet extends HttpServlet { - public static final Logger s_logger = Logger.getLogger(CloudStartupServlet.class.getName()); static final long serialVersionUID = SerialVersionUID.CloudStartupServlet; Timer _timer = new Timer(); diff --git a/server/src/main/java/com/cloud/servlet/ConsoleProxyPasswordBasedEncryptor.java b/server/src/main/java/com/cloud/servlet/ConsoleProxyPasswordBasedEncryptor.java index 8f469e400240..91ccb71d27c8 100644 --- a/server/src/main/java/com/cloud/servlet/ConsoleProxyPasswordBasedEncryptor.java +++ b/server/src/main/java/com/cloud/servlet/ConsoleProxyPasswordBasedEncryptor.java @@ -17,7 +17,8 @@ package com.cloud.servlet; import org.apache.commons.codec.binary.Base64; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.google.gson.Gson; import com.google.gson.GsonBuilder; @@ -27,7 +28,7 @@ // To maintain independency of console proxy project, we duplicate this class from console proxy project public class ConsoleProxyPasswordBasedEncryptor { - private static final Logger s_logger = Logger.getLogger(ConsoleProxyPasswordBasedEncryptor.class); + protected Logger logger = LogManager.getLogger(getClass()); private Gson gson; diff --git a/server/src/main/java/com/cloud/servlet/ConsoleProxyServlet.java b/server/src/main/java/com/cloud/servlet/ConsoleProxyServlet.java index 83c359a96f94..ad884a33406e 100644 --- a/server/src/main/java/com/cloud/servlet/ConsoleProxyServlet.java +++ b/server/src/main/java/com/cloud/servlet/ConsoleProxyServlet.java @@ -37,7 +37,8 @@ import org.apache.cloudstack.framework.security.keys.KeysManager; import org.apache.commons.codec.binary.Base64; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.stereotype.Component; import org.springframework.web.context.support.SpringBeanAutowiringSupport; @@ -67,7 +68,7 @@ @Component("consoleServlet") public class ConsoleProxyServlet extends HttpServlet { private static final long serialVersionUID = -5515382620323808168L; - public static final Logger s_logger = Logger.getLogger(ConsoleProxyServlet.class.getName()); + protected static Logger LOGGER = LogManager.getLogger(ConsoleProxyServlet.class); private static final int DEFAULT_THUMBNAIL_WIDTH = 144; private static final int DEFAULT_THUMBNAIL_HEIGHT = 110; @@ -112,7 +113,7 @@ protected void doGet(HttpServletRequest req, HttpServletResponse resp) { } if (_keysMgr.getHashKey() == null) { - s_logger.debug("Console/thumbnail access denied. Ticket service is not ready yet"); + LOGGER.debug("Console/thumbnail access denied. Ticket service is not ready yet"); sendResponse(resp, "Service is not ready"); return; } @@ -131,7 +132,7 @@ protected void doGet(HttpServletRequest req, HttpServletResponse resp) { account = (String)params.get("account")[0]; accountObj = (Account)params.get("accountobj")[0]; } else { - s_logger.debug("Invalid web session or API key in request, reject console/thumbnail access"); + LOGGER.debug("Invalid web session or API key in request, reject console/thumbnail access"); sendResponse(resp, "Access denied. Invalid web session or API key in request"); return; } @@ -149,7 +150,7 @@ protected void doGet(HttpServletRequest req, HttpServletResponse resp) { // Do a sanity check here to make sure the user hasn't already been deleted if ((userId == null) || (account == null) || (accountObj == null) || !verifyUser(Long.valueOf(userId))) { - s_logger.debug("Invalid user/account, reject console/thumbnail access"); + LOGGER.debug("Invalid user/account, reject console/thumbnail access"); sendResponse(resp, "Access denied. Invalid or inconsistent account is found"); return; } @@ -158,9 +159,9 @@ protected void doGet(HttpServletRequest req, HttpServletResponse resp) { if (cmd == null || !isValidCmd(cmd)) { if (cmd != null) { cmd = cmd.replaceAll(SANITIZATION_REGEX, "_"); - s_logger.debug(String.format("invalid console servlet command [%s].", cmd)); + LOGGER.debug(String.format("invalid console servlet command [%s].", cmd)); } else { - s_logger.debug("Null console servlet command."); + LOGGER.debug("Null console servlet command."); } sendResponse(resp, ""); @@ -172,9 +173,9 @@ protected void doGet(HttpServletRequest req, HttpServletResponse resp) { if (vm == null) { if (vmIdString != null) { vmIdString = vmIdString.replaceAll(SANITIZATION_REGEX, "_"); - s_logger.info(String.format("invalid console servlet command vm parameter[%s].", vmIdString)); + LOGGER.info(String.format("invalid console servlet command vm parameter[%s].", vmIdString)); } else { - s_logger.info("Null console servlet command VM parameter."); + LOGGER.info("Null console servlet command VM parameter."); } sendResponse(resp, ""); @@ -194,7 +195,7 @@ protected void doGet(HttpServletRequest req, HttpServletResponse resp) { handleAuthRequest(req, resp, vmId); } } catch (Exception e) { - s_logger.error("Unexepected exception in ConsoleProxyServlet", e); + LOGGER.error("Unexepected exception in ConsoleProxyServlet", e); sendResponse(resp, "Server Internal Error"); } } @@ -202,20 +203,20 @@ protected void doGet(HttpServletRequest req, HttpServletResponse resp) { private void handleThumbnailRequest(HttpServletRequest req, HttpServletResponse resp, long vmId) { VirtualMachine vm = _vmMgr.findById(vmId); if (vm == null) { - s_logger.warn("VM " + vmId + " does not exist, sending blank response for thumbnail request"); + LOGGER.warn("VM " + vmId + " does not exist, sending blank response for thumbnail request"); sendResponse(resp, ""); return; } if (vm.getHostId() == null) { - s_logger.warn("VM " + vmId + " lost host info, sending blank response for thumbnail request"); + LOGGER.warn("VM " + vmId + " lost host info, sending blank response for thumbnail request"); sendResponse(resp, ""); return; } HostVO host = _ms.getHostBy(vm.getHostId()); if (host == null) { - s_logger.warn("VM " + vmId + "'s host does not exist, sending blank response for thumbnail request"); + LOGGER.warn("VM " + vmId + "'s host does not exist, sending blank response for thumbnail request"); sendResponse(resp, ""); return; } @@ -233,20 +234,20 @@ private void handleThumbnailRequest(HttpServletRequest req, HttpServletResponse try { w = Integer.parseInt(value); } catch (NumberFormatException e) { - s_logger.info("[ignored] not a number: " + value); + LOGGER.info("[ignored] not a number: " + value); } value = req.getParameter("h"); try { h = Integer.parseInt(value); } catch (NumberFormatException e) { - s_logger.info("[ignored] not a number: " + value); + LOGGER.info("[ignored] not a number: " + value); } try { resp.sendRedirect(composeThumbnailUrl(rootUrl, vm, host, w, h)); } catch (IOException e) { - s_logger.info("Client may already close the connection", e); + LOGGER.info("Client may already close the connection", e); } } @@ -256,20 +257,20 @@ private void handleAuthRequest(HttpServletRequest req, HttpServletResponse resp, // the data is now being sent through private network, but this is apparently not enough VirtualMachine vm = _vmMgr.findById(vmId); if (vm == null) { - s_logger.warn("VM " + vmId + " does not exist, sending failed response for authentication request from console proxy"); + LOGGER.warn("VM " + vmId + " does not exist, sending failed response for authentication request from console proxy"); sendResponse(resp, "failed"); return; } if (vm.getHostId() == null) { - s_logger.warn("VM " + vmId + " lost host info, failed response for authentication request from console proxy"); + LOGGER.warn("VM " + vmId + " lost host info, failed response for authentication request from console proxy"); sendResponse(resp, "failed"); return; } HostVO host = _ms.getHostBy(vm.getHostId()); if (host == null) { - s_logger.warn("VM " + vmId + "'s host does not exist, sending failed response for authentication request from console proxy"); + LOGGER.warn("VM " + vmId + "'s host does not exist, sending failed response for authentication request from console proxy"); sendResponse(resp, "failed"); return; } @@ -278,9 +279,9 @@ private void handleAuthRequest(HttpServletRequest req, HttpServletResponse resp, if (sid == null || !sid.equals(vm.getVncPassword())) { if(sid != null) { sid = sid.replaceAll(SANITIZATION_REGEX, "_"); - s_logger.warn(String.format("sid [%s] in url does not match stored sid.", sid)); + LOGGER.warn(String.format("sid [%s] in url does not match stored sid.", sid)); } else { - s_logger.warn("Null sid in URL."); + LOGGER.warn("Null sid in URL."); } sendResponse(resp, "failed"); @@ -296,7 +297,7 @@ static public Ternary parseHostInfo(String hostInfo) { String tunnelUrl = null; String tunnelSession = null; - s_logger.info("Parse host info returned from executing GetVNCPortCommand. host info: " + hostInfo); + LOGGER.info("Parse host info returned from executing GetVNCPortCommand. host info: " + hostInfo); if (hostInfo != null) { if (hostInfo.startsWith("consoleurl")) { @@ -371,8 +372,8 @@ private String composeThumbnailUrl(String rootUrl, VirtualMachine vm, HostVO hos sb.append("/ajaximg?token=" + encryptor.encryptObject(ConsoleProxyClientParam.class, param)); sb.append("&w=").append(w).append("&h=").append(h).append("&key=0"); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Compose thumbnail url: " + sb.toString()); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Compose thumbnail url: " + sb.toString()); } return sb.toString(); } @@ -400,7 +401,7 @@ public static String genAccessTicket(String host, String port, String sid, Strin return Base64.encodeBase64String(encryptedBytes); } catch (Exception e) { - s_logger.error("Unexpected exception ", e); + LOGGER.error("Unexpected exception ", e); } return ""; } @@ -410,7 +411,7 @@ private void sendResponse(HttpServletResponse resp, String content) { resp.setContentType("text/html"); resp.getWriter().print(content); } catch (IOException e) { - s_logger.info("Client may already close the connection", e); + LOGGER.info("Client may already close the connection", e); } } @@ -418,7 +419,7 @@ private boolean checkSessionPermision(HttpServletRequest req, long vmId, Account VirtualMachine vm = _vmMgr.findById(vmId); if (vm == null) { - s_logger.debug("Console/thumbnail access denied. VM " + vmId + " does not exist in system any more"); + LOGGER.debug("Console/thumbnail access denied. VM " + vmId + " does not exist in system any more"); return false; } @@ -432,14 +433,14 @@ private boolean checkSessionPermision(HttpServletRequest req, long vmId, Account _accountMgr.checkAccess(accountObj, null, true, vm); } catch (PermissionDeniedException ex) { if (_accountMgr.isNormalUser(accountObj.getId())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("VM access is denied. VM owner account " + vm.getAccountId() + " does not match the account id in session " + + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("VM access is denied. VM owner account " + vm.getAccountId() + " does not match the account id in session " + accountObj.getId() + " and caller is a normal user"); } } else if (_accountMgr.isDomainAdmin(accountObj.getId()) || accountObj.getType() == Account.Type.READ_ONLY_ADMIN) { - if(s_logger.isDebugEnabled()) { - s_logger.debug("VM access is denied. VM owner account " + vm.getAccountId() + if(LOGGER.isDebugEnabled()) { + LOGGER.debug("VM access is denied. VM owner account " + vm.getAccountId() + " does not match the account id in session " + accountObj.getId() + " and the domain-admin caller does not manage the target domain"); } } @@ -453,7 +454,7 @@ private boolean checkSessionPermision(HttpServletRequest req, long vmId, Account return false; default: - s_logger.warn("Unrecoginized virtual machine type, deny access by default. type: " + vm.getType()); + LOGGER.warn("Unrecoginized virtual machine type, deny access by default. type: " + vm.getType()); return false; } @@ -478,7 +479,7 @@ public boolean verifyUser(Long userId) { if ((user == null) || (user.getRemoved() != null) || !user.getState().equals(Account.State.ENABLED) || (account == null) || !account.getState().equals(Account.State.ENABLED)) { - s_logger.warn("Deleted/Disabled/Locked user with id=" + userId + " attempting to access public API"); + LOGGER.warn("Deleted/Disabled/Locked user with id=" + userId + " attempting to access public API"); return false; } return true; @@ -524,8 +525,8 @@ private boolean verifyRequest(Map requestParameters) { // if api/secret key are passed to the parameters if ((signature == null) || (apiKey == null)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("expired session, missing signature, or missing apiKey -- ignoring request...sig: " + signature + ", apiKey: " + apiKey); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("expired session, missing signature, or missing apiKey -- ignoring request...sig: " + signature + ", apiKey: " + apiKey); } return false; // no signature, bad request } @@ -536,7 +537,7 @@ private boolean verifyRequest(Map requestParameters) { // verify there is a user with this api key Pair userAcctPair = _accountMgr.findUserByApiKey(apiKey); if (userAcctPair == null) { - s_logger.debug("apiKey does not map to a valid user -- ignoring request, apiKey: " + apiKey); + LOGGER.debug("apiKey does not map to a valid user -- ignoring request, apiKey: " + apiKey); return false; } @@ -544,7 +545,7 @@ private boolean verifyRequest(Map requestParameters) { Account account = userAcctPair.second(); if (!user.getState().equals(Account.State.ENABLED) || !account.getState().equals(Account.State.ENABLED)) { - s_logger.debug("disabled or locked user accessing the api, userid = " + user.getId() + "; name = " + user.getUsername() + "; state: " + user.getState() + + LOGGER.debug("disabled or locked user accessing the api, userid = " + user.getId() + "; name = " + user.getUsername() + "; state: " + user.getState() + "; accountState: " + account.getState()); return false; } @@ -552,7 +553,7 @@ private boolean verifyRequest(Map requestParameters) { // verify secret key exists secretKey = user.getSecretKey(); if (secretKey == null) { - s_logger.debug("User does not have a secret key associated with the account -- ignoring request, username: " + user.getUsername()); + LOGGER.debug("User does not have a secret key associated with the account -- ignoring request, username: " + user.getUsername()); return false; } @@ -566,7 +567,7 @@ private boolean verifyRequest(Map requestParameters) { String computedSignature = Base64.encodeBase64String(encryptedBytes); boolean equalSig = ConstantTimeComparator.compareStrings(signature, computedSignature); if (!equalSig) { - s_logger.debug("User signature: " + signature + " is not equaled to computed signature: " + computedSignature); + LOGGER.debug("User signature: " + signature + " is not equaled to computed signature: " + computedSignature); } if (equalSig) { @@ -576,7 +577,7 @@ private boolean verifyRequest(Map requestParameters) { } return equalSig; } catch (Exception ex) { - s_logger.error("unable to verify request signature", ex); + LOGGER.error("unable to verify request signature", ex); } return false; } diff --git a/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java b/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java index a92b75e1e1cf..2a6494cffcdb 100644 --- a/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java @@ -38,7 +38,6 @@ import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; import org.apache.commons.lang3.EnumUtils; -import org.apache.log4j.Logger; import com.cloud.event.ActionEvent; import com.cloud.event.EventTypes; @@ -48,7 +47,6 @@ public class ImageStoreServiceImpl extends ManagerBase implements ImageStoreService { - private static final Logger s_logger = Logger.getLogger(ImageStoreServiceImpl.class); @Inject ImageStoreDao imageStoreDao; @Inject @@ -112,7 +110,7 @@ public MigrationResponse migrateData(MigrateSecondaryStorageDataCmd cmd) { } if (destImgStoreIds.contains(srcImgStoreId)) { - s_logger.debug("One of the destination stores is the same as the source image store ... Ignoring it..."); + logger.debug("One of the destination stores is the same as the source image store ... Ignoring it..."); destImgStoreIds.remove(srcImgStoreId); } @@ -121,21 +119,21 @@ public MigrationResponse migrateData(MigrateSecondaryStorageDataCmd cmd) { for (Long id : destImgStoreIds) { ImageStoreVO store = imageStoreDao.findById(id); if (store == null) { - s_logger.warn("Secondary storage with id: " + id + "is not found. Skipping it..."); + logger.warn("Secondary storage with id: " + id + "is not found. Skipping it..."); continue; } if (store.isReadonly()) { - s_logger.warn("Secondary storage: "+ id + " cannot be considered for migration as has read-only permission, Skipping it... "); + logger.warn("Secondary storage: "+ id + " cannot be considered for migration as has read-only permission, Skipping it... "); continue; } if (!store.getProviderName().equals(DataStoreProvider.NFS_IMAGE)) { - s_logger.warn("Destination image store : " + store.getName() + " not NFS based. Store not suitable for migration!"); + logger.warn("Destination image store : " + store.getName() + " not NFS based. Store not suitable for migration!"); continue; } if (srcStoreDcId != null && store.getDataCenterId() != null && !srcStoreDcId.equals(store.getDataCenterId())) { - s_logger.warn("Source and destination stores are not in the same zone. Skipping destination store: " + store.getName()); + logger.warn("Source and destination stores are not in the same zone. Skipping destination store: " + store.getName()); continue; } diff --git a/server/src/main/java/com/cloud/storage/ImageStoreUploadMonitorImpl.java b/server/src/main/java/com/cloud/storage/ImageStoreUploadMonitorImpl.java index 7916f4afe3d2..c356a62c6279 100755 --- a/server/src/main/java/com/cloud/storage/ImageStoreUploadMonitorImpl.java +++ b/server/src/main/java/com/cloud/storage/ImageStoreUploadMonitorImpl.java @@ -47,7 +47,6 @@ import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO; import org.apache.cloudstack.storage.image.deployasis.DeployAsIsHelper; import org.apache.cloudstack.utils.identity.ManagementServerNode; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.Listener; @@ -87,7 +86,6 @@ @Component public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageStoreUploadMonitor, Listener, Configurable { - static final Logger s_logger = Logger.getLogger(ImageStoreUploadMonitorImpl.class); @Inject private VolumeDao _volumeDao; @@ -221,12 +219,12 @@ protected void runInContext() { DataStore dataStore = storeMgr.getDataStore(volumeDataStore.getDataStoreId(), DataStoreRole.Image); EndPoint ep = _epSelector.select(dataStore, volumeDataStore.getExtractUrl()); if (ep == null) { - s_logger.warn("There is no secondary storage VM for image store " + dataStore.getName()); + logger.warn("There is no secondary storage VM for image store " + dataStore.getName()); continue; } VolumeVO volume = _volumeDao.findById(volumeDataStore.getVolumeId()); if (volume == null) { - s_logger.warn("Volume with id " + volumeDataStore.getVolumeId() + " not found"); + logger.warn("Volume with id " + volumeDataStore.getVolumeId() + " not found"); continue; } Host host = _hostDao.findById(ep.getId()); @@ -237,11 +235,11 @@ protected void runInContext() { try { answer = ep.sendMessage(cmd); } catch (CloudRuntimeException e) { - s_logger.warn("Unable to get upload status for volume " + volume.getUuid() + ". Error details: " + e.getMessage()); + logger.warn("Unable to get upload status for volume " + volume.getUuid() + ". Error details: " + e.getMessage()); answer = new UploadStatusAnswer(cmd, UploadStatus.UNKNOWN, e.getMessage()); } if (answer == null || !(answer instanceof UploadStatusAnswer)) { - s_logger.warn("No or invalid answer corresponding to UploadStatusCommand for volume " + volumeDataStore.getVolumeId()); + logger.warn("No or invalid answer corresponding to UploadStatusCommand for volume " + volumeDataStore.getVolumeId()); continue; } handleVolumeStatusResponse((UploadStatusAnswer)answer, volume, volumeDataStore); @@ -251,9 +249,9 @@ protected void runInContext() { handleVolumeStatusResponse(new UploadStatusAnswer(cmd, UploadStatus.ERROR, error), volume, volumeDataStore); } } catch (Throwable th) { - s_logger.warn("Exception while checking status for uploaded volume " + volumeDataStore.getExtractUrl() + ". Error details: " + th.getMessage()); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Exception details: ", th); + logger.warn("Exception while checking status for uploaded volume " + volumeDataStore.getExtractUrl() + ". Error details: " + th.getMessage()); + if (logger.isTraceEnabled()) { + logger.trace("Exception details: ", th); } } } @@ -265,12 +263,12 @@ protected void runInContext() { DataStore dataStore = storeMgr.getDataStore(templateDataStore.getDataStoreId(), DataStoreRole.Image); EndPoint ep = _epSelector.select(dataStore, templateDataStore.getExtractUrl()); if (ep == null) { - s_logger.warn("There is no secondary storage VM for image store " + dataStore.getName()); + logger.warn("There is no secondary storage VM for image store " + dataStore.getName()); continue; } VMTemplateVO template = _templateDao.findById(templateDataStore.getTemplateId()); if (template == null) { - s_logger.warn("Template with id " + templateDataStore.getTemplateId() + " not found"); + logger.warn("Template with id " + templateDataStore.getTemplateId() + " not found"); continue; } Host host = _hostDao.findById(ep.getId()); @@ -281,11 +279,11 @@ protected void runInContext() { try { answer = ep.sendMessage(cmd); } catch (CloudRuntimeException e) { - s_logger.warn("Unable to get upload status for template " + template.getUuid() + ". Error details: " + e.getMessage()); + logger.warn("Unable to get upload status for template " + template.getUuid() + ". Error details: " + e.getMessage()); answer = new UploadStatusAnswer(cmd, UploadStatus.UNKNOWN, e.getMessage()); } if (answer == null || !(answer instanceof UploadStatusAnswer)) { - s_logger.warn("No or invalid answer corresponding to UploadStatusCommand for template " + templateDataStore.getTemplateId()); + logger.warn("No or invalid answer corresponding to UploadStatusCommand for template " + templateDataStore.getTemplateId()); continue; } handleTemplateStatusResponse((UploadStatusAnswer)answer, template, templateDataStore); @@ -295,9 +293,9 @@ protected void runInContext() { handleTemplateStatusResponse(new UploadStatusAnswer(cmd, UploadStatus.ERROR, error), template, templateDataStore); } } catch (Throwable th) { - s_logger.warn("Exception while checking status for uploaded template " + templateDataStore.getExtractUrl() + ". Error details: " + th.getMessage()); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Exception details: ", th); + logger.warn("Exception while checking status for uploaded template " + templateDataStore.getExtractUrl() + ". Error details: " + th.getMessage()); + if (logger.isTraceEnabled()) { + logger.trace("Exception details: ", th); } } } @@ -334,8 +332,8 @@ public void doInTransactionWithoutResult(TransactionStatus status) { null, null, tmpVolumeDataStore.getPhysicalSize(), tmpVolumeDataStore.getSize(), Volume.class.getName(), tmpVolume.getUuid()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Volume " + tmpVolume.getUuid() + " uploaded successfully"); + if (logger.isDebugEnabled()) { + logger.debug("Volume " + tmpVolume.getUuid() + " uploaded successfully"); } break; case IN_PROGRESS: @@ -349,7 +347,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { tmpVolumeDataStore.setState(State.Failed); stateMachine.transitTo(tmpVolume, Event.OperationFailed, null, _volumeDao); msg = "Volume " + tmpVolume.getUuid() + " failed to upload due to operation timed out"; - s_logger.error(msg); + logger.error(msg); sendAlert = true; } else { tmpVolumeDataStore.setDownloadPercent(answer.getDownloadPercent()); @@ -361,7 +359,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { tmpVolumeDataStore.setState(State.Failed); stateMachine.transitTo(tmpVolume, Event.OperationFailed, null, _volumeDao); msg = "Volume " + tmpVolume.getUuid() + " failed to upload. Error details: " + answer.getDetails(); - s_logger.error(msg); + logger.error(msg); sendAlert = true; break; case UNKNOWN: @@ -371,7 +369,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { tmpVolumeDataStore.setState(State.Failed); stateMachine.transitTo(tmpVolume, Event.OperationTimeout, null, _volumeDao); msg = "Volume " + tmpVolume.getUuid() + " failed to upload due to operation timed out"; - s_logger.error(msg); + logger.error(msg); sendAlert = true; } } @@ -379,7 +377,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } _volumeDataStoreDao.update(tmpVolumeDataStore.getId(), tmpVolumeDataStore); } catch (NoTransitionException e) { - s_logger.error("Unexpected error " + e.getMessage()); + logger.error("Unexpected error " + e.getMessage()); } finally { if (sendAlert) { _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_UPLOAD_FAILED, tmpVolume.getDataCenterId(), null, msg, msg); @@ -414,10 +412,10 @@ public void doInTransactionWithoutResult(TransactionStatus status) { OVFInformationTO ovfInformationTO = answer.getOvfInformationTO(); if (template.isDeployAsIs() && ovfInformationTO != null) { - s_logger.debug("Received OVF information from the uploaded template"); + logger.debug("Received OVF information from the uploaded template"); boolean persistDeployAsIs = deployAsIsHelper.persistTemplateOVFInformationAndUpdateGuestOS(tmpTemplate.getId(), ovfInformationTO, tmpTemplateDataStore); if (!persistDeployAsIs) { - s_logger.info("Failed persisting deploy-as-is template details for template " + template.getName()); + logger.info("Failed persisting deploy-as-is template details for template " + template.getName()); break; } } @@ -431,7 +429,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { tmpTemplateDataStore.setState(State.Failed); stateMachine.transitTo(tmpTemplate, VirtualMachineTemplate.Event.OperationFailed, null, _templateDao); msg = "Multi-disk OVA template " + tmpTemplate.getUuid() + " failed to process data disks"; - s_logger.error(msg); + logger.error(msg); sendAlert = true; break; } @@ -448,8 +446,8 @@ public void doInTransactionWithoutResult(TransactionStatus status) { UsageEventUtils.publishUsageEvent(etype, tmpTemplate.getAccountId(), vo.getDataCenterId(), tmpTemplate.getId(), tmpTemplate.getName(), null, null, tmpTemplateDataStore.getPhysicalSize(), tmpTemplateDataStore.getSize(), VirtualMachineTemplate.class.getName(), tmpTemplate.getUuid()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Template " + tmpTemplate.getUuid() + " uploaded successfully"); + if (logger.isDebugEnabled()) { + logger.debug("Template " + tmpTemplate.getUuid() + " uploaded successfully"); } break; case IN_PROGRESS: @@ -463,7 +461,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { tmpTemplateDataStore.setState(State.Failed); stateMachine.transitTo(tmpTemplate, VirtualMachineTemplate.Event.OperationFailed, null, _templateDao); msg = "Template " + tmpTemplate.getUuid() + " failed to upload due to operation timed out"; - s_logger.error(msg); + logger.error(msg); sendAlert = true; } else { tmpTemplateDataStore.setDownloadPercent(answer.getDownloadPercent()); @@ -475,7 +473,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { tmpTemplateDataStore.setState(State.Failed); stateMachine.transitTo(tmpTemplate, VirtualMachineTemplate.Event.OperationFailed, null, _templateDao); msg = "Template " + tmpTemplate.getUuid() + " failed to upload. Error details: " + answer.getDetails(); - s_logger.error(msg); + logger.error(msg); sendAlert = true; break; case UNKNOWN: @@ -485,7 +483,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { tmpTemplateDataStore.setState(State.Failed); stateMachine.transitTo(tmpTemplate, VirtualMachineTemplate.Event.OperationTimeout, null, _templateDao); msg = "Template " + tmpTemplate.getUuid() + " failed to upload due to operation timed out"; - s_logger.error(msg); + logger.error(msg); sendAlert = true; } } @@ -493,7 +491,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } _templateDataStoreDao.update(tmpTemplateDataStore.getId(), tmpTemplateDataStore); } catch (NoTransitionException e) { - s_logger.error("Unexpected error " + e.getMessage()); + logger.error("Unexpected error " + e.getMessage()); } finally { if (sendAlert) { _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_UPLOAD_FAILED, diff --git a/server/src/main/java/com/cloud/storage/OCFS2ManagerImpl.java b/server/src/main/java/com/cloud/storage/OCFS2ManagerImpl.java index 8d083cb75ba2..bbd2a506e4cb 100644 --- a/server/src/main/java/com/cloud/storage/OCFS2ManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/OCFS2ManagerImpl.java @@ -24,7 +24,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; @@ -53,7 +52,6 @@ @Component public class OCFS2ManagerImpl extends ManagerBase implements OCFS2Manager, ResourceListener { - private static final Logger s_logger = Logger.getLogger(OCFS2ManagerImpl.class); @Inject ClusterDetailsDao _clusterDetailsDao; @@ -107,11 +105,11 @@ private boolean prepareNodes(String clusterName, List hosts) { for (HostVO h : hosts) { Answer ans = _agentMgr.easySend(h.getId(), cmd); if (ans == null) { - s_logger.debug("Host " + h.getId() + " is not in UP state, skip preparing OCFS2 node on it"); + logger.debug("Host " + h.getId() + " is not in UP state, skip preparing OCFS2 node on it"); continue; } if (!ans.getResult()) { - s_logger.warn("PrepareOCFS2NodesCommand failed on host " + h.getId() + " " + ans.getDetails()); + logger.warn("PrepareOCFS2NodesCommand failed on host " + h.getId() + " " + ans.getDetails()); return false; } } @@ -152,7 +150,7 @@ public boolean prepareNodes(Long clusterId) { sc.and(sc.entity().getType(), Op.EQ, Host.Type.Routing); List hosts = sc.list(); if (hosts.isEmpty()) { - s_logger.debug("There is no host in cluster " + clusterId + ", no need to prepare OCFS2 nodes"); + logger.debug("There is no host in cluster " + clusterId + ", no need to prepare OCFS2 nodes"); return true; } @@ -200,10 +198,10 @@ public void processDeletHostEventAfter(Host host) { if (hasOcfs2) { try { if (!prepareNodes(host.getClusterId())) { - s_logger.warn(errMsg); + logger.warn(errMsg); } } catch (Exception e) { - s_logger.error(errMsg, e); + logger.error(errMsg, e); } } } diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index 49c166872a72..c20baf990b40 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -138,7 +138,6 @@ import org.apache.commons.lang.time.DateUtils; import org.apache.commons.lang3.EnumUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; @@ -261,7 +260,6 @@ @Component public class StorageManagerImpl extends ManagerBase implements StorageManager, ClusterManagerListener, Configurable { - private static final Logger s_logger = Logger.getLogger(StorageManagerImpl.class); protected String _name; @Inject @@ -421,7 +419,7 @@ public boolean share(VMInstanceVO vm, List vols, HostVO host, boolean // available for (VolumeVO vol : vols) { if (vol.getRemoved() != null) { - s_logger.warn("Volume id:" + vol.getId() + " is removed, cannot share on this instance"); + logger.warn("Volume id:" + vol.getId() + " is removed, cannot share on this instance"); // not ok to share return false; } @@ -601,11 +599,11 @@ public boolean configure(String name, Map params) { Map configs = _configDao.getConfiguration("management-server", params); _storagePoolAcquisitionWaitSeconds = NumbersUtil.parseInt(configs.get("pool.acquisition.wait.seconds"), 1800); - s_logger.info("pool.acquisition.wait.seconds is configured as " + _storagePoolAcquisitionWaitSeconds + " seconds"); + logger.info("pool.acquisition.wait.seconds is configured as " + _storagePoolAcquisitionWaitSeconds + " seconds"); _agentMgr.registerForHostEvents(new StoragePoolMonitor(this, _storagePoolDao, _dataStoreProviderMgr), true, false, true); - s_logger.info("Storage cleanup enabled: " + StorageCleanupEnabled.value() + ", interval: " + StorageCleanupInterval.value() + ", delay: " + StorageCleanupDelay.value() + logger.info("Storage cleanup enabled: " + StorageCleanupEnabled.value() + ", interval: " + StorageCleanupInterval.value() + ", delay: " + StorageCleanupDelay.value() + ", template cleanup enabled: " + TemplateCleanupEnabled.value()); String cleanupInterval = configs.get("extract.url.cleanup.interval"); @@ -669,7 +667,7 @@ public boolean start() { int initialDelay = generator.nextInt(StorageCleanupInterval.value()); _executor.scheduleWithFixedDelay(new StorageGarbageCollector(), initialDelay, StorageCleanupInterval.value(), TimeUnit.SECONDS); } else { - s_logger.debug("Storage cleanup is not enabled, so the storage cleanup thread is not being scheduled."); + logger.debug("Storage cleanup is not enabled, so the storage cleanup thread is not being scheduled."); } _executor.scheduleWithFixedDelay(new DownloadURLGarbageCollector(), _downloadUrlCleanupInterval, _downloadUrlCleanupInterval, TimeUnit.SECONDS); @@ -766,7 +764,7 @@ public DataStore createLocalStorage(Host host, StoragePoolInfo pInfo) throws Con //the path can be different, but if they have the same uuid, assume they are the same storage pool = _storagePoolDao.findPoolByHostPath(host.getDataCenterId(), host.getPodId(), hostAddress, null, pInfo.getUuid()); if (pool != null) { - s_logger.debug("Found a storage pool: " + pInfo.getUuid() + ", but with different hostpath " + pInfo.getHostPath() + ", still treat it as the same pool"); + logger.debug("Found a storage pool: " + pInfo.getUuid() + ", but with different hostpath " + pInfo.getHostPath() + ", still treat it as the same pool"); } } @@ -800,7 +798,7 @@ public DataStore createLocalStorage(Host host, StoragePoolInfo pInfo) throws Con } } catch (Exception e) { - s_logger.warn("Unable to setup the local storage pool for " + host, e); + logger.warn("Unable to setup the local storage pool for " + host, e); throw new ConnectionException(true, "Unable to setup the local storage pool for " + host, e); } @@ -923,7 +921,7 @@ public PrimaryDataStoreInfo createPool(CreateStoragePoolCmd cmd) throws Resource lifeCycle.attachZone(store, zoneScope, hypervisorType); } } catch (Exception e) { - s_logger.debug("Failed to add data store: " + e.getMessage(), e); + logger.debug("Failed to add data store: " + e.getMessage(), e); try { // clean up the db, just absorb the exception thrown in deletion with error logged, so that user can get error for adding data store // not deleting data store. @@ -931,7 +929,7 @@ public PrimaryDataStoreInfo createPool(CreateStoragePoolCmd cmd) throws Resource lifeCycle.deleteDataStore(store); } } catch (Exception ex) { - s_logger.debug("Failed to clean up storage pool: " + ex.getMessage()); + logger.debug("Failed to clean up storage pool: " + ex.getMessage()); } throw new CloudRuntimeException("Failed to add data store: " + e.getMessage(), e); } @@ -945,8 +943,8 @@ protected Map extractUriParamsAsMap(String url) { try { uriInfo = UriUtils.getUriInfo(url); } catch (CloudRuntimeException cre) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("URI validation for url: %s failed, returning empty uri params", url)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("URI validation for url: %s failed, returning empty uri params", url)); } return uriParams; } @@ -955,8 +953,8 @@ protected Map extractUriParamsAsMap(String url) { String storageHost = uriInfo.getStorageHost(); String storagePath = uriInfo.getStoragePath(); if (scheme == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Scheme for url: %s is not found, returning empty uri params", url)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Scheme for url: %s is not found, returning empty uri params", url)); } return uriParams; } @@ -994,7 +992,7 @@ protected Map extractUriParamsAsMap(String url) { try { hostPath = URLDecoder.decode(storagePath, "UTF-8"); } catch (UnsupportedEncodingException e) { - s_logger.error("[ignored] we are on a platform not supporting \"UTF-8\"!?!", e); + logger.error("[ignored] we are on a platform not supporting \"UTF-8\"!?!", e); } if (hostPath == null) { // if decoding fails, use getPath() anyway hostPath = storagePath; @@ -1063,7 +1061,7 @@ public PrimaryDataStoreInfo updateStoragePool(UpdateStoragePoolCmd cmd) throws I String name = cmd.getName(); if(StringUtils.isNotBlank(name)) { - s_logger.debug("Updating Storage Pool name to: " + name); + logger.debug("Updating Storage Pool name to: " + name); pool.setName(name); _storagePoolDao.update(pool.getId(), pool); } @@ -1071,8 +1069,8 @@ public PrimaryDataStoreInfo updateStoragePool(UpdateStoragePoolCmd cmd) throws I final List storagePoolTags = cmd.getTags(); if (storagePoolTags != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Updating Storage Pool Tags to :" + storagePoolTags); + if (logger.isDebugEnabled()) { + logger.debug("Updating Storage Pool Tags to :" + storagePoolTags); } if (pool.getPoolType() == StoragePoolType.DatastoreCluster) { List childStoragePools = _storagePoolDao.listChildStoragePoolsInDatastoreCluster(pool.getId()); @@ -1167,7 +1165,7 @@ public void removeStoragePoolFromCluster(long hostId, String iScsiName, StorageP if (answer == null || !answer.getResult()) { String errMsg = "Error interacting with host (related to DeleteStoragePoolCommand)" + (StringUtils.isNotBlank(answer.getDetails()) ? ": " + answer.getDetails() : ""); - s_logger.error(errMsg); + logger.error(errMsg); throw new CloudRuntimeException(errMsg); } @@ -1181,11 +1179,11 @@ public boolean deletePool(DeletePoolCmd cmd) { StoragePoolVO sPool = _storagePoolDao.findById(id); if (sPool == null) { - s_logger.warn("Unable to find pool:" + id); + logger.warn("Unable to find pool:" + id); throw new InvalidParameterValueException("Unable to find pool by id " + id); } if (sPool.getStatus() != StoragePoolStatus.Maintenance) { - s_logger.warn("Unable to delete storage id: " + id + " due to it is not in Maintenance state"); + logger.warn("Unable to delete storage id: " + id + " due to it is not in Maintenance state"); throw new InvalidParameterValueException("Unable to delete storage due to it is not in Maintenance state, id: " + id); } @@ -1246,9 +1244,9 @@ private boolean deleteDataStoreInternal(StoragePoolVO sPool, boolean forced) { try { future.get(); } catch (InterruptedException e) { - s_logger.debug("expunge volume failed:" + vol.getId(), e); + logger.debug("expunge volume failed:" + vol.getId(), e); } catch (ExecutionException e) { - s_logger.debug("expunge volume failed:" + vol.getId(), e); + logger.debug("expunge volume failed:" + vol.getId(), e); } } } @@ -1264,14 +1262,14 @@ private boolean deleteDataStoreInternal(StoragePoolVO sPool, boolean forced) { StoragePoolVO lock = _storagePoolDao.acquireInLockTable(sPool.getId()); if (lock == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Failed to acquire lock when deleting PrimaryDataStoreVO with ID: " + sPool.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Failed to acquire lock when deleting PrimaryDataStoreVO with ID: " + sPool.getId()); } return false; } _storagePoolDao.releaseFromLockTable(lock.getId()); - s_logger.trace("Released lock for storage pool " + sPool.getId()); + logger.trace("Released lock for storage pool " + sPool.getId()); DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(sPool.getStorageProviderName()); DataStoreLifeCycle lifeCycle = storeProvider.getDataStoreLifeCycle(); @@ -1283,7 +1281,7 @@ private boolean deleteDataStoreInternal(StoragePoolVO sPool, boolean forced) { public boolean connectHostToSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException { StoragePool pool = (StoragePool)_dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary); assert (pool.isShared()) : "Now, did you actually read the name of this method?"; - s_logger.debug("Adding pool " + pool.getName() + " to host " + hostId); + logger.debug("Adding pool " + pool.getName() + " to host " + hostId); DataStoreProvider provider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); HypervisorHostListener listener = hostListeners.get(provider.getName()); @@ -1294,7 +1292,7 @@ public boolean connectHostToSharedPool(long hostId, long poolId) throws StorageU public void disconnectHostFromSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException { StoragePool pool = (StoragePool)_dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary); assert (pool.isShared()) : "Now, did you actually read the name of this method?"; - s_logger.debug("Removing pool " + pool.getName() + " from host " + hostId); + logger.debug("Removing pool " + pool.getName() + " from host " + hostId); DataStoreProvider provider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); HypervisorHostListener listener = hostListeners.get(provider.getName()); @@ -1314,7 +1312,7 @@ public void enableHost(long hostId) { } } catch (Exception ex) { - s_logger.error("hostEnabled(long) failed for storage provider " + provider.getName(), ex); + logger.error("hostEnabled(long) failed for storage provider " + provider.getName(), ex); } } } @@ -1340,14 +1338,14 @@ public void createCapacityEntry(StoragePoolVO storagePool, short capacityType, l // All this is for the inaccuracy of floats for big number multiplication. BigDecimal overProvFactor = getStorageOverProvisioningFactor(storagePool.getId()); totalOverProvCapacity = overProvFactor.multiply(new BigDecimal(storagePool.getCapacityBytes())).longValue(); - s_logger.debug("Found storage pool " + storagePool.getName() + " of type " + storagePool.getPoolType().toString() + " with overprovisioning factor " + overProvFactor.toString()); - s_logger.debug("Total over provisioned capacity calculated is " + overProvFactor + " * " + toHumanReadableSize(storagePool.getCapacityBytes())); + logger.debug("Found storage pool " + storagePool.getName() + " of type " + storagePool.getPoolType().toString() + " with overprovisioning factor " + overProvFactor.toString()); + logger.debug("Total over provisioned capacity calculated is " + overProvFactor + " * " + toHumanReadableSize(storagePool.getCapacityBytes())); } else { - s_logger.debug("Found storage pool " + storagePool.getName() + " of type " + storagePool.getPoolType().toString()); + logger.debug("Found storage pool " + storagePool.getName() + " of type " + storagePool.getPoolType().toString()); totalOverProvCapacity = storagePool.getCapacityBytes(); } - s_logger.debug("Total over provisioned capacity of the pool " + storagePool.getName() + " id: " + storagePool.getId() + " is " + toHumanReadableSize(totalOverProvCapacity)); + logger.debug("Total over provisioned capacity of the pool " + storagePool.getName() + " id: " + storagePool.getId() + " is " + toHumanReadableSize(totalOverProvCapacity)); CapacityState capacityState = CapacityState.Enabled; if (storagePool.getScope() == ScopeType.ZONE) { DataCenterVO dc = ApiDBUtils.findZoneById(storagePool.getDataCenterId()); @@ -1389,7 +1387,7 @@ public void createCapacityEntry(StoragePoolVO storagePool, short capacityType, l _capacityDao.update(capacity.getId(), capacity); } } - s_logger.debug("Successfully set Capacity - " + toHumanReadableSize(totalOverProvCapacity) + " for capacity type - " + capacityType + " , DataCenterId - " + storagePool.getDataCenterId() + ", HostOrPoolId - " + logger.debug("Successfully set Capacity - " + toHumanReadableSize(totalOverProvCapacity) + " for capacity type - " + capacityType + " , DataCenterId - " + storagePool.getDataCenterId() + ", HostOrPoolId - " + storagePool.getId() + ", PodId " + storagePool.getPodId()); } @@ -1430,9 +1428,9 @@ public Pair sendToPool(StoragePool pool, long[] hostIdsToTryFirs } return new Pair(hostId, answers.toArray(new Answer[answers.size()])); } catch (AgentUnavailableException e) { - s_logger.debug("Unable to send storage pool command to " + pool + " via " + hostId, e); + logger.debug("Unable to send storage pool command to " + pool + " via " + hostId, e); } catch (OperationTimedoutException e) { - s_logger.debug("Unable to send storage pool command to " + pool + " via " + hostId, e); + logger.debug("Unable to send storage pool command to " + pool + " via " + hostId, e); } } @@ -1468,10 +1466,10 @@ public void cleanupStorage(boolean recurring) { try { List unusedTemplatesInPool = _tmpltMgr.getUnusedTemplatesInPool(pool); - s_logger.debug(String.format("Storage pool garbage collector found [%s] templates to be cleaned up in storage pool [%s].", unusedTemplatesInPool.size(), pool.getName())); + logger.debug(String.format("Storage pool garbage collector found [%s] templates to be cleaned up in storage pool [%s].", unusedTemplatesInPool.size(), pool.getName())); for (VMTemplateStoragePoolVO templatePoolVO : unusedTemplatesInPool) { if (templatePoolVO.getDownloadState() != VMTemplateStorageResourceAssoc.Status.DOWNLOADED) { - s_logger.debug(String.format("Storage pool garbage collector is skipping template [%s] clean up on pool [%s] " + + logger.debug(String.format("Storage pool garbage collector is skipping template [%s] clean up on pool [%s] " + "because it is not completely downloaded.", templatePoolVO.getTemplateId(), templatePoolVO.getPoolId())); continue; } @@ -1479,7 +1477,7 @@ public void cleanupStorage(boolean recurring) { if (!templatePoolVO.getMarkedForGC()) { templatePoolVO.setMarkedForGC(true); _vmTemplatePoolDao.update(templatePoolVO.getId(), templatePoolVO); - s_logger.debug(String.format("Storage pool garbage collector has marked template [%s] on pool [%s] " + + logger.debug(String.format("Storage pool garbage collector has marked template [%s] on pool [%s] " + "for garbage collection.", templatePoolVO.getTemplateId(), templatePoolVO.getPoolId())); continue; } @@ -1487,8 +1485,8 @@ public void cleanupStorage(boolean recurring) { _tmpltMgr.evictTemplateFromStoragePool(templatePoolVO); } } catch (Exception e) { - s_logger.error(String.format("Failed to clean up primary storage pool [%s] due to: [%s].", pool, e.getMessage())); - s_logger.debug(String.format("Failed to clean up primary storage pool [%s].", pool), e); + logger.error(String.format("Failed to clean up primary storage pool [%s] due to: [%s].", pool, e.getMessage())); + logger.debug(String.format("Failed to clean up primary storage pool [%s].", pool), e); } } } @@ -1499,32 +1497,32 @@ public void cleanupStorage(boolean recurring) { String snapshotUuid = null; SnapshotVO snapshot = null; final String storeRole = snapshotDataStoreVO.getRole().toString().toLowerCase(); - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { snapshot = _snapshotDao.findById(snapshotDataStoreVO.getSnapshotId()); if (snapshot == null) { - s_logger.warn(String.format("Did not find snapshot [ID: %d] for which store reference is in destroying state; therefore, it cannot be destroyed.", snapshotDataStoreVO.getSnapshotId())); + logger.warn(String.format("Did not find snapshot [ID: %d] for which store reference is in destroying state; therefore, it cannot be destroyed.", snapshotDataStoreVO.getSnapshotId())); continue; } snapshotUuid = snapshot.getUuid(); } try { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Verifying if snapshot [%s] is in destroying state in %s data store ID: %d.", snapshotUuid, storeRole, snapshotDataStoreVO.getDataStoreId())); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Verifying if snapshot [%s] is in destroying state in %s data store ID: %d.", snapshotUuid, storeRole, snapshotDataStoreVO.getDataStoreId())); } SnapshotInfo snapshotInfo = snapshotFactory.getSnapshot(snapshotDataStoreVO.getSnapshotId(), snapshotDataStoreVO.getDataStoreId(), snapshotDataStoreVO.getRole()); if (snapshotInfo != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Snapshot [%s] in destroying state found in %s data store [%s]; therefore, it will be destroyed.", snapshotUuid, storeRole, snapshotInfo.getDataStore().getUuid())); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Snapshot [%s] in destroying state found in %s data store [%s]; therefore, it will be destroyed.", snapshotUuid, storeRole, snapshotInfo.getDataStore().getUuid())); } _snapshotService.deleteSnapshot(snapshotInfo); - } else if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Did not find snapshot [%s] in destroying state in %s data store ID: %d.", snapshotUuid, storeRole, snapshotDataStoreVO.getDataStoreId())); + } else if (logger.isDebugEnabled()) { + logger.debug(String.format("Did not find snapshot [%s] in destroying state in %s data store ID: %d.", snapshotUuid, storeRole, snapshotDataStoreVO.getDataStoreId())); } } catch (Exception e) { - s_logger.error(String.format("Failed to delete snapshot [%s] from storage due to: [%s].", snapshotDataStoreVO.getSnapshotId(), e.getMessage())); - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Failed to delete snapshot [%s] from storage.", snapshotUuid), e); + logger.error(String.format("Failed to delete snapshot [%s] from storage due to: [%s].", snapshotDataStoreVO.getSnapshotId(), e.getMessage())); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Failed to delete snapshot [%s] from storage.", snapshotUuid), e); } } } @@ -1535,13 +1533,13 @@ public void cleanupStorage(boolean recurring) { if (Type.ROOT.equals(vol.getVolumeType())) { VMInstanceVO vmInstanceVO = _vmInstanceDao.findById(vol.getInstanceId()); if (vmInstanceVO != null && vmInstanceVO.getState() == State.Destroyed) { - s_logger.debug(String.format("ROOT volume [%s] will not be expunged because the VM is [%s], therefore this volume will be expunged with the VM" + logger.debug(String.format("ROOT volume [%s] will not be expunged because the VM is [%s], therefore this volume will be expunged with the VM" + " cleanup job.", vol.getUuid(), vmInstanceVO.getState())); continue; } } if (isVolumeSuspectedDestroyDuplicateOfVmVolume(vol)) { - s_logger.warn(String.format("Skipping cleaning up %s as it could be a duplicate for another volume on same pool", vol)); + logger.warn(String.format("Skipping cleaning up %s as it could be a duplicate for another volume on same pool", vol)); continue; } try { @@ -1549,8 +1547,8 @@ public void cleanupStorage(boolean recurring) { // system, but not necessary. handleManagedStorage(vol); } catch (Exception e) { - s_logger.error(String.format("Unable to destroy host-side clustered file system [%s] due to: [%s].", vol.getUuid(), e.getMessage())); - s_logger.debug(String.format("Unable to destroy host-side clustered file system [%s].", vol.getUuid()), e); + logger.error(String.format("Unable to destroy host-side clustered file system [%s] due to: [%s].", vol.getUuid(), e.getMessage())); + logger.debug(String.format("Unable to destroy host-side clustered file system [%s].", vol.getUuid()), e); } try { @@ -1559,11 +1557,11 @@ public void cleanupStorage(boolean recurring) { volService.ensureVolumeIsExpungeReady(vol.getId()); volService.expungeVolumeAsync(volumeInfo); } else { - s_logger.debug(String.format("Volume [%s] is already destroyed.", vol.getUuid())); + logger.debug(String.format("Volume [%s] is already destroyed.", vol.getUuid())); } } catch (Exception e) { - s_logger.error(String.format("Unable to destroy volume [%s] due to: [%s].", vol.getUuid(), e.getMessage())); - s_logger.debug(String.format("Unable to destroy volume [%s].", vol.getUuid()), e); + logger.error(String.format("Unable to destroy volume [%s] due to: [%s].", vol.getUuid(), e.getMessage())); + logger.debug(String.format("Unable to destroy volume [%s].", vol.getUuid()), e); } } @@ -1577,8 +1575,8 @@ public void cleanupStorage(boolean recurring) { } _snapshotDao.expunge(snapshotVO.getId()); } catch (Exception e) { - s_logger.error(String.format("Unable to destroy snapshot [%s] due to: [%s].", snapshotVO.getUuid(), e.getMessage())); - s_logger.debug(String.format("Unable to destroy snapshot [%s].", snapshotVO.getUuid()), e); + logger.error(String.format("Unable to destroy snapshot [%s] due to: [%s].", snapshotVO.getUuid(), e.getMessage())); + logger.debug(String.format("Unable to destroy snapshot [%s].", snapshotVO.getUuid()), e); } } @@ -1587,14 +1585,14 @@ public void cleanupStorage(boolean recurring) { for (VolumeDataStoreVO volumeDataStore : volumeDataStores) { VolumeVO volume = volumeDao.findById(volumeDataStore.getVolumeId()); if (volume == null) { - s_logger.warn(String.format("Uploaded volume [%s] not found, so cannot be destroyed.", volumeDataStore.getVolumeId())); + logger.warn(String.format("Uploaded volume [%s] not found, so cannot be destroyed.", volumeDataStore.getVolumeId())); continue; } try { DataStore dataStore = _dataStoreMgr.getDataStore(volumeDataStore.getDataStoreId(), DataStoreRole.Image); EndPoint ep = _epSelector.select(dataStore, volumeDataStore.getExtractUrl()); if (ep == null) { - s_logger.warn(String.format("There is no secondary storage VM for image store [%s], cannot destroy uploaded volume [%s].", dataStore.getName(), volume.getUuid())); + logger.warn(String.format("There is no secondary storage VM for image store [%s], cannot destroy uploaded volume [%s].", dataStore.getName(), volume.getUuid())); continue; } Host host = _hostDao.findById(ep.getId()); @@ -1606,18 +1604,18 @@ public void cleanupStorage(boolean recurring) { // expunge volume from secondary if volume is on image store VolumeInfo volOnSecondary = volFactory.getVolume(volume.getId(), DataStoreRole.Image); if (volOnSecondary != null) { - s_logger.info(String.format("Expunging volume [%s] uploaded using HTTP POST from secondary data store.", volume.getUuid())); + logger.info(String.format("Expunging volume [%s] uploaded using HTTP POST from secondary data store.", volume.getUuid())); AsyncCallFuture future = volService.expungeVolumeAsync(volOnSecondary); VolumeApiResult result = future.get(); if (!result.isSuccess()) { - s_logger.warn(String.format("Failed to expunge volume [%s] from the image store [%s] due to: [%s].", volume.getUuid(), dataStore.getName(), result.getResult())); + logger.warn(String.format("Failed to expunge volume [%s] from the image store [%s] due to: [%s].", volume.getUuid(), dataStore.getName(), result.getResult())); } } } } } catch (Throwable th) { - s_logger.error(String.format("Unable to destroy uploaded volume [%s] due to: [%s].", volume.getUuid(), th.getMessage())); - s_logger.debug(String.format("Unable to destroy uploaded volume [%s].", volume.getUuid()), th); + logger.error(String.format("Unable to destroy uploaded volume [%s] due to: [%s].", volume.getUuid(), th.getMessage())); + logger.debug(String.format("Unable to destroy uploaded volume [%s].", volume.getUuid()), th); } } @@ -1626,14 +1624,14 @@ public void cleanupStorage(boolean recurring) { for (TemplateDataStoreVO templateDataStore : templateDataStores) { VMTemplateVO template = _templateDao.findById(templateDataStore.getTemplateId()); if (template == null) { - s_logger.warn(String.format("Uploaded template [%s] not found, so cannot be destroyed.", templateDataStore.getTemplateId())); + logger.warn(String.format("Uploaded template [%s] not found, so cannot be destroyed.", templateDataStore.getTemplateId())); continue; } try { DataStore dataStore = _dataStoreMgr.getDataStore(templateDataStore.getDataStoreId(), DataStoreRole.Image); EndPoint ep = _epSelector.select(dataStore, templateDataStore.getExtractUrl()); if (ep == null) { - s_logger.warn(String.format("Cannot destroy uploaded template [%s] as there is no secondary storage VM for image store [%s].", template.getUuid(), dataStore.getName())); + logger.warn(String.format("Cannot destroy uploaded template [%s] as there is no secondary storage VM for image store [%s].", template.getUuid(), dataStore.getName())); continue; } Host host = _hostDao.findById(ep.getId()); @@ -1642,7 +1640,7 @@ public void cleanupStorage(boolean recurring) { AsyncCallFuture future = _imageSrv.deleteTemplateAsync(tmplFactory.getTemplate(template.getId(), dataStore)); TemplateApiResult result = future.get(); if (!result.isSuccess()) { - s_logger.warn(String.format("Failed to delete template [%s] from image store [%s] due to: [%s]", template.getUuid(), dataStore.getName(), result.getResult())); + logger.warn(String.format("Failed to delete template [%s] from image store [%s] due to: [%s]", template.getUuid(), dataStore.getName(), result.getResult())); continue; } // remove from template_zone_ref @@ -1666,8 +1664,8 @@ public void cleanupStorage(boolean recurring) { } } } catch (Throwable th) { - s_logger.error(String.format("Unable to destroy uploaded template [%s] due to: [%s].", template.getUuid(), th.getMessage())); - s_logger.debug(String.format("Unable to destroy uploaded template [%s].", template.getUuid()), th); + logger.error(String.format("Unable to destroy uploaded template [%s] due to: [%s].", template.getUuid(), th.getMessage())); + logger.debug(String.format("Unable to destroy uploaded template [%s].", template.getUuid()), th); } } cleanupInactiveTemplates(); @@ -1698,7 +1696,7 @@ protected boolean isVolumeSuspectedDestroyDuplicateOfVmVolume(VolumeVO gcVolume) List vmUsableVolumes = volumeDao.findUsableVolumesForInstance(vmId); for (VolumeVO vol : vmUsableVolumes) { if (gcVolume.getPoolId().equals(vol.getPoolId()) && gcVolume.getPath().equals(vol.getPath())) { - s_logger.debug(String.format("%s meant for garbage collection could a possible duplicate for %s", gcVolume, vol)); + logger.debug(String.format("%s meant for garbage collection could a possible duplicate for %s", gcVolume, vol)); return true; } } @@ -1753,7 +1751,7 @@ private void handleManagedStorage(Volume volume) { if (answer != null && answer.getResult()) { volService.revokeAccess(volumeInfo, host, volumeInfo.getDataStore()); } else { - s_logger.warn("Unable to remove host-side clustered file system for the following volume: " + volume.getUuid()); + logger.warn("Unable to remove host-side clustered file system for the following volume: " + volume.getUuid()); } } } @@ -1777,7 +1775,7 @@ List findAllVolumeIdInSnapshotTable(Long storeId) { } return list; } catch (Exception e) { - s_logger.debug("failed to get all volumes who has snapshots in secondary storage " + storeId + " due to " + e.getMessage()); + logger.debug("failed to get all volumes who has snapshots in secondary storage " + storeId + " due to " + e.getMessage()); return null; } @@ -1798,7 +1796,7 @@ List findAllSnapshotForVolume(Long volumeId) { } return list; } catch (Exception e) { - s_logger.debug("failed to get all snapshots for a volume " + volumeId + " due to " + e.getMessage()); + logger.debug("failed to get all snapshots for a volume " + volumeId + " due to " + e.getMessage()); return null; } } @@ -1815,15 +1813,15 @@ public void cleanupSecondaryStorage(boolean recurring) { try { long storeId = store.getId(); List destroyedTemplateStoreVOs = _templateStoreDao.listDestroyed(storeId); - s_logger.debug("Secondary storage garbage collector found " + destroyedTemplateStoreVOs.size() + " templates to cleanup on template_store_ref for store: " + store.getName()); + logger.debug("Secondary storage garbage collector found " + destroyedTemplateStoreVOs.size() + " templates to cleanup on template_store_ref for store: " + store.getName()); for (TemplateDataStoreVO destroyedTemplateStoreVO : destroyedTemplateStoreVOs) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Deleting template store DB entry: " + destroyedTemplateStoreVO); + if (logger.isDebugEnabled()) { + logger.debug("Deleting template store DB entry: " + destroyedTemplateStoreVO); } _templateStoreDao.remove(destroyedTemplateStoreVO.getId()); } } catch (Exception e) { - s_logger.warn("problem cleaning up templates in template_store_ref for store: " + store.getName(), e); + logger.warn("problem cleaning up templates in template_store_ref for store: " + store.getName(), e); } } @@ -1831,17 +1829,17 @@ public void cleanupSecondaryStorage(boolean recurring) { for (DataStore store : imageStores) { try { List destroyedSnapshotStoreVOs = _snapshotStoreDao.listDestroyed(store.getId()); - s_logger.debug("Secondary storage garbage collector found " + destroyedSnapshotStoreVOs.size() + " snapshots to cleanup on snapshot_store_ref for store: " + store.getName()); + logger.debug("Secondary storage garbage collector found " + destroyedSnapshotStoreVOs.size() + " snapshots to cleanup on snapshot_store_ref for store: " + store.getName()); for (SnapshotDataStoreVO destroyedSnapshotStoreVO : destroyedSnapshotStoreVOs) { // check if this snapshot has child SnapshotInfo snap = snapshotFactory.getSnapshot(destroyedSnapshotStoreVO.getSnapshotId(), store); if (snap.getChild() != null) { - s_logger.debug("Skip snapshot on store: " + destroyedSnapshotStoreVO + " , because it has child"); + logger.debug("Skip snapshot on store: " + destroyedSnapshotStoreVO + " , because it has child"); continue; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Deleting snapshot store DB entry: " + destroyedSnapshotStoreVO); + if (logger.isDebugEnabled()) { + logger.debug("Deleting snapshot store DB entry: " + destroyedSnapshotStoreVO); } List imageStoreRefs = _snapshotStoreDao.listBySnapshot(destroyedSnapshotStoreVO.getSnapshotId(), DataStoreRole.Image); @@ -1850,8 +1848,8 @@ public void cleanupSecondaryStorage(boolean recurring) { } SnapshotDataStoreVO snapshotOnPrimary = _snapshotStoreDao.findDestroyedReferenceBySnapshot(destroyedSnapshotStoreVO.getSnapshotId(), DataStoreRole.Primary); if (snapshotOnPrimary != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Deleting snapshot on primary store reference DB entry: " + snapshotOnPrimary); + if (logger.isDebugEnabled()) { + logger.debug("Deleting snapshot on primary store reference DB entry: " + snapshotOnPrimary); } _snapshotStoreDao.remove(snapshotOnPrimary.getId()); } @@ -1859,7 +1857,7 @@ public void cleanupSecondaryStorage(boolean recurring) { } } catch (Exception e2) { - s_logger.warn("problem cleaning up snapshots in snapshot_store_ref for store: " + store.getName(), e2); + logger.warn("problem cleaning up snapshots in snapshot_store_ref for store: " + store.getName(), e2); } } @@ -1869,20 +1867,20 @@ public void cleanupSecondaryStorage(boolean recurring) { try { List destroyedStoreVOs = _volumeStoreDao.listDestroyed(store.getId()); destroyedStoreVOs.addAll(_volumeDataStoreDao.listByVolumeState(Volume.State.Expunged)); - s_logger.debug("Secondary storage garbage collector found " + destroyedStoreVOs.size() + " volumes to cleanup on volume_store_ref for store: " + store.getName()); + logger.debug("Secondary storage garbage collector found " + destroyedStoreVOs.size() + " volumes to cleanup on volume_store_ref for store: " + store.getName()); for (VolumeDataStoreVO destroyedStoreVO : destroyedStoreVOs) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Deleting volume store DB entry: " + destroyedStoreVO); + if (logger.isDebugEnabled()) { + logger.debug("Deleting volume store DB entry: " + destroyedStoreVO); } _volumeStoreDao.remove(destroyedStoreVO.getId()); } } catch (Exception e2) { - s_logger.warn("problem cleaning up volumes in volume_store_ref for store: " + store.getName(), e2); + logger.warn("problem cleaning up volumes in volume_store_ref for store: " + store.getName(), e2); } } } catch (Exception e3) { - s_logger.warn("problem cleaning up secondary storage DB entries. ", e3); + logger.warn("problem cleaning up secondary storage DB entries. ", e3); } } @@ -1906,7 +1904,7 @@ public PrimaryDataStoreInfo preparePrimaryStorageForMaintenance(Long primaryStor if (primaryStorage == null) { String msg = "Unable to obtain lock on the storage pool record in preparePrimaryStorageForMaintenance()"; - s_logger.error(msg); + logger.error(msg); throw new InvalidParameterValueException(msg); } @@ -1951,8 +1949,8 @@ public void doInTransactionWithoutResult(TransactionStatus status) { try { lifeCycle.maintain(childStore); } catch (Exception e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Exception on maintenance preparation of one of the child datastores in datastore cluster %d with error %s", primaryStorageId, e)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Exception on maintenance preparation of one of the child datastores in datastore cluster %d with error %s", primaryStorageId, e)); } // Set to ErrorInMaintenance state of all child storage pools and datastore cluster for (StoragePoolVO childDatastore : childDatastores) { @@ -1976,7 +1974,7 @@ public PrimaryDataStoreInfo cancelPrimaryStorageForMaintenance(CancelPrimaryStor if (primaryStorage == null) { String msg = "Unable to obtain lock on the storage pool in cancelPrimaryStorageForMaintenance()"; - s_logger.error(msg); + logger.error(msg); throw new InvalidParameterValueException(msg); } @@ -2011,7 +2009,7 @@ public StoragePool syncStoragePool(SyncStoragePoolCmd cmd) { if (pool == null) { String msg = String.format("Unable to obtain lock on the storage pool record while syncing storage pool [%s] with management server", pool.getUuid()); - s_logger.error(msg); + logger.error(msg); throw new InvalidParameterValueException(msg); } @@ -2144,7 +2142,7 @@ public void syncDatastoreClusterStoragePool(long datastoreClusterPoolId, List set = new LinkedHashSet<>(storageTags); storageTags.clear(); storageTags.addAll(set); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Updating Storage Pool Tags to :" + storageTags); + if (logger.isDebugEnabled()) { + logger.debug("Updating Storage Pool Tags to :" + storageTags); } _storagePoolTagsDao.persist(storageTags); } @@ -2276,7 +2274,7 @@ private void handleRemoveChildStoragePoolFromDatastoreCluster(Set childD details.put(DiskTO.PROTOCOL_TYPE, Storage.StoragePoolType.DatastoreCluster.toString()); disk.setDetails(details); - s_logger.debug(String.format("Attempting to process SyncVolumePathCommand for the volume %d on the host %d with state %s", volumeId, hostId, hostVO.getResourceState())); + logger.debug(String.format("Attempting to process SyncVolumePathCommand for the volume %d on the host %d with state %s", volumeId, hostId, hostVO.getResourceState())); SyncVolumePathCommand cmd = new SyncVolumePathCommand(disk); final Answer answer = _agentMgr.easySend(hostId, cmd); // validate answer @@ -2297,7 +2295,7 @@ private void handleRemoveChildStoragePoolFromDatastoreCluster(Set childD if (storagePoolVO != null) { volumeVO.setPoolId(storagePoolVO.getId()); } else { - s_logger.warn(String.format("Unable to find datastore %s while updating the new datastore of the volume %d", datastoreName, volumeId)); + logger.warn(String.format("Unable to find datastore %s while updating the new datastore of the volume %d", datastoreName, volumeId)); } } @@ -2343,12 +2341,12 @@ public StorageGarbageCollector() { @Override protected void runInContext() { try { - s_logger.trace("Storage Garbage Collection Thread is running."); + logger.trace("Storage Garbage Collection Thread is running."); cleanupStorage(true); } catch (Exception e) { - s_logger.error("Caught the following Exception", e); + logger.error("Caught the following Exception", e); } } } @@ -2361,7 +2359,7 @@ public void onManagementNodeJoined(List nodeList public void onManagementNodeLeft(List nodeList, long selfNodeId) { for (ManagementServerHost vo : nodeList) { if (vo.getMsid() == _serverId) { - s_logger.info("Cleaning up storage maintenance jobs associated with Management server: " + vo.getMsid()); + logger.info("Cleaning up storage maintenance jobs associated with Management server: " + vo.getMsid()); List poolIds = _storagePoolWorkDao.searchForPoolIdsForPendingWorkJobs(vo.getMsid()); if (poolIds.size() > 0) { for (Long poolId : poolIds) { @@ -2590,7 +2588,7 @@ public Host updateSecondaryStorage(long secStorageId, String newUrl) { throw new InvalidParameterValueException("can not change old scheme:" + oldUri.getScheme() + " to " + uri.getScheme()); } } catch (URISyntaxException e) { - s_logger.debug("Failed to get uri from " + oldUrl); + logger.debug("Failed to get uri from " + oldUrl); } secHost.setStorageUrl(newUrl); @@ -2632,13 +2630,13 @@ private boolean checkUsagedSpace(StoragePool pool) { long usedSize = getUsedSize(pool); double usedPercentage = ((double)usedSize / (double)totalSize); double storageUsedThreshold = CapacityManager.StorageCapacityDisableThreshold.valueIn(pool.getDataCenterId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Checking pool " + pool.getId() + " for storage, totalSize: " + pool.getCapacityBytes() + ", usedBytes: " + pool.getUsedBytes() + + if (logger.isDebugEnabled()) { + logger.debug("Checking pool " + pool.getId() + " for storage, totalSize: " + pool.getCapacityBytes() + ", usedBytes: " + pool.getUsedBytes() + ", usedPct: " + usedPercentage + ", disable threshold: " + storageUsedThreshold); } if (usedPercentage >= storageUsedThreshold) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Insufficient space on pool: " + pool.getId() + " since its usage percentage: " + usedPercentage + + if (logger.isDebugEnabled()) { + logger.debug("Insufficient space on pool: " + pool.getId() + " since its usage percentage: " + usedPercentage + " has crossed the pool.storage.capacity.disablethreshold: " + storageUsedThreshold); } return false; @@ -2668,14 +2666,14 @@ private long getUsedSize(StoragePool pool) { @Override public boolean storagePoolHasEnoughIops(List> requestedVolumes, StoragePool pool) { if (requestedVolumes == null || requestedVolumes.isEmpty() || pool == null) { - s_logger.debug(String.format("Cannot check if storage [%s] has enough IOPS to allocate volumes [%s].", pool, requestedVolumes)); + logger.debug(String.format("Cannot check if storage [%s] has enough IOPS to allocate volumes [%s].", pool, requestedVolumes)); return false; } // Only IOPS-guaranteed primary storage like SolidFire is using/setting IOPS. // This check returns true for storage that does not specify IOPS. if (pool.getCapacityIops() == null) { - s_logger.info("Storage pool " + pool.getName() + " (" + pool.getId() + ") does not supply IOPS capacity, assuming enough capacity"); + logger.info("Storage pool " + pool.getName() + " (" + pool.getId() + ") does not supply IOPS capacity, assuming enough capacity"); return true; } @@ -2701,7 +2699,7 @@ public boolean storagePoolHasEnoughIops(List> requeste long futureIops = currentIops + requestedIops; boolean hasEnoughIops = futureIops <= pool.getCapacityIops(); String hasCapacity = hasEnoughIops ? "has" : "does not have"; - s_logger.debug(String.format("Pool [%s] %s enough IOPS to allocate volumes [%s].", pool, hasCapacity, requestedVolumes)); + logger.debug(String.format("Pool [%s] %s enough IOPS to allocate volumes [%s].", pool, hasCapacity, requestedVolumes)); return hasEnoughIops; } @@ -2713,18 +2711,18 @@ public boolean storagePoolHasEnoughSpace(List> volumeD @Override public boolean storagePoolHasEnoughSpace(List> volumeDiskProfilesList, StoragePool pool, Long clusterId) { if (CollectionUtils.isEmpty(volumeDiskProfilesList)) { - s_logger.debug(String.format("Cannot check if pool [%s] has enough space to allocate volumes because the volumes list is empty.", pool)); + logger.debug(String.format("Cannot check if pool [%s] has enough space to allocate volumes because the volumes list is empty.", pool)); return false; } if (!checkUsagedSpace(pool)) { - s_logger.debug(String.format("Cannot allocate pool [%s] because there is not enough space in this pool.", pool)); + logger.debug(String.format("Cannot allocate pool [%s] because there is not enough space in this pool.", pool)); return false; } // allocated space includes templates - if (s_logger.isDebugEnabled()) { - s_logger.debug("Destination pool id: " + pool.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Destination pool id: " + pool.getId()); } // allocated space includes templates final StoragePoolVO poolVO = _storagePoolDao.findById(pool.getId()); @@ -2757,8 +2755,8 @@ public boolean storagePoolHasEnoughSpace(List> volumeD } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Pool ID for the volume with ID " + volumeVO.getId() + " is " + volumeVO.getPoolId()); + if (logger.isDebugEnabled()) { + logger.debug("Pool ID for the volume with ID " + volumeVO.getId() + " is " + volumeVO.getPoolId()); } // A ready-state volume is already allocated in a pool, so the asking size is zero for it. @@ -2778,8 +2776,8 @@ public boolean storagePoolHasEnoughSpaceForResize(StoragePool pool, long current if (!checkUsagedSpace(pool)) { return false; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Destination pool id: " + pool.getId()); + if (logger.isDebugEnabled()) { + logger.debug("Destination pool id: " + pool.getId()); } long totalAskingSize = newSize - currentSize; @@ -2827,10 +2825,10 @@ public boolean isStoragePoolCompliantWithStoragePolicy(List(volume, answer)); } catch (AgentUnavailableException e) { - s_logger.debug("Unable to send storage pool command to " + pool + " via " + hostIds.get(0), e); + logger.debug("Unable to send storage pool command to " + pool + " via " + hostIds.get(0), e); throw new StorageUnavailableException("Unable to send command to the pool ", pool.getId()); } catch (OperationTimedoutException e) { - s_logger.debug("Failed to process storage pool command to " + pool + " via " + hostIds.get(0), e); + logger.debug("Failed to process storage pool command to " + pool + " via " + hostIds.get(0), e); throw new StorageUnavailableException("Failed to process storage command to the pool ", pool.getId()); } } @@ -2838,7 +2836,7 @@ public boolean isStoragePoolCompliantWithStoragePolicy(List answer : answers) { if (!answer.second().getResult()) { - s_logger.debug(String.format("Storage pool %s is not compliance with storage policy for volume %s", pool.getUuid(), answer.first().getName())); + logger.debug(String.format("Storage pool %s is not compliance with storage policy for volume %s", pool.getUuid(), answer.first().getName())); return false; } } @@ -2856,28 +2854,28 @@ private boolean checkPoolforSpace(StoragePool pool, long allocatedSizeWithTempla totalOverProvCapacity = overProvFactor.multiply(new BigDecimal(pool.getCapacityBytes())).longValue(); - s_logger.debug("Found storage pool " + pool.getName() + " of type " + pool.getPoolType().toString() + " with overprovisioning factor " + overProvFactor.toString()); - s_logger.debug("Total over provisioned capacity calculated is " + overProvFactor + " * " + toHumanReadableSize(pool.getCapacityBytes())); + logger.debug("Found storage pool " + pool.getName() + " of type " + pool.getPoolType().toString() + " with overprovisioning factor " + overProvFactor.toString()); + logger.debug("Total over provisioned capacity calculated is " + overProvFactor + " * " + toHumanReadableSize(pool.getCapacityBytes())); } else { totalOverProvCapacity = pool.getCapacityBytes(); - s_logger.debug("Found storage pool " + poolVO.getName() + " of type " + pool.getPoolType().toString()); + logger.debug("Found storage pool " + poolVO.getName() + " of type " + pool.getPoolType().toString()); } - s_logger.debug("Total capacity of the pool " + poolVO.getName() + " with ID " + pool.getId() + " is " + toHumanReadableSize(totalOverProvCapacity)); + logger.debug("Total capacity of the pool " + poolVO.getName() + " with ID " + pool.getId() + " is " + toHumanReadableSize(totalOverProvCapacity)); double storageAllocatedThreshold = CapacityManager.StorageAllocatedCapacityDisableThreshold.valueIn(pool.getDataCenterId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Checking pool: " + pool.getId() + " for storage allocation , maxSize : " + toHumanReadableSize(totalOverProvCapacity) + ", totalAllocatedSize : " + toHumanReadableSize(allocatedSizeWithTemplate) + if (logger.isDebugEnabled()) { + logger.debug("Checking pool: " + pool.getId() + " for storage allocation , maxSize : " + toHumanReadableSize(totalOverProvCapacity) + ", totalAllocatedSize : " + toHumanReadableSize(allocatedSizeWithTemplate) + ", askingSize : " + toHumanReadableSize(totalAskingSize) + ", allocated disable threshold: " + storageAllocatedThreshold); } double usedPercentage = (allocatedSizeWithTemplate + totalAskingSize) / (double)(totalOverProvCapacity); if (usedPercentage > storageAllocatedThreshold) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Insufficient un-allocated capacity on: " + pool.getId() + " for storage allocation since its allocated percentage: " + usedPercentage + if (logger.isDebugEnabled()) { + logger.debug("Insufficient un-allocated capacity on: " + pool.getId() + " for storage allocation since its allocated percentage: " + usedPercentage + " has crossed the allocated pool.storage.allocated.capacity.disablethreshold: " + storageAllocatedThreshold + ", skipping this pool"); } @@ -2885,8 +2883,8 @@ private boolean checkPoolforSpace(StoragePool pool, long allocatedSizeWithTempla } if (totalOverProvCapacity < (allocatedSizeWithTemplate + totalAskingSize)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Insufficient un-allocated capacity on: " + pool.getId() + " for storage allocation, not enough storage, maxSize : " + toHumanReadableSize(totalOverProvCapacity) + if (logger.isDebugEnabled()) { + logger.debug("Insufficient un-allocated capacity on: " + pool.getId() + " for storage allocation, not enough storage, maxSize : " + toHumanReadableSize(totalOverProvCapacity) + ", totalAllocatedSize : " + toHumanReadableSize(allocatedSizeWithTemplate) + ", askingSize : " + toHumanReadableSize(totalAskingSize)); } @@ -2981,34 +2979,34 @@ private long getBytesRequiredForTemplate(VMTemplateVO tmpl, StoragePool pool) { @Override public boolean storagePoolCompatibleWithVolumePool(StoragePool pool, Volume volume) { if (pool == null || volume == null) { - s_logger.debug(String.format("Cannot check if storage pool [%s] is compatible with volume [%s].", pool, volume)); + logger.debug(String.format("Cannot check if storage pool [%s] is compatible with volume [%s].", pool, volume)); return false; } if (volume.getPoolId() == null) { - s_logger.debug(String.format("Volume [%s] is not allocated to any pool. Cannot check compatibility with pool [%s].", volume, pool)); + logger.debug(String.format("Volume [%s] is not allocated to any pool. Cannot check compatibility with pool [%s].", volume, pool)); return true; } StoragePool volumePool = _storagePoolDao.findById(volume.getPoolId()); if (volumePool == null) { - s_logger.debug(String.format("Pool [%s] used by volume [%s] does not exist. Cannot check compatibility.", pool, volume)); + logger.debug(String.format("Pool [%s] used by volume [%s] does not exist. Cannot check compatibility.", pool, volume)); return true; } if (volume.getState() == Volume.State.Ready) { if (volumePool.getPoolType() == Storage.StoragePoolType.PowerFlex && pool.getPoolType() != Storage.StoragePoolType.PowerFlex) { - s_logger.debug(String.format("Pool [%s] with type [%s] does not match volume [%s] pool type [%s].", pool, pool.getPoolType(), volume, volumePool.getPoolType())); + logger.debug(String.format("Pool [%s] with type [%s] does not match volume [%s] pool type [%s].", pool, pool.getPoolType(), volume, volumePool.getPoolType())); return false; } else if (volumePool.getPoolType() != Storage.StoragePoolType.PowerFlex && pool.getPoolType() == Storage.StoragePoolType.PowerFlex) { - s_logger.debug(String.format("Pool [%s] with type [%s] does not match volume [%s] pool type [%s].", pool, pool.getPoolType(), volume, volumePool.getPoolType())); + logger.debug(String.format("Pool [%s] with type [%s] does not match volume [%s] pool type [%s].", pool, pool.getPoolType(), volume, volumePool.getPoolType())); return false; } } else { - s_logger.debug(String.format("Cannot check compatibility of pool [%s] because volume [%s] is not in [%s] state.", pool, volume, Volume.State.Ready)); + logger.debug(String.format("Cannot check compatibility of pool [%s] because volume [%s] is not in [%s] state.", pool, volume, Volume.State.Ready)); return false; } - s_logger.debug(String.format("Pool [%s] is compatible with volume [%s].", pool, volume)); + logger.debug(String.format("Pool [%s] is compatible with volume [%s].", pool, volume)); return true; } @@ -3133,8 +3131,8 @@ public ImageStore discoverImageStore(String name, String url, String providerNam try { store = lifeCycle.initialize(params); } catch (Exception e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Failed to add data store: " + e.getMessage(), e); + if (logger.isDebugEnabled()) { + logger.debug("Failed to add data store: " + e.getMessage(), e); } throw new CloudRuntimeException("Failed to add data store: " + e.getMessage(), e); } @@ -3200,7 +3198,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { continue; } } catch (Exception e) { - s_logger.error("Failed to validated if template is seeded", e); + logger.error("Failed to validated if template is seeded", e); } } } @@ -3212,11 +3210,11 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { } } catch (CloudRuntimeException e) { SystemVmTemplateRegistration.unmountStore(filePath); - s_logger.error(String.format("Failed to register systemVM template for hypervisor: %s", hypervisorType.name()), e); + logger.error(String.format("Failed to register systemVM template for hypervisor: %s", hypervisorType.name()), e); } } } catch (Exception e) { - s_logger.error("Failed to register systemVM template(s)"); + logger.error("Failed to register systemVM template(s)"); } finally { SystemVmTemplateRegistration.unmountStore(filePath); txn.close(); @@ -3336,7 +3334,7 @@ public void updateStorageCapabilities(Long poolId, boolean failOnChecks) { } } else { if (answer != null && !answer.getResult()) { - s_logger.error("Failed to update storage pool capabilities: " + answer.getDetails()); + logger.error("Failed to update storage pool capabilities: " + answer.getDetails()); if (failOnChecks) { throw new CloudRuntimeException(answer.getDetails()); } @@ -3486,7 +3484,7 @@ public ImageStore createSecondaryStagingStore(CreateSecondaryStagingStoreCmd cmd try { store = lifeCycle.initialize(params); } catch (Exception e) { - s_logger.debug("Failed to add data store: " + e.getMessage(), e); + logger.debug("Failed to add data store: " + e.getMessage(), e); throw new CloudRuntimeException("Failed to add data store: " + e.getMessage(), e); } @@ -3545,12 +3543,12 @@ public DownloadURLGarbageCollector() { @Override public void run() { try { - s_logger.trace("Download URL Garbage Collection Thread is running."); + logger.trace("Download URL Garbage Collection Thread is running."); cleanupDownloadUrls(); } catch (Exception e) { - s_logger.error("Caught the following Exception", e); + logger.error("Caught the following Exception", e); } } } @@ -3572,7 +3570,7 @@ public void cleanupDownloadUrls() { continue; } expiredVolumeIds.add(volumeId); - s_logger.debug("Removing download url " + volumeOnImageStore.getExtractUrl() + " for volume id " + volumeId); + logger.debug("Removing download url " + volumeOnImageStore.getExtractUrl() + " for volume id " + volumeId); // Remove it from image store ImageStoreEntity secStore = (ImageStoreEntity)_dataStoreMgr.getDataStore(volumeOnImageStore.getDataStoreId(), DataStoreRole.Image); @@ -3581,7 +3579,7 @@ public void cleanupDownloadUrls() { // Now expunge it from DB since this entry was created only for download purpose _volumeStoreDao.expunge(volumeOnImageStore.getId()); } catch (Throwable th) { - s_logger.warn("Caught exception while deleting download url " + volumeOnImageStore.getExtractUrl() + " for volume id " + volumeOnImageStore.getVolumeId(), th); + logger.warn("Caught exception while deleting download url " + volumeOnImageStore.getExtractUrl() + " for volume id " + volumeOnImageStore.getVolumeId(), th); } } for (Long volumeId : expiredVolumeIds) { @@ -3604,7 +3602,7 @@ public void cleanupDownloadUrls() { continue; } - s_logger.debug("Removing download url " + templateOnImageStore.getExtractUrl() + " for template id " + templateOnImageStore.getTemplateId()); + logger.debug("Removing download url " + templateOnImageStore.getExtractUrl() + " for template id " + templateOnImageStore.getTemplateId()); // Remove it from image store ImageStoreEntity secStore = (ImageStoreEntity)_dataStoreMgr.getDataStore(templateOnImageStore.getDataStoreId(), DataStoreRole.Image); @@ -3615,7 +3613,7 @@ public void cleanupDownloadUrls() { templateOnImageStore.setExtractUrlCreated(null); _templateStoreDao.update(templateOnImageStore.getId(), templateOnImageStore); } catch (Throwable th) { - s_logger.warn("caught exception while deleting download url " + templateOnImageStore.getExtractUrl() + " for template id " + templateOnImageStore.getTemplateId(), th); + logger.warn("caught exception while deleting download url " + templateOnImageStore.getExtractUrl() + " for template id " + templateOnImageStore.getTemplateId(), th); } } @@ -3627,7 +3625,7 @@ public void cleanupDownloadUrls() { secStore.deleteExtractUrl(imageStoreObjectDownloadVO.getPath(), imageStoreObjectDownloadVO.getDownloadUrl(), null); _imageStoreObjectDownloadDao.expunge(imageStoreObjectDownloadVO.getId()); } catch (Throwable th) { - s_logger.warn("caught exception while deleting download url " + imageStoreObjectDownloadVO.getDownloadUrl() + " for object id " + imageStoreObjectDownloadVO.getId(), th); + logger.warn("caught exception while deleting download url " + imageStoreObjectDownloadVO.getDownloadUrl() + " for object id " + imageStoreObjectDownloadVO.getId(), th); } } } @@ -3809,8 +3807,8 @@ public ObjectStore discoverObjectStore(String name, String url, String providerN try { store = lifeCycle.initialize(params); } catch (Exception e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Failed to add object store: " + e.getMessage(), e); + if (logger.isDebugEnabled()) { + logger.debug("Failed to add object store: " + e.getMessage(), e); } throw new CloudRuntimeException("Failed to add object store: " + e.getMessage(), e); } @@ -3842,7 +3840,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { _objectStoreDao.remove(storeId); } }); - s_logger.debug("Successfully deleted object store with Id: "+storeId); + logger.debug("Successfully deleted object store with Id: "+storeId); return true; } @@ -3883,7 +3881,7 @@ public ObjectStore updateObjectStore(Long id, UpdateObjectStoragePoolCmd cmd) { objectStoreVO.setName(cmd.getName()); } _objectStoreDao.update(id, objectStoreVO); - s_logger.debug("Successfully updated object store with Id: "+id); + logger.debug("Successfully updated object store with Id: "+id); return objectStoreVO; } } diff --git a/server/src/main/java/com/cloud/storage/StoragePoolAutomationImpl.java b/server/src/main/java/com/cloud/storage/StoragePoolAutomationImpl.java index 7b5ebc455c6d..6a8e3f0ff517 100644 --- a/server/src/main/java/com/cloud/storage/StoragePoolAutomationImpl.java +++ b/server/src/main/java/com/cloud/storage/StoragePoolAutomationImpl.java @@ -29,7 +29,8 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; @@ -65,7 +66,7 @@ @Component public class StoragePoolAutomationImpl implements StoragePoolAutomation { - private static final Logger s_logger = Logger.getLogger(StoragePoolAutomationImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject protected VirtualMachineManager vmMgr; @Inject @@ -158,15 +159,15 @@ public boolean maintain(DataStore store) { ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(false, storagePool); final Answer answer = agentMgr.easySend(host.getId(), cmd); if (answer == null || !answer.getResult()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("ModifyStoragePool false failed due to " + ((answer == null) ? "answer null" : answer.getDetails())); + if (logger.isDebugEnabled()) { + logger.debug("ModifyStoragePool false failed due to " + ((answer == null) ? "answer null" : answer.getDetails())); } } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("ModifyStoragePool false succeeded"); + if (logger.isDebugEnabled()) { + logger.debug("ModifyStoragePool false succeeded"); } if (pool.getPoolType() == Storage.StoragePoolType.DatastoreCluster) { - s_logger.debug(String.format("Started synchronising datastore cluster storage pool %s with vCenter", pool.getUuid())); + logger.debug(String.format("Started synchronising datastore cluster storage pool %s with vCenter", pool.getUuid())); storageManager.syncDatastoreClusterStoragePool(pool.getId(), ((ModifyStoragePoolAnswer) answer).getDatastoreClusterChildren(), host.getId()); } } @@ -198,8 +199,8 @@ public boolean maintain(DataStore store) { StoragePoolWorkVO work = new StoragePoolWorkVO(vmInstance.getId(), pool.getId(), false, false, server.getId()); _storagePoolWorkDao.persist(work); } catch (Exception e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Work record already exists, re-using by re-setting values"); + if (logger.isDebugEnabled()) { + logger.debug("Work record already exists, re-using by re-setting values"); } StoragePoolWorkVO work = _storagePoolWorkDao.findByPoolIdAndVmId(pool.getId(), vmInstance.getId()); work.setStartedAfterMaintenance(false); @@ -284,7 +285,7 @@ public boolean maintain(DataStore store) { } } } catch (Exception e) { - s_logger.error("Exception in enabling primary storage maintenance:", e); + logger.error("Exception in enabling primary storage maintenance:", e); pool.setStatus(StoragePoolStatus.ErrorInMaintenance); primaryDataStoreDao.update(pool.getId(), pool); throw new CloudRuntimeException(e.getMessage()); @@ -323,15 +324,15 @@ public boolean cancelMaintain(DataStore store) { ModifyStoragePoolCommand msPoolCmd = new ModifyStoragePoolCommand(true, pool); final Answer answer = agentMgr.easySend(host.getId(), msPoolCmd); if (answer == null || !answer.getResult()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("ModifyStoragePool add failed due to " + ((answer == null) ? "answer null" : answer.getDetails())); + if (logger.isDebugEnabled()) { + logger.debug("ModifyStoragePool add failed due to " + ((answer == null) ? "answer null" : answer.getDetails())); } } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("ModifyStoragePool add succeeded"); + if (logger.isDebugEnabled()) { + logger.debug("ModifyStoragePool add succeeded"); } if (pool.getPoolType() == Storage.StoragePoolType.DatastoreCluster) { - s_logger.debug(String.format("Started synchronising datastore cluster storage pool %s with vCenter", pool.getUuid())); + logger.debug(String.format("Started synchronising datastore cluster storage pool %s with vCenter", pool.getUuid())); storageManager.syncDatastoreClusterStoragePool(pool.getId(), ((ModifyStoragePoolAnswer) answer).getDatastoreClusterChildren(), host.getId()); } } @@ -399,7 +400,7 @@ public boolean cancelMaintain(DataStore store) { } } } catch (Exception e) { - s_logger.debug("Failed start vm", e); + logger.debug("Failed start vm", e); throw new CloudRuntimeException(e.toString()); } } diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index 2a0821c5c0a7..8597028f6c8d 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -113,7 +113,6 @@ import org.apache.commons.lang3.BooleanUtils; import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import org.joda.time.DateTime; @@ -236,7 +235,6 @@ import com.google.gson.JsonParseException; public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiService, VmWorkJobHandler, Configurable { - private final static Logger s_logger = Logger.getLogger(VolumeApiServiceImpl.class); public static final String VM_WORK_JOB_HANDLER = VolumeApiServiceImpl.class.getSimpleName(); @Inject @@ -433,7 +431,7 @@ private String sanitizeFormat(String format) { ImageFormat.valueOf(uppercase); } catch (IllegalArgumentException e) { String msg = "Image format: " + format + " is incorrect. Supported formats are " + EnumUtils.listValues(ImageFormat.values()); - s_logger.error("ImageFormat IllegalArgumentException: " + e.getMessage(), e); + logger.error("ImageFormat IllegalArgumentException: " + e.getMessage(), e); throw new IllegalArgumentException(msg); } return uppercase; @@ -548,7 +546,7 @@ private boolean validateVolume(Account caller, long ownerId, Long zoneId, String } UriUtils.validateUrl(format, url); if (VolumeUrlCheck.value()) { // global setting that can be set when their MS does not have internet access - s_logger.debug("Checking url: " + url); + logger.debug("Checking url: " + url); DirectDownloadHelper.checkUrlExistence(url); } // Check that the resource limit for secondary storage won't be exceeded @@ -1024,8 +1022,8 @@ public VolumeVO createVolume(CreateVolumeCmd cmd) { message.append(cmd.getVirtualMachineId()); message.append(" due to error: "); message.append(ex.getMessage()); - if (s_logger.isDebugEnabled()) { - s_logger.debug(message, ex); + if (logger.isDebugEnabled()) { + logger.debug(message, ex); } throw new CloudRuntimeException(message.toString()); } @@ -1039,7 +1037,7 @@ public VolumeVO createVolume(CreateVolumeCmd cmd) { throw new CloudRuntimeException("Failed to create volume: " + volume.getId(), e); } finally { if (!created) { - s_logger.trace("Decrementing volume resource count for account id=" + volume.getAccountId() + " as volume failed to create on the backend"); + logger.trace("Decrementing volume resource count for account id=" + volume.getAccountId() + " as volume failed to create on the backend"); _resourceLimitMgr.decrementResourceCount(volume.getAccountId(), ResourceType.volume, cmd.getDisplayVolume()); _resourceLimitMgr.decrementResourceCount(volume.getAccountId(), ResourceType.primary_storage, cmd.getDisplayVolume(), new Long(volume.getSize())); } @@ -1291,7 +1289,7 @@ public VolumeVO resizeVolume(ResizeVolumeCmd cmd) throws ResourceAllocationExcep /* If this volume has never been beyond allocated state, short circuit everything and simply update the database. */ // We need to publish this event to usage_volume table if (volume.getState() == Volume.State.Allocated) { - s_logger.debug("Volume is in the allocated state, but has never been created. Simply updating database with new size and IOPS."); + logger.debug("Volume is in the allocated state, but has never been created. Simply updating database with new size and IOPS."); volume.setSize(newSize); volume.setMinIops(newMinIops); @@ -1481,7 +1479,7 @@ private VolumeVO orchestrateResizeVolume(long volumeId, long currentSize, long n VolumeApiResult result = future.get(); if (result.isFailed()) { - s_logger.warn("Failed to resize the volume " + volume); + logger.warn("Failed to resize the volume " + volume); String details = ""; if (result.getResult() != null && !result.getResult().isEmpty()) { details = result.getResult(); @@ -1563,7 +1561,7 @@ private boolean deleteVolumeFromStorage(VolumeVO volume, Account caller) throws cleanVolumesCache(volume); return true; } catch (InterruptedException | ExecutionException e) { - s_logger.warn("Failed to expunge volume: " + volume.getUuid(), e); + logger.warn("Failed to expunge volume: " + volume.getUuid(), e); return false; } } @@ -1646,19 +1644,19 @@ protected void expungeVolumesInSecondaryStorageIfNeeded(VolumeVO volume) throws private void expungeVolumesInPrimaryOrSecondary(VolumeVO volume, DataStoreRole role) throws InterruptedException, ExecutionException { if (!canAccessVolumeStore(volume, role)) { - s_logger.debug(String.format("Cannot access the storage pool with role: %s " + + logger.debug(String.format("Cannot access the storage pool with role: %s " + "for the volume: %s, skipping expunge from storage", role.name(), volume.getName())); return; } VolumeInfo volOnStorage = volFactory.getVolume(volume.getId(), role); if (volOnStorage != null) { - s_logger.info("Expunging volume " + volume.getId() + " from " + role + " data store"); + logger.info("Expunging volume " + volume.getId() + " from " + role + " data store"); AsyncCallFuture future = volService.expungeVolumeAsync(volOnStorage); VolumeApiResult result = future.get(); if (result.isFailed()) { String msg = "Failed to expunge the volume " + volume + " in " + role + " data store"; - s_logger.warn(msg); + logger.warn(msg); String details = ""; if (result.getResult() != null && !result.getResult().isEmpty()) { details = msg + " : " + result.getResult(); @@ -1690,7 +1688,7 @@ protected void cleanVolumesCache(VolumeVO volume) { return; } for (VolumeInfo volOnCache : cacheVols) { - s_logger.info("Delete volume from image cache store: " + volOnCache.getDataStore().getName()); + logger.info("Delete volume from image cache store: " + volOnCache.getDataStore().getName()); volOnCache.delete(); } } @@ -1731,7 +1729,7 @@ public Volume destroyVolume(long volumeId, Account caller, boolean expunge, bool stateTransitTo(volume, Volume.Event.DestroyRequested); stateTransitTo(volume, Volume.Event.OperationSucceeded); } catch (NoTransitionException e) { - s_logger.debug("Failed to destroy volume" + volume.getId(), e); + logger.debug("Failed to destroy volume" + volume.getId(), e); return null; } _resourceLimitMgr.decrementResourceCount(volume.getAccountId(), ResourceType.volume, volume.isDisplay()); @@ -1739,7 +1737,7 @@ public Volume destroyVolume(long volumeId, Account caller, boolean expunge, bool return volume; } if (!deleteVolumeFromStorage(volume, caller)) { - s_logger.warn("Failed to expunge volume: " + volumeId); + logger.warn("Failed to expunge volume: " + volumeId); return null; } removeVolume(volume.getId()); @@ -1781,7 +1779,7 @@ public Volume recoverVolume(long volumeId) { try { _resourceLimitMgr.checkResourceLimit(_accountMgr.getAccount(volume.getAccountId()), ResourceType.primary_storage, volume.isDisplayVolume(), volume.getSize()); } catch (ResourceAllocationException e) { - s_logger.error("primary storage resource limit check failed", e); + logger.error("primary storage resource limit check failed", e); throw new InvalidParameterValueException(e.getMessage()); } @@ -1789,7 +1787,7 @@ public Volume recoverVolume(long volumeId) { _volsDao.detachVolume(volume.getId()); stateTransitTo(volume, Volume.Event.RecoverRequested); } catch (NoTransitionException e) { - s_logger.debug("Failed to recover volume" + volume.getId(), e); + logger.debug("Failed to recover volume" + volume.getId(), e); throw new CloudRuntimeException("Failed to recover volume" + volume.getId(), e); } _resourceLimitMgr.incrementResourceCount(volume.getAccountId(), ResourceType.volume, volume.isDisplay()); @@ -1814,7 +1812,7 @@ public void publishVolumeCreationUsageEvent(Volume volume) { .publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(), offeringId, volume.getTemplateId(), volume.getSize(), Volume.class.getName(), volume.getUuid(), volume.isDisplay()); - s_logger.debug(String.format("Volume [%s] has been successfully recovered, thus a new usage event %s has been published.", volume.getUuid(), EventTypes.EVENT_VOLUME_CREATE)); + logger.debug(String.format("Volume [%s] has been successfully recovered, thus a new usage event %s has been published.", volume.getUuid(), EventTypes.EVENT_VOLUME_CREATE)); } @@ -1864,7 +1862,7 @@ private Volume changeDiskOfferingForVolumeInternal(VolumeVO volume, Long newDisk /* If this volume has never been beyond allocated state, short circuit everything and simply update the database. */ // We need to publish this event to usage_volume table if (volume.getState() == Volume.State.Allocated) { - s_logger.debug(String.format("Volume %s is in the allocated state, but has never been created. Simply updating database with new size and IOPS.", volume.getUuid())); + logger.debug(String.format("Volume %s is in the allocated state, but has never been created. Simply updating database with new size and IOPS.", volume.getUuid())); volume.setSize(newSize); volume.setMinIops(newMinIops); @@ -1929,7 +1927,7 @@ private Volume changeDiskOfferingForVolumeInternal(VolumeVO volume, Long newDisk volume = resizeVolumeInternal(volume, newDiskOffering, currentSize, newSize, newMinIops, newMaxIops, newHypervisorSnapshotReserve, shrinkOk); } catch (Exception e) { if (volumeMigrateRequired) { - s_logger.warn(String.format("Volume change offering operation succeeded for volume ID: %s but volume resize operation failed, so please try resize volume operation separately", volume.getUuid())); + logger.warn(String.format("Volume change offering operation succeeded for volume ID: %s but volume resize operation failed, so please try resize volume operation separately", volume.getUuid())); } else { throw new CloudRuntimeException(String.format("Volume change offering operation failed for volume ID: %s due to resize volume operation failed", volume.getUuid())); } @@ -1960,7 +1958,7 @@ private VolumeVO resizeVolumeInternal(VolumeVO volume, DiskOfferingVO newDiskOff if (userVm != null) { if (volume.getVolumeType().equals(Volume.Type.ROOT) && userVm.getPowerState() != VirtualMachine.PowerState.PowerOff && hypervisorType == HypervisorType.VMware) { - s_logger.error(" For ROOT volume resize VM should be in Power Off state."); + logger.error(" For ROOT volume resize VM should be in Power Off state."); throw new InvalidParameterValueException("VM current state is : " + userVm.getPowerState() + ". But VM should be in " + VirtualMachine.PowerState.PowerOff + " state."); } // serialize VM operation @@ -2042,7 +2040,7 @@ private void validateVolumeReadyStateAndHypervisorChecks(VolumeVO volume, long c UserVmVO userVm = _userVmDao.findById(volume.getInstanceId()); if (userVm != null) { if (volume.getVolumeType().equals(Volume.Type.ROOT) && userVm.getPowerState() != VirtualMachine.PowerState.PowerOff && hypervisorType == HypervisorType.VMware) { - s_logger.error(" For ROOT volume resize VM should be in Power Off state."); + logger.error(" For ROOT volume resize VM should be in Power Off state."); throw new InvalidParameterValueException("VM current state is : " + userVm.getPowerState() + ". But VM should be in " + VirtualMachine.PowerState.PowerOff + " state."); } } @@ -2173,7 +2171,7 @@ private void validateVolumeResizeWithSize(VolumeVO volume, long currentSize, Lon if (currentSize > newSize) { if (volume != null && ImageFormat.QCOW2.equals(volume.getFormat()) && !Volume.State.Allocated.equals(volume.getState())) { String message = "Unable to shrink volumes of type QCOW2"; - s_logger.warn(message); + logger.warn(message); throw new InvalidParameterValueException(message); } } @@ -2223,10 +2221,10 @@ private Volume orchestrateAttachVolumeToVM(Long vmId, Long volumeId, Long device } } } - if (s_logger.isTraceEnabled()) { + if (logger.isTraceEnabled()) { String msg = "attaching volume %s/%s to a VM (%s/%s) with an existing volume %s/%s on primary storage %s"; if (existingVolumeOfVm != null) { - s_logger.trace(String.format(msg, + logger.trace(String.format(msg, volumeToAttach.getName(), volumeToAttach.getUuid(), vm.getName(), vm.getUuid(), existingVolumeOfVm.getName(), existingVolumeOfVm.getUuid(), @@ -2243,8 +2241,8 @@ private Volume orchestrateAttachVolumeToVM(Long vmId, Long volumeId, Long device StoragePoolVO destPrimaryStorage = null; if (existingVolumeOfVm != null && !existingVolumeOfVm.getState().equals(Volume.State.Allocated)) { destPrimaryStorage = _storagePoolDao.findById(existingVolumeOfVm.getPoolId()); - if (s_logger.isTraceEnabled() && destPrimaryStorage != null) { - s_logger.trace(String.format("decided on target storage: %s/%s", destPrimaryStorage.getName(), destPrimaryStorage.getUuid())); + if (logger.isTraceEnabled() && destPrimaryStorage != null) { + logger.trace(String.format("decided on target storage: %s/%s", destPrimaryStorage.getName(), destPrimaryStorage.getUuid())); } } @@ -2257,7 +2255,7 @@ private Volume orchestrateAttachVolumeToVM(Long vmId, Long volumeId, Long device } newVolumeOnPrimaryStorage = _volumeMgr.createVolumeOnPrimaryStorage(vm, volumeToAttach, rootDiskHyperType, destPrimaryStorage); } catch (NoTransitionException e) { - s_logger.debug("Failed to create volume on primary storage", e); + logger.debug("Failed to create volume on primary storage", e); throw new CloudRuntimeException("Failed to create volume on primary storage", e); } } @@ -2265,9 +2263,9 @@ private Volume orchestrateAttachVolumeToVM(Long vmId, Long volumeId, Long device // reload the volume from db newVolumeOnPrimaryStorage = volFactory.getVolume(newVolumeOnPrimaryStorage.getId()); boolean moveVolumeNeeded = needMoveVolume(existingVolumeOfVm, newVolumeOnPrimaryStorage); - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("is this a new volume: %s == %s ?", volumeToAttach, newVolumeOnPrimaryStorage)); - s_logger.trace(String.format("is it needed to move the volume: %b?", moveVolumeNeeded)); + if (logger.isTraceEnabled()) { + logger.trace(String.format("is this a new volume: %s == %s ?", volumeToAttach, newVolumeOnPrimaryStorage)); + logger.trace(String.format("is it needed to move the volume: %b?", moveVolumeNeeded)); } if (moveVolumeNeeded) { @@ -2282,10 +2280,10 @@ private Volume orchestrateAttachVolumeToVM(Long vmId, Long volumeId, Long device newVolumeOnPrimaryStorage = _volumeMgr.moveVolume(newVolumeOnPrimaryStorage, vmRootVolumePool.getDataCenterId(), vmRootVolumePool.getPodId(), vmRootVolumePool.getClusterId(), volumeToAttachHyperType); } catch (ConcurrentOperationException e) { - s_logger.debug("move volume failed", e); + logger.debug("move volume failed", e); throw new CloudRuntimeException("move volume failed", e); } catch (StorageUnavailableException e) { - s_logger.debug("move volume failed", e); + logger.debug("move volume failed", e); throw new CloudRuntimeException("move volume failed", e); } } @@ -2322,8 +2320,8 @@ public Volume attachVolumeToVM(Long vmId, Long volumeId, Long deviceId) { HypervisorType volumeToAttachHyperType = _volsDao.getHypervisorType(volumeToAttach.getId()); StoragePoolVO volumeToAttachStoragePool = _storagePoolDao.findById(volumeToAttach.getPoolId()); - if (s_logger.isTraceEnabled() && volumeToAttachStoragePool != null) { - s_logger.trace(String.format("volume to attach (%s/%s) has a primary storage assigned to begin with (%s/%s)", + if (logger.isTraceEnabled() && volumeToAttachStoragePool != null) { + logger.trace(String.format("volume to attach (%s/%s) has a primary storage assigned to begin with (%s/%s)", volumeToAttach.getName(), volumeToAttach.getUuid(), volumeToAttachStoragePool.getName(), volumeToAttachStoragePool.getUuid())); } @@ -2333,8 +2331,8 @@ public Volume attachVolumeToVM(Long vmId, Long volumeId, Long deviceId) { AsyncJob job = asyncExecutionContext.getJob(); - if (s_logger.isInfoEnabled()) { - s_logger.info(String.format("Trying to attach volume [%s/%s] to VM instance [%s/%s], update async job-%s progress status", + if (logger.isInfoEnabled()) { + logger.info(String.format("Trying to attach volume [%s/%s] to VM instance [%s/%s], update async job-%s progress status", volumeToAttach.getName(), volumeToAttach.getUuid(), vm.getName(), @@ -2414,7 +2412,7 @@ private void checkRightsToAttach(Account caller, VolumeInfo volumeToAttach, User try { _resourceLimitMgr.checkResourceLimit(owner, ResourceType.primary_storage, volumeToAttach.getSize()); } catch (ResourceAllocationException e) { - s_logger.error("primary storage resource limit check failed", e); + logger.error("primary storage resource limit check failed", e); throw new InvalidParameterValueException(e.getMessage()); } } @@ -2555,9 +2553,9 @@ protected String createVolumeInfoFromVolumes(List vmVolumes) { return GsonHelper.getGson().toJson(list.toArray(), Backup.VolumeInfo[].class); } catch (Exception e) { if (CollectionUtils.isEmpty(vmVolumes) || vmVolumes.get(0).getInstanceId() == null) { - s_logger.error(String.format("Failed to create VolumeInfo of VM [id: null] volumes due to: [%s].", e.getMessage()), e); + logger.error(String.format("Failed to create VolumeInfo of VM [id: null] volumes due to: [%s].", e.getMessage()), e); } else { - s_logger.error(String.format("Failed to create VolumeInfo of VM [id: %s] volumes due to: [%s].", vmVolumes.get(0).getInstanceId(), e.getMessage()), e); + logger.error(String.format("Failed to create VolumeInfo of VM [id: %s] volumes due to: [%s].", vmVolumes.get(0).getInstanceId(), e.getMessage()), e); } throw e; } @@ -2745,8 +2743,8 @@ public Volume detachVolumeFromVM(DetachVolumeCmd cmmd) { if (asyncExecutionContext != null) { AsyncJob job = asyncExecutionContext.getJob(); - if (s_logger.isInfoEnabled()) { - s_logger.info(String.format("Trying to attach volume %s to VM instance %s, update async job-%s progress status", + if (logger.isInfoEnabled()) { + logger.info(String.format("Trying to attach volume %s to VM instance %s, update async job-%s progress status", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(volume, "name", "uuid"), ReflectionToStringBuilderUtils.reflectOnlySelectedFields(vm, "name", "uuid"), job.getId())); @@ -2885,7 +2883,7 @@ private Volume orchestrateDetachVolumeFromVM(long vmId, long volumeId) { volumeVO.setPoolId(storagePoolVO.getId()); _volsDao.update(volumeVO.getId(), volumeVO); } else { - s_logger.warn(String.format("Unable to find datastore %s while updating the new datastore of the volume %d", datastoreName, volumeId)); + logger.warn(String.format("Unable to find datastore %s while updating the new datastore of the volume %d", datastoreName, volumeId)); } } @@ -2944,7 +2942,7 @@ public void updateMissingRootDiskController(final VMInstanceVO vm, final String _userVmMgr.persistDeviceBusInfo(userVmVo, rootDiskController); } } catch (JsonParseException e) { - s_logger.debug("Error parsing chain info json: " + e.getMessage()); + logger.debug("Error parsing chain info json: " + e.getMessage()); } } @@ -2979,11 +2977,11 @@ private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { if (answer == null) { String msg = "Unable to get an answer to the modify targets command"; - s_logger.warn(msg); + logger.warn(msg); } else if (!answer.getResult()) { String msg = "Unable to modify target on the following host: " + hostId; - s_logger.warn(msg); + logger.warn(msg); } } @@ -3291,11 +3289,11 @@ protected void validateConditionsToReplaceDiskOfferingOfVolume(VolumeVO volume, if (volume.getSize() != newDiskOffering.getDiskSize()) { DiskOfferingVO oldDiskOffering = this._diskOfferingDao.findById(volume.getDiskOfferingId()); - s_logger.warn(String.format( + logger.warn(String.format( "You are migrating a volume [id=%s] and changing the disk offering[from id=%s to id=%s] to reflect this migration. However, the sizes of the volume and the new disk offering are different.", volume.getUuid(), oldDiskOffering.getUuid(), newDiskOffering.getUuid())); } - s_logger.info(String.format("Changing disk offering to [uuid=%s] while migrating volume [uuid=%s, name=%s].", newDiskOffering.getUuid(), volume.getUuid(), volume.getName())); + logger.info(String.format("Changing disk offering to [uuid=%s] while migrating volume [uuid=%s, name=%s].", newDiskOffering.getUuid(), volume.getUuid(), volume.getName())); } /** @@ -3340,14 +3338,14 @@ public boolean doesTargetStorageSupportDiskOffering(StoragePool destPool, String Pair, Boolean> storagePoolTags = getStoragePoolTags(destPool); if ((storagePoolTags == null || !storagePoolTags.second()) && org.apache.commons.lang.StringUtils.isBlank(diskOfferingTags)) { if (storagePoolTags == null) { - s_logger.debug(String.format("Destination storage pool [%s] does not have any tags, and so does the disk offering. Therefore, they are compatible", destPool.getUuid())); + logger.debug(String.format("Destination storage pool [%s] does not have any tags, and so does the disk offering. Therefore, they are compatible", destPool.getUuid())); } else { - s_logger.debug("Destination storage pool has tags [%s], and the disk offering has no tags. Therefore, they are compatible."); + logger.debug("Destination storage pool has tags [%s], and the disk offering has no tags. Therefore, they are compatible."); } return true; } if (storagePoolTags == null || CollectionUtils.isEmpty(storagePoolTags.first())) { - s_logger.debug(String.format("Destination storage pool [%s] has no tags, while disk offering has tags [%s]. Therefore, they are not compatible", destPool.getUuid(), + logger.debug(String.format("Destination storage pool [%s] has no tags, while disk offering has tags [%s]. Therefore, they are not compatible", destPool.getUuid(), diskOfferingTags)); return false; } @@ -3360,7 +3358,7 @@ public boolean doesTargetStorageSupportDiskOffering(StoragePool destPool, String } else { result = CollectionUtils.isSubCollection(Arrays.asList(newDiskOfferingTagsAsStringArray), storageTagsList); } - s_logger.debug(String.format("Destination storage pool [%s] accepts tags [%s]? %s", destPool.getUuid(), diskOfferingTags, result)); + logger.debug(String.format("Destination storage pool [%s] accepts tags [%s]? %s", destPool.getUuid(), diskOfferingTags, result)); return result; } @@ -3402,10 +3400,10 @@ private Volume orchestrateMigrateVolume(VolumeVO volume, StoragePool destPool, b _volsDao.updateDiskOffering(newVol.getId(), newDiskOffering.getId()); } } catch (StorageUnavailableException e) { - s_logger.debug("Failed to migrate volume", e); + logger.debug("Failed to migrate volume", e); throw new CloudRuntimeException(e.getMessage()); } catch (Exception e) { - s_logger.debug("Failed to migrate volume", e); + logger.debug("Failed to migrate volume", e); throw new CloudRuntimeException(e.getMessage()); } return newVol; @@ -3420,15 +3418,15 @@ protected Volume liveMigrateVolume(Volume volume, StoragePool destPool) throws S try { VolumeApiResult result = future.get(); if (result.isFailed()) { - s_logger.debug("migrate volume failed:" + result.getResult()); + logger.debug("migrate volume failed:" + result.getResult()); throw new StorageUnavailableException("Migrate volume failed: " + result.getResult(), destPool.getId()); } return result.getVolume(); } catch (InterruptedException e) { - s_logger.debug("migrate volume failed", e); + logger.debug("migrate volume failed", e); throw new CloudRuntimeException(e.getMessage()); } catch (ExecutionException e) { - s_logger.debug("migrate volume failed", e); + logger.debug("migrate volume failed", e); throw new CloudRuntimeException(e.getMessage()); } } @@ -3558,7 +3556,7 @@ private Snapshot orchestrateTakeVolumeSnapshot(Long volumeId, Long policyId, Lon boolean isSnapshotOnStorPoolOnly = volume.getStoragePoolType() == StoragePoolType.StorPool && BooleanUtils.toBoolean(_configDao.getValue("sp.bypass.secondary.storage")); if (volume.getEncryptFormat() != null && volume.getAttachedVM() != null && volume.getAttachedVM().getState() != State.Stopped && !isSnapshotOnStorPoolOnly) { - s_logger.debug(String.format("Refusing to take snapshot of encrypted volume (%s) on running VM (%s)", volume, volume.getAttachedVM())); + logger.debug(String.format("Refusing to take snapshot of encrypted volume (%s) on running VM (%s)", volume, volume.getAttachedVM())); throw new UnsupportedOperationException("Volume snapshots for encrypted volumes are not supported if VM is running"); } @@ -3762,7 +3760,7 @@ public String extractVolume(ExtractVolumeCmd cmd) { // Extract activity only for detached volumes or for volumes whose // instance is stopped if (volume.getInstanceId() != null && ApiDBUtils.findVMInstanceById(volume.getInstanceId()).getState() != State.Stopped) { - s_logger.debug("Invalid state of the volume with ID: " + volumeId + ". It should be either detached or the VM should be in stopped state."); + logger.debug("Invalid state of the volume with ID: " + volumeId + ". It should be either detached or the VM should be in stopped state."); PermissionDeniedException ex = new PermissionDeniedException("Invalid state of the volume with specified ID. It should be either detached or the VM should be in stopped state."); ex.addProxyObject(volume.getUuid(), "volumeId"); throw ex; @@ -3942,7 +3940,7 @@ protected void validateVolume(String volumeUuid, VolumeVO volume) { if (volume.getInstanceId() != null) { VMInstanceVO vmInstanceVo = _vmInstanceDao.findById(volume.getInstanceId()); String msg = String.format("Volume [%s] is attached to [%s], so it cannot be moved to a different account.", volumeToString, vmInstanceVo); - s_logger.error(msg); + logger.error(msg); throw new PermissionDeniedException(msg); } @@ -3998,7 +3996,7 @@ private Optional setExtractVolumeSearchCriteria(SearchCriteria attachVolumeToVmThroughJobQueue(final Long vmId, final Lo _jobMgr.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId()); AsyncJobVO jobVo = _jobMgr.getAsyncJob(workJob.getId()); - s_logger.debug("New job " + workJob.getId() + ", result field: " + jobVo.getResult()); + logger.debug("New job " + workJob.getId() + ", result field: " + jobVo.getResult()); AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(workJob.getId()); diff --git a/server/src/main/java/com/cloud/storage/download/DownloadActiveState.java b/server/src/main/java/com/cloud/storage/download/DownloadActiveState.java index 9a22eb859a2a..989e0b55f94e 100644 --- a/server/src/main/java/com/cloud/storage/download/DownloadActiveState.java +++ b/server/src/main/java/com/cloud/storage/download/DownloadActiveState.java @@ -16,12 +16,12 @@ // under the License. package com.cloud.storage.download; -import org.apache.log4j.Level; import org.apache.cloudstack.storage.command.DownloadProgressCommand.RequestType; import com.cloud.agent.api.storage.DownloadAnswer; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; +import org.apache.logging.log4j.Level; public abstract class DownloadActiveState extends DownloadState { @@ -31,8 +31,8 @@ public DownloadActiveState(DownloadListener dl) { @Override public String handleAnswer(DownloadAnswer answer) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("handleAnswer, answer status=" + answer.getDownloadStatus() + ", curr state=" + getName()); + if (logger.isTraceEnabled()) { + logger.trace("handleAnswer, answer status=" + answer.getDownloadStatus() + ", curr state=" + getName()); } switch (answer.getDownloadStatus()) { case DOWNLOAD_IN_PROGRESS: @@ -72,7 +72,7 @@ public void onExit() { @Override public String handleTimeout(long updateMs) { - if (s_logger.isTraceEnabled()) { + if (logger.isTraceEnabled()) { getDownloadListener().log("handleTimeout, updateMs=" + updateMs + ", curr state= " + getName(), Level.TRACE); } String newState = getName(); diff --git a/server/src/main/java/com/cloud/storage/download/DownloadErrorState.java b/server/src/main/java/com/cloud/storage/download/DownloadErrorState.java index 1591a3417357..a0834456a2d0 100644 --- a/server/src/main/java/com/cloud/storage/download/DownloadErrorState.java +++ b/server/src/main/java/com/cloud/storage/download/DownloadErrorState.java @@ -16,12 +16,12 @@ // under the License. package com.cloud.storage.download; -import org.apache.log4j.Level; import org.apache.cloudstack.storage.command.DownloadProgressCommand.RequestType; import com.cloud.agent.api.storage.DownloadAnswer; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; +import org.apache.logging.log4j.Level; public class DownloadErrorState extends DownloadInactiveState { diff --git a/server/src/main/java/com/cloud/storage/download/DownloadListener.java b/server/src/main/java/com/cloud/storage/download/DownloadListener.java index 7cd2e2a790a6..bd0c0eff1bce 100644 --- a/server/src/main/java/com/cloud/storage/download/DownloadListener.java +++ b/server/src/main/java/com/cloud/storage/download/DownloadListener.java @@ -24,8 +24,9 @@ import javax.inject.Inject; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; @@ -95,7 +96,7 @@ protected void runInContext() { } } - public static final Logger s_logger = Logger.getLogger(DownloadListener.class.getName()); + protected Logger logger = LogManager.getLogger(getClass()); public static final int SMALL_DELAY = 100; public static final long STATUS_POLL_INTERVAL = 10000L; @@ -174,7 +175,7 @@ private DownloadState getState(String stateName) { public void sendCommand(RequestType reqType) { if (getJobId() != null) { - if (s_logger.isTraceEnabled()) { + if (logger.isTraceEnabled()) { log("Sending progress command ", Level.TRACE); } try { @@ -186,7 +187,7 @@ public void sendCommand(RequestType reqType) { } _ssAgent.sendMessageAsync(dcmd, new UploadListener.Callback(_ssAgent.getId(), this)); } catch (Exception e) { - s_logger.debug("Send command failed", e); + logger.debug("Send command failed", e); setDisconnected(); } } @@ -202,11 +203,11 @@ public void setDisconnected() { } public void logDisconnect() { - s_logger.warn("Unable to monitor download progress of " + object.getType() + ": " + object.getId() + " at host " + _ssAgent.getId()); + logger.warn("Unable to monitor download progress of " + object.getType() + ": " + object.getId() + " at host " + _ssAgent.getId()); } public void log(String message, Level level) { - s_logger.log(level, message + ", " + object.getType() + ": " + object.getId() + " at host " + _ssAgent.getId()); + logger.log(level, message + ", " + object.getType() + ": " + object.getId() + " at host " + _ssAgent.getId()); } public DownloadListener(DownloadMonitorImpl monitor) { @@ -304,7 +305,7 @@ else if (cmd instanceof StartupSecondaryStorageCommand) { _imageSrv.handleTemplateSync(store); } }catch (Exception e){ - s_logger.error("Caught exception while doing template/volume sync ", e); + logger.error("Caught exception while doing template/volume sync ", e); } } } @@ -357,7 +358,7 @@ public void scheduleTimeoutTask(long delay) { _timeoutTask = new TimeoutTask(this); _timer.schedule(_timeoutTask, delay); - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { log("Scheduling timeout at " + delay + " ms", Level.DEBUG); } } diff --git a/server/src/main/java/com/cloud/storage/download/DownloadMonitorImpl.java b/server/src/main/java/com/cloud/storage/download/DownloadMonitorImpl.java index 90782dd934b9..d21257516e29 100644 --- a/server/src/main/java/com/cloud/storage/download/DownloadMonitorImpl.java +++ b/server/src/main/java/com/cloud/storage/download/DownloadMonitorImpl.java @@ -45,7 +45,6 @@ import org.apache.cloudstack.storage.to.SnapshotObjectTO; import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; @@ -64,7 +63,6 @@ @Component public class DownloadMonitorImpl extends ManagerBase implements DownloadMonitor { - static final Logger LOGGER = Logger.getLogger(DownloadMonitorImpl.class); @Inject private TemplateDataStoreDao _vmTemplateStoreDao; @@ -91,7 +89,7 @@ public boolean configure(String name, Map params) { String cert = configs.get("secstorage.ssl.cert.domain"); if (!"realhostip.com".equalsIgnoreCase(cert)) { - LOGGER.warn("Only realhostip.com ssl cert is supported, ignoring self-signed and other certs"); + logger.warn("Only realhostip.com ssl cert is supported, ignoring self-signed and other certs"); } _copyAuthPasswd = configs.get("secstorage.copy.password"); @@ -155,7 +153,7 @@ private void initiateTemplateDownload(DataObject template, AsyncCompletionCallba EndPoint ep = _epSelector.select(template); if (ep == null) { String errMsg = "There is no secondary storage VM for downloading template to image store " + store.getName(); - LOGGER.warn(errMsg); + logger.warn(errMsg); throw new CloudRuntimeException(errMsg); } DownloadListener dl = new DownloadListener(ep, store, template, _timer, this, dcmd, callback); @@ -166,14 +164,14 @@ private void initiateTemplateDownload(DataObject template, AsyncCompletionCallba // DownloadListener to use // new ObjectInDataStore.State transition. TODO: fix this later // to be able to remove downloadState from template_store_ref. - LOGGER.info("found existing download job"); + logger.info("found existing download job"); dl.setCurrState(vmTemplateStore.getDownloadState()); } try { ep.sendMessageAsync(dcmd, new UploadListener.Callback(ep.getId(), dl)); } catch (Exception e) { - LOGGER.warn("Unable to start /resume download of template " + template.getId() + " to " + store.getName(), e); + logger.warn("Unable to start /resume download of template " + template.getId() + " to " + store.getName(), e); dl.setDisconnected(); dl.scheduleStatusCheck(RequestType.GET_OR_RESTART); } @@ -212,7 +210,7 @@ private void initiateSnapshotDownload(DataObject snapshot, AsyncCompletionCallba EndPoint ep = _epSelector.select(snapshot); if (ep == null) { String errMsg = "There is no secondary storage VM for downloading snapshot to image store " + store.getName(); - LOGGER.warn(errMsg); + logger.warn(errMsg); throw new CloudRuntimeException(errMsg); } DownloadListener dl = new DownloadListener(ep, store, snapshot, _timer, this, dcmd, callback); @@ -223,14 +221,14 @@ private void initiateSnapshotDownload(DataObject snapshot, AsyncCompletionCallba // DownloadListener to use // new ObjectInDataStore.State transition. TODO: fix this later // to be able to remove downloadState from template_store_ref. - LOGGER.info("found existing download job"); + logger.info("found existing download job"); dl.setCurrState(snapshotStore.getDownloadState()); } try { ep.sendMessageAsync(dcmd, new UploadListener.Callback(ep.getId(), dl)); } catch (Exception e) { - LOGGER.warn("Unable to start /resume download of snapshot " + snapshot.getId() + " to " + store.getName(), e); + logger.warn("Unable to start /resume download of snapshot " + snapshot.getId() + " to " + store.getName(), e); dl.setDisconnected(); dl.scheduleStatusCheck(RequestType.GET_OR_RESTART); } @@ -246,12 +244,12 @@ public void downloadTemplateToStorage(DataObject template, AsyncCompletionCallba if (template.getUri() != null) { initiateTemplateDownload(template, callback); } else { - LOGGER.info("Template url is null, cannot download"); + logger.info("Template url is null, cannot download"); DownloadAnswer ans = new DownloadAnswer("Template url is null", Status.UNKNOWN); callback.complete(ans); } } else { - LOGGER.info("Template download is already in progress or already downloaded"); + logger.info("Template download is already in progress or already downloaded"); DownloadAnswer ans = new DownloadAnswer("Template download is already in progress or already downloaded", Status.UNKNOWN); callback.complete(ans); @@ -294,7 +292,7 @@ public void downloadVolumeToStorage(DataObject volume, AsyncCompletionCallback storagePoolHosts = _storageManager.findStoragePoolsConnectedToHost(host.getId()); if (storagePoolHosts == null) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("No pools to disconnect for host: " + host.getId()); + if (logger.isTraceEnabled()) { + logger.trace("No pools to disconnect for host: " + host.getId()); } return true; } @@ -181,7 +182,7 @@ public synchronized boolean processDisconnect(long agentId, Status state) { try { _storageManager.disconnectHostFromSharedPool(host.getId(), pool.getId()); } catch (Exception e) { - s_logger.error("Unable to disconnect host " + host.getId() + " from storage pool id " + pool.getId() + " due to " + e.toString()); + logger.error("Unable to disconnect host " + host.getId() + " from storage pool id " + pool.getId() + " due to " + e.toString()); disconnectResult = false; } } @@ -204,7 +205,7 @@ public void processHostAboutToBeRemoved(long hostId) { } } catch (Exception ex) { - s_logger.error("hostAboutToBeRemoved(long) failed for storage provider " + provider.getName(), ex); + logger.error("hostAboutToBeRemoved(long) failed for storage provider " + provider.getName(), ex); } } } @@ -226,7 +227,7 @@ public void processHostRemoved(long hostId, long clusterId) { } } catch (Exception ex) { - s_logger.error("hostRemoved(long, long) failed for storage provider " + provider.getName(), ex); + logger.error("hostRemoved(long, long) failed for storage provider " + provider.getName(), ex); } } } diff --git a/server/src/main/java/com/cloud/storage/listener/StorageSyncListener.java b/server/src/main/java/com/cloud/storage/listener/StorageSyncListener.java index eeef434367ee..24e6b7972df6 100644 --- a/server/src/main/java/com/cloud/storage/listener/StorageSyncListener.java +++ b/server/src/main/java/com/cloud/storage/listener/StorageSyncListener.java @@ -16,7 +16,8 @@ // under the License. package com.cloud.storage.listener; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.Listener; import com.cloud.agent.api.AgentControlAnswer; @@ -28,7 +29,7 @@ import com.cloud.host.Status; public class StorageSyncListener implements Listener { - private static final Logger s_logger = Logger.getLogger(StorageSyncListener.class); + protected Logger logger = LogManager.getLogger(getClass()); public StorageSyncListener() { } @@ -42,9 +43,9 @@ public boolean isRecurring() { public boolean processAnswers(long agentId, long seq, Answer[] answers) { for (Answer answer : answers) { if (answer.getResult() == false) { - s_logger.warn("Unable to execute sync command: " + answer.toString()); + logger.warn("Unable to execute sync command: " + answer.toString()); } else { - s_logger.debug("Sync command executed: " + answer.toString()); + logger.debug("Sync command executed: " + answer.toString()); } } return true; @@ -60,7 +61,7 @@ public void processConnect(Host agent, StartupCommand cmd, boolean forRebalance) @Override public boolean processDisconnect(long agentId, Status state) { - s_logger.debug("Disconnecting"); + logger.debug("Disconnecting"); return true; } diff --git a/server/src/main/java/com/cloud/storage/listener/VolumeStateListener.java b/server/src/main/java/com/cloud/storage/listener/VolumeStateListener.java index d2a4dc93b2f3..9b9913b7a5d3 100644 --- a/server/src/main/java/com/cloud/storage/listener/VolumeStateListener.java +++ b/server/src/main/java/com/cloud/storage/listener/VolumeStateListener.java @@ -28,7 +28,8 @@ import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.dao.VMInstanceDao; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.beans.factory.NoSuchBeanDefinitionException; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; @@ -50,7 +51,7 @@ public class VolumeStateListener implements StateListener protected ConfigurationDao _configDao; protected VMInstanceDao _vmInstanceDao; - private static final Logger s_logger = Logger.getLogger(VolumeStateListener.class); + protected Logger logger = LogManager.getLogger(getClass()); public VolumeStateListener(ConfigurationDao configDao, VMInstanceDao vmInstanceDao) { this._configDao = configDao; @@ -122,7 +123,7 @@ private void pubishOnEventBus(String event, String status, Volume vo, State oldS try { s_eventBus.publish(eventMsg); } catch (EventBusException e) { - s_logger.warn("Failed to state change event on the event bus."); + logger.warn("Failed to state change event on the event bus."); } } diff --git a/server/src/main/java/com/cloud/storage/resource/DummySecondaryStorageResource.java b/server/src/main/java/com/cloud/storage/resource/DummySecondaryStorageResource.java index ce01a69aa0a4..f841c9733ee1 100644 --- a/server/src/main/java/com/cloud/storage/resource/DummySecondaryStorageResource.java +++ b/server/src/main/java/com/cloud/storage/resource/DummySecondaryStorageResource.java @@ -24,7 +24,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.cloudstack.storage.command.DownloadCommand; import org.apache.cloudstack.storage.command.DownloadProgressCommand; @@ -54,7 +53,6 @@ import com.cloud.storage.template.TemplateProp; public class DummySecondaryStorageResource extends ServerResourceBase implements ServerResource { - private static final Logger s_logger = Logger.getLogger(DummySecondaryStorageResource.class); String _dc; String _pod; diff --git a/server/src/main/java/com/cloud/storage/secondary/SecondaryStorageListener.java b/server/src/main/java/com/cloud/storage/secondary/SecondaryStorageListener.java index b78a548729cc..f0acd0afc510 100644 --- a/server/src/main/java/com/cloud/storage/secondary/SecondaryStorageListener.java +++ b/server/src/main/java/com/cloud/storage/secondary/SecondaryStorageListener.java @@ -16,7 +16,8 @@ // under the License. package com.cloud.storage.secondary; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.Listener; import com.cloud.agent.api.AgentControlAnswer; @@ -31,7 +32,7 @@ import com.cloud.storage.Storage; public class SecondaryStorageListener implements Listener { - private final static Logger s_logger = Logger.getLogger(SecondaryStorageListener.class); + protected Logger logger = LogManager.getLogger(getClass()); SecondaryStorageVmManager _ssVmMgr = null; @@ -78,8 +79,8 @@ public void processConnect(Host agent, StartupCommand cmd, boolean forRebalance) return; } } else if (cmd instanceof StartupSecondaryStorageCommand) { - if (s_logger.isInfoEnabled()) { - s_logger.info("Received a host startup notification " + cmd); + if (logger.isInfoEnabled()) { + logger.info("Received a host startup notification " + cmd); } _ssVmMgr.onAgentConnect(agent.getDataCenterId(), cmd); _ssVmMgr.generateSetupCommand(agent.getId()); diff --git a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java index 940860dd04d7..c20dc9e05c2a 100755 --- a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java @@ -78,7 +78,6 @@ import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.builder.ReflectionToStringBuilder; import org.apache.commons.lang3.builder.ToStringStyle; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.api.Answer; @@ -174,7 +173,6 @@ @Component public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implements SnapshotManager, SnapshotApiService, Configurable { - private static final Logger s_logger = Logger.getLogger(SnapshotManagerImpl.class); @Inject VMTemplateDao _templateDao; @Inject @@ -303,25 +301,25 @@ public Answer sendToPool(Volume vol, Command cmd) { if (result.second().getResult()) { return result.second(); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("The result for " + cmd.getClass().getName() + " is " + result.second().getDetails() + " through " + result.first()); + if (logger.isDebugEnabled()) { + logger.debug("The result for " + cmd.getClass().getName() + " is " + result.second().getDetails() + " through " + result.first()); } hostIdsToAvoid.add(result.first()); } catch (StorageUnavailableException e1) { - s_logger.warn("Storage unavailable ", e1); + logger.warn("Storage unavailable ", e1); return null; } try { Thread.sleep(_pauseInterval * 1000); } catch (InterruptedException e) { - s_logger.debug("[ignored] interrupted while retry cmd."); + logger.debug("[ignored] interrupted while retry cmd."); } - s_logger.debug("Retrying..."); + logger.debug("Retrying..."); } - s_logger.warn("After " + _totalRetries + " retries, the command " + cmd.getClass().getName() + " did not succeed."); + logger.warn("After " + _totalRetries + " retries, the command " + cmd.getClass().getName() + " did not succeed."); return null; } @@ -381,7 +379,7 @@ public Snapshot revertSnapshot(Long snapshotId) { SnapshotStrategy snapshotStrategy = _storageStrategyFactory.getSnapshotStrategy(snapshot, SnapshotOperation.REVERT); if (snapshotStrategy == null) { - s_logger.error("Unable to find snapshot strategy to handle snapshot with id '" + snapshotId + "'"); + logger.error("Unable to find snapshot strategy to handle snapshot with id '" + snapshotId + "'"); String errorMsg = String.format("Revert snapshot command failed for snapshot with id %d, because this command is supported only for KVM hypervisor", snapshotId); throw new CloudRuntimeException(errorMsg); } @@ -447,7 +445,7 @@ public Snapshot createSnapshot(Long volumeId, Long policyId, Long snapshotId, Ac SnapshotInfo snapshot = snapshotFactory.getSnapshotOnPrimaryStore(snapshotId); if (snapshot == null) { - s_logger.debug("Failed to create snapshot"); + logger.debug("Failed to create snapshot"); throw new CloudRuntimeException("Failed to create snapshot"); } try { @@ -461,7 +459,7 @@ public Snapshot createSnapshot(Long volumeId, Long policyId, Long snapshotId, Ac _resourceLimitMgr.incrementResourceCount(snapshotOwner.getId(), ResourceType.snapshot); } catch (Exception e) { - s_logger.debug("Failed to create snapshot", e); + logger.debug("Failed to create snapshot", e); throw new CloudRuntimeException("Failed to create snapshot", e); } @@ -553,7 +551,7 @@ public Snapshot backupSnapshotFromVmSnapshot(Long snapshotId, Long vmId, Long vo snapshotInfo = snapshotStrategy.backupSnapshot(snapshotInfo); } catch (Exception e) { - s_logger.debug("Failed to backup snapshot from vm snapshot", e); + logger.debug("Failed to backup snapshot from vm snapshot", e); _resourceLimitMgr.decrementResourceCount(snapshotOwnerId, ResourceType.snapshot); _resourceLimitMgr.decrementResourceCount(snapshotOwnerId, ResourceType.secondary_storage, new Long(volume.getSize())); throw new CloudRuntimeException("Failed to backup snapshot from vm snapshot", e); @@ -637,7 +635,7 @@ private void postCreateRecurringSnapshotForPolicy(long userId, long volumeId, lo SnapshotVO oldestSnapshot = snaps.get(0); long oldSnapId = oldestSnapshot.getId(); if (policy != null) { - s_logger.debug("Max snaps: " + policy.getMaxSnaps() + " exceeded for snapshot policy with Id: " + policyId + ". Deleting oldest snapshot: " + oldSnapId); + logger.debug("Max snaps: " + policy.getMaxSnaps() + " exceeded for snapshot policy with Id: " + policyId + ". Deleting oldest snapshot: " + oldSnapId); } if (deleteSnapshot(oldSnapId, null)) { //log Snapshot delete event @@ -702,7 +700,7 @@ public boolean deleteSnapshot(long snapshotId, Long zoneId) { SnapshotStrategy snapshotStrategy = _storageStrategyFactory.getSnapshotStrategy(snapshotCheck, zoneId, SnapshotOperation.DELETE); if (snapshotStrategy == null) { - s_logger.error("Unable to find snapshot strategy to handle snapshot with id '" + snapshotId + "'"); + logger.error("Unable to find snapshot strategy to handle snapshot with id '" + snapshotId + "'"); return false; } @@ -736,7 +734,7 @@ public boolean deleteSnapshot(long snapshotId, Long zoneId) { return result; } catch (Exception e) { - s_logger.debug("Failed to delete snapshot: " + snapshotCheck.getId() + ":" + e.toString()); + logger.debug("Failed to delete snapshot: " + snapshotCheck.getId() + ":" + e.toString()); throw new CloudRuntimeException("Failed to delete snapshot:" + e.toString()); } @@ -887,18 +885,18 @@ public boolean deleteSnapshotDirsForAccount(long accountId) { Answer answer = null; if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; - s_logger.error(errMsg); + logger.error(errMsg); answer = new Answer(cmd, false, errMsg); } else { answer = ep.sendMessage(cmd); } if ((answer != null) && answer.getResult()) { - s_logger.debug("Deleted all snapshots for volume: " + volumeId + " under account: " + accountId); + logger.debug("Deleted all snapshots for volume: " + volumeId + " under account: " + accountId); } else { success = false; if (answer != null) { - s_logger.warn("Failed to delete all snapshot for volume " + volumeId + " on secondary storage " + ssHost.getUri()); - s_logger.error(answer.getDetails()); + logger.warn("Failed to delete all snapshot for volume " + volumeId + " on secondary storage " + ssHost.getUri()); + logger.error(answer.getDetails()); } } } @@ -908,7 +906,7 @@ public boolean deleteSnapshotDirsForAccount(long accountId) { for (SnapshotVO snapshot : snapshots) { SnapshotStrategy snapshotStrategy = _storageStrategyFactory.getSnapshotStrategy(snapshot, SnapshotOperation.DELETE); if (snapshotStrategy == null) { - s_logger.error("Unable to find snapshot strategy to handle snapshot with id '" + snapshot.getId() + "'"); + logger.error("Unable to find snapshot strategy to handle snapshot with id '" + snapshot.getId() + "'"); continue; } List snapshotStoreRefs = _snapshotStoreDao.listReadyBySnapshot(snapshot.getId(), DataStoreRole.Image); @@ -1011,7 +1009,7 @@ public SnapshotPolicyVO createPolicy(CreateSnapshotPolicyCmd cmd, Account policy TimeZone timeZone = TimeZone.getTimeZone(cmdTimezone); String timezoneId = timeZone.getID(); if (!timezoneId.equals(cmdTimezone)) { - s_logger.warn(String.format("Using timezone [%s] for running the snapshot policy [%s] for volume %s, as an equivalent of [%s].", timezoneId, intervalType, volumeDescription, + logger.warn(String.format("Using timezone [%s] for running the snapshot policy [%s] for volume %s, as an equivalent of [%s].", timezoneId, intervalType, volumeDescription, cmdTimezone)); } @@ -1070,7 +1068,7 @@ protected SnapshotPolicyVO persistSnapshotPolicy(VolumeVO volume, String schedul throw new CloudRuntimeException(String.format("Unable to acquire lock for creating snapshot policy [%s] for %s.", intervalType, volumeDescription)); } - s_logger.debug(String.format("Acquired lock for creating snapshot policy [%s] for volume %s.", intervalType, volumeDescription)); + logger.debug(String.format("Acquired lock for creating snapshot policy [%s] for volume %s.", intervalType, volumeDescription)); try { SnapshotPolicyVO policy = _snapshotPolicyDao.findOneByVolumeInterval(volumeId, intervalType); @@ -1101,7 +1099,7 @@ protected SnapshotPolicyVO createSnapshotPolicy(long volumeId, String schedule, snapshotPolicyDetailsDao.saveDetails(details); } _snapSchedMgr.scheduleNextSnapshotJob(policy); - s_logger.debug(String.format("Created snapshot policy %s.", new ReflectionToStringBuilder(policy, ToStringStyle.JSON_STYLE).setExcludeFieldNames("id", "uuid", "active"))); + logger.debug(String.format("Created snapshot policy %s.", new ReflectionToStringBuilder(policy, ToStringStyle.JSON_STYLE).setExcludeFieldNames("id", "uuid", "active"))); return policy; } @@ -1126,7 +1124,7 @@ protected void updateSnapshotPolicy(SnapshotPolicyVO policy, String schedule, St _snapSchedMgr.scheduleOrCancelNextSnapshotJobOnDisplayChange(policy, previousDisplay); taggedResourceService.deleteTags(Collections.singletonList(policy.getUuid()), ResourceObjectType.SnapshotPolicy, null); - s_logger.debug(String.format("Updated snapshot policy %s to %s.", previousPolicy, new ReflectionToStringBuilder(policy, ToStringStyle.JSON_STYLE) + logger.debug(String.format("Updated snapshot policy %s to %s.", previousPolicy, new ReflectionToStringBuilder(policy, ToStringStyle.JSON_STYLE) .setExcludeFieldNames("id", "uuid"))); } @@ -1141,7 +1139,7 @@ public void copySnapshotPoliciesBetweenVolumes(VolumeVO srcVolume, VolumeVO dest IntervalType[] intervalTypes = IntervalType.values(); List policies = listPoliciesforVolume(srcVolume.getId()); - s_logger.debug(String.format("Copying snapshot policies %s from volume %s to volume %s.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(policies, + logger.debug(String.format("Copying snapshot policies %s from volume %s to volume %s.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(policies, "id", "uuid"), srcVolume.getVolumeDescription(), destVolume.getVolumeDescription())); for (SnapshotPolicyVO policy : policies) { @@ -1284,7 +1282,7 @@ private boolean hostSupportsSnapsthotForVolume(HostVO host, VolumeInfo volume, b if (vm.getState() != VirtualMachine.State.Stopped && vm.getState() != VirtualMachine.State.Destroyed) { boolean snapshotEnabled = Boolean.parseBoolean(_configDao.getValue("kvm.snapshot.enabled")); if (!snapshotEnabled && !isFromVmSnapshot) { - s_logger.debug("Snapshot is not supported on host " + host + " for the volume " + volume + " attached to the vm " + vm); + logger.debug("Snapshot is not supported on host " + host + " for the volume " + volume + " attached to the vm " + vm); return false; } } @@ -1387,7 +1385,7 @@ public SnapshotInfo takeSnapshot(VolumeInfo volume) throws ResourceAllocationExc if (backupSnapToSecondary) { backupSnapshotToSecondary(payload.getAsyncBackup(), snapshotStrategy, snapshotOnPrimary, payload.getZoneIds()); } else { - s_logger.debug("skipping backup of snapshot [uuid=" + snapshot.getUuid() + "] to secondary due to configuration"); + logger.debug("skipping backup of snapshot [uuid=" + snapshot.getUuid() + "] to secondary due to configuration"); snapshotOnPrimary.markBackedUp(); } @@ -1412,18 +1410,18 @@ public SnapshotInfo takeSnapshot(VolumeInfo volume) throws ResourceAllocationExc copyNewSnapshotToZones(snapshotId, snapshot.getDataCenterId(), payload.getZoneIds()); } } catch (Exception e) { - s_logger.debug("post process snapshot failed", e); + logger.debug("post process snapshot failed", e); } } catch (CloudRuntimeException cre) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Failed to create snapshot" + cre.getLocalizedMessage()); + if (logger.isDebugEnabled()) { + logger.debug("Failed to create snapshot" + cre.getLocalizedMessage()); } _resourceLimitMgr.decrementResourceCount(snapshotOwner.getId(), ResourceType.snapshot); _resourceLimitMgr.decrementResourceCount(snapshotOwner.getId(), ResourceType.secondary_storage, new Long(volume.getSize())); throw cre; } catch (Exception e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Failed to create snapshot", e); + if (logger.isDebugEnabled()) { + logger.debug("Failed to create snapshot", e); } _resourceLimitMgr.decrementResourceCount(snapshotOwner.getId(), ResourceType.snapshot); _resourceLimitMgr.decrementResourceCount(snapshotOwner.getId(), ResourceType.secondary_storage, new Long(volume.getSize())); @@ -1460,7 +1458,7 @@ public BackupSnapshotTask(SnapshotInfo snap, int maxRetries, SnapshotStrategy st @Override protected void runInContext() { try { - s_logger.debug("Value of attempts is " + (snapshotBackupRetries - attempts)); + logger.debug("Value of attempts is " + (snapshotBackupRetries - attempts)); SnapshotInfo backupedSnapshot = snapshotStrategy.backupSnapshot(snapshot); @@ -1470,10 +1468,10 @@ protected void runInContext() { } } catch (final Exception e) { if (attempts >= 0) { - s_logger.debug("Backing up of snapshot failed, for snapshot with ID " + snapshot.getSnapshotId() + ", left with " + attempts + " more attempts"); + logger.debug("Backing up of snapshot failed, for snapshot with ID " + snapshot.getSnapshotId() + ", left with " + attempts + " more attempts"); backupSnapshotExecutor.schedule(new BackupSnapshotTask(snapshot, --attempts, snapshotStrategy, zoneIds), snapshotBackupRetryInterval, TimeUnit.SECONDS); } else { - s_logger.debug("Done with " + snapshotBackupRetries + " attempts in backing up of snapshot with ID " + snapshot.getSnapshotId()); + logger.debug("Done with " + snapshotBackupRetries + " attempts in backing up of snapshot with ID " + snapshot.getSnapshotId()); snapshotSrv.cleanupOnSnapshotBackupFailure(snapshot); } } @@ -1509,7 +1507,7 @@ public boolean configure(String name, Map params) throws Configu snapshotBackupRetries = BackupRetryAttempts.value(); snapshotBackupRetryInterval = BackupRetryInterval.value(); backupSnapshotExecutor = Executors.newScheduledThreadPool(10, new NamedThreadFactory("BackupSnapshotTask")); - s_logger.info("Snapshot Manager is configured."); + logger.info("Snapshot Manager is configured."); return true; } @@ -1521,10 +1519,10 @@ public boolean start() { for (SnapshotVO snapshotVO : snapshots) { try { if (!deleteSnapshot(snapshotVO.getId(), null)) { - s_logger.debug("Failed to delete snapshot in destroying state with id " + snapshotVO.getUuid()); + logger.debug("Failed to delete snapshot in destroying state with id " + snapshotVO.getUuid()); } } catch (Exception e) { - s_logger.debug("Failed to delete snapshot in destroying state with id " + snapshotVO.getUuid()); + logger.debug("Failed to delete snapshot in destroying state with id " + snapshotVO.getUuid()); } } return true; @@ -1572,7 +1570,7 @@ public boolean deleteSnapshotPolicies(DeleteSnapshotPoliciesCmd cmd) { for (Long pId : policyIds) { if (!deletePolicy(pId)) { - s_logger.warn("Failed to delete snapshot policy with Id: " + policyId); + logger.warn("Failed to delete snapshot policy with Id: " + policyId); return false; } } @@ -1608,7 +1606,7 @@ public void cleanupSnapshotsByVolume(Long volumeId) { } } catch (CloudRuntimeException e) { String msg = "Cleanup of Snapshot with uuid " + info.getUuid() + " in primary storage is failed. Ignoring"; - s_logger.warn(msg); + logger.warn(msg); } } } @@ -1635,7 +1633,7 @@ public Snapshot allocSnapshot(Long volumeId, Long policyId, String snapshotName, } catch (ResourceAllocationException e) { if (snapshotType != Type.MANUAL) { String msg = "Snapshot resource limit exceeded for account id : " + owner.getId() + ". Failed to create recurring snapshots"; - s_logger.warn(msg); + logger.warn(msg); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_UPDATE_RESOURCE_COUNT, 0L, 0L, msg, "Snapshot resource limit exceeded for account id : " + owner.getId() + ". Failed to create recurring snapshots; please use updateResourceLimit to increase the limit"); } @@ -1705,7 +1703,7 @@ private boolean checkAndProcessSnapshotAlreadyExistInStore(long snapshotId, Data } if (dstSnapshotStore.getState() == ObjectInDataStoreStateMachine.State.Ready) { if (!dstSnapshotStore.isDisplay()) { - s_logger.debug(String.format("Snapshot ID: %d is in ready state on image store ID: %d, marking it displayable for view", snapshotId, dstSnapshotStore.getDataStoreId())); + logger.debug(String.format("Snapshot ID: %d is in ready state on image store ID: %d, marking it displayable for view", snapshotId, dstSnapshotStore.getDataStoreId())); dstSnapshotStore.setDisplay(true); _snapshotStoreDao.update(dstSnapshotStore.getId(), dstSnapshotStore); } @@ -1742,18 +1740,18 @@ private boolean copySnapshotToZone(SnapshotDataStoreVO snapshotDataStoreVO, Data copyUrl = result.getPath(); } } catch (InterruptedException | ExecutionException | ResourceUnavailableException ex) { - s_logger.error(String.format("Failed to prepare URL for copy for snapshot ID: %d on store: %s", snapshotId, srcSecStore.getName()), ex); + logger.error(String.format("Failed to prepare URL for copy for snapshot ID: %d on store: %s", snapshotId, srcSecStore.getName()), ex); } if (StringUtils.isEmpty(copyUrl)) { - s_logger.error(String.format("Unable to prepare URL for copy for snapshot ID: %d on store: %s", snapshotId, srcSecStore.getName())); + logger.error(String.format("Unable to prepare URL for copy for snapshot ID: %d on store: %s", snapshotId, srcSecStore.getName())); return false; } - s_logger.debug(String.format("Copying snapshot ID: %d to destination zones using download URL: %s", snapshotId, copyUrl)); + logger.debug(String.format("Copying snapshot ID: %d to destination zones using download URL: %s", snapshotId, copyUrl)); try { AsyncCallFuture future = snapshotSrv.copySnapshot(snapshotOnSecondary, copyUrl, dstSecStore); SnapshotResult result = future.get(); if (result.isFailed()) { - s_logger.debug(String.format("Copy snapshot ID: %d failed for image store %s: %s", snapshotId, dstSecStore.getName(), result.getResult())); + logger.debug(String.format("Copy snapshot ID: %d failed for image store %s: %s", snapshotId, dstSecStore.getName(), result.getResult())); return false; } snapshotZoneDao.addSnapshotToZone(snapshotId, dstZoneId); @@ -1765,7 +1763,7 @@ private boolean copySnapshotToZone(SnapshotDataStoreVO snapshotDataStoreVO, Data } return true; } catch (InterruptedException | ExecutionException | ResourceUnavailableException ex) { - s_logger.debug(String.format("Failed to copy snapshot ID: %d to image store: %s", snapshotId, dstSecStore.getName())); + logger.debug(String.format("Failed to copy snapshot ID: %d to image store: %s", snapshotId, dstSecStore.getName())); } return false; } @@ -1782,7 +1780,7 @@ private boolean copySnapshotChainToZone(SnapshotVO snapshotVO, DataStore srcSecS do { dstSecStore = getSnapshotZoneImageStore(currentSnap.getSnapshotId(), destZone.getId()); if (dstSecStore != null) { - s_logger.debug(String.format("Snapshot ID: %d is already present in secondary storage: %s" + + logger.debug(String.format("Snapshot ID: %d is already present in secondary storage: %s" + " in zone %s in ready state, don't need to copy any further", currentSnap.getSnapshotId(), dstSecStore.getName(), destZone)); if (snapshotId == currentSnap.getSnapshotId()) { @@ -1802,7 +1800,7 @@ private boolean copySnapshotChainToZone(SnapshotVO snapshotVO, DataStore srcSecS try { _resourceLimitMgr.checkResourceLimit(account, ResourceType.secondary_storage, size); } catch (ResourceAllocationException e) { - s_logger.error(String.format("Unable to allocate secondary storage resources for snapshot chain for %s with size: %d", snapshotVO, size), e); + logger.error(String.format("Unable to allocate secondary storage resources for snapshot chain for %s with size: %d", snapshotVO, size), e); return false; } Collections.reverse(snapshotChain); @@ -1817,10 +1815,10 @@ private boolean copySnapshotChainToZone(SnapshotVO snapshotVO, DataStore srcSecS throw new StorageUnavailableException("Destination zone is not ready, no image store with free capacity", DataCenter.class, destZoneId); } } - s_logger.debug(String.format("Copying snapshot chain for snapshot ID: %d on secondary store: %s of zone ID: %d", snapshotId, dstSecStore.getName(), destZoneId)); + logger.debug(String.format("Copying snapshot chain for snapshot ID: %d on secondary store: %s of zone ID: %d", snapshotId, dstSecStore.getName(), destZoneId)); for (SnapshotDataStoreVO snapshotDataStoreVO : snapshotChain) { if (!copySnapshotToZone(snapshotDataStoreVO, srcSecStore, destZone, dstSecStore, account)) { - s_logger.error(String.format("Failed to copy snapshot: %s to zone: %s due to failure to copy snapshot ID: %d from snapshot chain", + logger.error(String.format("Failed to copy snapshot: %s to zone: %s due to failure to copy snapshot ID: %d from snapshot chain", snapshotVO, destZone, snapshotDataStoreVO.getSnapshotId())); return false; } @@ -1878,7 +1876,7 @@ protected DataCenterVO getCheckedDestinationZoneForSnapshotCopy(long zoneId, boo throw new PermissionDeniedException("Cannot perform this operation, Zone is currently disabled: " + dstZone.getName()); } if (DataCenter.Type.Edge.equals(dstZone.getType())) { - s_logger.error(String.format("Edge zone %s specified for snapshot copy", dstZone)); + logger.error(String.format("Edge zone %s specified for snapshot copy", dstZone)); throw new InvalidParameterValueException(String.format("Snapshot copy is not supported by zone %s", dstZone.getName())); } return dstZone; @@ -1908,7 +1906,7 @@ public Snapshot copySnapshot(CopySnapshotCmd cmd) throws StorageUnavailableExcep List failedZones = copySnapshotToZones(snapshot, srcSecStore, new ArrayList<>(dataCenterVOs.values())); if (destZoneIds.size() > failedZones.size()){ if (!failedZones.isEmpty()) { - s_logger.error(String.format("There were failures when copying snapshot to zones: %s", + logger.error(String.format("There were failures when copying snapshot to zones: %s", StringUtils.joinWith(", ", failedZones.toArray()))); } return snapshot; @@ -1931,7 +1929,7 @@ protected void copyNewSnapshotToZones(long snapshotId, long zoneId, List d String completedEventLevel = EventVO.LEVEL_ERROR; String completedEventMsg = String.format("Copying snapshot ID: %s failed", snapshotVO.getUuid()); if (dataStore == null) { - s_logger.error(String.format("Unable to find an image store for zone ID: %d where snapshot %s is in Ready state", zoneId, snapshotVO)); + logger.error(String.format("Unable to find an image store for zone ID: %d where snapshot %s is in Ready state", zoneId, snapshotVO)); ActionEventUtils.onCompletedActionEvent(CallContext.current().getCallingUserId(), CallContext.current().getCallingAccountId(), completedEventLevel, EventTypes.EVENT_SNAPSHOT_COPY, completedEventMsg, snapshotId, ApiCommandResourceType.Snapshot.toString(), startEventId); @@ -1945,11 +1943,11 @@ protected void copyNewSnapshotToZones(long snapshotId, long zoneId, List d try { failedZones = copySnapshotToZones(snapshotVO, dataStore, dataCenterVOs); if (CollectionUtils.isNotEmpty(failedZones)) { - s_logger.error(String.format("There were failures while copying snapshot %s to zones: %s", + logger.error(String.format("There were failures while copying snapshot %s to zones: %s", snapshotVO, StringUtils.joinWith(", ", failedZones.toArray()))); } } catch (ResourceAllocationException | StorageUnavailableException | CloudRuntimeException e) { - s_logger.error(String.format("Error while copying snapshot %s to zones: %s", snapshotVO, StringUtils.joinWith(",", destZoneIds.toArray()))); + logger.error(String.format("Error while copying snapshot %s to zones: %s", snapshotVO, StringUtils.joinWith(",", destZoneIds.toArray()))); } if (failedZones.size() < destZoneIds.size()) { final List failedZonesFinal = failedZones; diff --git a/server/src/main/java/com/cloud/storage/snapshot/SnapshotSchedulerImpl.java b/server/src/main/java/com/cloud/storage/snapshot/SnapshotSchedulerImpl.java index 3c40911e0f3f..e4e1175713e8 100644 --- a/server/src/main/java/com/cloud/storage/snapshot/SnapshotSchedulerImpl.java +++ b/server/src/main/java/com/cloud/storage/snapshot/SnapshotSchedulerImpl.java @@ -36,7 +36,6 @@ import org.apache.cloudstack.framework.jobs.dao.AsyncJobDao; import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO; import org.apache.cloudstack.managed.context.ManagedContextTimerTask; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.api.ApiDispatcher; @@ -73,7 +72,6 @@ @Component public class SnapshotSchedulerImpl extends ManagerBase implements SnapshotScheduler { - private static final Logger s_logger = Logger.getLogger(SnapshotSchedulerImpl.class); @Inject protected AsyncJobDao _asyncJobDao; @@ -129,7 +127,7 @@ private Date getNextScheduledTime(final long policyId, final Date currentTimesta nextTimestamp = DateUtil.getNextRunTime(type, schedule, timezone, currentTimestamp); final String currentTime = DateUtil.displayDateInTimezone(DateUtil.GMT_TIMEZONE, currentTimestamp); final String nextScheduledTime = DateUtil.displayDateInTimezone(DateUtil.GMT_TIMEZONE, nextTimestamp); - s_logger.debug("Current time is " + currentTime + ". NextScheduledTime of policyId " + policyId + " is " + nextScheduledTime); + logger.debug("Current time is " + currentTime + ". NextScheduledTime of policyId " + policyId + " is " + nextScheduledTime); } return nextTimestamp; } @@ -172,7 +170,7 @@ public void poll(final Date currentTimestamp) { deleteExpiredVMSnapshots(); } catch (Exception e) { - s_logger.warn("Error in expiring vm snapshots", e); + logger.warn("Error in expiring vm snapshots", e); } } @@ -253,8 +251,8 @@ protected void deleteExpiredVMSnapshots() { Date creationTime = vmSnapshot.getCreated(); long diffInHours = TimeUnit.MILLISECONDS.toHours(now.getTime() - creationTime.getTime()); if (diffInHours >= expiration_interval_hours) { - if (s_logger.isDebugEnabled()){ - s_logger.debug("Deleting expired VM snapshot id: " + vmSnapshot.getId()); + if (logger.isDebugEnabled()){ + logger.debug("Deleting expired VM snapshot id: " + vmSnapshot.getId()); } _vmSnaphostManager.deleteVMSnapshot(vmSnapshot.getId()); } @@ -264,10 +262,10 @@ protected void deleteExpiredVMSnapshots() { @DB protected void scheduleSnapshots() { String displayTime = DateUtil.displayDateInTimezone(DateUtil.GMT_TIMEZONE, _currentTimestamp); - s_logger.debug(String.format("Snapshot scheduler is being called at [%s].", displayTime)); + logger.debug(String.format("Snapshot scheduler is being called at [%s].", displayTime)); final List snapshotsToBeExecuted = _snapshotScheduleDao.getSchedulesToExecute(_currentTimestamp); - s_logger.debug(String.format("There are [%s] scheduled snapshots to be executed at [%s].", snapshotsToBeExecuted.size(), displayTime)); + logger.debug(String.format("There are [%s] scheduled snapshots to be executed at [%s].", snapshotsToBeExecuted.size(), displayTime)); for (final SnapshotScheduleVO snapshotToBeExecuted : snapshotsToBeExecuted) { SnapshotScheduleVO tmpSnapshotScheduleVO = null; @@ -286,7 +284,7 @@ protected void scheduleSnapshots() { ActionEventUtils.onScheduledActionEvent(User.UID_SYSTEM, volume.getAccountId(), EventTypes.EVENT_SNAPSHOT_CREATE, "creating snapshot for volume Id:" + volume.getUuid(), volumeId, ApiCommandResourceType.Volume.toString(), true, 0); - s_logger.trace(String.format("Mapping parameters required to generate a CreateSnapshotCmd for snapshot [%s].", snapshotToBeExecuted.getUuid())); + logger.trace(String.format("Mapping parameters required to generate a CreateSnapshotCmd for snapshot [%s].", snapshotToBeExecuted.getUuid())); final Map params = new HashMap(); params.put(ApiConstants.VOLUME_ID, "" + volumeId); params.put(ApiConstants.POLICY_ID, "" + policyId); @@ -303,7 +301,7 @@ protected void scheduleSnapshots() { } } - s_logger.trace(String.format("Generating a CreateSnapshotCmd for snapshot [%s] with parameters: [%s].", snapshotToBeExecuted.getUuid(), params.toString())); + logger.trace(String.format("Generating a CreateSnapshotCmd for snapshot [%s] with parameters: [%s].", snapshotToBeExecuted.getUuid(), params.toString())); final CreateSnapshotCmd cmd = new CreateSnapshotCmd(); ComponentContext.inject(cmd); _dispatcher.dispatchCreateCmd(cmd, params); @@ -312,18 +310,18 @@ protected void scheduleSnapshots() { final Date scheduledTimestamp = snapshotToBeExecuted.getScheduledTimestamp(); displayTime = DateUtil.displayDateInTimezone(DateUtil.GMT_TIMEZONE, scheduledTimestamp); - s_logger.debug(String.format("Scheduling snapshot [%s] for volume [%s] at [%s].", snapshotToBeExecuted.getUuid(), volume.getVolumeDescription(), displayTime)); + logger.debug(String.format("Scheduling snapshot [%s] for volume [%s] at [%s].", snapshotToBeExecuted.getUuid(), volume.getVolumeDescription(), displayTime)); AsyncJobVO job = new AsyncJobVO("", User.UID_SYSTEM, volume.getAccountId(), CreateSnapshotCmd.class.getName(), ApiGsonHelper.getBuilder().create().toJson(params), cmd.getEntityId(), cmd.getApiResourceType() != null ? cmd.getApiResourceType().toString() : null, null); job.setDispatcher(_asyncDispatcher.getName()); final long jobId = _asyncMgr.submitAsyncJob(job); - s_logger.debug(String.format("Scheduled snapshot [%s] for volume [%s] as job [%s].", snapshotToBeExecuted.getUuid(), volume.getVolumeDescription(), job.getUuid())); + logger.debug(String.format("Scheduled snapshot [%s] for volume [%s] as job [%s].", snapshotToBeExecuted.getUuid(), volume.getVolumeDescription(), job.getUuid())); tmpSnapshotScheduleVO.setAsyncJobId(jobId); _snapshotScheduleDao.update(snapshotScheId, tmpSnapshotScheduleVO); } catch (final Exception e) { - s_logger.error(String.format("The scheduling of snapshot [%s] for volume [%s] failed due to [%s].", snapshotToBeExecuted.getUuid(), volumeId, e.toString()), e); + logger.error(String.format("The scheduling of snapshot [%s] for volume [%s] failed due to [%s].", snapshotToBeExecuted.getUuid(), volumeId, e.toString()), e); } finally { if (tmpSnapshotScheduleVO != null) { _snapshotScheduleDao.releaseFromLockTable(snapshotScheId); @@ -341,13 +339,13 @@ protected void scheduleSnapshots() { */ protected boolean canSnapshotBeScheduled(final SnapshotScheduleVO snapshotToBeScheduled, final VolumeVO volume) { if (volume.getRemoved() != null) { - s_logger.warn(String.format("Skipping snapshot [%s] for volume [%s] because it has been removed. Having a snapshot scheduled for a volume that has been " + logger.warn(String.format("Skipping snapshot [%s] for volume [%s] because it has been removed. Having a snapshot scheduled for a volume that has been " + "removed is an inconsistency; please, check your database.", snapshotToBeScheduled.getUuid(), volume.getVolumeDescription())); return false; } if (volume.getPoolId() == null) { - s_logger.debug(String.format("Skipping snapshot [%s] for volume [%s] because it is not attached to any storage pool.", snapshotToBeScheduled.getUuid(), + logger.debug(String.format("Skipping snapshot [%s] for volume [%s] because it is not attached to any storage pool.", snapshotToBeScheduled.getUuid(), volume.getVolumeDescription())); return false; } @@ -357,12 +355,12 @@ protected boolean canSnapshotBeScheduled(final SnapshotScheduleVO snapshotToBeSc } if (_snapshotPolicyDao.findById(snapshotToBeScheduled.getPolicyId()) == null) { - s_logger.debug(String.format("Snapshot's policy [%s] for volume [%s] has been removed; therefore, this snapshot will be removed from the snapshot scheduler.", + logger.debug(String.format("Snapshot's policy [%s] for volume [%s] has been removed; therefore, this snapshot will be removed from the snapshot scheduler.", snapshotToBeScheduled.getPolicyId(), volume.getVolumeDescription())); _snapshotScheduleDao.remove(snapshotToBeScheduled.getId()); } - s_logger.debug(String.format("Snapshot [%s] for volume [%s] can be executed.", snapshotToBeScheduled.getUuid(), volume.getVolumeDescription())); + logger.debug(String.format("Snapshot [%s] for volume [%s] can be executed.", snapshotToBeScheduled.getUuid(), volume.getVolumeDescription())); return true; } @@ -370,13 +368,13 @@ protected boolean isAccountRemovedOrDisabled(final SnapshotScheduleVO snapshotTo Account volAcct = _acctDao.findById(volume.getAccountId()); if (volAcct == null) { - s_logger.debug(String.format("Skipping snapshot [%s] for volume [%s] because its account [%s] has been removed.", snapshotToBeExecuted.getUuid(), + logger.debug(String.format("Skipping snapshot [%s] for volume [%s] because its account [%s] has been removed.", snapshotToBeExecuted.getUuid(), volume.getVolumeDescription(), volume.getAccountId())); return true; } if (volAcct.getState() == Account.State.DISABLED) { - s_logger.debug(String.format("Skipping snapshot [%s] for volume [%s] because its account [%s] is disabled.", snapshotToBeExecuted.getUuid(), + logger.debug(String.format("Skipping snapshot [%s] for volume [%s] because its account [%s] is disabled.", snapshotToBeExecuted.getUuid(), volume.getVolumeDescription(), volAcct.getUuid())); return true; } @@ -467,7 +465,7 @@ public boolean removeSchedule(final Long volumeId, final Long policyId) { success = _snapshotScheduleDao.remove(schedule.getId()); } if (!success) { - s_logger.debug("Error while deleting Snapshot schedule with Id: " + schedule.getId()); + logger.debug("Error while deleting Snapshot schedule with Id: " + schedule.getId()); } return success; } @@ -490,7 +488,7 @@ public boolean configure(final String name, final Map params) th } _currentTimestamp = new Date(); - s_logger.info("Snapshot Scheduler is configured."); + logger.info("Snapshot Scheduler is configured."); return true; } @@ -518,7 +516,7 @@ protected void runInContext() { final Date currentTimestamp = new Date(); poll(currentTimestamp); } catch (final Throwable t) { - s_logger.warn("Catch throwable in snapshot scheduler ", t); + logger.warn("Catch throwable in snapshot scheduler ", t); } } }; diff --git a/server/src/main/java/com/cloud/storage/upload/UploadActiveState.java b/server/src/main/java/com/cloud/storage/upload/UploadActiveState.java index c5dcc4ed1590..18cc6be2eb1b 100644 --- a/server/src/main/java/com/cloud/storage/upload/UploadActiveState.java +++ b/server/src/main/java/com/cloud/storage/upload/UploadActiveState.java @@ -16,11 +16,11 @@ // under the License. package com.cloud.storage.upload; -import org.apache.log4j.Level; import com.cloud.agent.api.storage.UploadAnswer; import com.cloud.agent.api.storage.UploadProgressCommand.RequestType; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; +import org.apache.logging.log4j.Level; public abstract class UploadActiveState extends UploadState { @@ -41,8 +41,8 @@ public String handleDisconnect() { @Override public String handleAnswer(UploadAnswer answer) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("handleAnswer, answer status=" + answer.getUploadStatus() + ", curr state=" + getName()); + if (logger.isDebugEnabled()) { + logger.debug("handleAnswer, answer status=" + answer.getUploadStatus() + ", curr state=" + getName()); } switch (answer.getUploadStatus()) { case UPLOAD_IN_PROGRESS: @@ -70,7 +70,7 @@ public String handleAnswer(UploadAnswer answer) { @Override public String handleTimeout(long updateMs) { - if (s_logger.isTraceEnabled()) { + if (logger.isTraceEnabled()) { getUploadListener().log("handleTimeout, updateMs=" + updateMs + ", curr state= " + getName(), Level.TRACE); } String newState = getName(); diff --git a/server/src/main/java/com/cloud/storage/upload/UploadErrorState.java b/server/src/main/java/com/cloud/storage/upload/UploadErrorState.java index 6d4e80fa6283..577bcd9371d8 100644 --- a/server/src/main/java/com/cloud/storage/upload/UploadErrorState.java +++ b/server/src/main/java/com/cloud/storage/upload/UploadErrorState.java @@ -16,11 +16,12 @@ // under the License. package com.cloud.storage.upload; -import org.apache.log4j.Level; + import com.cloud.agent.api.storage.UploadAnswer; import com.cloud.agent.api.storage.UploadProgressCommand.RequestType; import com.cloud.storage.Upload.Status; +import org.apache.logging.log4j.Level; public class UploadErrorState extends UploadInactiveState { diff --git a/server/src/main/java/com/cloud/storage/upload/UploadListener.java b/server/src/main/java/com/cloud/storage/upload/UploadListener.java index 1184be693002..9709f5f94774 100644 --- a/server/src/main/java/com/cloud/storage/upload/UploadListener.java +++ b/server/src/main/java/com/cloud/storage/upload/UploadListener.java @@ -25,8 +25,6 @@ import javax.inject.Inject; import org.apache.cloudstack.api.BaseCmd; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.command.user.iso.ExtractIsoCmd; import org.apache.cloudstack.api.command.user.template.ExtractTemplateCmd; @@ -61,6 +59,9 @@ import com.cloud.storage.dao.UploadDao; import com.cloud.storage.upload.UploadState.UploadEvent; import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; public class UploadListener implements Listener { @@ -93,7 +94,7 @@ protected void runInContext() { } } - public static final Logger s_logger = Logger.getLogger(UploadListener.class.getName()); + protected Logger logger = LogManager.getLogger(getClass()); public static final int SMALL_DELAY = 100; public static final long STATUS_POLL_INTERVAL = 10000L; @@ -348,7 +349,7 @@ public void setLastUpdated() { } public void log(String message, Level level) { - s_logger.log(level, message + ", " + type.toString() + " = " + typeName + " at host " + sserver.getName()); + logger.log(level, message + ", " + type.toString() + " = " + typeName + " at host " + sserver.getName()); } public void setDisconnected() { @@ -369,7 +370,7 @@ public void scheduleTimeoutTask(long delay) { timeoutTask = new TimeoutTask(this); timer.schedule(timeoutTask, delay); - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { log("Scheduling timeout at " + delay + " ms", Level.DEBUG); } } @@ -438,19 +439,19 @@ public synchronized void updateDatabase(UploadAnswer answer) { public void sendCommand(RequestType reqType) { if (getJobId() != null) { - if (s_logger.isTraceEnabled()) { + if (logger.isTraceEnabled()) { log("Sending progress command ", Level.TRACE); } try { EndPoint ep = _epSelector.select(sserver); if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; - s_logger.error(errMsg); + logger.error(errMsg); return; } ep.sendMessageAsync(new UploadProgressCommand(getCommand(), getJobId(), reqType), new Callback(ep.getId(), this)); } catch (Exception e) { - s_logger.debug("Send command failed", e); + logger.debug("Send command failed", e); setDisconnected(); } } @@ -462,7 +463,7 @@ private UploadCommand getCommand() { } public void logDisconnect() { - s_logger.warn("Unable to monitor upload progress of " + typeName + " at host " + sserver.getName()); + logger.warn("Unable to monitor upload progress of " + typeName + " at host " + sserver.getName()); } public void scheduleImmediateStatusCheck(RequestType request) { diff --git a/server/src/main/java/com/cloud/storage/upload/UploadMonitorImpl.java b/server/src/main/java/com/cloud/storage/upload/UploadMonitorImpl.java index 64ada6dc3096..76a724a428ee 100644 --- a/server/src/main/java/com/cloud/storage/upload/UploadMonitorImpl.java +++ b/server/src/main/java/com/cloud/storage/upload/UploadMonitorImpl.java @@ -31,7 +31,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; @@ -82,7 +81,6 @@ @Component public class UploadMonitorImpl extends ManagerBase implements UploadMonitor { - static final Logger s_logger = Logger.getLogger(UploadMonitorImpl.class); @Inject private UploadDao _uploadDao; @@ -159,12 +157,12 @@ public void extractVolume(UploadVO uploadVolumeObj, DataStore secStore, VolumeVO EndPoint ep = _epSelector.select(secStore); if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; - s_logger.error(errMsg); + logger.error(errMsg); return; } ep.sendMessageAsync(ucmd, new UploadListener.Callback(ep.getId(), ul)); } catch (Exception e) { - s_logger.warn("Unable to start upload of volume " + volume.getName() + " from " + secStore.getName() + " to " + url, e); + logger.warn("Unable to start upload of volume " + volume.getName() + " from " + secStore.getName() + " to " + url, e); ul.setDisconnected(); ul.scheduleStatusCheck(RequestType.GET_OR_RESTART); } @@ -178,7 +176,7 @@ public Long extractTemplate(VMTemplateVO template, String url, TemplateDataStore DataStore secStore = storeMgr.getImageStoreWithFreeCapacity(dataCenterId); if(secStore == null) { - s_logger.error("Unable to extract template, secondary storage to satisfy storage needs cannot be found!"); + logger.error("Unable to extract template, secondary storage to satisfy storage needs cannot be found!"); return null; } @@ -196,12 +194,12 @@ public Long extractTemplate(VMTemplateVO template, String url, TemplateDataStore EndPoint ep = _epSelector.select(secStore); if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; - s_logger.error(errMsg); + logger.error(errMsg); return null; } ep.sendMessageAsync(ucmd, new UploadListener.Callback(ep.getId(), ul)); } catch (Exception e) { - s_logger.warn("Unable to start upload of " + template.getUniqueName() + " from " + secStore.getName() + " to " + url, e); + logger.warn("Unable to start upload of " + template.getUniqueName() + " from " + secStore.getName() + " to " + url, e); ul.setDisconnected(); ul.scheduleStatusCheck(RequestType.GET_OR_RESTART); } @@ -222,7 +220,7 @@ public UploadVO createEntityDownloadURL(VMTemplateVO template, TemplateDataStore EndPoint ep = _epSelector.select(store); if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; - s_logger.error(errMsg); + logger.error(errMsg); return null; } @@ -265,7 +263,7 @@ else if ((token != null) && (token.length == 5) && (token[2].startsWith(hostname Answer ans = ep.sendMessage(cmd); if (ans == null || !ans.getResult()) { errorString = "Unable to create a link for " + type + " id:" + template.getId() + "," + (ans == null ? "" : ans.getDetails()); - s_logger.error(errorString); + logger.error(errorString); throw new CloudRuntimeException(errorString); } @@ -321,7 +319,7 @@ public void createVolumeDownloadURL(Long entityId, String path, Type type, Long Answer ans = ep.sendMessage(cmd); if (ans == null || !ans.getResult()) { errorString = "Unable to create a link for " + type + " id:" + entityId + "," + (ans == null ? "" : ans.getDetails()); - s_logger.warn(errorString); + logger.warn(errorString); throw new CloudRuntimeException(errorString); } @@ -330,7 +328,7 @@ public void createVolumeDownloadURL(Long entityId, String path, Type type, Long SecondaryStorageVmVO ssVm = ssVms.get(0); if (ssVm.getPublicIpAddress() == null) { errorString = "A running secondary storage vm has a null public ip?"; - s_logger.error(errorString); + logger.error(errorString); throw new CloudRuntimeException(errorString); } //Construct actual URL locally now that the symlink exists at SSVM @@ -380,7 +378,7 @@ public boolean configure(String name, Map params) throws Configu String cert = configs.get("secstorage.secure.copy.cert"); if ("realhostip.com".equalsIgnoreCase(cert)) { - s_logger.warn("Only realhostip.com ssl cert is supported, ignoring self-signed and other certs"); + logger.warn("Only realhostip.com ssl cert is supported, ignoring self-signed and other certs"); } _ssvmUrlDomain = configs.get("secstorage.ssl.cert.domain"); @@ -427,10 +425,10 @@ public void handleUploadSync(long sserverId) { HostVO storageHost = _serverDao.findById(sserverId); if (storageHost == null) { - s_logger.warn("Huh? Agent id " + sserverId + " does not correspond to a row in hosts table?"); + logger.warn("Huh? Agent id " + sserverId + " does not correspond to a row in hosts table?"); return; } - s_logger.debug("Handling upload sserverId " + sserverId); + logger.debug("Handling upload sserverId " + sserverId); List uploadsInProgress = new ArrayList(); uploadsInProgress.addAll(_uploadDao.listByHostAndUploadStatus(sserverId, UploadVO.Status.UPLOAD_IN_PROGRESS)); uploadsInProgress.addAll(_uploadDao.listByHostAndUploadStatus(sserverId, UploadVO.Status.COPY_IN_PROGRESS)); @@ -468,7 +466,7 @@ protected void runInContext() { } } catch (Exception e) { - s_logger.error("Caught the following Exception", e); + logger.error("Caught the following Exception", e); } } } @@ -496,17 +494,17 @@ public void cleanupStorage() { new DeleteEntityDownloadURLCommand(path, extractJob.getType(), extractJob.getUploadUrl(), ((ImageStoreVO)secStore).getParent()); EndPoint ep = _epSelector.select(secStore); if (ep == null) { - s_logger.warn("UploadMonitor cleanup: There is no secondary storage VM for secondary storage host " + extractJob.getDataStoreId()); + logger.warn("UploadMonitor cleanup: There is no secondary storage VM for secondary storage host " + extractJob.getDataStoreId()); continue; //TODO: why continue? why not break? } - if (s_logger.isDebugEnabled()) { - s_logger.debug("UploadMonitor cleanup: Sending deletion of extract URL " + extractJob.getUploadUrl() + " to ssvm " + ep.getHostAddr()); + if (logger.isDebugEnabled()) { + logger.debug("UploadMonitor cleanup: Sending deletion of extract URL " + extractJob.getUploadUrl() + " to ssvm " + ep.getHostAddr()); } Answer ans = ep.sendMessage(cmd); if (ans != null && ans.getResult()) { _uploadDao.remove(extractJob.getId()); } else { - s_logger.warn("UploadMonitor cleanup: Unable to delete the link for " + extractJob.getType() + " id=" + extractJob.getTypeId() + " url=" + + logger.warn("UploadMonitor cleanup: Unable to delete the link for " + extractJob.getType() + " id=" + extractJob.getTypeId() + " url=" + extractJob.getUploadUrl() + " on ssvm " + ep.getHostAddr()); } } diff --git a/server/src/main/java/com/cloud/storage/upload/UploadState.java b/server/src/main/java/com/cloud/storage/upload/UploadState.java index ce91a3b8f3dc..0c7692b1e3c9 100644 --- a/server/src/main/java/com/cloud/storage/upload/UploadState.java +++ b/server/src/main/java/com/cloud/storage/upload/UploadState.java @@ -18,8 +18,9 @@ import java.util.Date; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.api.storage.UploadAnswer; @@ -29,7 +30,7 @@ public static enum UploadEvent { UPLOAD_ANSWER, ABANDON_UPLOAD, TIMEOUT_CHECK, DISCONNECT }; - protected static final Logger s_logger = Logger.getLogger(UploadState.class.getName()); + protected Logger logger = LogManager.getLogger(getClass()); private UploadListener ul; @@ -42,7 +43,7 @@ protected UploadListener getUploadListener() { } public String handleEvent(UploadEvent event, Object eventObj) { - if (s_logger.isTraceEnabled()) { + if (logger.isTraceEnabled()) { getUploadListener().log("handleEvent, event type=" + event + ", curr state=" + getName(), Level.TRACE); } switch (event) { @@ -62,7 +63,7 @@ public String handleEvent(UploadEvent event, Object eventObj) { } public void onEntry(String prevState, UploadEvent event, Object evtObj) { - if (s_logger.isTraceEnabled()) { + if (logger.isTraceEnabled()) { getUploadListener().log("onEntry, event type=" + event + ", curr state=" + getName(), Level.TRACE); } if (event == UploadEvent.UPLOAD_ANSWER) { diff --git a/server/src/main/java/com/cloud/tags/TaggedResourceManagerImpl.java b/server/src/main/java/com/cloud/tags/TaggedResourceManagerImpl.java index 60ded224a214..d9c98e2ef920 100644 --- a/server/src/main/java/com/cloud/tags/TaggedResourceManagerImpl.java +++ b/server/src/main/java/com/cloud/tags/TaggedResourceManagerImpl.java @@ -30,7 +30,6 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; import org.apache.commons.collections.MapUtils; -import org.apache.log4j.Logger; import com.cloud.domain.PartOf; import com.cloud.event.ActionEvent; @@ -69,7 +68,6 @@ import com.cloud.utils.exception.CloudRuntimeException; public class TaggedResourceManagerImpl extends ManagerBase implements TaggedResourceService { - public static final Logger s_logger = Logger.getLogger(TaggedResourceManagerImpl.class); @Inject EntityManager _entityMgr; @@ -167,14 +165,14 @@ private Pair getAccountDomain(long resourceId, ResourceObjectType re protected void checkTagsDeletePermission(List tagsToDelete, Account caller) { for (ResourceTag resourceTag : tagsToDelete) { - if(s_logger.isDebugEnabled()) { - s_logger.debug("Resource Tag Id: " + resourceTag.getResourceId()); - s_logger.debug("Resource Tag AccountId: " + resourceTag.getAccountId()); + if(logger.isDebugEnabled()) { + logger.debug("Resource Tag Id: " + resourceTag.getResourceId()); + logger.debug("Resource Tag AccountId: " + resourceTag.getAccountId()); } if (caller.getAccountId() != resourceTag.getAccountId()) { Account owner = _accountMgr.getAccount(resourceTag.getAccountId()); - if(s_logger.isDebugEnabled()) { - s_logger.debug("Resource Owner: " + owner); + if(logger.isDebugEnabled()) { + logger.debug("Resource Owner: " + owner); } _accountMgr.checkAccess(caller, null, false, owner); } @@ -249,8 +247,8 @@ private List searchResourceTags(List resourceIds, @ActionEvent(eventType = EventTypes.EVENT_TAGS_DELETE, eventDescription = "deleting resource tags") public boolean deleteTags(List resourceIds, ResourceObjectType resourceType, Map tags) { Account caller = CallContext.current().getCallingAccount(); - if(s_logger.isDebugEnabled()) { - s_logger.debug("ResourceIds to Find " + String.join(", ", resourceIds)); + if(logger.isDebugEnabled()) { + logger.debug("ResourceIds to Find " + String.join(", ", resourceIds)); } List resourceTags = searchResourceTags(resourceIds, resourceType); final List tagsToDelete = new ArrayList<>(); @@ -291,7 +289,7 @@ public boolean deleteTags(List resourceIds, ResourceObjectType resourceT public void doInTransactionWithoutResult(TransactionStatus status) { for (ResourceTag tagToRemove : tagsToDelete) { _resourceTagDao.remove(tagToRemove.getId()); - s_logger.debug("Removed the tag '" + tagToRemove + "' for resources (" + + logger.debug("Removed the tag '" + tagToRemove + "' for resources (" + String.join(", ", resourceIds) + ")"); if (ResourceObjectType.UserVm.equals(resourceType)) { informStoragePoolForVmTags(tagToRemove.getResourceId(), tagToRemove.getKey(), tagToRemove.getValue()); @@ -321,7 +319,7 @@ private void informStoragePoolForVmTags(long vmId, String key, String value) { Long poolId = volume.getPoolId(); DataStore dataStore = retrieveDatastore(poolId); if (dataStore == null || !(dataStore.getDriver() instanceof PrimaryDataStoreDriver)) { - s_logger.info(String.format("No data store found for VM %d with pool ID %d.", vmId, poolId)); + logger.info(String.format("No data store found for VM %d with pool ID %d.", vmId, poolId)); continue; } PrimaryDataStoreDriver dataStoreDriver = (PrimaryDataStoreDriver) dataStore.getDriver(); diff --git a/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java b/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java index b886f0868f67..47daec0eeda1 100644 --- a/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java +++ b/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java @@ -67,7 +67,6 @@ import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity; import org.apache.cloudstack.utils.security.DigestHelper; import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -109,7 +108,6 @@ import com.cloud.utils.exception.CloudRuntimeException; public class HypervisorTemplateAdapter extends TemplateAdapterBase { - protected final static Logger s_logger = Logger.getLogger(HypervisorTemplateAdapter.class); @Inject DownloadMonitor _downloadMonitor; @Inject @@ -181,7 +179,7 @@ private Long performDirectDownloadUrlValidation(final String format, final Hyper Integer connectRequestTimeout = DirectDownloadManager.DirectDownloadConnectionRequestTimeout.value(); Integer connectTimeout = DirectDownloadManager.DirectDownloadConnectTimeout.value(); CheckUrlCommand cmd = new CheckUrlCommand(format, url, connectTimeout, connectRequestTimeout, socketTimeout); - s_logger.debug("Performing URL " + url + " validation on host " + host.getId()); + logger.debug("Performing URL " + url + " validation on host " + host.getId()); Answer answer = _agentMgr.easySend(host.getId(), cmd); if (answer == null || !answer.getResult()) { throw new CloudRuntimeException("URL: " + url + " validation failed on host id " + host.getId()); @@ -355,38 +353,38 @@ protected void validateSecondaryStorageAndCreateTemplate(List imageSt protected boolean isZoneAndImageStoreAvailable(DataStore imageStore, Long zoneId, Set zoneSet, boolean isTemplatePrivate) { if (zoneId == null) { - s_logger.warn(String.format("Zone ID is null, cannot allocate ISO/template in image store [%s].", imageStore)); + logger.warn(String.format("Zone ID is null, cannot allocate ISO/template in image store [%s].", imageStore)); return false; } DataCenterVO zone = _dcDao.findById(zoneId); if (zone == null) { - s_logger.warn(String.format("Unable to find zone by id [%s], so skip downloading template to its image store [%s].", zoneId, imageStore.getId())); + logger.warn(String.format("Unable to find zone by id [%s], so skip downloading template to its image store [%s].", zoneId, imageStore.getId())); return false; } if (Grouping.AllocationState.Disabled == zone.getAllocationState()) { - s_logger.info(String.format("Zone [%s] is disabled. Skip downloading template to its image store [%s].", zoneId, imageStore.getId())); + logger.info(String.format("Zone [%s] is disabled. Skip downloading template to its image store [%s].", zoneId, imageStore.getId())); return false; } if (!_statsCollector.imageStoreHasEnoughCapacity(imageStore)) { - s_logger.info(String.format("Image store doesn't have enough capacity. Skip downloading template to this image store [%s].", imageStore.getId())); + logger.info(String.format("Image store doesn't have enough capacity. Skip downloading template to this image store [%s].", imageStore.getId())); return false; } if (zoneSet == null) { - s_logger.info(String.format("Zone set is null; therefore, the ISO/template should be allocated in every secondary storage of zone [%s].", zone)); + logger.info(String.format("Zone set is null; therefore, the ISO/template should be allocated in every secondary storage of zone [%s].", zone)); return true; } if (isTemplatePrivate && zoneSet.contains(zoneId)) { - s_logger.info(String.format("The template is private and it is already allocated in a secondary storage in zone [%s]; therefore, image store [%s] will be skipped.", + logger.info(String.format("The template is private and it is already allocated in a secondary storage in zone [%s]; therefore, image store [%s] will be skipped.", zone, imageStore)); return false; } - s_logger.info(String.format("Private template will be allocated in image store [%s] in zone [%s].", imageStore, zone)); + logger.info(String.format("Private template will be allocated in image store [%s] in zone [%s].", imageStore, zone)); zoneSet.add(zoneId); return true; } @@ -458,7 +456,7 @@ private void postUploadAllocation(List imageStores, VMTemplateVO temp EndPoint ep = _epSelector.select(templateOnStore); if (ep == null) { String errMsg = "There is no secondary storage VM for downloading template to image store " + imageStore.getName(); - s_logger.warn(errMsg); + logger.warn(errMsg); throw new CloudRuntimeException(errMsg); } @@ -523,7 +521,7 @@ protected Void createTemplateAsyncCallBack(AsyncCallbackDispatcher dataDiskTemplates = templateDao.listByParentTemplatetId(template.getId()); if (dataDiskTemplates != null && dataDiskTemplates.size() > 0) { - s_logger.info("Template: " + template.getId() + " has Datadisk template(s) associated with it. Delete Datadisk templates before deleting the template"); + logger.info("Template: " + template.getId() + " has Datadisk template(s) associated with it. Delete Datadisk templates before deleting the template"); for (VMTemplateVO dataDiskTemplate : dataDiskTemplates) { - s_logger.info("Delete Datadisk template: " + dataDiskTemplate.getId() + " from image store: " + imageStore.getName()); + logger.info("Delete Datadisk template: " + dataDiskTemplate.getId() + " from image store: " + imageStore.getName()); AsyncCallFuture future = imageService.deleteTemplateAsync(imageFactory.getTemplate(dataDiskTemplate.getId(), imageStore)); try { TemplateApiResult result = future.get(); dataDiskDeletetionResult = result.isSuccess(); if (!dataDiskDeletetionResult) { - s_logger.warn("Failed to delete datadisk template: " + dataDiskTemplate + " from image store: " + imageStore.getName() + " due to: " + logger.warn("Failed to delete datadisk template: " + dataDiskTemplate + " from image store: " + imageStore.getName() + " due to: " + result.getResult()); break; } @@ -641,20 +639,20 @@ public boolean delete(TemplateProfile profile) { // Decrement total secondary storage space used by the account _resourceLimitMgr.recalculateResourceCount(dataDiskTemplate.getAccountId(), account.getDomainId(), ResourceType.secondary_storage.getOrdinal()); } catch (Exception e) { - s_logger.debug("Delete datadisk template failed", e); + logger.debug("Delete datadisk template failed", e); throw new CloudRuntimeException("Delete datadisk template failed", e); } } } // remove from template_zone_ref if (dataDiskDeletetionResult) { - s_logger.info("Delete template: " + template.getId() + " from image store: " + imageStore.getName()); + logger.info("Delete template: " + template.getId() + " from image store: " + imageStore.getName()); AsyncCallFuture future = imageService.deleteTemplateAsync(imageFactory.getTemplate(template.getId(), imageStore)); try { TemplateApiResult result = future.get(); success = result.isSuccess(); if (!success) { - s_logger.warn("Failed to delete the template: " + template + " from the image store: " + imageStore.getName() + " due to: " + result.getResult()); + logger.warn("Failed to delete the template: " + template + " from the image store: " + imageStore.getName() + " due to: " + result.getResult()); break; } @@ -666,11 +664,11 @@ public boolean delete(TemplateProfile profile) { } } } catch (InterruptedException|ExecutionException e) { - s_logger.debug("Delete template Failed", e); + logger.debug("Delete template Failed", e); throw new CloudRuntimeException("Delete template Failed", e); } } else { - s_logger.warn("Template: " + template.getId() + " won't be deleted from image store: " + imageStore.getName() + " because deletion of one of the Datadisk" + logger.warn("Template: " + template.getId() + " won't be deleted from image store: " + imageStore.getName() + " because deletion of one of the Datadisk" + " templates that belonged to the template failed"); } } @@ -685,7 +683,7 @@ public boolean delete(TemplateProfile profile) { // delete all cache entries for this template List cacheTmpls = imageFactory.listTemplateOnCache(template.getId()); for (TemplateInfo tmplOnCache : cacheTmpls) { - s_logger.info("Delete template: " + tmplOnCache.getId() + " from image cache store: " + tmplOnCache.getDataStore().getName()); + logger.info("Delete template: " + tmplOnCache.getId() + " from image cache store: " + tmplOnCache.getDataStore().getName()); tmplOnCache.delete(); } diff --git a/server/src/main/java/com/cloud/template/TemplateAdapterBase.java b/server/src/main/java/com/cloud/template/TemplateAdapterBase.java index 74347d1c0570..195f336b38b8 100644 --- a/server/src/main/java/com/cloud/template/TemplateAdapterBase.java +++ b/server/src/main/java/com/cloud/template/TemplateAdapterBase.java @@ -33,7 +33,6 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.BooleanUtils; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd; @@ -84,7 +83,6 @@ import com.cloud.vm.dao.UserVmDao; public abstract class TemplateAdapterBase extends AdapterBase implements TemplateAdapter { - private final static Logger s_logger = Logger.getLogger(TemplateAdapterBase.class); protected @Inject DomainDao _domainDao; protected @Inject @@ -171,7 +169,7 @@ public TemplateProfile prepare(boolean isIso, long userId, String name, String d requiresHVM = true; } if (deployAsIs) { - s_logger.info("Setting default guest OS for deploy-as-is template while the template registration is not completed"); + logger.info("Setting default guest OS for deploy-as-is template while the template registration is not completed"); guestOSId = getDefaultDeployAsIsGuestOsId(); } } @@ -214,7 +212,7 @@ public TemplateProfile prepare(boolean isIso, long userId, String name, String d try { imgfmt = ImageFormat.valueOf(format.toUpperCase()); } catch (IllegalArgumentException e) { - s_logger.debug("ImageFormat IllegalArgumentException: " + e.getMessage()); + logger.debug("ImageFormat IllegalArgumentException: " + e.getMessage()); throw new IllegalArgumentException("Image format: " + format + " is incorrect. Supported formats are " + EnumUtils.listValues(ImageFormat.values())); } @@ -295,11 +293,11 @@ public TemplateProfile prepare(RegisterTemplateCmd cmd) throws ResourceAllocatio if (cmd.isDeployAsIs()) { if (MapUtils.isNotEmpty(details)) { if (details.containsKey(VmDetailConstants.ROOT_DISK_CONTROLLER)) { - s_logger.info("Ignoring the rootDiskController detail provided, as we honour what is defined in the template"); + logger.info("Ignoring the rootDiskController detail provided, as we honour what is defined in the template"); details.remove(VmDetailConstants.ROOT_DISK_CONTROLLER); } if (details.containsKey(VmDetailConstants.NIC_ADAPTER)) { - s_logger.info("Ignoring the nicAdapter detail provided, as we honour what is defined in the template"); + logger.info("Ignoring the nicAdapter detail provided, as we honour what is defined in the template"); details.remove(VmDetailConstants.NIC_ADAPTER); } } @@ -350,7 +348,7 @@ private Long getDefaultDeployAsIsGuestOsId() { public TemplateProfile prepare(GetUploadParamsForTemplateCmd cmd) throws ResourceAllocationException { Long osTypeId = cmd.getOsTypeId(); if (osTypeId == null) { - s_logger.info("Setting the default guest OS for deploy-as-is templates while the template upload is not completed"); + logger.info("Setting the default guest OS for deploy-as-is templates while the template upload is not completed"); osTypeId = getDefaultDeployAsIsGuestOsId(); } UploadParams params = new TemplateUploadParams(CallContext.current().getCallingUserId(), cmd.getName(), diff --git a/server/src/main/java/com/cloud/template/TemplateManagerImpl.java b/server/src/main/java/com/cloud/template/TemplateManagerImpl.java index 2ed420870208..1080ad7f0263 100755 --- a/server/src/main/java/com/cloud/template/TemplateManagerImpl.java +++ b/server/src/main/java/com/cloud/template/TemplateManagerImpl.java @@ -108,7 +108,6 @@ import org.apache.cloudstack.utils.imagestore.ImageStoreUtil; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; -import org.apache.log4j.Logger; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; @@ -220,7 +219,6 @@ import com.google.gson.GsonBuilder; public class TemplateManagerImpl extends ManagerBase implements TemplateManager, TemplateApiService, Configurable { - private final static Logger s_logger = Logger.getLogger(TemplateManagerImpl.class); @Inject private VMTemplateDao _tmpltDao; @@ -525,7 +523,7 @@ public VirtualMachineTemplate prepareTemplate(long templateId, long zoneId, Long if (pool.getStatus() == StoragePoolStatus.Up && pool.getDataCenterId() == zoneId) { prepareTemplateInOneStoragePool(vmTemplate, pool); } else { - s_logger.warn("Skip loading template " + vmTemplate.getId() + " into primary storage " + pool.getId() + " as either the pool zone " + logger.warn("Skip loading template " + vmTemplate.getId() + " into primary storage " + pool.getId() + " as either the pool zone " + pool.getDataCenterId() + " is different from the requested zone " + zoneId + " or the pool is currently not available."); } } @@ -630,7 +628,7 @@ public void prepareIsoForVmProfile(VirtualMachineProfile profile, DeployDestinat template = prepareIso(vm.getIsoId(), vm.getDataCenterId(), dest.getHost().getId(), poolId); if (template == null){ - s_logger.error("Failed to prepare ISO on secondary or cache storage"); + logger.error("Failed to prepare ISO on secondary or cache storage"); throw new CloudRuntimeException("Failed to prepare ISO on secondary or cache storage"); } if (template.isBootable()) { @@ -657,10 +655,10 @@ public void prepareIsoForVmProfile(VirtualMachineProfile profile, DeployDestinat } private void prepareTemplateInOneStoragePool(final VMTemplateVO template, final StoragePoolVO pool) { - s_logger.info("Schedule to preload template " + template.getId() + " into primary storage " + pool.getId()); + logger.info("Schedule to preload template " + template.getId() + " into primary storage " + pool.getId()); if (pool.getPoolType() == Storage.StoragePoolType.DatastoreCluster) { List childDataStores = _poolDao.listChildStoragePoolsInDatastoreCluster(pool.getId()); - s_logger.debug("Schedule to preload template " + template.getId() + " into child datastores of DataStore cluster: " + pool.getId()); + logger.debug("Schedule to preload template " + template.getId() + " into child datastores of DataStore cluster: " + pool.getId()); for (StoragePoolVO childDataStore : childDataStores) { prepareTemplateInOneStoragePoolInternal(template, childDataStore); } @@ -676,15 +674,15 @@ protected void runInContext() { try { reallyRun(); } catch (Throwable e) { - s_logger.warn("Unexpected exception ", e); + logger.warn("Unexpected exception ", e); } } private void reallyRun() { - s_logger.info("Start to preload template " + template.getId() + " into primary storage " + pool.getId()); + logger.info("Start to preload template " + template.getId() + " into primary storage " + pool.getId()); StoragePool pol = (StoragePool)_dataStoreMgr.getPrimaryDataStore(pool.getId()); prepareTemplateForCreate(template, pol); - s_logger.info("End of preloading template " + template.getId() + " into primary storage " + pool.getId()); + logger.info("End of preloading template " + template.getId() + " into primary storage " + pool.getId()); } }); } @@ -695,7 +693,7 @@ public void prepareTemplateInAllStoragePools(final VMTemplateVO template, long z if (pool.getDataCenterId() == zoneId) { prepareTemplateInOneStoragePool(template, pool); } else { - s_logger.info("Skip loading template " + template.getId() + " into primary storage " + pool.getId() + " as pool zone " + pool.getDataCenterId() + + logger.info("Skip loading template " + template.getId() + " into primary storage " + pool.getId() + " as pool zone " + pool.getDataCenterId() + " is different from the requested zone " + zoneId); } } @@ -717,8 +715,8 @@ public VMTemplateStoragePoolVO prepareTemplateForCreate(VMTemplateVO templ, Stor _tmpltPoolDao.update(templateStoragePoolRef.getId(), templateStoragePoolRef); if (templateStoragePoolRef.getDownloadState() == Status.DOWNLOADED) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Template " + templateId + " has already been downloaded to pool " + poolId); + if (logger.isDebugEnabled()) { + logger.debug("Template " + templateId + " has already been downloaded to pool " + poolId); } return templateStoragePoolRef; @@ -727,7 +725,7 @@ public VMTemplateStoragePoolVO prepareTemplateForCreate(VMTemplateVO templ, Stor templateStoreRef = _tmplStoreDao.findByTemplateZoneDownloadStatus(templateId, pool.getDataCenterId(), VMTemplateStorageResourceAssoc.Status.DOWNLOADED); if (templateStoreRef == null) { - s_logger.error("Unable to find a secondary storage host who has completely downloaded the template."); + logger.error("Unable to find a secondary storage host who has completely downloaded the template."); return null; } @@ -737,8 +735,8 @@ public VMTemplateStoragePoolVO prepareTemplateForCreate(VMTemplateVO templ, Stor } if (templateStoragePoolRef == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Downloading template " + templateId + " to pool " + poolId); + if (logger.isDebugEnabled()) { + logger.debug("Downloading template " + templateId + " to pool " + poolId); } DataStore srcSecStore = _dataStoreMgr.getDataStore(templateStoreRef.getDataStoreId(), DataStoreRole.Image); TemplateInfo srcTemplate = _tmplFactory.getTemplate(templateId, srcSecStore); @@ -747,13 +745,13 @@ public VMTemplateStoragePoolVO prepareTemplateForCreate(VMTemplateVO templ, Stor try { TemplateApiResult result = future.get(); if (result.isFailed()) { - s_logger.debug("prepare template failed:" + result.getResult()); + logger.debug("prepare template failed:" + result.getResult()); return null; } return _tmpltPoolDao.findByPoolTemplate(poolId, templateId, null); } catch (Exception ex) { - s_logger.debug("failed to copy template from image store:" + srcSecStore.getName() + " to primary storage"); + logger.debug("failed to copy template from image store:" + srcSecStore.getName() + " to primary storage"); } } @@ -767,7 +765,7 @@ public String getChecksum(DataStore store, String templatePath, String algorithm Answer answer = null; if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; - s_logger.error(errMsg); + logger.error(errMsg); answer = new Answer(cmd, false, errMsg); } else { answer = ep.sendMessage(cmd); @@ -786,7 +784,7 @@ public boolean resetTemplateDownloadStateOnPool(long templateStoragePoolRefId) { VMTemplateStoragePoolVO templateStoragePoolRef = _tmpltPoolDao.acquireInLockTable(templateStoragePoolRefId, 1200); if (templateStoragePoolRef == null) { - s_logger.warn("resetTemplateDownloadStateOnPool failed - unable to lock TemplateStorgePoolRef " + templateStoragePoolRefId); + logger.warn("resetTemplateDownloadStateOnPool failed - unable to lock TemplateStorgePoolRef " + templateStoragePoolRefId); return false; } @@ -844,7 +842,7 @@ public boolean copy(long userId, VMTemplateVO template, DataStore srcSecStore, D try { TemplateApiResult result = future.get(); if (result.isFailed()) { - s_logger.debug("copy template failed for image store " + dstSecStore.getName() + ":" + result.getResult()); + logger.debug("copy template failed for image store " + dstSecStore.getName() + ":" + result.getResult()); continue; // try next image store } @@ -859,26 +857,26 @@ public boolean copy(long userId, VMTemplateVO template, DataStore srcSecStore, D List dataDiskTemplates = _tmpltDao.listByParentTemplatetId(template.getId()); if (dataDiskTemplates != null && !dataDiskTemplates.isEmpty()) { for (VMTemplateVO dataDiskTemplate : dataDiskTemplates) { - s_logger.debug("Copying " + dataDiskTemplates.size() + " for source template " + template.getId() + ". Copy all Datadisk templates to destination datastore " + dstSecStore.getName()); + logger.debug("Copying " + dataDiskTemplates.size() + " for source template " + template.getId() + ". Copy all Datadisk templates to destination datastore " + dstSecStore.getName()); TemplateInfo srcDataDiskTemplate = _tmplFactory.getTemplate(dataDiskTemplate.getId(), srcSecStore); AsyncCallFuture dataDiskCopyFuture = _tmpltSvr.copyTemplate(srcDataDiskTemplate, dstSecStore); try { TemplateApiResult dataDiskCopyResult = dataDiskCopyFuture.get(); if (dataDiskCopyResult.isFailed()) { - s_logger.error("Copy of datadisk template: " + srcDataDiskTemplate.getId() + " to image store: " + dstSecStore.getName() + logger.error("Copy of datadisk template: " + srcDataDiskTemplate.getId() + " to image store: " + dstSecStore.getName() + " failed with error: " + dataDiskCopyResult.getResult() + " , will try copying the next one"); continue; // Continue to copy next Datadisk template } _tmpltDao.addTemplateToZone(dataDiskTemplate, dstZoneId); _resourceLimitMgr.incrementResourceCount(dataDiskTemplate.getAccountId(), ResourceType.secondary_storage, dataDiskTemplate.getSize()); } catch (Exception ex) { - s_logger.error("Failed to copy datadisk template: " + srcDataDiskTemplate.getId() + " to image store: " + dstSecStore.getName() + logger.error("Failed to copy datadisk template: " + srcDataDiskTemplate.getId() + " to image store: " + dstSecStore.getName() + " , will try copying the next one"); } } } } catch (Exception ex) { - s_logger.debug("failed to copy template to image store:" + dstSecStore.getName() + " ,will try next one"); + logger.debug("failed to copy template to image store:" + dstSecStore.getName() + " ,will try next one"); } } return true; @@ -933,7 +931,7 @@ public VirtualMachineTemplate copyTemplate(CopyTemplateCmd cmd) throws StorageUn boolean success = false; if (template.getHypervisorType() == HypervisorType.BareMetal) { if (template.isCrossZones()) { - s_logger.debug("Template " + templateId + " is cross-zone, don't need to copy"); + logger.debug("Template " + templateId + " is cross-zone, don't need to copy"); return template; } for (Long destZoneId: destZoneIds) { @@ -962,7 +960,7 @@ public VirtualMachineTemplate copyTemplate(CopyTemplateCmd cmd) throws StorageUn for (Long destZoneId : destZoneIds) { DataStore dstSecStore = getImageStore(destZoneId, templateId); if (dstSecStore != null) { - s_logger.debug("There is template " + templateId + " in secondary storage " + dstSecStore.getName() + + logger.debug("There is template " + templateId + " in secondary storage " + dstSecStore.getName() + " in zone " + destZoneId + " , don't need to copy"); continue; } @@ -981,7 +979,7 @@ public VirtualMachineTemplate copyTemplate(CopyTemplateCmd cmd) throws StorageUn if ((destZoneIds != null) && (destZoneIds.size() > failedZones.size())){ if (!failedZones.isEmpty()) { - s_logger.debug("There were failures when copying template to zones: " + + logger.debug("There were failures when copying template to zones: " + StringUtils.listToCsvTags(failedZones)); } return template; @@ -1004,7 +1002,7 @@ private boolean addTemplateToZone(VMTemplateVO template, long dstZoneId, long so _tmpltDao.addTemplateToZone(template, dstZoneId); return true; } catch (Exception ex) { - s_logger.debug("failed to copy template from Zone: " + sourceZone.getUuid() + " to Zone: " + dstZone.getUuid()); + logger.debug("failed to copy template from Zone: " + sourceZone.getUuid() + " to Zone: " + dstZone.getUuid()); } return false; } @@ -1055,7 +1053,7 @@ public void evictTemplateFromStoragePool(VMTemplateStoragePoolVO templatePoolVO) VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.acquireInLockTable(templatePoolVO.getId()); if (templatePoolRef == null) { - s_logger.debug("Can't aquire the lock for template pool ref: " + templatePoolVO.getId()); + logger.debug("Can't aquire the lock for template pool ref: " + templatePoolVO.getId()); return; } @@ -1064,8 +1062,8 @@ public void evictTemplateFromStoragePool(VMTemplateStoragePoolVO templatePoolVO) TemplateInfo template = _tmplFactory.getTemplateOnPrimaryStorage(templatePoolRef.getTemplateId(), pool, templatePoolRef.getDeploymentOption()); try { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Evicting " + templatePoolVO); + if (logger.isDebugEnabled()) { + logger.debug("Evicting " + templatePoolVO); } if (pool.isManaged()) { @@ -1074,11 +1072,11 @@ public void evictTemplateFromStoragePool(VMTemplateStoragePoolVO templatePoolVO) TemplateApiResult result = future.get(); if (result.isFailed()) { - s_logger.debug("Failed to delete template " + template.getId() + " from storage pool " + pool.getId()); + logger.debug("Failed to delete template " + template.getId() + " from storage pool " + pool.getId()); } else { // Remove the templatePoolVO. if (_tmpltPoolDao.remove(templatePoolVO.getId())) { - s_logger.debug("Successfully evicted template " + template.getName() + " from storage pool " + pool.getName()); + logger.debug("Successfully evicted template " + template.getName() + " from storage pool " + pool.getName()); } } } else { @@ -1088,14 +1086,14 @@ public void evictTemplateFromStoragePool(VMTemplateStoragePoolVO templatePoolVO) if (answer != null && answer.getResult()) { // Remove the templatePoolVO. if (_tmpltPoolDao.remove(templatePoolVO.getId())) { - s_logger.debug("Successfully evicted template " + template.getName() + " from storage pool " + pool.getName()); + logger.debug("Successfully evicted template " + template.getName() + " from storage pool " + pool.getName()); } } else { - s_logger.info("Will retry evict template " + template.getName() + " from storage pool " + pool.getName()); + logger.info("Will retry evict template " + template.getName() + " from storage pool " + pool.getName()); } } } catch (StorageUnavailableException | InterruptedException | ExecutionException e) { - s_logger.info("Storage is unavailable currently. Will retry evicte template " + template.getName() + " from storage pool " + pool.getName()); + logger.info("Storage is unavailable currently. Will retry evicte template " + template.getName() + " from storage pool " + pool.getName()); } finally { _tmpltPoolDao.releaseFromLockTable(templatePoolRef.getId()); } @@ -1134,7 +1132,7 @@ public boolean templateIsDeleteable(long templateId) { // always be copied to // primary storage before deploying VM. if (!userVmUsingIso.isEmpty()) { - s_logger.debug("ISO " + templateId + " is not deleteable because it is attached to " + userVmUsingIso.size() + " VMs"); + logger.debug("ISO " + templateId + " is not deleteable because it is attached to " + userVmUsingIso.size() + " VMs"); return false; } @@ -1246,7 +1244,7 @@ public TemplateInfo prepareIso(long isoId, long dcId, Long hostId, Long poolId) } if (tmplt == null || tmplt.getFormat() != ImageFormat.ISO) { - s_logger.warn("ISO: " + isoId + " does not exist in vm_template table"); + logger.warn("ISO: " + isoId + " does not exist in vm_template table"); return null; } @@ -1255,7 +1253,7 @@ public TemplateInfo prepareIso(long isoId, long dcId, Long hostId, Long poolId) Scope destScope = new ZoneScope(dcId); TemplateInfo cacheData = (TemplateInfo)cacheMgr.createCacheObject(tmplt, destScope); if (cacheData == null) { - s_logger.error("Failed in copy iso from S3 to cache storage"); + logger.error("Failed in copy iso from S3 to cache storage"); return null; } return cacheData; @@ -1276,14 +1274,14 @@ private boolean attachISOToVM(long vmId, long isoId, boolean attach, boolean for // prepare ISO ready to mount on hypervisor resource level TemplateInfo tmplt = prepareIso(isoId, vm.getDataCenterId(), vm.getHostId(), null); if (tmplt == null) { - s_logger.error("Failed to prepare ISO ready to mount on hypervisor resource level"); + logger.error("Failed to prepare ISO ready to mount on hypervisor resource level"); throw new CloudRuntimeException("Failed to prepare ISO ready to mount on hypervisor resource level"); } String vmName = vm.getInstanceName(); HostVO host = _hostDao.findById(vm.getHostId()); if (host == null) { - s_logger.warn("Host: " + vm.getHostId() + " does not exist"); + logger.warn("Host: " + vm.getHostId() + " does not exist"); return false; } @@ -1342,7 +1340,7 @@ public boolean deleteTemplate(DeleteTemplateCmd cmd) { } if(!cmd.isForced() && CollectionUtils.isNotEmpty(vmInstanceVOList)) { final String message = String.format("Unable to delete template with id: %1$s because VM instances: [%2$s] are using it.", templateId, Joiner.on(",").join(vmInstanceVOList)); - s_logger.warn(message); + logger.warn(message); throw new InvalidParameterValueException(message); } @@ -1499,7 +1497,7 @@ public boolean updateTemplateOrIsoPermissions(BaseUpdateTemplateOrIsoPermissions // If the template is removed throw an error. if (template.getRemoved() != null) { - s_logger.error("unable to update permissions for " + mediaType + " with id " + id + " as it is removed "); + logger.error("unable to update permissions for " + mediaType + " with id " + id + " as it is removed "); throw new InvalidParameterValueException("unable to update permissions for " + mediaType + " with id " + id + " as it is removed "); } @@ -1706,7 +1704,7 @@ public VirtualMachineTemplate createPrivateTemplate(CreateTemplateCmd command) t if (result.isFailed()) { privateTemplate = null; - s_logger.debug("Failed to create template" + result.getResult()); + logger.debug("Failed to create template" + result.getResult()); throw new CloudRuntimeException("Failed to create template" + result.getResult()); } @@ -1726,10 +1724,10 @@ public VirtualMachineTemplate createPrivateTemplate(CreateTemplateCmd command) t privateTemplate.getSourceTemplateId(), srcTmpltStore.getPhysicalSize(), privateTemplate.getSize()); _usageEventDao.persist(usageEvent); } catch (InterruptedException e) { - s_logger.debug("Failed to create template", e); + logger.debug("Failed to create template", e); throw new CloudRuntimeException("Failed to create template", e); } catch (ExecutionException e) { - s_logger.debug("Failed to create template", e); + logger.debug("Failed to create template", e); throw new CloudRuntimeException("Failed to create template", e); } @@ -1851,8 +1849,8 @@ public VMTemplateVO createPrivateTemplateRecord(CreateTemplateCmd cmd, Account t // created if (!_volumeMgr.volumeInactive(volume)) { String msg = "Unable to create private template for volume: " + volume.getName() + "; volume is attached to a non-stopped VM, please stop the VM first"; - if (s_logger.isInfoEnabled()) { - s_logger.info(msg); + if (logger.isInfoEnabled()) { + logger.info(msg); } throw new CloudRuntimeException(msg); } @@ -1931,8 +1929,8 @@ public VMTemplateVO createPrivateTemplateRecord(CreateTemplateCmd cmd, Account t } String templateTag = cmd.getTemplateTag(); if (templateTag != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Adding template tag: " + templateTag); + if (logger.isDebugEnabled()) { + logger.debug("Adding template tag: " + templateTag); } } privateTemplate = new VMTemplateVO(nextTemplateId, name, ImageFormat.RAW, isPublic, featured, isExtractable, @@ -1940,8 +1938,8 @@ public VMTemplateVO createPrivateTemplateRecord(CreateTemplateCmd cmd, Account t passwordEnabledValue, guestOS.getId(), true, hyperType, templateTag, cmd.getDetails(), sshKeyEnabledValue, isDynamicScalingEnabled, false, false); if (sourceTemplateId != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("This template is getting created from other template, setting source template Id to: " + sourceTemplateId); + if (logger.isDebugEnabled()) { + logger.debug("This template is getting created from other template, setting source template Id to: " + sourceTemplateId); } } @@ -2328,7 +2326,7 @@ void validateDetails(VMTemplateVO template, Map details) { } catch (IllegalArgumentException e) { String msg = String.format("Invalid %s: %s specified. Valid values are: %s", ApiConstants.BOOT_MODE, bootMode, Arrays.toString(ApiConstants.BootMode.values())); - s_logger.error(msg); + logger.error(msg); throw new InvalidParameterValueException(msg); } } @@ -2366,7 +2364,7 @@ public List getTemplateDisksOnImageStore(Long templateId, DataStoreR TemplateInfo templateObject = _tmplFactory.getTemplate(templateId, role); if (templateObject == null) { String msg = String.format("Could not find template %s downloaded on store with role %s", templateId, role.toString()); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } return _tmpltSvr.getTemplateDatadisksOnImageStore(templateObject, configurationId); diff --git a/server/src/main/java/com/cloud/test/DatabaseConfig.java b/server/src/main/java/com/cloud/test/DatabaseConfig.java index 15525446e1e2..27f2bf18ed76 100644 --- a/server/src/main/java/com/cloud/test/DatabaseConfig.java +++ b/server/src/main/java/com/cloud/test/DatabaseConfig.java @@ -39,8 +39,9 @@ import org.apache.cloudstack.utils.security.DigestHelper; import org.apache.cloudstack.utils.security.ParserUtils; -import org.apache.log4j.Logger; -import org.apache.log4j.xml.DOMConfigurator; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.core.config.Configurator; import org.w3c.dom.Document; import org.w3c.dom.Node; import org.w3c.dom.NodeList; @@ -65,7 +66,7 @@ import com.cloud.utils.net.NfsUtils; public class DatabaseConfig { - private static final Logger s_logger = Logger.getLogger(DatabaseConfig.class.getName()); + protected static Logger LOGGER = LogManager.getLogger(DatabaseConfig.class); private String _configFileName = null; private String _currentObjectName = null; @@ -368,13 +369,13 @@ public static void main(String[] args) { File file = PropertiesUtil.findConfigFile("log4j-cloud.xml"); if (file != null) { System.out.println("Log4j configuration from : " + file.getAbsolutePath()); - DOMConfigurator.configureAndWatch(file.getAbsolutePath(), 10000); + Configurator.initialize(null, file.getAbsolutePath()); } else { System.out.println("Configure log4j with default properties"); } if (args.length < 1) { - s_logger.error("error starting database config, missing initial data file"); + LOGGER.error("error starting database config, missing initial data file"); } else { try { DatabaseConfig config = ComponentContext.inject(DatabaseConfig.class); @@ -384,7 +385,7 @@ public static void main(String[] args) { } catch (Exception ex) { System.out.print("Error Caught"); ex.printStackTrace(); - s_logger.error("error", ex); + LOGGER.error("error", ex); } } } @@ -450,7 +451,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) throws Except pzc.checkAllPodCidrSubnets(); } catch (Exception ex) { System.out.print("ERROR IS" + ex); - s_logger.error("error", ex); + LOGGER.error("error", ex); } } @@ -595,7 +596,7 @@ public void saveCluster() { } catch (SQLException ex) { System.out.println("Error creating cluster: " + ex.getMessage()); - s_logger.error("error creating cluster", ex); + LOGGER.error("error creating cluster", ex); return; } @@ -642,7 +643,7 @@ public void saveStoragePool() { } catch (SQLException ex) { System.out.println("Error creating storage pool: " + ex.getMessage()); - s_logger.error("error creating storage pool ", ex); + LOGGER.error("error creating storage pool ", ex); return; } @@ -746,7 +747,7 @@ private void savePhysicalNetworkServiceProvider() { stmt.executeUpdate(); } catch (SQLException ex) { System.out.println("Error creating physical network service provider: " + ex.getMessage()); - s_logger.error("error creating physical network service provider", ex); + LOGGER.error("error creating physical network service provider", ex); return; } @@ -771,7 +772,7 @@ private void saveVirtualRouterProvider() { stmt.executeUpdate(); } catch (SQLException ex) { System.out.println("Error creating virtual router provider: " + ex.getMessage()); - s_logger.error("error creating virtual router provider ", ex); + LOGGER.error("error creating virtual router provider ", ex); return; } @@ -957,7 +958,7 @@ protected void saveServiceOffering() { try { DiskOfferinDao.persist(diskOfferingVO); } catch (Exception e) { - s_logger.error("error creating disk offering", e); + LOGGER.error("error creating disk offering", e); } serviceOffering.setDiskOfferingId(diskOfferingVO.getId()); @@ -965,7 +966,7 @@ protected void saveServiceOffering() { try { serviceOfferingDao.persist(serviceOffering); } catch (Exception e) { - s_logger.error("error creating service offering", e); + LOGGER.error("error creating service offering", e); } /* String insertSql = "INSERT INTO `cloud`.`service_offering` (id, name, cpu, ram_size, speed, nw_rate, mc_rate, created, ha_enabled, mirrored, display_text, guest_ip_type, use_local_storage) " + @@ -976,7 +977,7 @@ protected void saveServiceOffering() { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql); stmt.executeUpdate(); } catch (SQLException ex) { - s_logger.error("error creating service offering", ex); + LOGGER.error("error creating service offering", ex); return; } */ @@ -1027,7 +1028,7 @@ protected void saveDiskOffering() { try { offering.persist(diskOffering); } catch (Exception e) { - s_logger.error("error creating disk offering", e); + LOGGER.error("error creating disk offering", e); } /* @@ -1040,7 +1041,7 @@ protected void saveDiskOffering() { stmt.setString(1, tags); stmt.executeUpdate(); } catch (SQLException ex) { - s_logger.error("error creating disk offering", ex); + LOGGER.error("error creating disk offering", ex); return; } */ @@ -1075,7 +1076,7 @@ protected void saveThrottlingRates() { } } catch (SQLException ex) { - s_logger.error("error saving network and multicast throttling rates to all service offerings", ex); + LOGGER.error("error saving network and multicast throttling rates to all service offerings", ex); return; } } @@ -1103,7 +1104,7 @@ private void saveVMTemplate() { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql); stmt.executeUpdate(); } catch (SQLException ex) { - s_logger.error("error creating vm template: " + ex); + LOGGER.error("error creating vm template: " + ex); } finally { txn.close(); } @@ -1126,7 +1127,7 @@ private void saveVMTemplate() { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql); stmt.executeUpdate(); } catch (SQLException ex) { - s_logger.error("error creating vm template: " + ex); + LOGGER.error("error creating vm template: " + ex); } finally { txn.close(); } @@ -1142,7 +1143,7 @@ protected void saveUser() { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSystemAccount); stmt.executeUpdate(); } catch (SQLException ex) { - s_logger.error("error creating system account", ex); + LOGGER.error("error creating system account", ex); } // insert system user @@ -1154,7 +1155,7 @@ protected void saveUser() { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSystemUser); stmt.executeUpdate(); } catch (SQLException ex) { - s_logger.error("error creating system user", ex); + LOGGER.error("error creating system user", ex); } // insert admin user @@ -1174,7 +1175,7 @@ protected void saveUser() { try { pwDigest = DigestHelper.getPaddedDigest(algorithm, password); } catch (NoSuchAlgorithmException e) { - s_logger.error("error saving user", e); + LOGGER.error("error saving user", e); return; } @@ -1187,7 +1188,7 @@ protected void saveUser() { stmt.setString(2, username); stmt.executeUpdate(); } catch (SQLException ex) { - s_logger.error("error creating account", ex); + LOGGER.error("error creating account", ex); } // now insert the user @@ -1204,7 +1205,7 @@ protected void saveUser() { stmt.setString(6, email); stmt.executeUpdate(); } catch (SQLException ex) { - s_logger.error("error creating user", ex); + LOGGER.error("error creating user", ex); } } @@ -1272,7 +1273,7 @@ protected void saveConfiguration(String name, String value, String category) { stmt.executeUpdate(); } } catch (SQLException ex) { - s_logger.error("error creating configuration", ex); + LOGGER.error("error creating configuration", ex); } } @@ -1285,17 +1286,17 @@ private boolean checkIpAddressRange(String ipAddressRange) { } if (!IPRangeConfig.validIP(startIP)) { - s_logger.error("The private IP address: " + startIP + " is invalid."); + LOGGER.error("The private IP address: " + startIP + " is invalid."); return false; } if (!IPRangeConfig.validOrBlankIP(endIP)) { - s_logger.error("The private IP address: " + endIP + " is invalid."); + LOGGER.error("The private IP address: " + endIP + " is invalid."); return false; } if (!IPRangeConfig.validIPRange(startIP, endIP)) { - s_logger.error("The IP range " + startIP + " -> " + endIP + " is invalid."); + LOGGER.error("The IP range " + startIP + " -> " + endIP + " is invalid."); return false; } @@ -1310,7 +1311,7 @@ protected void saveRootDomain() { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql); stmt.executeUpdate(); } catch (SQLException ex) { - s_logger.error("error creating ROOT domain", ex); + LOGGER.error("error creating ROOT domain", ex); } /* @@ -1320,7 +1321,7 @@ protected void saveRootDomain() { PreparedStatement stmt = txn.prepareStatement(updateSql); stmt.executeUpdate(); } catch (SQLException ex) { - s_logger.error("error updating admin user", ex); + LOGGER.error("error updating admin user", ex); } finally { txn.close(); } @@ -1331,7 +1332,7 @@ protected void saveRootDomain() { PreparedStatement stmt = txn.prepareStatement(updateSql); stmt.executeUpdate(); } catch (SQLException ex) { - s_logger.error("error updating system user", ex); + LOGGER.error("error updating system user", ex); } finally { txn.close(); } diff --git a/server/src/main/java/com/cloud/test/TestAppender.java b/server/src/main/java/com/cloud/test/TestAppender.java deleted file mode 100644 index 9a6ec62efc6a..000000000000 --- a/server/src/main/java/com/cloud/test/TestAppender.java +++ /dev/null @@ -1,181 +0,0 @@ -/* -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, -* software distributed under the License is distributed on an -* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -* KIND, either express or implied. See the License for the -* specific language governing permissions and limitations -* under the License. -*/ -package com.cloud.test; - -import com.google.common.base.Joiner; -import com.google.common.base.Objects; -import com.google.common.collect.ImmutableMap; - -import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.AppenderSkeleton; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; -import org.apache.log4j.spi.LoggingEvent; -import org.springframework.util.Assert; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.regex.Pattern; - -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.base.Preconditions.checkState; -import static java.lang.String.format; -import static org.apache.log4j.Level.ALL; -import static org.apache.log4j.Level.DEBUG; -import static org.apache.log4j.Level.ERROR; -import static org.apache.log4j.Level.FATAL; -import static org.apache.log4j.Level.INFO; -import static org.apache.log4j.Level.OFF; -import static org.apache.log4j.Level.WARN; - -/** -* -* Tracks one or more patterns to determine whether or not they have been -* logged. It uses a streaming approach to determine whether or not a message -* has a occurred to prevent unnecessary memory consumption. Instances of this -* of this class are created using the {@link TestAppenderBuilder}. -* -* To use this class, register a one or more expected patterns by level as part -* of the test setup and retain an reference to the appender instance. After the -* expected logging events have occurred in the test case, call -* {@link TestAppender#assertMessagesLogged()} which will fail the test if any of the -* expected patterns were not logged. -* -*/ -public final class TestAppender extends AppenderSkeleton { - private final static String APPENDER_NAME = "test_appender"; - private final ImmutableMap> expectedPatternResults; - private TestAppender(final Map> expectedPatterns) { - super(); - expectedPatternResults = ImmutableMap.copyOf(expectedPatterns); - } - protected void append(LoggingEvent loggingEvent) { - checkArgument(loggingEvent != null, "append requires a non-null loggingEvent"); - final Level level = loggingEvent.getLevel(); - checkState(expectedPatternResults.containsKey(level), "level " + level + " not supported by append"); - for (final PatternResult patternResult : expectedPatternResults.get(level)) { - if (patternResult.getPattern().matcher(loggingEvent.getRenderedMessage()).matches()) { - patternResult.markFound(); - } - } - } - - public void close() { -// Do nothing ... - } - public boolean requiresLayout() { - return false; - } - public void assertMessagesLogged() { - final List unloggedPatterns = new ArrayList<>(); - for (final Map.Entry> expectedPatternResult : expectedPatternResults.entrySet()) { - for (final PatternResult patternResults : expectedPatternResult.getValue()) { - if (!patternResults.isFound()) { - unloggedPatterns.add(format("%1$s was not logged for level %2$s", - patternResults.getPattern().toString(), expectedPatternResult.getKey())); - } - } - } - if (!unloggedPatterns.isEmpty()) { - //Raise an assert - Assert.isTrue(false, Joiner.on(",").join(unloggedPatterns)); - } - } - - private static final class PatternResult { - private final Pattern pattern; - private boolean foundFlag = false; - private PatternResult(Pattern pattern) { - super(); - this.pattern = pattern; - } - public Pattern getPattern() { - return pattern; - } - public void markFound() { - // This operation is thread-safe because the value will only ever be switched from false to true. Therefore, - // multiple threads mutating the value for a pattern will not corrupt the value ... - foundFlag = true; - } - public boolean isFound() { - return foundFlag; - } - @Override - public boolean equals(Object thatObject) { - if (this == thatObject) { - return true; - } - if (thatObject == null || getClass() != thatObject.getClass()) { - return false; - } - PatternResult thatPatternResult = (PatternResult) thatObject; - return foundFlag == thatPatternResult.foundFlag && - Objects.equal(pattern, thatPatternResult.pattern); - } - @Override - public int hashCode() { - return Objects.hashCode(pattern, foundFlag); - } - @Override - public String toString() { - return format("Pattern Result [ pattern: %1$s, markFound: %2$s ]", pattern.toString(), foundFlag); - } - } - - public static final class TestAppenderBuilder { - private final Map> expectedPatterns; - public TestAppenderBuilder() { - super(); - expectedPatterns = new HashMap<>(); - expectedPatterns.put(ALL, new HashSet()); - expectedPatterns.put(DEBUG, new HashSet()); - expectedPatterns.put(ERROR, new HashSet()); - expectedPatterns.put(FATAL, new HashSet()); - expectedPatterns.put(INFO, new HashSet()); - expectedPatterns.put(OFF, new HashSet()); - expectedPatterns.put(WARN, new HashSet()); - } - public TestAppenderBuilder addExpectedPattern(final Level level, final String pattern) { - checkArgument(level != null, "addExpectedPattern requires a non-null level"); - checkArgument(StringUtils.isNotEmpty(pattern), "addExpectedPattern requires a non-blank pattern"); - checkState(expectedPatterns.containsKey(level), "level " + level + " is not supported by " + getClass().getName()); - expectedPatterns.get(level).add(new PatternResult(Pattern.compile(pattern))); - return this; - } - public TestAppender build() { - return new TestAppender(expectedPatterns); - } - } - /** - * - * Attaches a {@link TestAppender} to a {@link Logger} and ensures that it is the only - * test appender attached to the logger. - * - * @param logger The logger which will be monitored by the test - * @param testAppender The test appender to attach to {@code logger} - */ - public static void safeAddAppender(Logger logger, TestAppender testAppender) { - logger.removeAppender(APPENDER_NAME); - logger.addAppender(testAppender); - } -} diff --git a/server/src/main/java/com/cloud/usage/UsageServiceImpl.java b/server/src/main/java/com/cloud/usage/UsageServiceImpl.java index e9264554ec50..6d6a05cac19e 100644 --- a/server/src/main/java/com/cloud/usage/UsageServiceImpl.java +++ b/server/src/main/java/com/cloud/usage/UsageServiceImpl.java @@ -37,7 +37,6 @@ import org.apache.cloudstack.usage.UsageService; import org.apache.cloudstack.usage.UsageTypes; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.jetbrains.annotations.NotNull; import org.springframework.stereotype.Component; @@ -83,7 +82,6 @@ @Component public class UsageServiceImpl extends ManagerBase implements UsageService, Manager { - public static final Logger s_logger = Logger.getLogger(UsageServiceImpl.class); //ToDo: Move implementation to ManagaerImpl @@ -194,7 +192,7 @@ public Pair, Integer> getUsageRecords(ListUsageRecordsCmd //List records for all the accounts if the caller account is of type admin. //If account_id or account_name is explicitly mentioned, list records for the specified account only even if the caller is of type admin ignoreAccountId = _accountService.isRootAdmin(caller.getId()); - s_logger.debug("Account details not available. Using userContext accountId: " + accountId); + logger.debug("Account details not available. Using userContext accountId: " + accountId); } // Check if a domain admin is allowed to access the requested domain id @@ -213,8 +211,8 @@ public Pair, Integer> getUsageRecords(ListUsageRecordsCmd Date adjustedStartDate = computeAdjustedTime(startDate, usageTZ); Date adjustedEndDate = computeAdjustedTime(endDate, usageTZ); - if (s_logger.isDebugEnabled()) { - s_logger.debug("getting usage records for account: " + accountId + ", domainId: " + domainId + ", between " + adjustedStartDate + " and " + adjustedEndDate + + if (logger.isDebugEnabled()) { + logger.debug("getting usage records for account: " + accountId + ", domainId: " + domainId + ", between " + adjustedStartDate + " and " + adjustedEndDate + ", using pageSize: " + cmd.getPageSizeVal() + " and startIndex: " + cmd.getStartIndex()); } @@ -406,8 +404,8 @@ private Long getAccountIdFromProject(Long projectId) { throw new InvalidParameterValueException("Unable to find project by id " + projectId); } final long projectAccountId = project.getProjectAccountId(); - if (s_logger.isInfoEnabled()) { - s_logger.info(String.format("Using projectAccountId %d for project %s [%s] as account id", projectAccountId, project.getName(), project.getUuid())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Using projectAccountId %d for project %s [%s] as account id", projectAccountId, project.getName(), project.getUuid())); } accountId = projectAccountId; return accountId; @@ -479,7 +477,7 @@ public boolean removeRawUsageRecords(RemoveRawUsageRecordsCmd cmd) throws Invali cal.set(Calendar.SECOND, 0); cal.set(Calendar.MILLISECOND, 0); long execTS = cal.getTimeInMillis(); - s_logger.debug("Trying to remove old raw cloud_usage records older than " + interval + " day(s), current time=" + curTS + " next job execution time=" + execTS); + logger.debug("Trying to remove old raw cloud_usage records older than " + interval + " day(s), current time=" + curTS + " next job execution time=" + execTS); // Let's avoid cleanup when job runs and around a 15 min interval if (Math.abs(curTS - execTS) < 15 * 60 * 1000) { return false; diff --git a/server/src/main/java/com/cloud/user/AccountManagerImpl.java b/server/src/main/java/com/cloud/user/AccountManagerImpl.java index 86a359a33487..1a30f192173a 100644 --- a/server/src/main/java/com/cloud/user/AccountManagerImpl.java +++ b/server/src/main/java/com/cloud/user/AccountManagerImpl.java @@ -81,7 +81,6 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.BooleanUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.jetbrains.annotations.NotNull; import com.cloud.api.ApiDBUtils; @@ -201,7 +200,6 @@ import com.cloud.vm.snapshot.dao.VMSnapshotDao; public class AccountManagerImpl extends ManagerBase implements AccountManager, Manager { - public static final Logger s_logger = Logger.getLogger(AccountManagerImpl.class); @Inject private AccountDao _accountDao; @@ -468,12 +466,12 @@ public boolean start() { apiNameList = new ArrayList(); Set> cmdClasses = new LinkedHashSet>(); for (PluggableService service : services) { - s_logger.debug(String.format("getting api commands of service: %s", service.getClass().getName())); + logger.debug(String.format("getting api commands of service: %s", service.getClass().getName())); cmdClasses.addAll(service.getCommands()); } apiNameList = createApiNameList(cmdClasses); long endTime = System.nanoTime(); - s_logger.info("Api Discovery Service: Annotation, docstrings, api relation graph processed in " + (endTime - startTime) / 1000000.0 + " ms"); + logger.info("Api Discovery Service: Annotation, docstrings, api relation graph processed in " + (endTime - startTime) / 1000000.0 + " ms"); } _executor.scheduleAtFixedRate(new AccountCleanupTask(), _cleanupInterval, _cleanupInterval, TimeUnit.SECONDS); return true; @@ -492,8 +490,8 @@ protected List createApiNameList(Set> cmdClasses) { } String apiName = apiCmdAnnotation.name(); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Found api: " + apiName); + if (logger.isTraceEnabled()) { + logger.trace("Found api: " + apiName); } apiNameList.add(apiName); @@ -543,8 +541,8 @@ public boolean isRootAdmin(Long accountId) { for (SecurityChecker checker : _securityCheckers) { try { if (checker.checkAccess(acct, null, null, "SystemCapability")) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Root Access granted to " + acct + " by " + checker.getName()); + if (logger.isTraceEnabled()) { + logger.trace("Root Access granted to " + acct + " by " + checker.getName()); } return true; } @@ -566,8 +564,8 @@ public boolean isDomainAdmin(Long accountId) { for (SecurityChecker checker : _securityCheckers) { try { if (checker.checkAccess(acct, null, null, "DomainCapability")) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("DomainAdmin Access granted to " + acct + " by " + checker.getName()); + if (logger.isTraceEnabled()) { + logger.trace("DomainAdmin Access granted to " + acct + " by " + checker.getName()); } return true; } @@ -597,8 +595,8 @@ public boolean isResourceDomainAdmin(Long accountId) { for (SecurityChecker checker : _securityCheckers) { try { if (checker.checkAccess(acct, null, null, "DomainResourceCapability")) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("ResourceDomainAdmin Access granted to " + acct + " by " + checker.getName()); + if (logger.isTraceEnabled()) { + logger.trace("ResourceDomainAdmin Access granted to " + acct + " by " + checker.getName()); } return true; } @@ -625,8 +623,8 @@ public boolean isInternalAccount(long accountId) { public void checkAccess(Account caller, Domain domain) throws PermissionDeniedException { for (SecurityChecker checker : _securityCheckers) { if (checker.checkAccess(caller, domain)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Access granted to " + caller + " to " + domain + " by " + checker.getName()); + if (logger.isDebugEnabled()) { + logger.debug("Access granted to " + caller + " to " + domain + " by " + checker.getName()); } return; } @@ -658,8 +656,8 @@ public void checkAccess(Account caller, AccessType accessType, boolean sameOwner if (caller.getId() == Account.ACCOUNT_ID_SYSTEM || isRootAdmin(caller.getId())) { // no need to make permission checks if the system/root admin makes the call - if (s_logger.isTraceEnabled()) { - s_logger.trace("No need to make permission check for System/RootAdmin account, returning true"); + if (logger.isTraceEnabled()) { + logger.trace("No need to make permission check for System/RootAdmin account, returning true"); } return; @@ -688,8 +686,8 @@ public void checkAccess(Account caller, AccessType accessType, boolean sameOwner boolean granted = false; for (SecurityChecker checker : _securityCheckers) { if (checker.checkAccess(caller, entity, accessType, apiName)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Access to " + entity + " granted to " + caller + " by " + checker.getName()); + if (logger.isDebugEnabled()) { + logger.debug("Access to " + entity + " granted to " + caller + " by " + checker.getName()); } granted = true; break; @@ -765,7 +763,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } }); } catch (Exception e) { - s_logger.error("Failed to update login attempts for user with id " + id); + logger.error("Failed to update login attempts for user with id " + id); } } @@ -796,12 +794,12 @@ protected boolean lockAccount(long accountId) { acctForUpdate.setState(State.LOCKED); success = _accountDao.update(Long.valueOf(accountId), acctForUpdate); } else { - if (s_logger.isInfoEnabled()) { - s_logger.info("Attempting to lock a non-enabled account, current state is " + account.getState() + " (accountId: " + accountId + "), locking failed."); + if (logger.isInfoEnabled()) { + logger.info("Attempting to lock a non-enabled account, current state is " + account.getState() + " (accountId: " + accountId + "), locking failed."); } } } else { - s_logger.warn("Failed to lock account " + accountId + ", account not found."); + logger.warn("Failed to lock account " + accountId + ", account not found."); } return success; } @@ -812,15 +810,15 @@ public boolean deleteAccount(AccountVO account, long callerUserId, Account calle // delete the account record if (!_accountDao.remove(accountId)) { - s_logger.error("Unable to delete account " + accountId); + logger.error("Unable to delete account " + accountId); return false; } account.setState(State.REMOVED); _accountDao.update(accountId, account); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Removed account " + accountId); + if (logger.isDebugEnabled()) { + logger.debug("Removed account " + accountId); } return cleanupAccount(account, callerUserId, caller); @@ -835,7 +833,7 @@ protected boolean cleanupAccount(AccountVO account, long callerUserId, Account c List users = _userDao.listByAccount(accountId); for (UserVO user : users) { if (!_userDao.remove(user.getId())) { - s_logger.error("Unable to delete user: " + user + " as a part of account " + account + " cleanup"); + logger.error("Unable to delete user: " + user + " as a part of account " + account + " cleanup"); accountCleanupNeeded = true; } } @@ -864,7 +862,7 @@ protected boolean cleanupAccount(AccountVO account, long callerUserId, Account c List groups = _vmGroupDao.listByAccountId(accountId); for (InstanceGroupVO group : groups) { if (!_vmMgr.deleteVmGroup(group.getId())) { - s_logger.error("Unable to delete group: " + group.getId()); + logger.error("Unable to delete group: " + group.getId()); accountCleanupNeeded = true; } } @@ -872,7 +870,7 @@ protected boolean cleanupAccount(AccountVO account, long callerUserId, Account c // Delete the snapshots dir for the account. Have to do this before destroying the VMs. boolean success = _snapMgr.deleteSnapshotDirsForAccount(accountId); if (success) { - s_logger.debug("Successfully deleted snapshots directories for all volumes under account " + accountId + " across all zones"); + logger.debug("Successfully deleted snapshots directories for all volumes under account " + accountId + " across all zones"); } // clean up templates @@ -883,14 +881,14 @@ protected boolean cleanupAccount(AccountVO account, long callerUserId, Account c try { allTemplatesDeleted = _tmpltMgr.delete(callerUserId, template.getId(), null); } catch (Exception e) { - s_logger.warn("Failed to delete template while removing account: " + template.getName() + " due to: ", e); + logger.warn("Failed to delete template while removing account: " + template.getName() + " due to: ", e); allTemplatesDeleted = false; } } } if (!allTemplatesDeleted) { - s_logger.warn("Failed to delete templates while removing account id=" + accountId); + logger.warn("Failed to delete templates while removing account id=" + accountId); accountCleanupNeeded = true; } @@ -900,14 +898,14 @@ protected boolean cleanupAccount(AccountVO account, long callerUserId, Account c try { _vmSnapshotMgr.deleteVMSnapshot(vmSnapshot.getId()); } catch (Exception e) { - s_logger.debug("Failed to cleanup vm snapshot " + vmSnapshot.getId() + " due to " + e.toString()); + logger.debug("Failed to cleanup vm snapshot " + vmSnapshot.getId() + " due to " + e.toString()); } } // Destroy the account's VMs List vms = _userVmDao.listByAccountId(accountId); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Expunging # of vms (accountId=" + accountId + "): " + vms.size()); + if (logger.isDebugEnabled()) { + logger.debug("Expunging # of vms (accountId=" + accountId + "): " + vms.size()); } for (UserVmVO vm : vms) { @@ -916,13 +914,13 @@ protected boolean cleanupAccount(AccountVO account, long callerUserId, Account c _vmMgr.destroyVm(vm.getId(), false); } catch (Exception e) { e.printStackTrace(); - s_logger.warn("Failed destroying instance " + vm.getUuid() + " as part of account deletion."); + logger.warn("Failed destroying instance " + vm.getUuid() + " as part of account deletion."); } } // no need to catch exception at this place as expunging vm // should pass in order to perform further cleanup if (!_vmMgr.expunge(vm)) { - s_logger.error("Unable to expunge vm: " + vm.getId()); + logger.error("Unable to expunge vm: " + vm.getId()); accountCleanupNeeded = true; } } @@ -933,7 +931,7 @@ protected boolean cleanupAccount(AccountVO account, long callerUserId, Account c try { volumeService.deleteVolume(volume.getId(), caller); } catch (Exception ex) { - s_logger.warn("Failed to cleanup volumes as a part of account id=" + accountId + " cleanup due to Exception: ", ex); + logger.warn("Failed to cleanup volumes as a part of account id=" + accountId + " cleanup due to Exception: ", ex); accountCleanupNeeded = true; } } @@ -951,7 +949,7 @@ protected boolean cleanupAccount(AccountVO account, long callerUserId, Account c _remoteAccessVpnMgr.destroyRemoteAccessVpnForIp(vpn.getServerAddressId(), caller, false); } } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to cleanup remote access vpn resources as a part of account id=" + accountId + " cleanup due to Exception: ", ex); + logger.warn("Failed to cleanup remote access vpn resources as a part of account id=" + accountId + " cleanup due to Exception: ", ex); accountCleanupNeeded = true; } @@ -963,15 +961,15 @@ protected boolean cleanupAccount(AccountVO account, long callerUserId, Account c // Cleanup security groups int numRemoved = _securityGroupDao.removeByAccountId(accountId); - s_logger.info("deleteAccount: Deleted " + numRemoved + " network groups for account " + accountId); + logger.info("deleteAccount: Deleted " + numRemoved + " network groups for account " + accountId); // Cleanup affinity groups int numAGRemoved = _affinityGroupDao.removeByAccountId(accountId); - s_logger.info("deleteAccount: Deleted " + numAGRemoved + " affinity groups for account " + accountId); + logger.info("deleteAccount: Deleted " + numAGRemoved + " affinity groups for account " + accountId); // Delete all the networks boolean networksDeleted = true; - s_logger.debug("Deleting networks for account " + account.getId()); + logger.debug("Deleting networks for account " + account.getId()); List networks = _networkDao.listByOwner(accountId); if (networks != null) { Collections.sort(networks, new Comparator() { @@ -991,27 +989,27 @@ public int compare(NetworkVO network1, NetworkVO network2) { ReservationContext context = new ReservationContextImpl(null, null, getActiveUser(callerUserId), caller); if (!_networkMgr.destroyNetwork(network.getId(), context, false)) { - s_logger.warn("Unable to destroy network " + network + " as a part of account id=" + accountId + " cleanup."); + logger.warn("Unable to destroy network " + network + " as a part of account id=" + accountId + " cleanup."); accountCleanupNeeded = true; networksDeleted = false; } else { - s_logger.debug("Network " + network.getId() + " successfully deleted as a part of account id=" + accountId + " cleanup."); + logger.debug("Network " + network.getId() + " successfully deleted as a part of account id=" + accountId + " cleanup."); } } } // Delete all VPCs boolean vpcsDeleted = true; - s_logger.debug("Deleting vpcs for account " + account.getId()); + logger.debug("Deleting vpcs for account " + account.getId()); List vpcs = _vpcMgr.getVpcsForAccount(account.getId()); for (Vpc vpc : vpcs) { if (!_vpcMgr.destroyVpc(vpc, caller, callerUserId)) { - s_logger.warn("Unable to destroy VPC " + vpc + " as a part of account id=" + accountId + " cleanup."); + logger.warn("Unable to destroy VPC " + vpc + " as a part of account id=" + accountId + " cleanup."); accountCleanupNeeded = true; vpcsDeleted = false; } else { - s_logger.debug("VPC " + vpc.getId() + " successfully deleted as a part of account id=" + accountId + " cleanup."); + logger.debug("VPC " + vpc.getId() + " successfully deleted as a part of account id=" + accountId + " cleanup."); } } @@ -1019,25 +1017,25 @@ public int compare(NetworkVO network1, NetworkVO network2) { // release ip addresses belonging to the account List ipsToRelease = _ipAddressDao.listByAccount(accountId); for (IpAddress ip : ipsToRelease) { - s_logger.debug("Releasing ip " + ip + " as a part of account id=" + accountId + " cleanup"); + logger.debug("Releasing ip " + ip + " as a part of account id=" + accountId + " cleanup"); if (!_ipAddrMgr.disassociatePublicIpAddress(ip.getId(), callerUserId, caller)) { - s_logger.warn("Failed to release ip address " + ip + " as a part of account id=" + accountId + " clenaup"); + logger.warn("Failed to release ip address " + ip + " as a part of account id=" + accountId + " clenaup"); accountCleanupNeeded = true; } } } // Delete Site 2 Site VPN customer gateway - s_logger.debug("Deleting site-to-site VPN customer gateways for account " + accountId); + logger.debug("Deleting site-to-site VPN customer gateways for account " + accountId); if (!_vpnMgr.deleteCustomerGatewayByAccount(accountId)) { - s_logger.warn("Fail to delete site-to-site VPN customer gateways for account " + accountId); + logger.warn("Fail to delete site-to-site VPN customer gateways for account " + accountId); } // Delete autoscale resources if any try { _autoscaleMgr.cleanUpAutoScaleResources(accountId); } catch (CloudRuntimeException ex) { - s_logger.warn("Failed to cleanup AutoScale resources as a part of account id=" + accountId + " cleanup due to exception:", ex); + logger.warn("Failed to cleanup AutoScale resources as a part of account id=" + accountId + " cleanup due to exception:", ex); accountCleanupNeeded = true; } @@ -1047,7 +1045,7 @@ public int compare(NetworkVO network1, NetworkVO network2) { if (!_configMgr.releaseAccountSpecificVirtualRanges(accountId)) { accountCleanupNeeded = true; } else { - s_logger.debug("Account specific Virtual IP ranges " + " are successfully released as a part of account id=" + accountId + " cleanup."); + logger.debug("Account specific Virtual IP ranges " + " are successfully released as a part of account id=" + accountId + " cleanup."); } } @@ -1057,14 +1055,14 @@ public int compare(NetworkVO network1, NetworkVO network2) { _dataCenterVnetDao.releaseDedicatedGuestVlans(map.getId()); } int vlansReleased = _accountGuestVlanMapDao.removeByAccountId(accountId); - s_logger.info("deleteAccount: Released " + vlansReleased + " dedicated guest vlan ranges from account " + accountId); + logger.info("deleteAccount: Released " + vlansReleased + " dedicated guest vlan ranges from account " + accountId); // release account specific acquired portable IP's. Since all the portable IP's must have been already // disassociated with VPC/guest network (due to deletion), so just mark portable IP as free. List ipsToRelease = _ipAddressDao.listByAccount(accountId); for (IpAddress ip : ipsToRelease) { if (ip.isPortable()) { - s_logger.debug("Releasing portable ip " + ip + " as a part of account id=" + accountId + " cleanup"); + logger.debug("Releasing portable ip " + ip + " as a part of account id=" + accountId + " cleanup"); _ipAddrMgr.releasePortableIpAddress(ip.getId()); } } @@ -1072,10 +1070,10 @@ public int compare(NetworkVO network1, NetworkVO network2) { // release dedication if any List dedicatedResources = _dedicatedDao.listByAccountId(accountId); if (dedicatedResources != null && !dedicatedResources.isEmpty()) { - s_logger.debug("Releasing dedicated resources for account " + accountId); + logger.debug("Releasing dedicated resources for account " + accountId); for (DedicatedResourceVO dr : dedicatedResources) { if (!_dedicatedDao.remove(dr.getId())) { - s_logger.warn("Fail to release dedicated resources for account " + accountId); + logger.warn("Fail to release dedicated resources for account " + accountId); } } } @@ -1103,11 +1101,11 @@ public int compare(NetworkVO network1, NetworkVO network2) { return true; } catch (Exception ex) { - s_logger.warn("Failed to cleanup account " + account + " due to ", ex); + logger.warn("Failed to cleanup account " + account + " due to ", ex); accountCleanupNeeded = true; return true; } finally { - s_logger.info("Cleanup for account " + account.getId() + (accountCleanupNeeded ? " is needed." : " is not needed.")); + logger.info("Cleanup for account " + account.getId() + (accountCleanupNeeded ? " is needed." : " is not needed.")); if (accountCleanupNeeded) { _accountDao.markForCleanup(accountId); } else { @@ -1121,8 +1119,8 @@ public int compare(NetworkVO network1, NetworkVO network2) { public boolean disableAccount(long accountId) throws ConcurrentOperationException, ResourceUnavailableException { boolean success = false; if (accountId <= 2) { - if (s_logger.isInfoEnabled()) { - s_logger.info("disableAccount -- invalid account id: " + accountId); + if (logger.isInfoEnabled()) { + logger.info("disableAccount -- invalid account id: " + accountId); } return false; } @@ -1141,7 +1139,7 @@ public boolean disableAccount(long accountId) throws ConcurrentOperationExceptio disableAccountResult = doDisableAccount(accountId); } finally { if (!disableAccountResult) { - s_logger.warn("Failed to disable account " + account + " resources as a part of disableAccount call, marking the account for cleanup"); + logger.warn("Failed to disable account " + account + " resources as a part of disableAccount call, marking the account for cleanup"); _accountDao.markForCleanup(accountId); } else { acctForUpdate = _accountDao.createForUpdate(); @@ -1162,11 +1160,11 @@ private boolean doDisableAccount(long accountId) throws ConcurrentOperationExcep try { _itMgr.advanceStop(vm.getUuid(), false); } catch (OperationTimedoutException ote) { - s_logger.warn("Operation for stopping vm timed out, unable to stop vm " + vm.getHostName(), ote); + logger.warn("Operation for stopping vm timed out, unable to stop vm " + vm.getHostName(), ote); success = false; } } catch (AgentUnavailableException aue) { - s_logger.warn("Agent running on host " + vm.getHostId() + " is unavailable, unable to stop vm " + vm.getHostName(), aue); + logger.warn("Agent running on host " + vm.getHostId() + " is unavailable, unable to stop vm " + vm.getHostName(), aue); success = false; } } @@ -1302,8 +1300,8 @@ private boolean isValidRoleChange(Account account, Role role) { * if there is any permission under the requested role that is not permitted for the caller, refuse */ private void checkRoleEscalation(Account caller, Account requested) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("checking if user of account %s [%s] with role-id [%d] can create an account of type %s [%s] with role-id [%d]", + if (logger.isDebugEnabled()) { + logger.debug(String.format("checking if user of account %s [%s] with role-id [%d] can create an account of type %s [%s] with role-id [%d]", caller.getAccountName(), caller.getUuid(), caller.getRoleId(), @@ -1316,8 +1314,8 @@ private void checkRoleEscalation(Account caller, Account requested) { try { checkApiAccess(apiCheckers, requested, command); } catch (PermissionDeniedException pde) { - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("checking for permission to \"%s\" is irrelevant as it is not requested for %s [%s]", + if (logger.isTraceEnabled()) { + logger.trace(String.format("checking for permission to \"%s\" is irrelevant as it is not requested for %s [%s]", command, pde.getAccount().getAccountName(), pde.getAccount().getUuid(), @@ -1328,8 +1326,8 @@ private void checkRoleEscalation(Account caller, Account requested) { } // so requested can, now make sure caller can as well try { - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("permission to \"%s\" is requested", + if (logger.isTraceEnabled()) { + logger.trace(String.format("permission to \"%s\" is requested", command)); } checkApiAccess(apiCheckers, caller, command); @@ -1338,7 +1336,7 @@ private void checkRoleEscalation(Account caller, Account requested) { caller.getAccountName(), caller.getDomainId(), caller.getUuid()); - s_logger.warn(msg); + logger.warn(msg); throw new PermissionDeniedException(msg,pde); } } @@ -1357,8 +1355,8 @@ private List getEnabledApiCheckers() { for (APIChecker apiChecker : apiAccessCheckers) { if (apiChecker.isEnabled()) { usableApiCheckers.add(apiChecker); - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("using api checker \"%s\"", + if (logger.isTraceEnabled()) { + logger.trace(String.format("using api checker \"%s\"", apiChecker.getName())); } } @@ -1412,7 +1410,7 @@ public UserVO createUser(String userName, String password, String firstName, Str @ActionEvent(eventType = EventTypes.EVENT_USER_UPDATE, eventDescription = "Updating User") public UserAccount updateUser(UpdateUserCmd updateUserCmd) { UserVO user = retrieveAndValidateUser(updateUserCmd); - s_logger.debug("Updating user with Id: " + user.getUuid()); + logger.debug("Updating user with Id: " + user.getUuid()); validateAndUpdateApiAndSecretKeyIfNeeded(updateUserCmd, user); Account account = retrieveAndValidateAccount(user); @@ -1451,7 +1449,7 @@ public UserAccount updateUser(UpdateUserCmd updateUserCmd) { */ protected void validateUserPasswordAndUpdateIfNeeded(String newPassword, UserVO user, String currentPassword) { if (newPassword == null) { - s_logger.trace("No new password to update for user: " + user.getUuid()); + logger.trace("No new password to update for user: " + user.getUuid()); return; } if (StringUtils.isBlank(newPassword)) { @@ -1465,7 +1463,7 @@ protected void validateUserPasswordAndUpdateIfNeeded(String newPassword, UserVO boolean isDomainAdmin = isDomainAdmin(callingAccount.getId()); boolean isAdmin = isDomainAdmin || isRootAdminExecutingPasswordUpdate; if (isAdmin) { - s_logger.trace(String.format("Admin account [uuid=%s] executing password update for user [%s] ", callingAccount.getUuid(), user.getUuid())); + logger.trace(String.format("Admin account [uuid=%s] executing password update for user [%s] ", callingAccount.getUuid(), user.getUuid())); } if (!isAdmin && StringUtils.isBlank(currentPassword)) { throw new InvalidParameterValueException("To set a new password the current password must be provided."); @@ -1491,11 +1489,11 @@ protected void validateCurrentPassword(UserVO user, String currentPassword) { for (UserAuthenticator userAuthenticator : _userPasswordEncoders) { Pair authenticationResult = userAuthenticator.authenticate(user.getUsername(), currentPassword, userAccount.getDomainId(), null); if (authenticationResult == null) { - s_logger.trace(String.format("Authenticator [%s] is returning null for the authenticate mehtod.", userAuthenticator.getClass())); + logger.trace(String.format("Authenticator [%s] is returning null for the authenticate mehtod.", userAuthenticator.getClass())); continue; } if (BooleanUtils.toBoolean(authenticationResult.first())) { - s_logger.debug(String.format("User [id=%s] re-authenticated [authenticator=%s] during password update.", user.getUuid(), userAuthenticator.getName())); + logger.debug(String.format("User [id=%s] re-authenticated [authenticator=%s] during password update.", user.getUuid(), userAuthenticator.getName())); currentPasswordMatchesDataBasePassword = true; break; } @@ -1790,8 +1788,8 @@ public UserAccount lockUser(long userId) { success = (success && lockAccount(user.getAccountId())); } } else { - if (s_logger.isInfoEnabled()) { - s_logger.info("Attempting to lock a non-enabled user, current state is " + user.getState() + " (userId: " + user.getId() + "), locking failed."); + if (logger.isInfoEnabled()) { + logger.info("Attempting to lock a non-enabled user, current state is " + user.getState() + " (userId: " + user.getId() + "), locking failed."); } success = false; } @@ -1839,11 +1837,11 @@ public boolean deleteUserAccount(long accountId) { private boolean isDeleteNeeded(AccountVO account, long accountId, Account caller) { if (account == null) { - s_logger.info(String.format("The account, identified by id %d, doesn't exist", accountId )); + logger.info(String.format("The account, identified by id %d, doesn't exist", accountId )); return false; } if (account.getRemoved() != null) { - s_logger.info("The account:" + account.getAccountName() + " is already removed"); + logger.info("The account:" + account.getAccountName() + " is already removed"); return false; } // don't allow removing Project account @@ -1979,7 +1977,7 @@ public AccountVO updateAccount(UpdateAccountCmd cmd) { // Check if account exists if (account == null || account.getType() == Account.Type.PROJECT) { - s_logger.error("Unable to find account by accountId: " + accountId + " OR by name: " + accountName + " in domain " + domainId); + logger.error("Unable to find account by accountId: " + accountId + " OR by name: " + accountName + " in domain " + domainId); throw new InvalidParameterValueException("Unable to find account by accountId: " + accountId + " OR by name: " + accountName + " in domain " + domainId); } @@ -2124,8 +2122,8 @@ public Boolean doInTransaction(TransactionStatus status) { private long getNewAccountId(long domainId, String accountName, Long accountId) { Account newAccount = null; if (StringUtils.isNotBlank(accountName)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Getting id for account by name '" + accountName + "' in domain " + domainId); + if (logger.isDebugEnabled()) { + logger.debug("Getting id for account by name '" + accountName + "' in domain " + domainId); } newAccount = _accountDao.findEnabledAccount(accountName, domainId); } @@ -2178,39 +2176,39 @@ protected void runInContext() { try { GlobalLock lock = GlobalLock.getInternLock("AccountCleanup"); if (lock == null) { - s_logger.debug("Couldn't get the global lock"); + logger.debug("Couldn't get the global lock"); return; } if (!lock.lock(30)) { - s_logger.debug("Couldn't lock the db"); + logger.debug("Couldn't lock the db"); return; } try { // Cleanup removed accounts List removedAccounts = _accountDao.findCleanupsForRemovedAccounts(null); - s_logger.info("Found " + removedAccounts.size() + " removed accounts to cleanup"); + logger.info("Found " + removedAccounts.size() + " removed accounts to cleanup"); for (AccountVO account : removedAccounts) { - s_logger.debug("Cleaning up " + account.getId()); + logger.debug("Cleaning up " + account.getId()); cleanupAccount(account, getSystemUser().getId(), getSystemAccount()); } // cleanup disabled accounts List disabledAccounts = _accountDao.findCleanupsForDisabledAccounts(); - s_logger.info("Found " + disabledAccounts.size() + " disabled accounts to cleanup"); + logger.info("Found " + disabledAccounts.size() + " disabled accounts to cleanup"); for (AccountVO account : disabledAccounts) { - s_logger.debug("Disabling account " + account.getId()); + logger.debug("Disabling account " + account.getId()); try { disableAccount(account.getId()); } catch (Exception e) { - s_logger.error("Skipping due to error on account " + account.getId(), e); + logger.error("Skipping due to error on account " + account.getId(), e); } } // cleanup inactive domains List inactiveDomains = _domainMgr.findInactiveDomains(); - s_logger.info("Found " + inactiveDomains.size() + " inactive domains to cleanup"); + logger.info("Found " + inactiveDomains.size() + " inactive domains to cleanup"); for (Domain inactiveDomain : inactiveDomains) { long domainId = inactiveDomain.getId(); try { @@ -2219,47 +2217,47 @@ protected void runInContext() { // release dedication if any, before deleting the domain List dedicatedResources = _dedicatedDao.listByDomainId(domainId); if (dedicatedResources != null && !dedicatedResources.isEmpty()) { - s_logger.debug("Releasing dedicated resources for domain" + domainId); + logger.debug("Releasing dedicated resources for domain" + domainId); for (DedicatedResourceVO dr : dedicatedResources) { if (!_dedicatedDao.remove(dr.getId())) { - s_logger.warn("Fail to release dedicated resources for domain " + domainId); + logger.warn("Fail to release dedicated resources for domain " + domainId); } } } - s_logger.debug("Removing inactive domain id=" + domainId); + logger.debug("Removing inactive domain id=" + domainId); _domainMgr.removeDomain(domainId); } else { - s_logger.debug("Can't remove inactive domain id=" + domainId + " as it has accounts that need cleanup"); + logger.debug("Can't remove inactive domain id=" + domainId + " as it has accounts that need cleanup"); } } catch (Exception e) { - s_logger.error("Skipping due to error on domain " + domainId, e); + logger.error("Skipping due to error on domain " + domainId, e); } } // cleanup inactive projects List inactiveProjects = _projectDao.listByState(Project.State.Disabled); - s_logger.info("Found " + inactiveProjects.size() + " disabled projects to cleanup"); + logger.info("Found " + inactiveProjects.size() + " disabled projects to cleanup"); for (ProjectVO project : inactiveProjects) { try { Account projectAccount = getAccount(project.getProjectAccountId()); if (projectAccount == null) { - s_logger.debug("Removing inactive project id=" + project.getId()); + logger.debug("Removing inactive project id=" + project.getId()); _projectMgr.deleteProject(CallContext.current().getCallingAccount(), CallContext.current().getCallingUserId(), project); } else { - s_logger.debug("Can't remove disabled project " + project + " as it has non removed account id=" + project.getId()); + logger.debug("Can't remove disabled project " + project + " as it has non removed account id=" + project.getId()); } } catch (Exception e) { - s_logger.error("Skipping due to error on project " + project, e); + logger.error("Skipping due to error on project " + project, e); } } } catch (Exception e) { - s_logger.error("Exception ", e); + logger.error("Exception ", e); } finally { lock.unlock(); } } catch (Exception e) { - s_logger.error("Exception ", e); + logger.error("Exception ", e); } } } @@ -2447,8 +2445,8 @@ public AccountVO doInTransaction(TransactionStatus status) { } protected UserVO createUser(long accountId, String userName, String password, String firstName, String lastName, String email, String timezone, String userUUID, User.Source source) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Creating user: " + userName + ", accountId: " + accountId + " timezone:" + timezone); + if (logger.isDebugEnabled()) { + logger.debug("Creating user: " + userName + ", accountId: " + accountId + " timezone:" + timezone); } passwordPolicy.verifyIfPasswordCompliesWithPasswordPolicies(password, userName, getAccount(accountId).getDomainId()); @@ -2539,14 +2537,14 @@ public UserAccount authenticateUser(final String username, final String password timestamp = Long.parseLong(timestampStr); long currentTime = System.currentTimeMillis(); if (Math.abs(currentTime - timestamp) > tolerance) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Expired timestamp passed in to login, current time = " + currentTime + ", timestamp = " + timestamp); + if (logger.isDebugEnabled()) { + logger.debug("Expired timestamp passed in to login, current time = " + currentTime + ", timestamp = " + timestamp); } return null; } } catch (NumberFormatException nfe) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Invalid timestamp passed in to login: " + timestampStr); + if (logger.isDebugEnabled()) { + logger.debug("Invalid timestamp passed in to login: " + timestampStr); } return null; } @@ -2560,8 +2558,8 @@ public UserAccount authenticateUser(final String username, final String password } if ((signature == null) || (timestamp == 0L)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Missing parameters in login request, signature = " + signature + ", timestamp = " + timestamp); + if (logger.isDebugEnabled()) { + logger.debug("Missing parameters in login request, signature = " + signature + ", timestamp = " + timestamp); } return null; } @@ -2576,12 +2574,12 @@ public UserAccount authenticateUser(final String username, final String password String computedSignature = new String(Base64.encodeBase64(encryptedBytes)); boolean equalSig = ConstantTimeComparator.compareStrings(signature, computedSignature); if (!equalSig) { - s_logger.info("User signature: " + signature + " is not equaled to computed signature: " + computedSignature); + logger.info("User signature: " + signature + " is not equaled to computed signature: " + computedSignature); } else { user = _userAccountDao.getUserAccount(username, domainId); } } catch (Exception ex) { - s_logger.error("Exception authenticating user", ex); + logger.error("Exception authenticating user", ex); return null; } } @@ -2589,12 +2587,12 @@ public UserAccount authenticateUser(final String username, final String password if (user != null) { // don't allow to authenticate system user if (user.getId() == User.UID_SYSTEM) { - s_logger.error("Failed to authenticate user: " + username + " in domain " + domainId); + logger.error("Failed to authenticate user: " + username + " in domain " + domainId); return null; } // don't allow baremetal system user if (BaremetalUtils.BAREMETAL_SYSTEM_ACCOUNT_NAME.equals(user.getUsername())) { - s_logger.error("Won't authenticate user: " + username + " in domain " + domainId); + logger.error("Won't authenticate user: " + username + " in domain " + domainId); return null; } @@ -2607,35 +2605,35 @@ public UserAccount authenticateUser(final String username, final String password final Boolean ApiSourceCidrChecksEnabled = ApiServiceConfiguration.ApiSourceCidrChecksEnabled.value(); if (ApiSourceCidrChecksEnabled) { - s_logger.debug("CIDRs from which account '" + account.toString() + "' is allowed to perform API calls: " + accessAllowedCidrs); + logger.debug("CIDRs from which account '" + account.toString() + "' is allowed to perform API calls: " + accessAllowedCidrs); // Block when is not in the list of allowed IPs if (!NetUtils.isIpInCidrList(loginIpAddress, accessAllowedCidrs.split(","))) { - s_logger.warn("Request by account '" + account.toString() + "' was denied since " + loginIpAddress.toString().replace("/", "") + " does not match " + accessAllowedCidrs); + logger.warn("Request by account '" + account.toString() + "' was denied since " + loginIpAddress.toString().replace("/", "") + " does not match " + accessAllowedCidrs); throw new CloudAuthenticationException("Failed to authenticate user '" + username + "' in domain '" + domain.getPath() + "' from ip " + loginIpAddress.toString().replace("/", "") + "; please provide valid credentials"); } } // Here all is fine! - if (s_logger.isDebugEnabled()) { - s_logger.debug("User: " + username + " in domain " + domainId + " has successfully logged in"); + if (logger.isDebugEnabled()) { + logger.debug("User: " + username + " in domain " + domainId + " has successfully logged in"); } ActionEventUtils.onActionEvent(user.getId(), user.getAccountId(), user.getDomainId(), EventTypes.EVENT_USER_LOGIN, "user has logged in from IP Address " + loginIpAddress, user.getId(), ApiCommandResourceType.User.toString()); return user; } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("User: " + username + " in domain " + domainId + " has failed to log in"); + if (logger.isDebugEnabled()) { + logger.debug("User: " + username + " in domain " + domainId + " has failed to log in"); } return null; } } private UserAccount getUserAccount(String username, String password, Long domainId, Map requestParameters) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Attempting to log in user: " + username + " in domain " + domainId); + if (logger.isDebugEnabled()) { + logger.debug("Attempting to log in user: " + username + " in domain " + domainId); } UserAccount userAccount = _userAccountDao.getUserAccount(username, domainId); @@ -2674,8 +2672,8 @@ private UserAccount getUserAccount(String username, String password, Long domain userAccount = _userAccountDao.getUserAccount(username, domainId); if (!userAccount.getState().equalsIgnoreCase(Account.State.ENABLED.toString()) || !userAccount.getAccountState().equalsIgnoreCase(Account.State.ENABLED.toString())) { - if (s_logger.isInfoEnabled()) { - s_logger.info("User " + username + " in domain " + domainName + " is disabled/locked (or account is disabled/locked)"); + if (logger.isInfoEnabled()) { + logger.info("User " + username + " in domain " + domainName + " is disabled/locked (or account is disabled/locked)"); } throw new CloudAuthenticationException("User " + username + " (or their account) in domain " + domainName + " is disabled/locked. Please contact the administrator."); } @@ -2686,12 +2684,12 @@ private UserAccount getUserAccount(String username, String password, Long domain return userAccount; } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to authenticate user with username " + username + " in domain " + domainId); + if (logger.isDebugEnabled()) { + logger.debug("Unable to authenticate user with username " + username + " in domain " + domainId); } if (userAccount == null) { - s_logger.warn("Unable to find an user with username " + username + " in domain " + domainId); + logger.warn("Unable to find an user with username " + username + " in domain " + domainId); return null; } @@ -2701,7 +2699,7 @@ private UserAccount getUserAccount(String username, String password, Long domain updateLoginAttemptsWhenIncorrectLoginAttemptsEnabled(userAccount, updateIncorrectLoginCount, _allowedLoginAttempts); } } else { - s_logger.info("User " + userAccount.getUsername() + " is disabled/locked"); + logger.info("User " + userAccount.getUsername() + " is disabled/locked"); } return null; } @@ -2715,11 +2713,11 @@ protected void updateLoginAttemptsWhenIncorrectLoginAttemptsEnabled(UserAccount } if (attemptsMade < allowedLoginAttempts) { updateLoginAttempts(account.getId(), attemptsMade, false); - s_logger.warn("Login attempt failed. You have " + + logger.warn("Login attempt failed. You have " + (allowedLoginAttempts - attemptsMade) + " attempt(s) remaining"); } else { updateLoginAttempts(account.getId(), allowedLoginAttempts, true); - s_logger.warn("User " + account.getUsername() + + logger.warn("User " + account.getUsername() + " has been disabled due to multiple failed login attempts." + " Please contact admin."); } } @@ -2854,7 +2852,7 @@ private String createUserApiKey(long userId) { _userDao.update(userId, updatedUser); return encodedKey; } catch (NoSuchAlgorithmException ex) { - s_logger.error("error generating secret key for user id=" + userId, ex); + logger.error("error generating secret key for user id=" + userId, ex); } return null; } @@ -2881,7 +2879,7 @@ private String createUserSecretKey(long userId) { _userDao.update(userId, updatedUser); return encodedKey; } catch (NoSuchAlgorithmException ex) { - s_logger.error("error generating secret key for user id=" + userId, ex); + logger.error("error generating secret key for user id=" + userId, ex); } return null; } @@ -3163,8 +3161,8 @@ public UserAccount getUserAccountById(Long userId) { public void checkAccess(Account account, ServiceOffering so, DataCenter zone) throws PermissionDeniedException { for (SecurityChecker checker : _securityCheckers) { if (checker.checkAccess(account, so, zone)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Access granted to " + account + " to " + so + " by " + checker.getName()); + if (logger.isDebugEnabled()) { + logger.debug("Access granted to " + account + " to " + so + " by " + checker.getName()); } return; } @@ -3178,8 +3176,8 @@ public void checkAccess(Account account, ServiceOffering so, DataCenter zone) th public void checkAccess(Account account, DiskOffering dof, DataCenter zone) throws PermissionDeniedException { for (SecurityChecker checker : _securityCheckers) { if (checker.checkAccess(account, dof, zone)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Access granted to " + account + " to " + dof + " by " + checker.getName()); + if (logger.isDebugEnabled()) { + logger.debug("Access granted to " + account + " to " + dof + " by " + checker.getName()); } return; } @@ -3193,8 +3191,8 @@ public void checkAccess(Account account, DiskOffering dof, DataCenter zone) thro public void checkAccess(Account account, NetworkOffering nof, DataCenter zone) throws PermissionDeniedException { for (SecurityChecker checker : _securityCheckers) { if (checker.checkAccess(account, nof, zone)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Access granted to " + account + " to " + nof + " by " + checker.getName()); + if (logger.isDebugEnabled()) { + logger.debug("Access granted to " + account + " to " + nof + " by " + checker.getName()); } return; } @@ -3208,8 +3206,8 @@ public void checkAccess(Account account, NetworkOffering nof, DataCenter zone) t public void checkAccess(Account account, VpcOffering vof, DataCenter zone) throws PermissionDeniedException { for (SecurityChecker checker : _securityCheckers) { if (checker.checkAccess(account, vof, zone)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Access granted to " + account + " to " + vof + " by " + checker.getName()); + if (logger.isDebugEnabled()) { + logger.debug("Access granted to " + account + " to " + vof + " by " + checker.getName()); } return; } @@ -3223,8 +3221,8 @@ public void checkAccess(Account account, VpcOffering vof, DataCenter zone) throw public void checkAccess(User user, ControlledEntity entity) throws PermissionDeniedException { for (SecurityChecker checker : _securityCheckers) { if (checker.checkAccess(user, entity)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Access granted to " + user + "to " + entity + "by " + checker.getName()); + if (logger.isDebugEnabled()) { + logger.debug("Access granted to " + user + "to " + entity + "by " + checker.getName()); } return; } @@ -3334,7 +3332,7 @@ protected UserTwoFactorAuthenticationSetupResponse enableTwoFactorAuthentication if (StringUtils.isEmpty(providerName)) { providerName = userTwoFactorAuthenticationDefaultProvider.valueIn(domainId); - s_logger.debug(String.format("Provider name is not given to setup 2FA, so using the default 2FA provider %s", providerName)); + logger.debug(String.format("Provider name is not given to setup 2FA, so using the default 2FA provider %s", providerName)); } UserTwoFactorAuthenticator provider = getUserTwoFactorAuthenticationProvider(providerName); diff --git a/server/src/main/java/com/cloud/user/DomainManagerImpl.java b/server/src/main/java/com/cloud/user/DomainManagerImpl.java index 1551309890cc..321d0500f229 100644 --- a/server/src/main/java/com/cloud/user/DomainManagerImpl.java +++ b/server/src/main/java/com/cloud/user/DomainManagerImpl.java @@ -55,7 +55,6 @@ import org.apache.cloudstack.resourcedetail.dao.DiskOfferingDetailsDao; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.BooleanUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.api.query.dao.DiskOfferingJoinDao; @@ -105,7 +104,6 @@ @Component public class DomainManagerImpl extends ManagerBase implements DomainManager, DomainService { - public static final Logger s_logger = Logger.getLogger(DomainManagerImpl.class); @Inject private DomainDao _domainDao; @@ -265,7 +263,7 @@ public DomainVO doInTransaction(TransactionStatus status) { protected DomainVO createDomainVo(String name, Long parentId, Long ownerId, String networkDomain, String domainUuid) { if (StringUtils.isBlank(domainUuid)) { domainUuid = UUID.randomUUID().toString(); - s_logger.info(String.format("Domain UUID [%s] generated for domain name [%s].", domainUuid, name)); + logger.info(String.format("Domain UUID [%s] generated for domain name [%s].", domainUuid, name)); } DomainVO domainVO = new DomainVO(name, ownerId, parentId, networkDomain, domainUuid); @@ -361,7 +359,7 @@ public boolean deleteDomain(DomainVO domain, Boolean cleanup) { try { // mark domain as inactive - s_logger.debug("Marking domain id=" + domain.getId() + " as " + Domain.State.Inactive + " before actually deleting it"); + logger.debug("Marking domain id=" + domain.getId() + " as " + Domain.State.Inactive + " before actually deleting it"); domain.setState(Domain.State.Inactive); _domainDao.update(domain.getId(), domain); @@ -375,12 +373,12 @@ public boolean deleteDomain(DomainVO domain, Boolean cleanup) { private GlobalLock getGlobalLock() { GlobalLock lock = getGlobalLock("DomainCleanup"); if (lock == null) { - s_logger.debug("Couldn't get the global lock"); + logger.debug("Couldn't get the global lock"); return null; } if (!lock.lock(30)) { - s_logger.debug("Couldn't lock the db"); + logger.debug("Couldn't lock the db"); return null; } return lock; @@ -400,7 +398,7 @@ private boolean cleanDomain(DomainVO domain, Boolean cleanup) { e.addProxyObject(domain.getUuid(), "domainId"); throw e; } else { - s_logger.debug("Domain specific Virtual IP ranges " + " are successfully released as a part of domain id=" + domain.getId() + " cleanup."); + logger.debug("Domain specific Virtual IP ranges " + " are successfully released as a part of domain id=" + domain.getId() + " cleanup."); } cleanupDomainDetails(domain.getId()); @@ -409,7 +407,7 @@ private boolean cleanDomain(DomainVO domain, Boolean cleanup) { CallContext.current().putContextParameter(Domain.class, domain.getUuid()); return true; } catch (Exception ex) { - s_logger.error("Exception deleting domain with id " + domain.getId(), ex); + logger.error("Exception deleting domain with id " + domain.getId(), ex); if (ex instanceof CloudRuntimeException) { rollbackDomainState(domain); throw (CloudRuntimeException)ex; @@ -424,7 +422,7 @@ private boolean cleanDomain(DomainVO domain, Boolean cleanup) { * @param domain domain */ protected void rollbackDomainState(DomainVO domain) { - s_logger.debug("Changing domain id=" + domain.getId() + " state back to " + Domain.State.Active + + logger.debug("Changing domain id=" + domain.getId() + " state back to " + Domain.State.Active + " because it can't be removed due to resources referencing to it"); domain.setState(Domain.State.Active); _domainDao.update(domain.getId(), domain); @@ -465,7 +463,7 @@ protected void removeDomainWithNoAccountsForCleanupNetworksOrDedicatedResources( List accountsForCleanup = _accountDao.findCleanupsForRemovedAccounts(domain.getId()); List dedicatedResources = _dedicatedDao.listByDomainId(domain.getId()); if (CollectionUtils.isNotEmpty(dedicatedResources)) { - s_logger.error("There are dedicated resources for the domain " + domain.getId()); + logger.error("There are dedicated resources for the domain " + domain.getId()); hasDedicatedResources = true; } if (accountsForCleanup.isEmpty() && networkIds.isEmpty() && !hasDedicatedResources) { @@ -597,7 +595,7 @@ private void removeDiskOfferings(Long domainId, String domainIdString) { } protected boolean cleanupDomain(Long domainId, Long ownerId) throws ConcurrentOperationException, ResourceUnavailableException { - s_logger.debug("Cleaning up domain id=" + domainId); + logger.debug("Cleaning up domain id=" + domainId); boolean success = true; DomainVO domainHandle = _domainDao.findById(domainId); { @@ -622,7 +620,7 @@ protected boolean cleanupDomain(Long domainId, Long ownerId) throws ConcurrentOp for (DomainVO domain : domains) { success = (success && cleanupDomain(domain.getId(), domain.getAccountId())); if (!success) { - s_logger.warn("Failed to cleanup domain id=" + domain.getId()); + logger.warn("Failed to cleanup domain id=" + domain.getId()); } } } @@ -633,18 +631,18 @@ protected boolean cleanupDomain(Long domainId, Long ownerId) throws ConcurrentOp List accounts = _accountDao.search(sc, null); for (AccountVO account : accounts) { if (account.getType() != Account.Type.PROJECT) { - s_logger.debug("Deleting account " + account + " as a part of domain id=" + domainId + " cleanup"); + logger.debug("Deleting account " + account + " as a part of domain id=" + domainId + " cleanup"); boolean deleteAccount = _accountMgr.deleteAccount(account, CallContext.current().getCallingUserId(), getCaller()); if (!deleteAccount) { - s_logger.warn("Failed to cleanup account id=" + account.getId() + " as a part of domain cleanup"); + logger.warn("Failed to cleanup account id=" + account.getId() + " as a part of domain cleanup"); } success = (success && deleteAccount); } else { ProjectVO project = _projectDao.findByProjectAccountId(account.getId()); - s_logger.debug("Deleting project " + project + " as a part of domain id=" + domainId + " cleanup"); + logger.debug("Deleting project " + project + " as a part of domain id=" + domainId + " cleanup"); boolean deleteProject = _projectMgr.deleteProject(getCaller(), CallContext.current().getCallingUserId(), project); if (!deleteProject) { - s_logger.warn("Failed to cleanup project " + project + " as a part of domain cleanup"); + logger.warn("Failed to cleanup project " + project + " as a part of domain cleanup"); } success = (success && deleteProject); } @@ -652,23 +650,23 @@ protected boolean cleanupDomain(Long domainId, Long ownerId) throws ConcurrentOp //delete the domain shared networks boolean networksDeleted = true; - s_logger.debug("Deleting networks for domain id=" + domainId); + logger.debug("Deleting networks for domain id=" + domainId); List networkIds = _networkDomainDao.listNetworkIdsByDomain(domainId); CallContext ctx = CallContext.current(); ReservationContext context = new ReservationContextImpl(null, null, _accountMgr.getActiveUser(ctx.getCallingUserId()), ctx.getCallingAccount()); for (Long networkId : networkIds) { - s_logger.debug("Deleting network id=" + networkId + " as a part of domain id=" + domainId + " cleanup"); + logger.debug("Deleting network id=" + networkId + " as a part of domain id=" + domainId + " cleanup"); if (!_networkMgr.destroyNetwork(networkId, context, false)) { - s_logger.warn("Unable to destroy network id=" + networkId + " as a part of domain id=" + domainId + " cleanup."); + logger.warn("Unable to destroy network id=" + networkId + " as a part of domain id=" + domainId + " cleanup."); networksDeleted = false; } else { - s_logger.debug("Network " + networkId + " successfully deleted as a part of domain id=" + domainId + " cleanup."); + logger.debug("Network " + networkId + " successfully deleted as a part of domain id=" + domainId + " cleanup."); } } //don't proceed if networks failed to cleanup. The cleanup will be performed for inactive domain once again if (!networksDeleted) { - s_logger.debug("Failed to delete the shared networks as a part of domain id=" + domainId + " clenaup"); + logger.debug("Failed to delete the shared networks as a part of domain id=" + domainId + " clenaup"); return false; } @@ -679,10 +677,10 @@ protected boolean cleanupDomain(Long domainId, Long ownerId) throws ConcurrentOp //release dedication if any, before deleting the domain List dedicatedResources = _dedicatedDao.listByDomainId(domainId); if (dedicatedResources != null && !dedicatedResources.isEmpty()) { - s_logger.debug("Releasing dedicated resources for domain" + domainId); + logger.debug("Releasing dedicated resources for domain" + domainId); for (DedicatedResourceVO dr : dedicatedResources) { if (!_dedicatedDao.remove(dr.getId())) { - s_logger.warn("Fail to release dedicated resources for domain " + domainId); + logger.warn("Fail to release dedicated resources for domain " + domainId); return false; } } @@ -696,7 +694,7 @@ protected boolean cleanupDomain(Long domainId, Long ownerId) throws ConcurrentOp _resourceCountDao.removeEntriesByOwner(domainId, ResourceOwnerType.Domain); _resourceLimitDao.removeEntriesByOwner(domainId, ResourceOwnerType.Domain); } else { - s_logger.debug("Can't delete the domain yet because it has " + accountsForCleanup.size() + "accounts that need a cleanup"); + logger.debug("Can't delete the domain yet because it has " + accountsForCleanup.size() + "accounts that need a cleanup"); return false; } @@ -938,10 +936,10 @@ public Domain moveDomainAndChildrenToNewParentDomain(MoveDomainCmd cmd) throws R } DomainVO domainToBeMoved = returnDomainIfExistsAndIsActive(idOfDomainToBeMoved); - s_logger.debug(String.format("Found the domain [%s] as the domain to be moved.", domainToBeMoved)); + logger.debug(String.format("Found the domain [%s] as the domain to be moved.", domainToBeMoved)); DomainVO newParentDomain = returnDomainIfExistsAndIsActive(idOfNewParentDomain); - s_logger.debug(String.format("Found the domain [%s] as the new parent domain of the domain to be moved [%s].", newParentDomain, domainToBeMoved)); + logger.debug(String.format("Found the domain [%s] as the new parent domain of the domain to be moved [%s].", newParentDomain, domainToBeMoved)); Account caller = getCaller(); _accountMgr.checkAccess(caller, domainToBeMoved); @@ -970,7 +968,7 @@ public Domain moveDomainAndChildrenToNewParentDomain(MoveDomainCmd cmd) throws R Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(TransactionStatus status) { - s_logger.debug(String.format("Setting the new parent of the domain to be moved [%s] as [%s].", domainToBeMoved, newParentDomain)); + logger.debug(String.format("Setting the new parent of the domain to be moved [%s] as [%s].", domainToBeMoved, newParentDomain)); domainToBeMoved.setParent(idOfNewParentDomain); updateDomainAndChildrenPathAndLevel(domainToBeMoved, newParentDomain, currentPathOfDomainToBeMoved, newPathOfDomainToBeMoved); @@ -1001,7 +999,7 @@ protected void validateNewParentDomainResourceLimits(DomainVO domainToBeMoved, D + "count for domain [%s] is [%s], the resource count for the new parent domain [%s] is [%s], and the limit is [%s].", domainToBeMoved.getUuid(), newParentDomain.getUuid(), resourceType, domainToBeMoved.getUuid(), currentDomainResourceCount, newParentDomain.getUuid(), newParentDomainResourceCount, newParentDomainResourceLimit); - s_logger.error(message); + logger.error(message); throw new ResourceAllocationException(message, resourceType); } } @@ -1043,7 +1041,7 @@ protected void validateNewParentDomainCanAccessResourcesOfDomainToBeMoved(String } if (!domainsOfResourcesInaccessibleToNewParentDomain.isEmpty()) { - s_logger.error(String.format("The new parent domain [%s] does not have access to domains [%s] used by [%s] in the domain to be moved [%s].", + logger.error(String.format("The new parent domain [%s] does not have access to domains [%s] used by [%s] in the domain to be moved [%s].", newParentDomain, domainsOfResourcesInaccessibleToNewParentDomain.keySet(), domainsOfResourcesInaccessibleToNewParentDomain.values(), domainToBeMoved)); throw new InvalidParameterValueException(String.format("New parent domain [%s] does not have access to [%s] used by domain [%s], therefore, domain [%s] cannot be moved.", newParentDomain, resourceToLog, domainToBeMoved, domainToBeMoved)); @@ -1051,7 +1049,7 @@ protected void validateNewParentDomainCanAccessResourcesOfDomainToBeMoved(String } protected DomainVO returnDomainIfExistsAndIsActive(Long idOfDomain) { - s_logger.debug(String.format("Checking if domain with ID [%s] exists and is active.", idOfDomain)); + logger.debug(String.format("Checking if domain with ID [%s] exists and is active.", idOfDomain)); DomainVO domain = _domainDao.findById(idOfDomain); if (domain == null) { @@ -1083,12 +1081,12 @@ protected void updateDomainPathAndLevel(DomainVO domain, String oldPath, String int finalLevel = newLevel + currentLevel - oldRootLevel; domain.setLevel(finalLevel); - s_logger.debug(String.format("Updating the path to [%s] and the level to [%s] of the domain [%s].", finalPath, finalLevel, domain)); + logger.debug(String.format("Updating the path to [%s] and the level to [%s] of the domain [%s].", finalPath, finalLevel, domain)); _domainDao.update(domain.getId(), domain); } protected void updateResourceCounts(Long idOfOldParentDomain, Long idOfNewParentDomain) { - s_logger.debug(String.format("Updating the resource counts of the old parent domain [%s] and of the new parent domain [%s].", idOfOldParentDomain, idOfNewParentDomain)); + logger.debug(String.format("Updating the resource counts of the old parent domain [%s] and of the new parent domain [%s].", idOfOldParentDomain, idOfNewParentDomain)); resourceLimitService.recalculateResourceCount(null, idOfOldParentDomain, null); resourceLimitService.recalculateResourceCount(null, idOfNewParentDomain, null); } @@ -1099,7 +1097,7 @@ protected void updateChildCounts(DomainVO oldParentDomain, DomainVO newParentDom oldParentDomain.setChildCount(finalOldParentChildCount); oldParentDomain.setNextChildSeq(finalOldParentChildCount + 1); - s_logger.debug(String.format("Updating the child count of the old parent domain [%s] to [%s].", oldParentDomain, finalOldParentChildCount)); + logger.debug(String.format("Updating the child count of the old parent domain [%s] to [%s].", oldParentDomain, finalOldParentChildCount)); _domainDao.update(oldParentDomain.getId(), oldParentDomain); int finalNewParentChildCount = newParentDomain.getChildCount() + 1; @@ -1107,7 +1105,7 @@ protected void updateChildCounts(DomainVO oldParentDomain, DomainVO newParentDom newParentDomain.setChildCount(finalNewParentChildCount); newParentDomain.setNextChildSeq(finalNewParentChildCount + 1); - s_logger.debug(String.format("Updating the child count of the new parent domain [%s] to [%s].", newParentDomain, finalNewParentChildCount)); + logger.debug(String.format("Updating the child count of the new parent domain [%s] to [%s].", newParentDomain, finalNewParentChildCount)); _domainDao.update(newParentDomain.getId(), newParentDomain); } } diff --git a/server/src/main/java/com/cloud/user/PasswordPolicyImpl.java b/server/src/main/java/com/cloud/user/PasswordPolicyImpl.java index 1082f3cc0d5a..daa57a4a50d8 100644 --- a/server/src/main/java/com/cloud/user/PasswordPolicyImpl.java +++ b/server/src/main/java/com/cloud/user/PasswordPolicyImpl.java @@ -20,11 +20,12 @@ import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; public class PasswordPolicyImpl implements PasswordPolicy, Configurable { - private Logger logger = Logger.getLogger(PasswordPolicyImpl.class); + private Logger logger = LogManager.getLogger(PasswordPolicyImpl.class); public void verifyIfPasswordCompliesWithPasswordPolicies(String password, String username, Long domainId) { if (StringUtils.isEmpty(password)) { diff --git a/server/src/main/java/com/cloud/vm/SystemVmLoadScanner.java b/server/src/main/java/com/cloud/vm/SystemVmLoadScanner.java index 42f96b6adeed..2d705267bc91 100644 --- a/server/src/main/java/com/cloud/vm/SystemVmLoadScanner.java +++ b/server/src/main/java/com/cloud/vm/SystemVmLoadScanner.java @@ -20,7 +20,8 @@ import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.jobs.AsyncJobExecutionContext; import org.apache.cloudstack.managed.context.ManagedContextRunnable; @@ -37,7 +38,7 @@ public enum AfterScanAction { nop, expand, shrink } - private static final Logger s_logger = Logger.getLogger(SystemVmLoadScanner.class); + protected Logger logger = LogManager.getLogger(getClass()); private static final int ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION = 3; // 3 seconds @@ -61,7 +62,7 @@ public void stop() { try { _capacityScanScheduler.awaitTermination(1000, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { - s_logger.debug("[ignored] interrupted while stopping systemvm load scanner."); + logger.debug("[ignored] interrupted while stopping systemvm load scanner."); } _capacityScanLock.releaseRef(); @@ -83,7 +84,7 @@ protected void runInContext() { AsyncJobExecutionContext.unregister(); } catch (Throwable e) { - s_logger.warn("Unexpected exception " + e.getMessage(), e); + logger.warn("Unexpected exception " + e.getMessage(), e); } } @@ -99,8 +100,8 @@ private void loadScan() { } if (!_capacityScanLock.lock(ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION)) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Capacity scan lock is used by others, skip and wait for my turn"); + if (logger.isTraceEnabled()) { + logger.trace("Capacity scan lock is used by others, skip and wait for my turn"); } return; } diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index 0d3f047809a2..c68f51cbda4f 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -137,7 +137,6 @@ import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.commons.lang3.builder.ToStringStyle; -import org.apache.log4j.Logger; import org.jetbrains.annotations.NotNull; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Qualifier; @@ -378,7 +377,6 @@ import com.cloud.vm.snapshot.dao.VMSnapshotDao; public class UserVmManagerImpl extends ManagerBase implements UserVmManager, VirtualMachineGuru, Configurable { - private static final Logger s_logger = Logger.getLogger(UserVmManagerImpl.class); /** * The number of seconds to wait before timing out when trying to acquire a global lock. @@ -769,7 +767,7 @@ protected void runInContext() { boolean decrementCount = true; try { - s_logger.debug("Trying for vm "+ vmId +" nic Id "+nicId +" ip retrieval ..."); + logger.debug("Trying for vm "+ vmId +" nic Id "+nicId +" ip retrieval ..."); Answer answer = _agentMgr.send(hostId, cmd); NicVO nic = _nicDao.findById(nicId); if (answer.getResult()) { @@ -780,7 +778,7 @@ protected void runInContext() { if (nic != null) { nic.setIPv4Address(vmIp); _nicDao.update(nicId, nic); - s_logger.debug("Vm "+ vmId +" IP "+vmIp +" got retrieved successfully"); + logger.debug("Vm "+ vmId +" IP "+vmIp +" got retrieved successfully"); vmIdCountMap.remove(nicId); decrementCount = false; ActionEventUtils.onActionEvent(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM, @@ -796,18 +794,18 @@ protected void runInContext() { _nicDao.update(nicId, nic); } if (answer.getDetails() != null) { - s_logger.debug("Failed to get vm ip for Vm "+ vmId + answer.getDetails()); + logger.debug("Failed to get vm ip for Vm "+ vmId + answer.getDetails()); } } } catch (OperationTimedoutException e) { - s_logger.warn("Timed Out", e); + logger.warn("Timed Out", e); } catch (AgentUnavailableException e) { - s_logger.warn("Agent Unavailable ", e); + logger.warn("Agent Unavailable ", e); } finally { if (decrementCount) { VmAndCountDetails vmAndCount = vmIdCountMap.get(nicId); vmAndCount.decrementCount(); - s_logger.debug("Ip is not retrieved for VM " + vmId +" nic "+nicId + " ... decremented count to "+vmAndCount.getRetrievalCount()); + logger.debug("Ip is not retrieved for VM " + vmId +" nic "+nicId + " ... decremented count to "+vmAndCount.getRetrievalCount()); vmIdCountMap.put(nicId, vmAndCount); } } @@ -815,8 +813,8 @@ protected void runInContext() { } private void addVmUefiBootOptionsToParams(Map params, String bootType, String bootMode) { - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("Adding boot options (%s, %s, %s) into the param map for VM start as UEFI detail(%s=%s) found for the VM", + if (logger.isTraceEnabled()) { + logger.trace(String.format("Adding boot options (%s, %s, %s) into the param map for VM start as UEFI detail(%s=%s) found for the VM", VirtualMachineProfile.Param.UefiFlag.getName(), VirtualMachineProfile.Param.BootType.getName(), VirtualMachineProfile.Param.BootMode.getName(), @@ -848,12 +846,12 @@ public UserVm resetVMPassword(ResetVMPasswordCmd cmd, String password) throws Re } if (userVm.getState() == State.Error || userVm.getState() == State.Expunging) { - s_logger.error("vm is not in the right state: " + vmId); + logger.error("vm is not in the right state: " + vmId); throw new InvalidParameterValueException("Vm with id " + vmId + " is not in the right state"); } if (userVm.getState() != State.Stopped) { - s_logger.error("vm is not in the right state: " + vmId); + logger.error("vm is not in the right state: " + vmId); throw new InvalidParameterValueException("Vm " + userVm + " should be stopped to do password reset"); } @@ -882,7 +880,7 @@ private boolean resetVMPasswordInternal(Long vmId, String password) throws Resou if (template.isEnablePassword()) { Nic defaultNic = _networkModel.getDefaultNic(vmId); if (defaultNic == null) { - s_logger.error("Unable to reset password for vm " + vmInstance + " as the instance doesn't have default nic"); + logger.error("Unable to reset password for vm " + vmInstance + " as the instance doesn't have default nic"); return false; } @@ -902,7 +900,7 @@ private boolean resetVMPasswordInternal(Long vmId, String password) throws Resou // Need to reboot the virtual machine so that the password gets // redownloaded from the DomR, and reset on the VM if (!result) { - s_logger.debug("Failed to reset password for the virtual machine; no need to reboot the vm"); + logger.debug("Failed to reset password for the virtual machine; no need to reboot the vm"); return false; } else { final UserVmVO userVm = _vmDao.findById(vmId); @@ -913,21 +911,21 @@ private boolean resetVMPasswordInternal(Long vmId, String password) throws Resou encryptAndStorePassword(userVm, password); if (vmInstance.getState() == State.Stopped) { - s_logger.debug("Vm " + vmInstance + " is stopped, not rebooting it as a part of password reset"); + logger.debug("Vm " + vmInstance + " is stopped, not rebooting it as a part of password reset"); return true; } if (rebootVirtualMachine(userId, vmId, false, false) == null) { - s_logger.warn("Failed to reboot the vm " + vmInstance); + logger.warn("Failed to reboot the vm " + vmInstance); return false; } else { - s_logger.debug("Vm " + vmInstance + " is rebooted successfully as a part of password reset"); + logger.debug("Vm " + vmInstance + " is rebooted successfully as a part of password reset"); return true; } } } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Reset password called for a vm that is not using a password enabled template"); + if (logger.isDebugEnabled()) { + logger.debug("Reset password called for a vm that is not using a password enabled template"); } return false; } @@ -951,7 +949,7 @@ public UserVm resetVMUserData(ResetVMUserDataCmd cmd) throws ResourceUnavailable // Do parameters input validation if (userVm.getState() != State.Stopped) { - s_logger.error("vm is not in the right state: " + vmId); + logger.error("vm is not in the right state: " + vmId); throw new InvalidParameterValueException(String.format("VM %s should be stopped to do UserData reset", userVm)); } @@ -994,11 +992,11 @@ public UserVm resetVMSSHKey(ResetVMSSHKeyCmd cmd) throws ResourceUnavailableExce // Do parameters input validation if (userVm.getState() == State.Error || userVm.getState() == State.Expunging) { - s_logger.error("vm is not in the right state: " + vmId); + logger.error("vm is not in the right state: " + vmId); throw new InvalidParameterValueException("Vm with specified id is not in the right state"); } if (userVm.getState() != State.Stopped) { - s_logger.error("vm is not in the right state: " + vmId); + logger.error("vm is not in the right state: " + vmId); throw new InvalidParameterValueException("Vm " + userVm + " should be stopped to do SSH Key reset"); } @@ -1044,7 +1042,7 @@ private boolean resetVMSSHKeyInternal(Long vmId, String sshPublicKeys, String ke VMTemplateVO template = _templateDao.findByIdIncludingRemoved(vmInstance.getTemplateId()); Nic defaultNic = _networkModel.getDefaultNic(vmId); if (defaultNic == null) { - s_logger.error("Unable to reset SSH Key for vm " + vmInstance + " as the instance doesn't have default nic"); + logger.error("Unable to reset SSH Key for vm " + vmInstance + " as the instance doesn't have default nic"); return false; } @@ -1061,7 +1059,7 @@ private boolean resetVMSSHKeyInternal(Long vmId, String sshPublicKeys, String ke boolean result = element.saveSSHKey(defaultNetwork, defaultNicProfile, vmProfile, sshPublicKeys); // Need to reboot the virtual machine so that the password gets redownloaded from the DomR, and reset on the VM if (!result) { - s_logger.debug("Failed to reset SSH Key for the virtual machine; no need to reboot the vm"); + logger.debug("Failed to reset SSH Key for the virtual machine; no need to reboot the vm"); return false; } else { final UserVmVO userVm = _vmDao.findById(vmId); @@ -1071,14 +1069,14 @@ private boolean resetVMSSHKeyInternal(Long vmId, String sshPublicKeys, String ke _vmDao.saveDetails(userVm); if (vmInstance.getState() == State.Stopped) { - s_logger.debug("Vm " + vmInstance + " is stopped, not rebooting it as a part of SSH Key reset"); + logger.debug("Vm " + vmInstance + " is stopped, not rebooting it as a part of SSH Key reset"); return true; } if (rebootVirtualMachine(userId, vmId, false, false) == null) { - s_logger.warn("Failed to reboot the vm " + vmInstance); + logger.warn("Failed to reboot the vm " + vmInstance); return false; } else { - s_logger.debug("Vm " + vmInstance + " is rebooted successfully as a part of SSH Key reset"); + logger.debug("Vm " + vmInstance + " is rebooted successfully as a part of SSH Key reset"); return true; } } @@ -1087,13 +1085,13 @@ private boolean resetVMSSHKeyInternal(Long vmId, String sshPublicKeys, String ke @Override public boolean stopVirtualMachine(long userId, long vmId) { boolean status = false; - if (s_logger.isDebugEnabled()) { - s_logger.debug("Stopping vm=" + vmId); + if (logger.isDebugEnabled()) { + logger.debug("Stopping vm=" + vmId); } UserVmVO vm = _vmDao.findById(vmId); if (vm == null || vm.getRemoved() != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("VM is either removed or deleted."); + if (logger.isDebugEnabled()) { + logger.debug("VM is either removed or deleted."); } return true; } @@ -1103,7 +1101,7 @@ public boolean stopVirtualMachine(long userId, long vmId) { VirtualMachineEntity vmEntity = _orchSrvc.getVirtualMachine(vm.getUuid()); status = vmEntity.stop(Long.toString(userId)); } catch (ResourceUnavailableException e) { - s_logger.debug("Unable to stop due to ", e); + logger.debug("Unable to stop due to ", e); status = false; } catch (CloudException e) { throw new CloudRuntimeException("Unable to contact the agent to stop the virtual machine " + vm, e); @@ -1114,12 +1112,12 @@ public boolean stopVirtualMachine(long userId, long vmId) { private UserVm rebootVirtualMachine(long userId, long vmId, boolean enterSetup, boolean forced) throws InsufficientCapacityException, ResourceUnavailableException { UserVmVO vm = _vmDao.findById(vmId); - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("reboot %s with enterSetup set to %s", vm.getInstanceName(), Boolean.toString(enterSetup))); + if (logger.isTraceEnabled()) { + logger.trace(String.format("reboot %s with enterSetup set to %s", vm.getInstanceName(), Boolean.toString(enterSetup))); } if (vm == null || vm.getState() == State.Destroyed || vm.getState() == State.Expunging || vm.getRemoved() != null) { - s_logger.warn("Vm id=" + vmId + " doesn't exist"); + logger.warn("Vm id=" + vmId + " doesn't exist"); return null; } @@ -1149,7 +1147,7 @@ private UserVm rebootVirtualMachine(long userId, long vmId, boolean enterSetup, //Safe to start the stopped router serially, this is consistent with the way how multiple networks are added to vm during deploy //and routers are started serially ,may revisit to make this process parallel for(DomainRouterVO routerToStart : routers) { - s_logger.warn("Trying to start router " + routerToStart.getInstanceName() + " as part of vm: " + vm.getInstanceName() + " reboot"); + logger.warn("Trying to start router " + routerToStart.getInstanceName() + " as part of vm: " + vm.getInstanceName() + " reboot"); _virtualNetAppliance.startRouter(routerToStart.getId(),true); } } @@ -1158,22 +1156,22 @@ private UserVm rebootVirtualMachine(long userId, long vmId, boolean enterSetup, } catch (Exception ex){ throw new CloudRuntimeException("Router start failed due to" + ex); } finally { - if (s_logger.isInfoEnabled()) { - s_logger.info(String.format("Rebooting vm %s%s.", vm.getInstanceName(), enterSetup? " entering hardware setup menu" : " as is")); + if (logger.isInfoEnabled()) { + logger.info(String.format("Rebooting vm %s%s.", vm.getInstanceName(), enterSetup? " entering hardware setup menu" : " as is")); } Map params = null; if (enterSetup) { params = new HashMap(); params.put(VirtualMachineProfile.Param.BootIntoSetup, Boolean.TRUE); - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("Adding %s to paramlist", VirtualMachineProfile.Param.BootIntoSetup)); + if (logger.isTraceEnabled()) { + logger.trace(String.format("Adding %s to paramlist", VirtualMachineProfile.Param.BootIntoSetup)); } } _itMgr.reboot(vm.getUuid(), params); } return _vmDao.findById(vmId); } else { - s_logger.error("Vm id=" + vmId + " is not in Running state, failed to reboot"); + logger.error("Vm id=" + vmId + " is not in Running state, failed to reboot"); return null; } } @@ -1370,7 +1368,7 @@ protected ResizeVolumeCmd prepareResizeVolumeCmd(VolumeVO rootVolume, DiskOfferi long currentRootDiskOfferingGiB = currentRootDiskOffering.getDiskSize() / GiB_TO_BYTES; if (newNewOfferingRootSizeInBytes > currentRootDiskOffering.getDiskSize()) { resizeVolumeCmd = new ResizeVolumeCmd(rootVolume.getId(), newRootDiskOffering.getMinIops(), newRootDiskOffering.getMaxIops(), newRootDiskOffering.getId()); - s_logger.debug(String.format("Preparing command to resize VM Root disk from %d GB to %d GB; current offering: %s, new offering: %s.", currentRootDiskOfferingGiB, + logger.debug(String.format("Preparing command to resize VM Root disk from %d GB to %d GB; current offering: %s, new offering: %s.", currentRootDiskOfferingGiB, newNewOfferingRootSizeInGiB, currentRootDiskOffering.getName(), newRootDiskOffering.getName())); } else if (newNewOfferingRootSizeInBytes > 0l && newNewOfferingRootSizeInBytes < currentRootDiskOffering.getDiskSize()) { throw new InvalidParameterValueException(String.format( @@ -1437,7 +1435,7 @@ public UserVm addNicToVirtualMachine(AddNicToVMCmd cmd) throws InvalidParameterV } if(_networkModel.getNicInNetwork(vmInstance.getId(),network.getId()) != null){ - s_logger.debug("VM " + vmInstance.getHostName() + " already in network " + network.getName() + " going to add another NIC"); + logger.debug("VM " + vmInstance.getHostName() + " already in network " + network.getName() + " going to add another NIC"); } else { //* get all vms hostNames in the network List hostNames = _vmInstanceDao.listDistinctHostNames(network.getId()); @@ -1473,7 +1471,7 @@ public UserVm addNicToVirtualMachine(AddNicToVMCmd cmd) throws InvalidParameterV } } CallContext.current().putContextParameter(Nic.class, guestNic.getUuid()); - s_logger.debug(String.format("Successful addition of %s from %s through %s", network, vmInstance, guestNic)); + logger.debug(String.format("Successful addition of %s from %s through %s", network, vmInstance, guestNic)); return _vmDao.findById(vmInstance.getId()); } @@ -1484,7 +1482,7 @@ public UserVm addNicToVirtualMachine(AddNicToVMCmd cmd) throws InvalidParameterV */ public void setNicAsDefaultIfNeeded(UserVmVO vmInstance, NicProfile nicProfile) { if (_networkModel.getDefaultNic(vmInstance.getId()) == null) { - s_logger.debug(String.format("Setting NIC %s as default as VM %s has no default NIC.", nicProfile.getName(), vmInstance.getName())); + logger.debug(String.format("Setting NIC %s as default as VM %s has no default NIC.", nicProfile.getName(), vmInstance.getName())); nicProfile.setDefaultNic(true); } } @@ -1590,7 +1588,7 @@ public UserVm removeNicFromVirtualMachine(RemoveNicFromVMCmd cmd) throws Invalid throw new CloudRuntimeException("Unable to remove " + network + " from " + vmInstance); } - s_logger.debug("Successful removal of " + network + " from " + vmInstance); + logger.debug("Successful removal of " + network + " from " + vmInstance); return _vmDao.findById(vmInstance.getId()); } @@ -1655,7 +1653,7 @@ public UserVm updateDefaultNicForVirtualMachine(UpdateDefaultNicForVMCmd cmd) th } if (existing == null) { - s_logger.warn("Failed to update default nic, no nic profile found for existing default network"); + logger.warn("Failed to update default nic, no nic profile found for existing default network"); throw new CloudRuntimeException("Failed to find a nic profile for the existing default network. This is bad and probably means some sort of configuration corruption"); } @@ -1691,7 +1689,7 @@ public UserVm updateDefaultNicForVirtualMachine(UpdateDefaultNicForVMCmd cmd) th } throw new CloudRuntimeException("Failed to change default nic to " + nic + " and now we have no default"); } else if (newdefault.getId() == nic.getNetworkId()) { - s_logger.debug("successfully set default network to " + network + " for " + vmInstance); + logger.debug("successfully set default network to " + network + " for " + vmInstance); String nicIdString = Long.toString(nic.getId()); long newNetworkOfferingId = network.getNetworkOfferingId(); UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NETWORK_OFFERING_REMOVE, vmInstance.getAccountId(), vmInstance.getDataCenterId(), vmInstance.getId(), @@ -1711,7 +1709,7 @@ public UserVm updateDefaultNicForVirtualMachine(UpdateDefaultNicForVMCmd cmd) th DeployDestination dest = new DeployDestination(dc, null, null, null); _networkMgr.prepare(vmProfile, dest, context); } catch (final Exception e) { - s_logger.info("Got exception: ", e); + logger.info("Got exception: ", e); } } @@ -1769,7 +1767,7 @@ public UserVm updateNicIpForVirtualMachine(UpdateVmNicIpCmd cmd) { Account ipOwner = _accountDao.findByIdIncludingRemoved(vm.getAccountId()); // verify ip address - s_logger.debug("Calling the ip allocation ..."); + logger.debug("Calling the ip allocation ..."); DataCenter dc = _dcDao.findById(network.getDataCenterId()); if (dc == null) { throw new InvalidParameterValueException("There is no dc with the nic"); @@ -1823,14 +1821,14 @@ public void doInTransactionWithoutResult(TransactionStatus status) { }); } } catch (InsufficientAddressCapacityException e) { - s_logger.error("Allocating ip to guest nic " + nicVO.getUuid() + " failed, for insufficient address capacity"); + logger.error("Allocating ip to guest nic " + nicVO.getUuid() + " failed, for insufficient address capacity"); return null; } } else { throw new InvalidParameterValueException("UpdateVmNicIpCmd is not supported in L2 network"); } - s_logger.debug("Updating IPv4 address of NIC " + nicVO + " to " + ipaddr + "/" + nicVO.getIPv4Netmask() + " with gateway " + nicVO.getIPv4Gateway()); + logger.debug("Updating IPv4 address of NIC " + nicVO + " to " + ipaddr + "/" + nicVO.getIPv4Netmask() + " with gateway " + nicVO.getIPv4Gateway()); nicVO.setIPv4Address(ipaddr); _nicDao.persist(nicVO); @@ -1913,7 +1911,7 @@ public boolean upgradeVirtualMachine(Long vmId, Long newServiceOfferingId, Map customParameters, Long zoneId) throws ResourceAllocationException { if (!AllowDiskOfferingChangeDuringScaleVm.valueIn(zoneId)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Changing the disk offering of the root volume during the compute offering change operation is disabled. Please check the setting [%s].", AllowDiskOfferingChangeDuringScaleVm.key())); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Changing the disk offering of the root volume during the compute offering change operation is disabled. Please check the setting [%s].", AllowDiskOfferingChangeDuringScaleVm.key())); } return; } @@ -2137,8 +2135,8 @@ private void changeDiskOfferingForRootVolume(Long vmId, DiskOfferingVO newDiskOf } if (currentRootDiskOffering.getId() == newDiskOffering.getId() && (!newDiskOffering.isCustomized() || (newDiskOffering.isCustomized() && Objects.equals(rootVolumeOfVm.getSize(), rootDiskSizeBytes)))) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Volume %s is already having disk offering %s", rootVolumeOfVm, newDiskOffering.getUuid())); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Volume %s is already having disk offering %s", rootVolumeOfVm, newDiskOffering.getUuid())); } continue; } @@ -2250,21 +2248,21 @@ public UserVm recoverVirtualMachine(RecoverVMCmd cmd) throws ResourceAllocationE } if (vm.getRemoved() != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to find vm or vm is removed: " + vmId); + if (logger.isDebugEnabled()) { + logger.debug("Unable to find vm or vm is removed: " + vmId); } throw new InvalidParameterValueException("Unable to find vm by id " + vmId); } if (vm.getState() != State.Destroyed) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("vm is not in the right state: " + vmId); + if (logger.isDebugEnabled()) { + logger.debug("vm is not in the right state: " + vmId); } throw new InvalidParameterValueException("Vm with id " + vmId + " is not in the right state"); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Recovering vm " + vmId); + if (logger.isDebugEnabled()) { + logger.debug("Recovering vm " + vmId); } Transaction.execute(new TransactionCallbackWithExceptionNoReturn() { @@ -2290,7 +2288,7 @@ public UserVm recoverVirtualMachine(RecoverVMCmd cmd) throws ResourceAllocationE try { if (!_itMgr.stateTransitTo(vm, VirtualMachine.Event.RecoveryRequested, null)) { - s_logger.debug("Unable to recover the vm because it is not in the correct state: " + vmId); + logger.debug("Unable to recover the vm because it is not in the correct state: " + vmId); throw new InvalidParameterValueException("Unable to recover the vm because it is not in the correct state: " + vmId); } } catch (NoTransitionException e) { @@ -2377,7 +2375,7 @@ public boolean configure(String name, Map params) throws Configu _vmIpFetchThreadExecutor = Executors.newFixedThreadPool(VmIpFetchThreadPoolMax.value(), new NamedThreadFactory("vmIpFetchThread")); - s_logger.info("User VM Manager is configured."); + logger.info("User VM Manager is configured."); return true; } @@ -2463,11 +2461,11 @@ public boolean expunge(UserVmVO vm) { if (vm.getRemoved() == null) { // Cleanup vm resources - all the PF/LB/StaticNat rules // associated with vm - s_logger.debug("Starting cleaning up vm " + vm + " resources..."); + logger.debug("Starting cleaning up vm " + vm + " resources..."); if (cleanupVmResources(vm.getId())) { - s_logger.debug("Successfully cleaned up vm " + vm + " resources as a part of expunge process"); + logger.debug("Successfully cleaned up vm " + vm + " resources as a part of expunge process"); } else { - s_logger.warn("Failed to cleanup resources as a part of vm " + vm + " expunge"); + logger.warn("Failed to cleanup resources as a part of vm " + vm + " expunge"); return false; } @@ -2482,13 +2480,13 @@ public boolean expunge(UserVmVO vm) { return true; } catch (ResourceUnavailableException e) { - s_logger.warn("Unable to expunge " + vm, e); + logger.warn("Unable to expunge " + vm, e); return false; } catch (OperationTimedoutException e) { - s_logger.warn("Operation time out on expunging " + vm, e); + logger.warn("Operation time out on expunging " + vm, e); return false; } catch (ConcurrentOperationException e) { - s_logger.warn("Concurrent operations on expunging " + vm, e); + logger.warn("Concurrent operations on expunging " + vm, e); return false; } finally { _vmDao.releaseFromLockTable(vm.getId()); @@ -2508,7 +2506,7 @@ private void releaseNetworkResourcesOnExpunge(long id) throws ConcurrentOperatio _networkMgr.release(profile, false); } else { - s_logger.error("Couldn't find vm with id = " + id + ", unable to release network resources"); + logger.error("Couldn't find vm with id = " + id + ", unable to release network resources"); } } @@ -2522,26 +2520,26 @@ private boolean cleanupVmResources(long vmId) { // cleanup firewall rules if (_firewallMgr.revokeFirewallRulesForVm(vmId)) { - s_logger.debug("Firewall rules are removed successfully as a part of vm id=" + vmId + " expunge"); + logger.debug("Firewall rules are removed successfully as a part of vm id=" + vmId + " expunge"); } else { success = false; - s_logger.warn("Fail to remove firewall rules as a part of vm id=" + vmId + " expunge"); + logger.warn("Fail to remove firewall rules as a part of vm id=" + vmId + " expunge"); } // cleanup port forwarding rules if (_rulesMgr.revokePortForwardingRulesForVm(vmId)) { - s_logger.debug("Port forwarding rules are removed successfully as a part of vm id=" + vmId + " expunge"); + logger.debug("Port forwarding rules are removed successfully as a part of vm id=" + vmId + " expunge"); } else { success = false; - s_logger.warn("Fail to remove port forwarding rules as a part of vm id=" + vmId + " expunge"); + logger.warn("Fail to remove port forwarding rules as a part of vm id=" + vmId + " expunge"); } // cleanup load balancer rules if (_lbMgr.removeVmFromLoadBalancers(vmId)) { - s_logger.debug("Removed vm id=" + vmId + " from all load balancers as a part of expunge process"); + logger.debug("Removed vm id=" + vmId + " from all load balancers as a part of expunge process"); } else { success = false; - s_logger.warn("Fail to remove vm id=" + vmId + " from load balancers as a part of expunge process"); + logger.warn("Fail to remove vm id=" + vmId + " from load balancers as a part of expunge process"); } // If vm is assigned to static nat, disable static nat for the ip @@ -2551,14 +2549,14 @@ private boolean cleanupVmResources(long vmId) { for (IPAddressVO ip : ips) { try { if (_rulesMgr.disableStaticNat(ip.getId(), _accountMgr.getAccount(Account.ACCOUNT_ID_SYSTEM), User.UID_SYSTEM, true)) { - s_logger.debug("Disabled 1-1 nat for ip address " + ip + " as a part of vm id=" + vmId + " expunge"); + logger.debug("Disabled 1-1 nat for ip address " + ip + " as a part of vm id=" + vmId + " expunge"); } else { - s_logger.warn("Failed to disable static nat for ip address " + ip + " as a part of vm id=" + vmId + " expunge"); + logger.warn("Failed to disable static nat for ip address " + ip + " as a part of vm id=" + vmId + " expunge"); success = false; } } catch (ResourceUnavailableException e) { success = false; - s_logger.warn("Failed to disable static nat for ip address " + ip + " as a part of vm id=" + vmId + " expunge because resource is unavailable", e); + logger.warn("Failed to disable static nat for ip address " + ip + " as a part of vm id=" + vmId + " expunge because resource is unavailable", e); } } @@ -2579,11 +2577,11 @@ private void updateVmStateForFailedVmCreation(Long vmId, Long hostId) { if (vm != null) { if (vm.getState().equals(State.Stopped)) { - s_logger.debug("Destroying vm " + vm + " as it failed to create on Host with Id:" + hostId); + logger.debug("Destroying vm " + vm + " as it failed to create on Host with Id:" + hostId); try { _itMgr.stateTransitTo(vm, VirtualMachine.Event.OperationFailedToError, null); } catch (NoTransitionException e1) { - s_logger.warn(e1.getMessage()); + logger.warn(e1.getMessage()); } // destroy associated volumes for vm in error state // get all volumes in non destroyed state @@ -2623,7 +2621,7 @@ protected void runInContext() { if (vmIdAndCount.getRetrievalCount() <= 0) { vmIdCountMap.remove(nicId); - s_logger.debug("Vm " + vmId +" nic "+nicId + " count is zero .. removing vm nic from map "); + logger.debug("Vm " + vmId +" nic "+nicId + " count is zero .. removing vm nic from map "); ActionEventUtils.onActionEvent(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM, Domain.ROOT_DOMAIN, EventTypes.EVENT_NETWORK_EXTERNAL_DHCP_VM_IPFETCH, @@ -2647,7 +2645,7 @@ protected void runInContext() { } } catch (Exception e) { - s_logger.error("Caught the Exception in VmIpFetchTask", e); + logger.error("Caught the Exception in VmIpFetchTask", e); } finally { scanLock.unlock(); } @@ -2671,22 +2669,22 @@ protected void runInContext() { if (scanLock.lock(ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION)) { try { List vms = _vmDao.findDestroyedVms(new Date(System.currentTimeMillis() - ((long)_expungeDelay << 10))); - if (s_logger.isInfoEnabled()) { + if (logger.isInfoEnabled()) { if (vms.size() == 0) { - s_logger.trace("Found " + vms.size() + " vms to expunge."); + logger.trace("Found " + vms.size() + " vms to expunge."); } else { - s_logger.info("Found " + vms.size() + " vms to expunge."); + logger.info("Found " + vms.size() + " vms to expunge."); } } for (UserVmVO vm : vms) { try { expungeVm(vm.getId()); } catch (Exception e) { - s_logger.warn("Unable to expunge " + vm, e); + logger.warn("Unable to expunge " + vm, e); } } } catch (Exception e) { - s_logger.error("Caught the following Exception", e); + logger.error("Caught the following Exception", e); } finally { scanLock.unlock(); } @@ -2731,7 +2729,7 @@ private void verifyVmLimits(UserVmVO vmInstance, Map details) { _resourceLimitMgr.checkResourceLimit(owner, ResourceType.memory, newMemory - currentMemory); } } catch (ResourceAllocationException e) { - s_logger.error(String.format("Failed to updated VM due to: %s", e.getLocalizedMessage())); + logger.error(String.format("Failed to updated VM due to: %s", e.getLocalizedMessage())); throw new InvalidParameterValueException(e.getLocalizedMessage()); } @@ -2848,7 +2846,7 @@ public UserVm updateVirtualMachine(UpdateVMCmd cmd) throws ResourceUnavailableEx } if (StringUtils.isNotBlank(extraConfig)) { if (EnableAdditionalVmConfig.valueIn(accountId)) { - s_logger.info("Adding extra configuration to user vm: " + vmInstance.getUuid()); + logger.info("Adding extra configuration to user vm: " + vmInstance.getUuid()); addExtraConfig(vmInstance, extraConfig); } else { throw new InvalidParameterValueException("attempted setting extraconfig but enable.additional.vm.configuration is disabled"); @@ -2968,7 +2966,7 @@ public UserVm updateVirtualMachine(long id, String displayName, String group, Bo } if (vm.getState() == State.Error || vm.getState() == State.Expunging) { - s_logger.error("vm is not in the right state: " + id); + logger.error("vm is not in the right state: " + id); throw new InvalidParameterValueException("Vm with id " + id + " is not in the right state"); } @@ -3028,7 +3026,7 @@ public UserVm updateVirtualMachine(long id, String displayName, String group, Bo throw new InvalidParameterValueException("Dynamic Scaling cannot be enabled for the VM since its service offering does not have dynamic scaling enabled"); } if (!UserVmManager.EnableDynamicallyScaleVm.valueIn(vm.getDataCenterId())) { - s_logger.debug(String.format("Dynamic Scaling cannot be enabled for the VM %s since the global setting enable.dynamic.scale.vm is set to false", vm.getUuid())); + logger.debug(String.format("Dynamic Scaling cannot be enabled for the VM %s since the global setting enable.dynamic.scale.vm is set to false", vm.getUuid())); throw new InvalidParameterValueException("Dynamic Scaling cannot be enabled for the VM since corresponding global setting is set to false"); } } @@ -3054,8 +3052,8 @@ public UserVm updateVirtualMachine(long id, String displayName, String group, Bo } } } catch (InvalidParameterValueException e) { - if(s_logger.isDebugEnabled()) { - s_logger.debug(e.getMessage(),e); + if(logger.isDebugEnabled()) { + logger.debug(e.getMessage(),e); } defaultNetwork = _networkModel.getDefaultNetworkForVm(id); } @@ -3077,7 +3075,7 @@ public UserVm updateVirtualMachine(long id, String displayName, String group, Bo checkNameForRFCCompliance(hostName); if (vm.getHostName().equals(hostName)) { - s_logger.debug("Vm " + vm + " is already set with the hostName specified: " + hostName); + logger.debug("Vm " + vm + " is already set with the hostName specified: " + hostName); hostName = null; } @@ -3119,7 +3117,7 @@ public UserVm updateVirtualMachine(long id, String displayName, String group, Bo protected void updateUserData(UserVm vm) throws ResourceUnavailableException, InsufficientCapacityException { boolean result = updateUserDataInternal(vm); if (result) { - s_logger.debug(String.format("User data successfully updated for vm id: %s", vm.getId())); + logger.debug(String.format("User data successfully updated for vm id: %s", vm.getId())); } else { throw new CloudRuntimeException("Failed to reset userdata for the virtual machine "); } @@ -3134,7 +3132,7 @@ private void updateDns(UserVmVO vm, String hostName) throws ResourceUnavailableE List routers = _routerDao.findByNetwork(nic.getNetworkId()); for (DomainRouterVO router : routers) { if (router.getState() != State.Running) { - s_logger.warn(String.format("Unable to update DNS for VM %s, as virtual router: %s is not in the right state: %s ", vm, router.getName(), router.getState())); + logger.warn(String.format("Unable to update DNS for VM %s, as virtual router: %s is not in the right state: %s ", vm, router.getName(), router.getState())); continue; } Commands commands = new Commands(Command.OnError.Stop); @@ -3160,7 +3158,7 @@ private boolean updateUserDataInternal(UserVm vm) throws ResourceUnavailableExce List nics = _nicDao.listByVmId(vm.getId()); if (nics == null || nics.isEmpty()) { - s_logger.error("unable to find any nics for vm " + vm.getUuid()); + logger.error("unable to find any nics for vm " + vm.getUuid()); return false; } @@ -3184,12 +3182,12 @@ protected boolean applyUserData(HypervisorType hyperVisorType, UserVm vm, Nic ni } boolean result = element.saveUserData(network, nicProfile, vmProfile); if (!result) { - s_logger.error("Failed to update userdata for vm " + vm + " and nic " + nic); + logger.error("Failed to update userdata for vm " + vm + " and nic " + nic); } else { return true; } } else { - s_logger.debug("Not applying userdata for nic id=" + nic.getId() + " in vm id=" + vmProfile.getId() + " because it is not supported in network id=" + network.getId()); + logger.debug("Not applying userdata for nic id=" + nic.getId() + " in vm id=" + vmProfile.getId() + " because it is not supported in network id=" + network.getId()); } return false; } @@ -3199,8 +3197,8 @@ protected boolean applyUserData(HypervisorType hyperVisorType, UserVm vm, Nic ni public UserVm startVirtualMachine(StartVMCmd cmd) throws ExecutionException, ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException, ResourceAllocationException { Map additonalParams = new HashMap<>(); if (cmd.getBootIntoSetup() != null) { - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("Adding %s into the param map", VirtualMachineProfile.Param.BootIntoSetup.getName())); + if (logger.isTraceEnabled()) { + logger.trace(String.format("Adding %s into the param map", VirtualMachineProfile.Param.BootIntoSetup.getName())); } additonalParams.put(VirtualMachineProfile.Param.BootIntoSetup, cmd.getBootIntoSetup()); } @@ -3259,7 +3257,7 @@ public UserVm rebootVirtualMachine(RebootVMCmd cmd) throws InsufficientCapacityE for (NicVO nic : nics) { Network network = _networkModel.getNetwork(nic.getNetworkId()); if (_networkModel.isSharedNetworkWithoutServices(network.getId())) { - s_logger.debug("Adding vm " +vmId +" nic id "+ nic.getId() +" into vmIdCountMap as part of vm " + + logger.debug("Adding vm " +vmId +" nic id "+ nic.getId() +" into vmIdCountMap as part of vm " + "reboot for vm ip fetch "); vmIdCountMap.put(nic.getId(), new VmAndCountDetails(nic.getInstanceId(), VmIpFetchTrialMax.value())); } @@ -3288,7 +3286,7 @@ public UserVm destroyVm(DestroyVMCmd cmd) throws ResourceUnavailableException, C } if (Arrays.asList(State.Destroyed, State.Expunging).contains(vm.getState()) && !expunge) { - s_logger.debug("Vm id=" + vmId + " is already destroyed"); + logger.debug("Vm id=" + vmId + " is already destroyed"); return vm; } @@ -3296,11 +3294,11 @@ public UserVm destroyVm(DestroyVMCmd cmd) throws ResourceUnavailableException, C autoScaleManager.checkIfVmActionAllowed(vmId); // check if there are active volume snapshots tasks - s_logger.debug("Checking if there are any ongoing snapshots on the ROOT volumes associated with VM with ID " + vmId); + logger.debug("Checking if there are any ongoing snapshots on the ROOT volumes associated with VM with ID " + vmId); if (checkStatusOfVolumeSnapshots(vmId, Volume.Type.ROOT)) { throw new CloudRuntimeException("There is/are unbacked up snapshot(s) on ROOT volume, vm destroy is not permitted, please try again later."); } - s_logger.debug("Found no ongoing snapshots on volume of type ROOT, for the vm with id " + vmId); + logger.debug("Found no ongoing snapshots on volume of type ROOT, for the vm with id " + vmId); List volumesToBeDeleted = getVolumesFromIds(cmd); @@ -3330,7 +3328,7 @@ public UserVm destroyVm(DestroyVMCmd cmd) throws ResourceUnavailableException, C if (rootVolume != null) { _volService.destroyVolume(rootVolume.getId()); } else { - s_logger.warn(String.format("Tried to destroy ROOT volume for VM [%s], but couldn't retrieve it.", vm.getUuid())); + logger.warn(String.format("Tried to destroy ROOT volume for VM [%s], but couldn't retrieve it.", vm.getUuid())); } } @@ -3385,7 +3383,7 @@ private InstanceGroupVO createVmGroup(String groupName, long accountId) { // not // created. if (account == null) { - s_logger.warn("Failed to acquire lock on account"); + logger.warn("Failed to acquire lock on account"); return null; } InstanceGroupVO group = _vmGroupDao.findByAccountAndName(accountId, groupName); @@ -3450,7 +3448,7 @@ public boolean addInstanceToGroup(final long userVmId, String groupName) { if (group != null) { UserVm userVm = _vmDao.acquireInLockTable(userVmId); if (userVm == null) { - s_logger.warn("Failed to acquire lock on user vm id=" + userVmId); + logger.warn("Failed to acquire lock on user vm id=" + userVmId); } try { final InstanceGroupVO groupFinal = group; @@ -3461,7 +3459,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { // it. InstanceGroupVO ngrpLock = _vmGroupDao.lockRow(groupFinal.getId(), false); if (ngrpLock == null) { - s_logger.warn("Failed to acquire lock on vm group id=" + groupFinal.getId() + " name=" + groupFinal.getName()); + logger.warn("Failed to acquire lock on vm group id=" + groupFinal.getId() + " name=" + groupFinal.getName()); throw new CloudRuntimeException("Failed to acquire lock on vm group id=" + groupFinal.getId() + " name=" + groupFinal.getName()); } @@ -3505,7 +3503,7 @@ public InstanceGroupVO getGroupForVm(long vmId) { return null; } } catch (Exception e) { - s_logger.warn("Error trying to get group for a vm: ", e); + logger.warn("Error trying to get group for a vm: ", e); return null; } } @@ -3520,7 +3518,7 @@ public void removeInstanceFromInstanceGroup(long vmId) { _groupVMMapDao.expunge(sc); } } catch (Exception e) { - s_logger.warn("Error trying to remove vm from group: ", e); + logger.warn("Error trying to remove vm from group: ", e); } } @@ -3579,8 +3577,8 @@ public UserVm createBasicSecurityGroupVirtualMachine(DataCenter zone, ServiceOff securityGroupIdList.add(defaultGroup.getId()); } else { // create default security group for the account - if (s_logger.isDebugEnabled()) { - s_logger.debug("Couldn't find default security group for the account " + owner + " so creating a new one"); + if (logger.isDebugEnabled()) { + logger.debug("Couldn't find default security group for the account " + owner + " so creating a new one"); } defaultGroup = _securityGroupMgr.createSecurityGroup(SecurityGroupManager.DEFAULT_GROUP_NAME, SecurityGroupManager.DEFAULT_GROUP_DESCRIPTION, owner.getDomainId(), owner.getId(), owner.getAccountName()); @@ -3690,8 +3688,8 @@ public UserVm createAdvancedSecurityGroupVirtualMachine(DataCenter zone, Service securityGroupIdList.add(defaultGroup.getId()); } else { // create default security group for the account - if (s_logger.isDebugEnabled()) { - s_logger.debug("Couldn't find default security group for the account " + owner + " so creating a new one"); + if (logger.isDebugEnabled()) { + logger.debug("Couldn't find default security group for the account " + owner + " so creating a new one"); } defaultGroup = _securityGroupMgr.createSecurityGroup(SecurityGroupManager.DEFAULT_GROUP_NAME, SecurityGroupManager.DEFAULT_GROUP_DESCRIPTION, owner.getDomainId(), owner.getId(), owner.getAccountName()); @@ -3770,12 +3768,12 @@ public UserVm createAdvancedVirtualMachine(DataCenter zone, ServiceOffering serv @Override @ActionEvent(eventType = EventTypes.EVENT_VM_CREATE, eventDescription = "deploying Vm") public UserVm finalizeCreateVirtualMachine(long vmId) { - s_logger.info("Loading UserVm " + vmId + " from DB"); + logger.info("Loading UserVm " + vmId + " from DB"); UserVm userVm = getUserVm(vmId); if (userVm == null) { - s_logger.info("Loaded UserVm " + vmId + " (" + userVm.getUuid() + ") from DB"); + logger.info("Loaded UserVm " + vmId + " (" + userVm.getUuid() + ") from DB"); } else { - s_logger.warn("UserVm " + vmId + " does not exist in DB"); + logger.warn("UserVm " + vmId + " does not exist in DB"); } return userVm; } @@ -3855,7 +3853,7 @@ private NetworkVO createDefaultNetworkForAccount(DataCenter zone, Account owner, throw new InvalidParameterValueException("Unable to find physical network with id: " + physicalNetworkId + " and tag: " + requiredOfferings.get(0).getTags()); } - s_logger.debug("Creating network for account " + owner + " from the network offering id=" + requiredOfferings.get(0).getId() + " as a part of deployVM process"); + logger.debug("Creating network for account " + owner + " from the network offering id=" + requiredOfferings.get(0).getId() + " as a part of deployVM process"); Network newNetwork = _networkMgr.createGuestNetwork(requiredOfferings.get(0).getId(), owner.getAccountName() + "-network", owner.getAccountName() + "-network", null, null, null, false, null, owner, null, physicalNetwork, zone.getId(), ACLType.Account, null, null, null, null, true, null, null, null, null, null, null, null, null, null, null); @@ -4009,7 +4007,7 @@ private UserVm getCheckedUserVmResource(DataCenter zone, String hostName, String } catch (ResourceAllocationException | CloudRuntimeException e) { throw e; } catch (Exception e) { - s_logger.error("error during resource reservation and allocation", e); + logger.error("error during resource reservation and allocation", e); throw new CloudRuntimeException(e); } @@ -4303,7 +4301,7 @@ private UserVm getUncheckedUserVmResource(DataCenter zone, String hostName, Stri } catch (ResourceAllocationException | CloudRuntimeException e) { throw e; } catch (Exception e) { - s_logger.error("error during resource reservation and allocation", e); + logger.error("error during resource reservation and allocation", e); throw new CloudRuntimeException(e); } } @@ -4344,7 +4342,7 @@ private long verifyAndGetDiskSize(DiskOfferingVO diskOffering, Long diskSize) { public boolean checkIfDynamicScalingCanBeEnabled(VirtualMachine vm, ServiceOffering offering, VirtualMachineTemplate template, Long zoneId) { boolean canEnableDynamicScaling = (vm != null ? vm.isDynamicallyScalable() : true) && offering.isDynamicScalingEnabled() && template.isDynamicallyScalable() && UserVmManager.EnableDynamicallyScaleVm.valueIn(zoneId); if (!canEnableDynamicScaling) { - s_logger.info("VM cannot be configured to be dynamically scalable if any of the service offering's dynamic scaling property, template's dynamic scaling property or global setting is false"); + logger.info("VM cannot be configured to be dynamically scalable if any of the service offering's dynamic scaling property, template's dynamic scaling property or global setting is false"); } return canEnableDynamicScaling; @@ -4537,7 +4535,7 @@ private UserVmVO commitUserVm(final boolean isImport, final DataCenter zone, fin } _vmDao.saveDetails(vm, hiddenDetails); if (!isImport) { - s_logger.debug("Allocating in the DB for vm"); + logger.debug("Allocating in the DB for vm"); DataCenterDeployment plan = new DataCenterDeployment(zone.getId()); List computeTags = new ArrayList(); @@ -4557,8 +4555,8 @@ private UserVmVO commitUserVm(final boolean isImport, final DataCenter zone, fin dataDiskTemplateToDiskOfferingMap, diskOfferingId, rootDiskOfferingId); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully allocated DB entry for " + vm); + if (logger.isDebugEnabled()) { + logger.debug("Successfully allocated DB entry for " + vm); } } CallContext.current().setEventDetails("Vm Id: " + vm.getUuid()); @@ -4602,7 +4600,7 @@ private void updateVMDiskController(UserVmVO vm, Map customParam vm.setDetail(VmDetailConstants.ROOT_DISK_CONTROLLER, "scsi"); vm.setDetail(VmDetailConstants.DATA_DISK_CONTROLLER, "scsi"); vm.setDetail(VmDetailConstants.FIRMWARE, "efi"); - s_logger.info("guestOS is OSX : overwrite root disk controller to scsi, use smc and efi"); + logger.info("guestOS is OSX : overwrite root disk controller to scsi, use smc and efi"); } else { String rootDiskControllerSetting = customParameters.get(VmDetailConstants.ROOT_DISK_CONTROLLER); String dataDiskControllerSetting = customParameters.get(VmDetailConstants.DATA_DISK_CONTROLLER); @@ -4657,8 +4655,8 @@ private void persistVMDeployAsIsProperties(UserVmVO vm, Map user } else if (value == null) { value = ""; } - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("setting property '%s' as '%s' with value '%s'", key, detailKey, value)); + if (logger.isTraceEnabled()) { + logger.trace(String.format("setting property '%s' as '%s' with value '%s'", key, detailKey, value)); } UserVmDeployAsIsDetailVO detail = new UserVmDeployAsIsDetailVO(vm.getId(), detailKey, value); userVmDeployAsIsDetailsDao.persist(detail); @@ -4686,20 +4684,20 @@ public void validateRootDiskResize(final HypervisorType hypervisorType, Long roo boolean isIso = ImageFormat.ISO == templateVO.getFormat(); if ((rootDiskSize << 30) < templateVO.getSize()) { String error = String.format("Unsupported: rootdisksize override (%s GB) is smaller than template size %s", rootDiskSize, toHumanReadableSize(templateVO.getSize())); - s_logger.error(error); + logger.error(error); throw new InvalidParameterValueException(error); } else if ((rootDiskSize << 30) > templateVO.getSize()) { if (hypervisorType == HypervisorType.VMware && (vm.getDetails() == null || vm.getDetails().get(VmDetailConstants.ROOT_DISK_CONTROLLER) == null)) { - s_logger.warn("If Root disk controller parameter is not overridden, then Root disk resize may fail because current Root disk controller value is NULL."); + logger.warn("If Root disk controller parameter is not overridden, then Root disk resize may fail because current Root disk controller value is NULL."); } else if (hypervisorType == HypervisorType.VMware && vm.getDetails().get(VmDetailConstants.ROOT_DISK_CONTROLLER).toLowerCase().contains("ide") && !isIso) { String error = String.format("Found unsupported root disk controller [%s].", vm.getDetails().get(VmDetailConstants.ROOT_DISK_CONTROLLER)); - s_logger.error(error); + logger.error(error); throw new InvalidParameterValueException(error); } else { - s_logger.debug("Rootdisksize override validation successful. Template root disk size " + toHumanReadableSize(templateVO.getSize()) + " Root disk size specified " + rootDiskSize + " GB"); + logger.debug("Rootdisksize override validation successful. Template root disk size " + toHumanReadableSize(templateVO.getSize()) + " Root disk size specified " + rootDiskSize + " GB"); } } else { - s_logger.debug("Root disk size specified is " + toHumanReadableSize(rootDiskSize << 30) + " and Template root disk size is " + toHumanReadableSize(templateVO.getSize()) + ". Both are equal so no need to override"); + logger.debug("Root disk size specified is " + toHumanReadableSize(rootDiskSize << 30) + " and Template root disk size is " + toHumanReadableSize(templateVO.getSize()) + ". Both are equal so no need to override"); customParameters.remove(VmDetailConstants.ROOT_DISK_SIZE); } } @@ -4729,7 +4727,7 @@ public void collectVmNetworkStatistics (final UserVm userVm) { if (!userVm.getHypervisorType().equals(HypervisorType.KVM)) { return; } - s_logger.debug("Collect vm network statistics from host before stopping Vm"); + logger.debug("Collect vm network statistics from host before stopping Vm"); long hostId = userVm.getHostId(); List vmNames = new ArrayList(); vmNames.add(userVm.getInstanceName()); @@ -4739,12 +4737,12 @@ public void collectVmNetworkStatistics (final UserVm userVm) { try { networkStatsAnswer = (GetVmNetworkStatsAnswer) _agentMgr.easySend(hostId, new GetVmNetworkStatsCommand(vmNames, host.getGuid(), host.getName())); } catch (Exception e) { - s_logger.warn("Error while collecting network stats for vm: " + userVm.getHostName() + " from host: " + host.getName(), e); + logger.warn("Error while collecting network stats for vm: " + userVm.getHostName() + " from host: " + host.getName(), e); return; } if (networkStatsAnswer != null) { if (!networkStatsAnswer.getResult()) { - s_logger.warn("Error while collecting network stats vm: " + userVm.getHostName() + " from host: " + host.getName() + "; details: " + networkStatsAnswer.getDetails()); + logger.warn("Error while collecting network stats vm: " + userVm.getHostName() + " from host: " + host.getName() + "; details: " + networkStatsAnswer.getDetails()); return; } try { @@ -4778,27 +4776,27 @@ public void doInTransactionWithoutResult(TransactionStatus status) { UserStatisticsVO vmNetworkStat_lock = _userStatsDao.lock(userVm.getAccountId(), userVm.getDataCenterId(), nic.getNetworkId(), nic.getIPv4Address(), userVm.getId(), "UserVm"); if ((vmNetworkStat.getBytesSent() == 0) && (vmNetworkStat.getBytesReceived() == 0)) { - s_logger.debug("bytes sent and received are all 0. Not updating user_statistics"); + logger.debug("bytes sent and received are all 0. Not updating user_statistics"); continue; } if (vmNetworkStat_lock == null) { - s_logger.warn("unable to find vm network stats from host for account: " + userVm.getAccountId() + " with vmId: " + userVm.getId()+ " and nicId:" + nic.getId()); + logger.warn("unable to find vm network stats from host for account: " + userVm.getAccountId() + " with vmId: " + userVm.getId()+ " and nicId:" + nic.getId()); continue; } if (previousvmNetworkStats != null && ((previousvmNetworkStats.getCurrentBytesSent() != vmNetworkStat_lock.getCurrentBytesSent()) || (previousvmNetworkStats.getCurrentBytesReceived() != vmNetworkStat_lock.getCurrentBytesReceived()))) { - s_logger.debug("vm network stats changed from the time GetNmNetworkStatsCommand was sent. " + + logger.debug("vm network stats changed from the time GetNmNetworkStatsCommand was sent. " + "Ignoring current answer. Host: " + host.getName() + " . VM: " + vmNetworkStat.getVmName() + " Sent(Bytes): " + toHumanReadableSize(vmNetworkStat.getBytesSent()) + " Received(Bytes): " + toHumanReadableSize(vmNetworkStat.getBytesReceived())); continue; } if (vmNetworkStat_lock.getCurrentBytesSent() > vmNetworkStat.getBytesSent()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Sent # of bytes that's less than the last one. " + + if (logger.isDebugEnabled()) { + logger.debug("Sent # of bytes that's less than the last one. " + "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmNetworkStat.getVmName() + " Reported: " + toHumanReadableSize(vmNetworkStat.getBytesSent()) + " Stored: " + toHumanReadableSize(vmNetworkStat_lock.getCurrentBytesSent())); } @@ -4807,8 +4805,8 @@ public void doInTransactionWithoutResult(TransactionStatus status) { vmNetworkStat_lock.setCurrentBytesSent(vmNetworkStat.getBytesSent()); if (vmNetworkStat_lock.getCurrentBytesReceived() > vmNetworkStat.getBytesReceived()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Received # of bytes that's less than the last one. " + + if (logger.isDebugEnabled()) { + logger.debug("Received # of bytes that's less than the last one. " + "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmNetworkStat.getVmName() + " Reported: " + toHumanReadableSize(vmNetworkStat.getBytesReceived()) + " Stored: " + toHumanReadableSize(vmNetworkStat_lock.getCurrentBytesReceived())); } @@ -4827,7 +4825,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } }); } catch (Exception e) { - s_logger.warn("Unable to update vm network statistics for vm: " + userVm.getId() + " from host: " + hostId, e); + logger.warn("Unable to update vm network statistics for vm: " + userVm.getId() + " from host: " + hostId, e); } } } @@ -4929,7 +4927,7 @@ private UserVm startVirtualMachine(long vmId, Long podId, Long clusterId, Long h UserVmVO tmpVm = _vmDao.findById(vm.getId()); if (!tmpVm.getState().equals(State.Running)) { // Some other thread changed state of VM, possibly vmsync - s_logger.error("VM " + tmpVm + " unexpectedly went to " + tmpVm.getState() + " state"); + logger.error("VM " + tmpVm + " unexpectedly went to " + tmpVm.getState() + " state"); throw new ConcurrentOperationException("Failed to deploy VM "+vm); } @@ -4945,7 +4943,7 @@ private UserVm startVirtualMachine(long vmId, Long podId, Long clusterId, Long h } } catch (Exception e) { - s_logger.fatal("Unable to resize the data disk for vm " + vm.getDisplayName() + " due to " + e.getMessage(), e); + logger.fatal("Unable to resize the data disk for vm " + vm.getDisplayName() + " due to " + e.getMessage(), e); } } finally { @@ -4999,7 +4997,7 @@ private void addUserVMCmdlineArgs(Long vmId, VirtualMachineProfile profile, Depl if (dc.getDns2() != null) { buf.append(" dns2=").append(dc.getDns2()); } - s_logger.info("cmdline details: "+ buf.toString()); + logger.info("cmdline details: "+ buf.toString()); } @Override @@ -5053,10 +5051,10 @@ public boolean setupVmForPvlan(boolean add, Long hostId, NicProfile nic) { try { answer = _agentMgr.send(hostId, cmd); } catch (OperationTimedoutException e) { - s_logger.warn("Timed Out", e); + logger.warn("Timed Out", e); return false; } catch (AgentUnavailableException e) { - s_logger.warn("Agent Unavailable ", e); + logger.warn("Agent Unavailable ", e); return false; } @@ -5111,8 +5109,8 @@ public boolean finalizeStart(VirtualMachineProfile profile, long hostId, Command Answer[] answersToCmds = cmds.getAnswers(); if (answersToCmds == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Returning from finalizeStart() since there are no answers to read"); + if (logger.isDebugEnabled()) { + logger.debug("Returning from finalizeStart() since there are no answers to read"); } return true; } @@ -5177,7 +5175,7 @@ public boolean finalizeStart(VirtualMachineProfile profile, long hostId, Command userVm.setPrivateIpAddress(guestNic.getIPv4Address()); _vmDao.update(userVm.getId(), userVm); - s_logger.info("Detected that ip changed in the answer, updated nic in the db with new ip " + returnedIp); + logger.info("Detected that ip changed in the answer, updated nic in the db with new ip " + returnedIp); } } @@ -5187,7 +5185,7 @@ public boolean finalizeStart(VirtualMachineProfile profile, long hostId, Command try { _rulesMgr.getSystemIpAndEnableStaticNatForVm(profile.getVirtualMachine(), false); } catch (Exception ex) { - s_logger.warn("Failed to get system ip and enable static nat for the vm " + profile.getVirtualMachine() + " due to exception ", ex); + logger.warn("Failed to get system ip and enable static nat for the vm " + profile.getVirtualMachine() + " due to exception ", ex); return false; } @@ -5195,7 +5193,7 @@ public boolean finalizeStart(VirtualMachineProfile profile, long hostId, Command if (answer != null && answer instanceof RestoreVMSnapshotAnswer) { RestoreVMSnapshotAnswer restoreVMSnapshotAnswer = (RestoreVMSnapshotAnswer) answer; if (restoreVMSnapshotAnswer == null || !restoreVMSnapshotAnswer.getResult()) { - s_logger.warn("Unable to restore the vm snapshot from image file to the VM: " + restoreVMSnapshotAnswer.getDetails()); + logger.warn("Unable to restore the vm snapshot from image file to the VM: " + restoreVMSnapshotAnswer.getDetails()); } } @@ -5284,7 +5282,7 @@ public void finalizeStop(VirtualMachineProfile profile, Answer answer) { assert (offering.isAssociatePublicIP() == true) : "User VM should not have system owned public IP associated with it when offering configured not to associate public IP."; _rulesMgr.disableStaticNat(ip.getId(), ctx.getCallingAccount(), ctx.getCallingUserId(), true); } catch (Exception ex) { - s_logger.warn("Failed to disable static nat and release system ip " + ip + " as a part of vm " + profile.getVirtualMachine() + " stop due to exception ", ex); + logger.warn("Failed to disable static nat and release system ip " + ip + " as a part of vm " + profile.getVirtualMachine() + " stop due to exception ", ex); } } @@ -5356,8 +5354,8 @@ public Pair> startVirtualMach if (_securityGroupMgr.isVmSecurityGroupEnabled(vmId) && _securityGroupMgr.getSecurityGroupsForVm(vmId).isEmpty() && !_securityGroupMgr.isVmMappedToDefaultSecurityGroup(vmId) && _networkModel.canAddDefaultSecurityGroup()) { // if vm is not mapped to security group, create a mapping - if (s_logger.isDebugEnabled()) { - s_logger.debug("Vm " + vm + " is security group enabled, but not mapped to default security group; creating the mapping automatically"); + if (logger.isDebugEnabled()) { + logger.debug("Vm " + vm + " is security group enabled, but not mapped to default security group; creating the mapping automatically"); } SecurityGroup defaultSecurityGroup = _securityGroupMgr.getDefaultSecurityGroup(vm.getAccountId()); @@ -5377,12 +5375,12 @@ public Pair> startVirtualMach DataCenterDeployment plan = null; boolean deployOnGivenHost = false; if (destinationHost != null) { - s_logger.debug("Destination Host to deploy the VM is specified, specifying a deployment plan to deploy the VM"); + logger.debug("Destination Host to deploy the VM is specified, specifying a deployment plan to deploy the VM"); final ServiceOfferingVO offering = serviceOfferingDao.findById(vm.getId(), vm.getServiceOfferingId()); Pair cpuCapabilityAndCapacity = _capacityMgr.checkIfHostHasCpuCapabilityAndCapacity(destinationHost, offering, false); if (!cpuCapabilityAndCapacity.first() || !cpuCapabilityAndCapacity.second()) { String errorMsg = "Cannot deploy the VM to specified host " + hostId + "; host has cpu capability? " + cpuCapabilityAndCapacity.first() + ", host has capacity? " + cpuCapabilityAndCapacity.second(); - s_logger.info(errorMsg); + logger.info(errorMsg); if (!AllowDeployVmIfGivenHostFails.value()) { throw new InvalidParameterValueException(errorMsg); }; @@ -5393,13 +5391,13 @@ public Pair> startVirtualMach } } } else if (destinationCluster != null) { - s_logger.debug("Destination Cluster to deploy the VM is specified, specifying a deployment plan to deploy the VM"); + logger.debug("Destination Cluster to deploy the VM is specified, specifying a deployment plan to deploy the VM"); plan = new DataCenterDeployment(vm.getDataCenterId(), destinationCluster.getPodId(), destinationCluster.getId(), null, null, null); if (!AllowDeployVmIfGivenHostFails.value()) { deployOnGivenHost = true; } } else if (destinationPod != null) { - s_logger.debug("Destination Pod to deploy the VM is specified, specifying a deployment plan to deploy the VM"); + logger.debug("Destination Pod to deploy the VM is specified, specifying a deployment plan to deploy the VM"); plan = new DataCenterDeployment(vm.getDataCenterId(), destinationPod.getId(), null, null, null, null); if (!AllowDeployVmIfGivenHostFails.value()) { deployOnGivenHost = true; @@ -5432,8 +5430,8 @@ public Pair> startVirtualMach throw new InvalidParameterValueException(ApiConstants.BOOT_INTO_SETUP + " makes no sense for " + vm.getHypervisorType()); } Object paramValue = additionalParams.get(VirtualMachineProfile.Param.BootIntoSetup); - if (s_logger.isTraceEnabled()) { - s_logger.trace("It was specified whether to enter setup mode: " + paramValue.toString()); + if (logger.isTraceEnabled()) { + logger.trace("It was specified whether to enter setup mode: " + paramValue.toString()); } params = createParameterInParameterMap(params, additionalParams, VirtualMachineProfile.Param.BootIntoSetup, paramValue); } @@ -5485,19 +5483,19 @@ protected String getCurrentVmPasswordOrDefineNewPassword(String newPassword, Use if (template.isEnablePassword()) { if (vm.getDetail("password") != null) { - s_logger.debug(String.format("Decrypting VM [%s] current password.", vm)); + logger.debug(String.format("Decrypting VM [%s] current password.", vm)); password = DBEncryptionUtil.decrypt(vm.getDetail("password")); } else if (StringUtils.isNotBlank(newPassword)) { - s_logger.debug(String.format("A password for VM [%s] was informed. Setting VM password to value defined by user.", vm)); + logger.debug(String.format("A password for VM [%s] was informed. Setting VM password to value defined by user.", vm)); password = newPassword; vm.setPassword(password); } else { - s_logger.debug(String.format("Setting VM [%s] password to a randomly generated password.", vm)); + logger.debug(String.format("Setting VM [%s] password to a randomly generated password.", vm)); password = _mgr.generateRandomPassword(); vm.setPassword(password); } } else if (StringUtils.isNotBlank(newPassword)) { - s_logger.debug(String.format("A password was informed; however, the template [%s] is not password enabled. Ignoring the parameter.", template)); + logger.debug(String.format("A password was informed; however, the template [%s] is not password enabled. Ignoring the parameter.", template)); } return password; @@ -5505,12 +5503,12 @@ protected String getCurrentVmPasswordOrDefineNewPassword(String newPassword, Use private Map createParameterInParameterMap(Map params, Map parameterMap, VirtualMachineProfile.Param parameter, Object parameterValue) { - if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("createParameterInParameterMap(%s, %s)", parameter, parameterValue)); + if (logger.isTraceEnabled()) { + logger.trace(String.format("createParameterInParameterMap(%s, %s)", parameter, parameterValue)); } if (params == null) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("creating new Parameter map"); + if (logger.isTraceEnabled()) { + logger.trace("creating new Parameter map"); } params = new HashMap<>(); if (parameterMap != null) { @@ -5578,7 +5576,7 @@ public UserVm destroyVm(long vmId, boolean expunge) throws ResourceUnavailableEx } if (vm.getState() == State.Destroyed || vm.getState() == State.Expunging) { - s_logger.trace("Vm id=" + vmId + " is already destroyed"); + logger.trace("Vm id=" + vmId + " is already destroyed"); return vm; } @@ -5628,9 +5626,9 @@ public void collectVmDiskStatistics(final UserVm userVm) { if (!(userVm.getHypervisorType().equals(HypervisorType.KVM) || userVm.getHypervisorType().equals(HypervisorType.VMware))) { return; } - s_logger.debug("Collect vm disk statistics from host before stopping VM"); + logger.debug("Collect vm disk statistics from host before stopping VM"); if (userVm.getHostId() == null) { - s_logger.error("Unable to collect vm disk statistics for VM as the host is null, skipping VM disk statistics collection"); + logger.error("Unable to collect vm disk statistics for VM as the host is null, skipping VM disk statistics collection"); return; } long hostId = userVm.getHostId(); @@ -5642,12 +5640,12 @@ public void collectVmDiskStatistics(final UserVm userVm) { try { diskStatsAnswer = (GetVmDiskStatsAnswer)_agentMgr.easySend(hostId, new GetVmDiskStatsCommand(vmNames, host.getGuid(), host.getName())); } catch (Exception e) { - s_logger.warn("Error while collecting disk stats for vm: " + userVm.getInstanceName() + " from host: " + host.getName(), e); + logger.warn("Error while collecting disk stats for vm: " + userVm.getInstanceName() + " from host: " + host.getName(), e); return; } if (diskStatsAnswer != null) { if (!diskStatsAnswer.getResult()) { - s_logger.warn("Error while collecting disk stats vm: " + userVm.getInstanceName() + " from host: " + host.getName() + "; details: " + diskStatsAnswer.getDetails()); + logger.warn("Error while collecting disk stats vm: " + userVm.getInstanceName() + " from host: " + host.getName() + "; details: " + diskStatsAnswer.getDetails()); return; } try { @@ -5676,12 +5674,12 @@ public void doInTransactionWithoutResult(TransactionStatus status) { VmDiskStatisticsVO vmDiskStat_lock = _vmDiskStatsDao.lock(userVm.getAccountId(), userVm.getDataCenterId(), userVm.getId(), volume.getId()); if ((vmDiskStat.getIORead() == 0) && (vmDiskStat.getIOWrite() == 0) && (vmDiskStat.getBytesRead() == 0) && (vmDiskStat.getBytesWrite() == 0)) { - s_logger.debug("Read/Write of IO and Bytes are both 0. Not updating vm_disk_statistics"); + logger.debug("Read/Write of IO and Bytes are both 0. Not updating vm_disk_statistics"); continue; } if (vmDiskStat_lock == null) { - s_logger.warn("unable to find vm disk stats from host for account: " + userVm.getAccountId() + " with vmId: " + userVm.getId() + " and volumeId:" + logger.warn("unable to find vm disk stats from host for account: " + userVm.getAccountId() + " with vmId: " + userVm.getId() + " and volumeId:" + volume.getId()); continue; } @@ -5691,39 +5689,39 @@ public void doInTransactionWithoutResult(TransactionStatus status) { .getCurrentIOWrite()) || (previousVmDiskStats.getCurrentBytesRead() != vmDiskStat_lock.getCurrentBytesRead()) || (previousVmDiskStats .getCurrentBytesWrite() != vmDiskStat_lock.getCurrentBytesWrite())))) { - s_logger.debug("vm disk stats changed from the time GetVmDiskStatsCommand was sent. " + "Ignoring current answer. Host: " + host.getName() + logger.debug("vm disk stats changed from the time GetVmDiskStatsCommand was sent. " + "Ignoring current answer. Host: " + host.getName() + " . VM: " + vmDiskStat.getVmName() + " IO Read: " + vmDiskStat.getIORead() + " IO Write: " + vmDiskStat.getIOWrite() + " Bytes Read: " + vmDiskStat.getBytesRead() + " Bytes Write: " + vmDiskStat.getBytesWrite()); continue; } if (vmDiskStat_lock.getCurrentIORead() > vmDiskStat.getIORead()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Read # of IO that's less than the last one. " + "Assuming something went wrong and persisting it. Host: " + host.getName() + if (logger.isDebugEnabled()) { + logger.debug("Read # of IO that's less than the last one. " + "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmDiskStat.getVmName() + " Reported: " + vmDiskStat.getIORead() + " Stored: " + vmDiskStat_lock.getCurrentIORead()); } vmDiskStat_lock.setNetIORead(vmDiskStat_lock.getNetIORead() + vmDiskStat_lock.getCurrentIORead()); } vmDiskStat_lock.setCurrentIORead(vmDiskStat.getIORead()); if (vmDiskStat_lock.getCurrentIOWrite() > vmDiskStat.getIOWrite()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Write # of IO that's less than the last one. " + "Assuming something went wrong and persisting it. Host: " + host.getName() + if (logger.isDebugEnabled()) { + logger.debug("Write # of IO that's less than the last one. " + "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmDiskStat.getVmName() + " Reported: " + vmDiskStat.getIOWrite() + " Stored: " + vmDiskStat_lock.getCurrentIOWrite()); } vmDiskStat_lock.setNetIOWrite(vmDiskStat_lock.getNetIOWrite() + vmDiskStat_lock.getCurrentIOWrite()); } vmDiskStat_lock.setCurrentIOWrite(vmDiskStat.getIOWrite()); if (vmDiskStat_lock.getCurrentBytesRead() > vmDiskStat.getBytesRead()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Read # of Bytes that's less than the last one. " + "Assuming something went wrong and persisting it. Host: " + host.getName() + if (logger.isDebugEnabled()) { + logger.debug("Read # of Bytes that's less than the last one. " + "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmDiskStat.getVmName() + " Reported: " + toHumanReadableSize(vmDiskStat.getBytesRead()) + " Stored: " + toHumanReadableSize(vmDiskStat_lock.getCurrentBytesRead())); } vmDiskStat_lock.setNetBytesRead(vmDiskStat_lock.getNetBytesRead() + vmDiskStat_lock.getCurrentBytesRead()); } vmDiskStat_lock.setCurrentBytesRead(vmDiskStat.getBytesRead()); if (vmDiskStat_lock.getCurrentBytesWrite() > vmDiskStat.getBytesWrite()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Write # of Bytes that's less than the last one. " + "Assuming something went wrong and persisting it. Host: " + host.getName() + if (logger.isDebugEnabled()) { + logger.debug("Write # of Bytes that's less than the last one. " + "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmDiskStat.getVmName() + " Reported: " + toHumanReadableSize(vmDiskStat.getBytesWrite()) + " Stored: " + toHumanReadableSize(vmDiskStat_lock.getCurrentBytesWrite())); } @@ -5744,7 +5742,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } }); } catch (Exception e) { - s_logger.warn(String.format("Unable to update VM disk statistics for %s from %s", userVm.getInstanceName(), host), e); + logger.warn(String.format("Unable to update VM disk statistics for %s from %s", userVm.getInstanceName(), host), e); } } } @@ -5764,7 +5762,7 @@ public UserVm expungeVm(long vmId) throws ResourceUnavailableException, Concurre } if (vm.getRemoved() != null) { - s_logger.trace("Vm id=" + vmId + " is already expunged"); + logger.trace("Vm id=" + vmId + " is already expunged"); return vm; } @@ -6030,7 +6028,7 @@ public UserVm createVirtualMachine(DeployVMCmd cmd) throws InsufficientCapacityE List child_templates = _templateDao.listByParentTemplatetId(templateId); for (VMTemplateVO tmpl: child_templates){ if (tmpl.getFormat() == Storage.ImageFormat.ISO){ - s_logger.info("MDOV trying to attach disk to the VM " + tmpl.getId() + " vmid=" + vm.getId()); + logger.info("MDOV trying to attach disk to the VM " + tmpl.getId() + " vmid=" + vm.getId()); _tmplService.attachIso(tmpl.getId(), vm.getId(), true); } } @@ -6039,7 +6037,7 @@ public UserVm createVirtualMachine(DeployVMCmd cmd) throws InsufficientCapacityE String extraConfig = cmd.getExtraConfig(); if (StringUtils.isNotBlank(extraConfig)) { if (EnableAdditionalVmConfig.valueIn(callerId)) { - s_logger.info("Adding extra configuration to user vm: " + vm.getUuid()); + logger.info("Adding extra configuration to user vm: " + vm.getUuid()); addExtraConfig(vm, extraConfig); } else { throw new InvalidParameterValueException("attempted setting extraconfig but enable.additional.vm.configuration is disabled"); @@ -6376,8 +6374,8 @@ private VMInstanceVO preVmStorageMigrationCheck(Long vmId) { // access check - only root admin can migrate VM Account caller = CallContext.current().getCallingAccount(); if (!_accountMgr.isRootAdmin(caller.getId())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Caller is not a root admin, permission denied to migrate the VM"); + if (logger.isDebugEnabled()) { + logger.debug("Caller is not a root admin, permission denied to migrate the VM"); } throw new PermissionDeniedException("No permission to migrate VM, Only Root Admin can migrate a VM!"); } @@ -6487,8 +6485,8 @@ public VirtualMachine migrateVirtualMachine(Long vmId, Host destinationHost) thr // access check - only root admin can migrate VM Account caller = CallContext.current().getCallingAccount(); if (!_accountMgr.isRootAdmin(caller.getId())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Caller is not a root admin, permission denied to migrate the VM"); + if (logger.isDebugEnabled()) { + logger.debug("Caller is not a root admin, permission denied to migrate the VM"); } throw new PermissionDeniedException("No permission to migrate VM, Only Root Admin can migrate a VM!"); } @@ -6499,8 +6497,8 @@ public VirtualMachine migrateVirtualMachine(Long vmId, Host destinationHost) thr } // business logic if (vm.getState() != State.Running) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("VM is not Running, unable to migrate the vm " + vm); + if (logger.isDebugEnabled()) { + logger.debug("VM is not Running, unable to migrate the vm " + vm); } InvalidParameterValueException ex = new InvalidParameterValueException("VM is not Running, unable to migrate the vm with specified id"); ex.addProxyObject(vm.getUuid(), "vmId"); @@ -6514,7 +6512,7 @@ public VirtualMachine migrateVirtualMachine(Long vmId, Host destinationHost) thr } if (!isOnSupportedHypevisorForMigration(vm)) { - s_logger.error(vm + " is not XenServer/VMware/KVM/Ovm/Hyperv, cannot migrate this VM from hypervisor type " + vm.getHypervisorType()); + logger.error(vm + " is not XenServer/VMware/KVM/Ovm/Hyperv, cannot migrate this VM from hypervisor type " + vm.getHypervisorType()); throw new InvalidParameterValueException("Unsupported Hypervisor Type for VM migration, we support XenServer/VMware/KVM/Ovm/Hyperv/Ovm3 only"); } @@ -6523,7 +6521,7 @@ public VirtualMachine migrateVirtualMachine(Long vmId, Host destinationHost) thr } if (isVMUsingLocalStorage(vm)) { - s_logger.error(vm + " is using Local Storage, cannot migrate this VM."); + logger.error(vm + " is using Local Storage, cannot migrate this VM."); throw new InvalidParameterValueException("Unsupported operation, VM uses Local storage, cannot migrate"); } @@ -6563,7 +6561,7 @@ private DeployDestination chooseVmMigrationDestination(VMInstanceVO vm, Host src try { return _planningMgr.planDeployment(profile, plan, excludes, null); } catch (final AffinityConflictException e2) { - s_logger.warn("Unable to create deployment, affinity rules associated to the VM conflict", e2); + logger.warn("Unable to create deployment, affinity rules associated to the VM conflict", e2); throw new CloudRuntimeException("Unable to create deployment, affinity rules associated to the VM conflict"); } catch (final InsufficientServerCapacityException e3) { throw new CloudRuntimeException("Unable to find a server to migrate the vm to"); @@ -6606,8 +6604,8 @@ private DeployDestination checkVmMigrationDestination(VMInstanceVO vm, Host srcH // check max guest vm limit for the destinationHost HostVO destinationHostVO = _hostDao.findById(destinationHost.getId()); if (_capacityMgr.checkIfHostReachMaxGuestLimit(destinationHostVO)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host name: " + destinationHost.getName() + ", hostId: " + destinationHost.getId() + if (logger.isDebugEnabled()) { + logger.debug("Host name: " + destinationHost.getName() + ", hostId: " + destinationHost.getId() + " already has max Running VMs(count includes system VMs), cannot migrate to this host"); } throw new VirtualMachineMigrationException("Destination host, hostId: " + destinationHost.getId() @@ -6615,11 +6613,11 @@ private DeployDestination checkVmMigrationDestination(VMInstanceVO vm, Host srcH } //check if there are any ongoing volume snapshots on the volumes associated with the VM. Long vmId = vm.getId(); - s_logger.debug("Checking if there are any ongoing snapshots volumes associated with VM with ID " + vmId); + logger.debug("Checking if there are any ongoing snapshots volumes associated with VM with ID " + vmId); if (checkStatusOfVolumeSnapshots(vmId, null)) { throw new CloudRuntimeException("There is/are unbacked up snapshot(s) on volume(s) attached to this VM, VM Migration is not permitted, please try again later."); } - s_logger.debug("Found no ongoing snapshots on volumes associated with the vm with id " + vmId); + logger.debug("Found no ongoing snapshots on volumes associated with the vm with id " + vmId); return dest; } @@ -6653,7 +6651,7 @@ private void checkIfHostOfVMIsInPrepareForMaintenanceState(Long hostId, Long vmI return; } - s_logger.debug("Host is in PrepareForMaintenance state - " + operation + " VM operation on the VM id: " + vmId + " is not allowed"); + logger.debug("Host is in PrepareForMaintenance state - " + operation + " VM operation on the VM id: " + vmId + " is not allowed"); throw new InvalidParameterValueException(operation + " VM operation on the VM id: " + vmId + " is not allowed as host is preparing for maintenance mode"); } @@ -6701,14 +6699,14 @@ public void checkHostsDedication(VMInstanceVO vm, long srcHostId, long destHostI //raise an alert String msg = "VM is being migrated from a explicitly dedicated host " + srcHost.getName() + " to non-dedicated host " + destHost.getName(); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USERVM, vm.getDataCenterId(), vm.getPodIdToDeployIn(), msg, msg); - s_logger.warn(msg); + logger.warn(msg); } //if srcHost is non dedicated but destination Host is explicitly dedicated if (!srcExplDedicated && destExplDedicated) { //raise an alert String msg = "VM is being migrated from a non dedicated host " + srcHost.getName() + " to a explicitly dedicated host " + destHost.getName(); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USERVM, vm.getDataCenterId(), vm.getPodIdToDeployIn(), msg, msg); - s_logger.warn(msg); + logger.warn(msg); } //if hosts are dedicated to different account/domains, raise an alert @@ -6717,13 +6715,13 @@ public void checkHostsDedication(VMInstanceVO vm, long srcHostId, long destHostI String msg = "VM is being migrated from host " + srcHost.getName() + " explicitly dedicated to account " + accountOfDedicatedHost(srcHost) + " to host " + destHost.getName() + " explicitly dedicated to account " + accountOfDedicatedHost(destHost); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USERVM, vm.getDataCenterId(), vm.getPodIdToDeployIn(), msg, msg); - s_logger.warn(msg); + logger.warn(msg); } if (!((domainOfDedicatedHost(srcHost) == null) || (domainOfDedicatedHost(srcHost).equals(domainOfDedicatedHost(destHost))))) { String msg = "VM is being migrated from host " + srcHost.getName() + " explicitly dedicated to domain " + domainOfDedicatedHost(srcHost) + " to host " + destHost.getName() + " explicitly dedicated to domain " + domainOfDedicatedHost(destHost); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USERVM, vm.getDataCenterId(), vm.getPodIdToDeployIn(), msg, msg); - s_logger.warn(msg); + logger.warn(msg); } } @@ -6761,7 +6759,7 @@ public void checkHostsDedication(VMInstanceVO vm, long srcHostId, long destHostI } } _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USERVM, vm.getDataCenterId(), vm.getPodIdToDeployIn(), msg, msg); - s_logger.warn(msg); + logger.warn(msg); } else { //VM is not deployed using implicit planner, check if it migrated between dedicated hosts @@ -6784,12 +6782,12 @@ public void checkHostsDedication(VMInstanceVO vm, long srcHostId, long destHostI msg = "VM is being migrated from implicitly dedicated host " + srcHost.getName() + " to shared host " + destHost.getName(); } _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USERVM, vm.getDataCenterId(), vm.getPodIdToDeployIn(), msg, msg); - s_logger.warn(msg); + logger.warn(msg); } else { if (destImplDedicated) { msg = "VM is being migrated from shared host " + srcHost.getName() + " to implicitly dedicated host " + destHost.getName(); _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USERVM, vm.getDataCenterId(), vm.getPodIdToDeployIn(), msg, msg); - s_logger.warn(msg); + logger.warn(msg); } } } @@ -6830,11 +6828,11 @@ private boolean checkIfAllVmsCreatedInStrictMode(Long accountId, List applicableNetworks = new LinkedHashSet<>(); Map requestedIPv4ForNics = new HashMap<>(); @@ -7367,7 +7365,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { applicableNetworks.add(defaultNetworkOld); requestedIPv4ForNics.put(defaultNetworkOld.getId(), defaultNicOld.getIPv4Address()); requestedIPv6ForNics.put(defaultNetworkOld.getId(), defaultNicOld.getIPv6Address()); - s_logger.debug("AssignVM: use old shared network " + defaultNetworkOld.getName() + " with old ip " + defaultNicOld.getIPv4Address() + " on default nic of vm:" + vm.getInstanceName()); + logger.debug("AssignVM: use old shared network " + defaultNetworkOld.getName() + " with old ip " + defaultNicOld.getIPv4Address() + " on default nic of vm:" + vm.getInstanceName()); } } } @@ -7399,10 +7397,10 @@ public void doInTransactionWithoutResult(TransactionStatus status) { if (nicOld != null) { requestedIPv4ForNics.put(network.getId(), nicOld.getIPv4Address()); requestedIPv6ForNics.put(network.getId(), nicOld.getIPv6Address()); - s_logger.debug("AssignVM: use old shared network " + network.getName() + " with old ip " + nicOld.getIPv4Address() + " on nic of vm:" + vm.getInstanceName()); + logger.debug("AssignVM: use old shared network " + network.getName() + " with old ip " + nicOld.getIPv4Address() + " on nic of vm:" + vm.getInstanceName()); } } - s_logger.debug("AssignVM: Added network " + network.getName() + " to vm " + vm.getId()); + logger.debug("AssignVM: Added network " + network.getName() + " to vm " + vm.getId()); applicableNetworks.add(network); } } @@ -7455,8 +7453,8 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } else { // create default security group for the account - if (s_logger.isDebugEnabled()) { - s_logger.debug("Couldn't find default security group for the account " + if (logger.isDebugEnabled()) { + logger.debug("Couldn't find default security group for the account " + newAccount + " so creating a new one"); } defaultGroup = _securityGroupMgr.createSecurityGroup( @@ -7479,7 +7477,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { _securityGroupMgr.addInstanceToGroups(vm.getId(), securityGroupIdList); - s_logger.debug("AssignVM: Advanced zone, adding security groups no " + logger.debug("AssignVM: Advanced zone, adding security groups no " + securityGroupIdList.size() + " to " + vm.getInstanceName()); @@ -7496,7 +7494,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { applicableNetworks.add(defaultNetworkOld); requestedIPv4ForNics.put(defaultNetworkOld.getId(), defaultNicOld.getIPv4Address()); requestedIPv6ForNics.put(defaultNetworkOld.getId(), defaultNicOld.getIPv6Address()); - s_logger.debug("AssignVM: use old shared network " + defaultNetworkOld.getName() + " with old ip " + defaultNicOld.getIPv4Address() + " on default nic of vm:" + vm.getInstanceName()); + logger.debug("AssignVM: use old shared network " + defaultNetworkOld.getName() + " with old ip " + defaultNicOld.getIPv4Address() + " on default nic of vm:" + vm.getInstanceName()); } } } @@ -7526,10 +7524,10 @@ public void doInTransactionWithoutResult(TransactionStatus status) { if (nicOld != null) { requestedIPv4ForNics.put(network.getId(), nicOld.getIPv4Address()); requestedIPv6ForNics.put(network.getId(), nicOld.getIPv6Address()); - s_logger.debug("AssignVM: use old shared network " + network.getName() + " with old ip " + nicOld.getIPv4Address() + " on nic of vm:" + vm.getInstanceName()); + logger.debug("AssignVM: use old shared network " + network.getName() + " with old ip " + nicOld.getIPv4Address() + " on nic of vm:" + vm.getInstanceName()); } } - s_logger.debug("AssignVM: Added network " + network.getName() + " to vm " + vm.getId()); + logger.debug("AssignVM: Added network " + network.getName() + " to vm " + vm.getId()); applicableNetworks.add(network); } } else if (applicableNetworks.isEmpty()) { @@ -7551,7 +7549,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { throw new InvalidParameterValueException("Unable to find physical network with id: " + physicalNetworkId + " and tag: " + requiredOfferings.get(0).getTags()); } - s_logger.debug("Creating network for account " + newAccount + " from the network offering id=" + requiredOfferings.get(0).getId() + logger.debug("Creating network for account " + newAccount + " from the network offering id=" + requiredOfferings.get(0).getId() + " as a part of deployVM process"); Network newNetwork = _networkMgr.createGuestNetwork(requiredOfferings.get(0).getId(), newAccount.getAccountName() + "-network", newAccount.getAccountName() + "-network", null, null, null, false, null, newAccount, @@ -7561,17 +7559,17 @@ public void doInTransactionWithoutResult(TransactionStatus status) { if (requiredOfferings.get(0).isPersistent()) { DeployDestination dest = new DeployDestination(zone, null, null, null); UserVO callerUser = _userDao.findById(CallContext.current().getCallingUserId()); - Journal journal = new Journal.LogJournal("Implementing " + newNetwork, s_logger); + Journal journal = new Journal.LogJournal("Implementing " + newNetwork, logger); ReservationContext context = new ReservationContextImpl(UUID.randomUUID().toString(), journal, callerUser, caller); - s_logger.debug("Implementing the network for account" + newNetwork + " as a part of" + " network provision for persistent networks"); + logger.debug("Implementing the network for account" + newNetwork + " as a part of" + " network provision for persistent networks"); try { Pair implementedNetwork = _networkMgr.implementNetwork(newNetwork.getId(), dest, context); if (implementedNetwork == null || implementedNetwork.first() == null) { - s_logger.warn("Failed to implement the network " + newNetwork); + logger.warn("Failed to implement the network " + newNetwork); } newNetwork = implementedNetwork.second(); } catch (Exception ex) { - s_logger.warn("Failed to implement network " + newNetwork + " elements and" + logger.warn("Failed to implement network " + newNetwork + " elements and" + " resources as a part of network provision for persistent network due to ", ex); CloudRuntimeException e = new CloudRuntimeException("Failed to implement network" + " (with specified id) elements and resources as a part of network provision"); @@ -7613,10 +7611,10 @@ public void doInTransactionWithoutResult(TransactionStatus status) { VirtualMachine vmi = _itMgr.findById(vm.getId()); VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vmi); _networkMgr.allocate(vmProfile, networks, null); - s_logger.debug("AssignVM: Advance virtual, adding networks no " + networks.size() + " to " + vm.getInstanceName()); + logger.debug("AssignVM: Advance virtual, adding networks no " + networks.size() + " to " + vm.getInstanceName()); } // END IF NON SEC GRP ENABLED } // END IF ADVANCED - s_logger.info("AssignVM: vm " + vm.getInstanceName() + " now belongs to account " + newAccount.getAccountName()); + logger.info("AssignVM: vm " + vm.getInstanceName() + " now belongs to account " + newAccount.getAccountName()); return vm; } @@ -7628,7 +7626,7 @@ private boolean canAccountUseNetwork(Account newAccount, Network network) { _networkModel.checkNetworkPermissions(newAccount, network); return true; } catch (PermissionDeniedException e) { - s_logger.debug(String.format("AssignVM: %s network %s can not be used by new account %s", network.getGuestType(), network.getName(), newAccount.getAccountName())); + logger.debug(String.format("AssignVM: %s network %s can not be used by new account %s", network.getGuestType(), network.getName(), newAccount.getAccountName())); return false; } } @@ -7653,11 +7651,11 @@ public UserVm restoreVM(RestoreVMCmd cmd) throws InsufficientCapacityException, _accountMgr.checkAccess(caller, null, true, vm); //check if there are any active snapshots on volumes associated with the VM - s_logger.debug("Checking if there are any ongoing snapshots on the ROOT volumes associated with VM with ID " + vmId); + logger.debug("Checking if there are any ongoing snapshots on the ROOT volumes associated with VM with ID " + vmId); if (checkStatusOfVolumeSnapshots(vmId, Volume.Type.ROOT)) { throw new CloudRuntimeException("There is/are unbacked up snapshot(s) on ROOT volume, Re-install VM is not permitted, please try again later."); } - s_logger.debug("Found no ongoing snapshots on volume of type ROOT, for the vm with id " + vmId); + logger.debug("Found no ongoing snapshots on volume of type ROOT, for the vm with id " + vmId); return restoreVMInternal(caller, vm, newTemplateId); } @@ -7757,7 +7755,7 @@ public UserVm restoreVirtualMachine(final Account caller, final long vmId, final try { _itMgr.stop(vm.getUuid()); } catch (ResourceUnavailableException e) { - s_logger.debug("Stop vm " + vm.getUuid() + " failed", e); + logger.debug("Stop vm " + vm.getUuid() + " failed", e); CloudRuntimeException ex = new CloudRuntimeException("Stop vm failed for specified vmId"); ex.addProxyObject(vm.getUuid(), "vmId"); throw ex; @@ -7814,7 +7812,7 @@ public Pair doInTransaction(final TransactionStatus status) th } catch (final CloudRuntimeException e) { throw e; } catch (final Exception e) { - s_logger.error("Unable to restore VM " + userVm.getUuid(), e); + logger.error("Unable to restore VM " + userVm.getUuid(), e); throw new CloudRuntimeException(e); } @@ -7841,12 +7839,12 @@ public Pair doInTransaction(final TransactionStatus status) th if (vm.getHypervisorType() == HypervisorType.VMware) { VolumeInfo volumeInStorage = volFactory.getVolume(root.getId()); if (volumeInStorage != null) { - s_logger.info("Expunging volume " + root.getId() + " from primary data store"); + logger.info("Expunging volume " + root.getId() + " from primary data store"); AsyncCallFuture future = _volService.expungeVolumeAsync(volFactory.getVolume(root.getId())); try { future.get(); } catch (Exception e) { - s_logger.debug("Failed to expunge volume:" + root.getId(), e); + logger.debug("Failed to expunge volume:" + root.getId(), e); } } } @@ -7886,14 +7884,14 @@ public Pair doInTransaction(final TransactionStatus status) th } } } catch (Exception e) { - s_logger.debug("Unable to start VM " + vm.getUuid(), e); + logger.debug("Unable to start VM " + vm.getUuid(), e); CloudRuntimeException ex = new CloudRuntimeException("Unable to start VM with specified id" + e.getMessage()); ex.addProxyObject(vm.getUuid(), "vmId"); throw ex; } } - s_logger.debug("Restore VM " + vmId + " done successfully"); + logger.debug("Restore VM " + vmId + " done successfully"); return vm; } @@ -7995,7 +7993,7 @@ else if (host.getHypervisorType() == HypervisorType.KVM) { if (!cmds.isSuccessful()) { for (Answer answer : cmds.getAnswers()) { if (!answer.getResult()) { - s_logger.warn("Failed to reset vm due to: " + answer.getDetails()); + logger.warn("Failed to reset vm due to: " + answer.getDetails()); throw new CloudRuntimeException("Unable to reset " + vm + " due to " + answer.getDetails()); } @@ -8046,12 +8044,12 @@ private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { if (answer == null) { String msg = "Unable to get an answer to the modify targets command"; - s_logger.warn(msg); + logger.warn(msg); } else if (!answer.getResult()) { String msg = "Unable to modify target on the following host: " + hostId; - s_logger.warn(msg); + logger.warn(msg); } } @@ -8068,7 +8066,7 @@ private void encryptAndStorePassword(UserVmVO vm, String password) { String sshPublicKeys = vm.getDetail(VmDetailConstants.SSH_PUBLIC_KEY); if (sshPublicKeys != null && !sshPublicKeys.equals("") && password != null && !password.equals("saved_password")) { if (!sshPublicKeys.startsWith("ssh-rsa")) { - s_logger.warn("Only RSA public keys can be used to encrypt a vm password."); + logger.warn("Only RSA public keys can be used to encrypt a vm password."); return; } String encryptedPasswd = RSAHelper.encryptWithSSHPublicKey(sshPublicKeys, password); @@ -8087,8 +8085,8 @@ public void persistDeviceBusInfo(UserVmVO vm, String rootDiskController) { if (StringUtils.isEmpty(existingVmRootDiskController) && StringUtils.isNotEmpty(rootDiskController)) { vm.setDetail(VmDetailConstants.ROOT_DISK_CONTROLLER, rootDiskController); _vmDao.saveDetails(vm); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Persisted device bus information rootDiskController=" + rootDiskController + " for vm: " + vm.getDisplayName()); + if (logger.isDebugEnabled()) { + logger.debug("Persisted device bus information rootDiskController=" + rootDiskController + " for vm: " + vm.getDisplayName()); } } } @@ -8135,15 +8133,15 @@ private boolean checkStatusOfVolumeSnapshots(long vmId, Volume.Type type) { } else { listVolumes = _volsDao.findByInstance(vmId); } - s_logger.debug("Found "+listVolumes.size()+" no. of volumes of type "+type+" for vm with VM ID "+vmId); + logger.debug("Found "+listVolumes.size()+" no. of volumes of type "+type+" for vm with VM ID "+vmId); for (VolumeVO volume : listVolumes) { Long volumeId = volume.getId(); - s_logger.debug("Checking status of snapshots for Volume with Volume Id: "+volumeId); + logger.debug("Checking status of snapshots for Volume with Volume Id: "+volumeId); List ongoingSnapshots = _snapshotDao.listByStatus(volumeId, Snapshot.State.Creating, Snapshot.State.CreatedOnPrimary, Snapshot.State.BackingUp); int ongoingSnapshotsCount = ongoingSnapshots.size(); - s_logger.debug("The count of ongoing Snapshots for VM with ID "+vmId+" and disk type "+type+" is "+ongoingSnapshotsCount); + logger.debug("The count of ongoing Snapshots for VM with ID "+vmId+" and disk type "+type+" is "+ongoingSnapshotsCount); if (ongoingSnapshotsCount > 0) { - s_logger.debug("Found "+ongoingSnapshotsCount+" no. of snapshots, on volume of type "+type+", which snapshots are not yet backed up"); + logger.debug("Found "+ongoingSnapshotsCount+" no. of snapshots, on volume of type "+type+", which snapshots are not yet backed up"); return true; } } @@ -8193,7 +8191,7 @@ private void detachVolumesFromVm(List volumes) { } if (detachResult == null) { - s_logger.error("DestroyVM remove volume - failed to detach and delete volume " + volume.getInstanceId() + " from instance " + volume.getId()); + logger.error("DestroyVM remove volume - failed to detach and delete volume " + volume.getInstanceId() + " from instance " + volume.getId()); } } } @@ -8216,7 +8214,7 @@ private void destroyVolumeInContext(UserVmVO vm, boolean expunge, VolumeVO volum Volume result = _volumeService.destroyVolume(volume.getId(), CallContext.current().getCallingAccount(), expunge, false); if (result == null) { - s_logger.error(String.format("DestroyVM remove volume - failed to delete volume %s from instance %s", volume.getId(), volume.getInstanceId())); + logger.error(String.format("DestroyVM remove volume - failed to delete volume %s from instance %s", volume.getId(), volume.getInstanceId())); } } finally { // Remove volumeContext and pop vmContext back @@ -8265,7 +8263,7 @@ public boolean unmanageUserVM(Long vmId) { boolean result; try { if (vm.getState() != State.Running && vm.getState() != State.Stopped) { - s_logger.debug("VM ID = " + vmId + " is not running or stopped, cannot be unmanaged"); + logger.debug("VM ID = " + vmId + " is not running or stopped, cannot be unmanaged"); return false; } @@ -8287,7 +8285,7 @@ public boolean unmanageUserVM(Long vmId) { throw new CloudRuntimeException("Error while unmanaging VM: " + vm.getUuid()); } } catch (Exception e) { - s_logger.error("Could not unmanage VM " + vm.getUuid(), e); + logger.error("Could not unmanage VM " + vm.getUuid(), e); throw new CloudRuntimeException(e); } finally { _vmDao.releaseFromLockTable(vm.getId()); @@ -8326,7 +8324,7 @@ private void unmanageVMFromDB(long vmId) { private void removeVMFromAffinityGroups(long vmId) { List affinityGroups = _affinityGroupVMMapDao.listByInstanceId(vmId); if (affinityGroups.size() > 0) { - s_logger.debug("Cleaning up VM from affinity groups after unmanaging"); + logger.debug("Cleaning up VM from affinity groups after unmanaging"); for (AffinityGroupVMMapVO map : affinityGroups) { _affinityGroupVMMapDao.expunge(map.getId()); } @@ -8372,11 +8370,11 @@ private void postProcessingUnmanageVMVolumes(List volumes, UserVmVO vm } private void checkUnmanagingVMOngoingVolumeSnapshots(UserVmVO vm) { - s_logger.debug("Checking if there are any ongoing snapshots on the ROOT volumes associated with VM with ID " + vm.getId()); + logger.debug("Checking if there are any ongoing snapshots on the ROOT volumes associated with VM with ID " + vm.getId()); if (checkStatusOfVolumeSnapshots(vm.getId(), Volume.Type.ROOT)) { throw new CloudRuntimeException("There is/are unbacked up snapshot(s) on ROOT volume, vm unmanage is not permitted, please try again later."); } - s_logger.debug("Found no ongoing snapshots on volume of type ROOT, for the vm with id " + vm.getId()); + logger.debug("Found no ongoing snapshots on volume of type ROOT, for the vm with id " + vm.getId()); } private void checkUnmanagingVMVolumes(UserVmVO vm, List volumes) { @@ -8434,7 +8432,7 @@ private void collectVmDiskAndNetworkStatistics(Long vmId, State expectedState) { if (uservm != null) { collectVmDiskAndNetworkStatistics(uservm, expectedState); } else { - s_logger.info(String.format("Skip collecting vm %s disk and network statistics as it is not user vm", uservm)); + logger.info(String.format("Skip collecting vm %s disk and network statistics as it is not user vm", uservm)); } } @@ -8443,7 +8441,7 @@ private void collectVmDiskAndNetworkStatistics(UserVm vm, State expectedState) { collectVmDiskStatistics(vm); collectVmNetworkStatistics(vm); } else { - s_logger.warn(String.format("Skip collecting vm %s disk and network statistics as the expected vm state is %s but actual state is %s", vm, expectedState, vm.getState())); + logger.warn(String.format("Skip collecting vm %s disk and network statistics as the expected vm state is %s but actual state is %s", vm, expectedState, vm.getState())); } } diff --git a/server/src/main/java/com/cloud/vm/UserVmStateListener.java b/server/src/main/java/com/cloud/vm/UserVmStateListener.java index e9f7e7c5c72d..6fc815dc10b1 100644 --- a/server/src/main/java/com/cloud/vm/UserVmStateListener.java +++ b/server/src/main/java/com/cloud/vm/UserVmStateListener.java @@ -27,7 +27,8 @@ import com.cloud.server.ManagementService; import com.cloud.utils.fsm.StateMachine2; import com.cloud.vm.dao.UserVmDao; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.beans.factory.NoSuchBeanDefinitionException; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; @@ -56,7 +57,7 @@ public class UserVmStateListener implements StateListener expungingSnapshots = _vmSnapshotDao.listByInstanceId(vmSnapshot.getVmId(), VMSnapshot.State.Expunging); if (expungingSnapshots.size() > 0 && expungingSnapshots.get(0).getId() == vmSnapshot.getId()) - s_logger.debug("Target VM snapshot already in expunging state, go on deleting it: " + vmSnapshot.getDisplayName()); + logger.debug("Target VM snapshot already in expunging state, go on deleting it: " + vmSnapshot.getDisplayName()); else throw new InvalidParameterValueException("There is other active vm snapshot tasks on the instance, please try again later"); } @@ -690,7 +688,7 @@ private boolean orchestrateDeleteVMSnapshot(Long vmSnapshotId) { if (hasActiveVMSnapshotTasks(vmSnapshot.getVmId())) { List expungingSnapshots = _vmSnapshotDao.listByInstanceId(vmSnapshot.getVmId(), VMSnapshot.State.Expunging); if (expungingSnapshots.size() > 0 && expungingSnapshots.get(0).getId() == vmSnapshot.getId()) - s_logger.debug("Target VM snapshot already in expunging state, go on deleting it: " + vmSnapshot.getDisplayName()); + logger.debug("Target VM snapshot already in expunging state, go on deleting it: " + vmSnapshot.getDisplayName()); else throw new InvalidParameterValueException("There is other active vm snapshot tasks on the instance, please try again later"); } @@ -703,7 +701,7 @@ private boolean orchestrateDeleteVMSnapshot(Long vmSnapshotId) { VMSnapshotStrategy strategy = findVMSnapshotStrategy(vmSnapshot); return strategy.deleteVMSnapshot(vmSnapshot); } catch (Exception e) { - s_logger.debug("Failed to delete vm snapshot: " + vmSnapshotId, e); + logger.debug("Failed to delete vm snapshot: " + vmSnapshotId, e); return false; } } @@ -832,7 +830,7 @@ protected void changeUserVmServiceOffering(UserVm userVm, VMSnapshotVO vmSnapsho if (! result){ throw new CloudRuntimeException("VM Snapshot reverting failed due to vm service offering couldn't be changed to the one used when snapshot was taken"); } - s_logger.debug("Successfully changed service offering to " + vmSnapshotVo.getServiceOfferingId() + " for vm " + userVm.getId()); + logger.debug("Successfully changed service offering to " + vmSnapshotVo.getServiceOfferingId() + " for vm " + userVm.getId()); } /** @@ -847,11 +845,11 @@ protected boolean upgradeUserVmServiceOffering(Long vmId, Long serviceOfferingId try { result = _userVmManager.upgradeVirtualMachine(vmId, serviceOfferingId, details); if (! result){ - s_logger.error("Couldn't change service offering for vm " + vmId + " to " + serviceOfferingId); + logger.error("Couldn't change service offering for vm " + vmId + " to " + serviceOfferingId); } return result; } catch (ConcurrentOperationException | ResourceUnavailableException | ManagementServerException | VirtualMachineMigrationException e) { - s_logger.error("Couldn't change service offering for vm " + vmId + " to " + serviceOfferingId + " due to: " + e.getMessage()); + logger.error("Couldn't change service offering for vm " + vmId + " to " + serviceOfferingId + " due to: " + e.getMessage()); return false; } } @@ -901,7 +899,7 @@ private UserVm orchestrateRevertToVMSnapshot(Long vmSnapshotId) throws Insuffici vm = _userVMDao.findById(userVm.getId()); hostId = vm.getHostId(); } catch (Exception e) { - s_logger.error("Start VM " + userVm.getInstanceName() + " before reverting failed due to " + e.getMessage()); + logger.error("Start VM " + userVm.getInstanceName() + " before reverting failed due to " + e.getMessage()); throw new CloudRuntimeException(e.getMessage()); } } else { @@ -909,7 +907,7 @@ private UserVm orchestrateRevertToVMSnapshot(Long vmSnapshotId) throws Insuffici try { _itMgr.advanceStop(userVm.getUuid(), true); } catch (Exception e) { - s_logger.error("Stop VM " + userVm.getInstanceName() + " before reverting failed due to " + e.getMessage()); + logger.error("Stop VM " + userVm.getInstanceName() + " before reverting failed due to " + e.getMessage()); throw new CloudRuntimeException(e.getMessage()); } } @@ -932,7 +930,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) throws CloudR }); return userVm; } catch (Exception e) { - s_logger.debug("Failed to revert vmsnapshot: " + vmSnapshotId, e); + logger.debug("Failed to revert vmsnapshot: " + vmSnapshotId, e); throw new CloudRuntimeException(e.getMessage()); } } @@ -1089,7 +1087,7 @@ public boolean syncVMSnapshot(VMInstanceVO vm, Long hostId) { } } } catch (Exception e) { - s_logger.error(e.getMessage(), e); + logger.error(e.getMessage(), e); if (_vmSnapshotDao.listByInstanceId(vm.getId(), VMSnapshot.State.Expunging).size() == 0) return true; else @@ -1372,12 +1370,12 @@ public boolean deleteVMSnapshotsFromDB(Long vmId, boolean unmanage) { try { VMSnapshotStrategy strategy = findVMSnapshotStrategy(snapshot); if (! strategy.deleteVMSnapshotFromDB(snapshot, unmanage)) { - s_logger.error("Couldn't delete vm snapshot with id " + snapshot.getId()); + logger.error("Couldn't delete vm snapshot with id " + snapshot.getId()); return false; } } catch (CloudRuntimeException e) { - s_logger.error("Couldn't delete vm snapshot due to: " + e.getMessage()); + logger.error("Couldn't delete vm snapshot due to: " + e.getMessage()); } } return true; diff --git a/server/src/main/java/org/apache/cloudstack/acl/ProjectRoleManagerImpl.java b/server/src/main/java/org/apache/cloudstack/acl/ProjectRoleManagerImpl.java index ad5c5d071e52..01fc96473d23 100644 --- a/server/src/main/java/org/apache/cloudstack/acl/ProjectRoleManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/acl/ProjectRoleManagerImpl.java @@ -36,7 +36,6 @@ import org.apache.cloudstack.api.command.admin.acl.project.UpdateProjectRolePermissionCmd; import org.apache.cloudstack.context.CallContext; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.event.ActionEvent; import com.cloud.event.EventTypes; @@ -71,7 +70,6 @@ public class ProjectRoleManagerImpl extends ManagerBase implements ProjectRoleSe @Inject AccountService accountService; - private static final Logger LOGGER = Logger.getLogger(ProjectRoleManagerImpl.class); private Project validateProjectId(Long projectId) { Project project = projectDao.findById(projectId); @@ -147,22 +145,22 @@ public boolean isEnabled() { @Override public ProjectRole findProjectRole(Long roleId, Long projectId) { if (projectId == null || projectId < 1L || projectDao.findById(projectId) == null) { - LOGGER.warn("Invalid project ID provided"); + logger.warn("Invalid project ID provided"); return null; } if (roleId != null && roleId < 1L) { - LOGGER.warn(String.format("Project Role ID is invalid [%s]", roleId)); + logger.warn(String.format("Project Role ID is invalid [%s]", roleId)); return null; } ProjectRoleVO role = projRoleDao.findById(roleId); if (role == null) { - LOGGER.warn(String.format("Project Role not found [id=%s]", roleId)); + logger.warn(String.format("Project Role not found [id=%s]", roleId)); return null; } if (!(role.getProjectId().equals(projectId))) { - LOGGER.warn(String.format("Project role : %s doesn't belong to the project" + role.getName())); + logger.warn(String.format("Project role : %s doesn't belong to the project" + role.getName())); return null; } return role; @@ -171,7 +169,7 @@ public ProjectRole findProjectRole(Long roleId, Long projectId) { @Override public List findProjectRoles(Long projectId, String keyword) { if (projectId == null || projectId < 1L || projectDao.findById(projectId) == null) { - LOGGER.warn("Invalid project ID provided"); + logger.warn("Invalid project ID provided"); return null; } return ListUtils.toListOfInterface(projRoleDao.findAllRoles(projectId, keyword)); diff --git a/server/src/main/java/org/apache/cloudstack/acl/RoleManagerImpl.java b/server/src/main/java/org/apache/cloudstack/acl/RoleManagerImpl.java index ff97d7ecf6fb..9208bc240361 100644 --- a/server/src/main/java/org/apache/cloudstack/acl/RoleManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/acl/RoleManagerImpl.java @@ -44,7 +44,6 @@ import org.apache.cloudstack.framework.config.Configurable; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.event.ActionEvent; import com.cloud.event.EventTypes; @@ -63,8 +62,6 @@ public class RoleManagerImpl extends ManagerBase implements RoleService, Configurable, PluggableService { - private Logger logger = Logger.getLogger(getClass()); - @Inject private AccountDao accountDao; @Inject diff --git a/server/src/main/java/org/apache/cloudstack/affinity/AffinityGroupServiceImpl.java b/server/src/main/java/org/apache/cloudstack/affinity/AffinityGroupServiceImpl.java index 48600ddc0cc7..05f8c3728261 100644 --- a/server/src/main/java/org/apache/cloudstack/affinity/AffinityGroupServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/affinity/AffinityGroupServiceImpl.java @@ -36,7 +36,6 @@ import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.messagebus.MessageBus; import org.apache.cloudstack.framework.messagebus.PublishScope; -import org.apache.log4j.Logger; import com.cloud.domain.DomainVO; import com.cloud.domain.dao.DomainDao; @@ -69,7 +68,6 @@ public class AffinityGroupServiceImpl extends ManagerBase implements AffinityGroupService, Manager, StateListener { - public static final Logger s_logger = Logger.getLogger(AffinityGroupServiceImpl.class); private String _name; @Inject @@ -160,8 +158,8 @@ public AffinityGroup createAffinityGroup(final String accountName, final Long pr AffinityGroupVO group = createAffinityGroup(processor, owner, aclType, affinityGroupName, affinityGroupType, description, domainLevel, domainId); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Created affinity group =" + affinityGroupName); + if (logger.isDebugEnabled()) { + logger.debug("Created affinity group =" + affinityGroupName); } CallContext.current().putContextParameter(AffinityGroup.class, group.getUuid()); @@ -260,8 +258,8 @@ public boolean deleteAffinityGroup(Long affinityGroupId, String account, Long pr Pair, Long> params = new Pair, Long>(AffinityGroup.class, affinityGroupIdFinal); _messageBus.publish(_name, EntityManager.MESSAGE_REMOVE_ENTITY_EVENT, PublishScope.LOCAL, params); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Deleted affinity group id=" + affinityGroupIdFinal); + if (logger.isDebugEnabled()) { + logger.debug("Deleted affinity group id=" + affinityGroupIdFinal); } return true; } @@ -435,7 +433,7 @@ public UserVm updateVMAffinityGroups(Long vmId, List affinityGroupIds) { // Check that the VM is stopped if (!vmInstance.getState().equals(State.Stopped)) { - s_logger.warn("Unable to update affinity groups of the virtual machine " + vmInstance.toString() + " in state " + vmInstance.getState()); + logger.warn("Unable to update affinity groups of the virtual machine " + vmInstance.toString() + " in state " + vmInstance.getState()); throw new InvalidParameterValueException("Unable update affinity groups of the virtual machine " + vmInstance.toString() + " " + "in state " + vmInstance.getState() + "; make sure the virtual machine is stopped and not in an error state before updating."); } @@ -472,8 +470,8 @@ public UserVm updateVMAffinityGroups(Long vmId, List affinityGroupIds) { } } _affinityGroupVMMapDao.updateMap(vmId, affinityGroupIds); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Updated VM :" + vmId + " affinity groups to =" + affinityGroupIds); + if (logger.isDebugEnabled()) { + logger.debug("Updated VM :" + vmId + " affinity groups to =" + affinityGroupIds); } // APIResponseHelper will pull out the updated affinitygroups. return vmInstance; diff --git a/server/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBServiceImpl.java b/server/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBServiceImpl.java index 1414a94907e6..8b05a76d0a96 100644 --- a/server/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBServiceImpl.java @@ -34,7 +34,6 @@ import org.apache.cloudstack.config.ApiServiceConfiguration; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -48,7 +47,6 @@ import org.apache.commons.lang3.StringUtils; public class IndirectAgentLBServiceImpl extends ComponentLifecycleBase implements IndirectAgentLB, Configurable { - public static final Logger LOG = Logger.getLogger(IndirectAgentLBServiceImpl.class); public static final ConfigKey IndirectAgentLBAlgorithm = new ConfigKey<>(String.class, "indirect.agent.lb.algorithm", "Advanced", "static", @@ -86,8 +84,8 @@ public List getManagementServerList(final Long hostId, final Long dcId, // just in case we have a host in creating state make sure it is in the list: if (null != hostId && ! hostIdList.contains(hostId)) { - if (LOG.isTraceEnabled()) { - LOG.trace("adding requested host to host list as it does not seem to be there; " + hostId); + if (logger.isTraceEnabled()) { + logger.trace("adding requested host to host list as it does not seem to be there; " + hostId); } hostIdList.add(hostId); } @@ -150,8 +148,8 @@ private List getAllAgentBasedHosts() { private void conditionallyAddHost(List agentBasedHosts, Host host) { if (host == null) { - if (LOG.isTraceEnabled()) { - LOG.trace("trying to add no host to a list"); + if (logger.isTraceEnabled()) { + logger.trace("trying to add no host to a list"); } return; } @@ -165,8 +163,8 @@ private void conditionallyAddHost(List agentBasedHosts, Host host) { // so the remaining EnumSet disallowedStates = EnumSet.complementOf(allowedStates) // would be {ResourceState.Creating, ResourceState.Error}; if (!allowedStates.contains(host.getResourceState())) { - if (LOG.isTraceEnabled()) { - LOG.trace(String.format("host is in '%s' state, not adding to the host list, (id = %s)", host.getResourceState(), host.getUuid())); + if (logger.isTraceEnabled()) { + logger.trace(String.format("host is in '%s' state, not adding to the host list, (id = %s)", host.getResourceState(), host.getUuid())); } return; } @@ -175,8 +173,8 @@ private void conditionallyAddHost(List agentBasedHosts, Host host) { && host.getType() != Host.Type.ConsoleProxy && host.getType() != Host.Type.SecondaryStorage && host.getType() != Host.Type.SecondaryStorageVM) { - if (LOG.isTraceEnabled()) { - LOG.trace(String.format("host is of wrong type, not adding to the host list, (id = %s, type = %s)", host.getUuid(), host.getType())); + if (logger.isTraceEnabled()) { + logger.trace(String.format("host is of wrong type, not adding to the host list, (id = %s, type = %s)", host.getUuid(), host.getType())); } return; } @@ -184,8 +182,8 @@ private void conditionallyAddHost(List agentBasedHosts, Host host) { if (host.getHypervisorType() != null && ! (host.getHypervisorType() == Hypervisor.HypervisorType.KVM || host.getHypervisorType() == Hypervisor.HypervisorType.LXC)) { - if (LOG.isTraceEnabled()) { - LOG.trace(String.format("hypervisor is not the right type, not adding to the host list, (id = %s, hypervisortype = %s)", host.getUuid(), host.getHypervisorType())); + if (logger.isTraceEnabled()) { + logger.trace(String.format("hypervisor is not the right type, not adding to the host list, (id = %s, hypervisortype = %s)", host.getUuid(), host.getHypervisorType())); } return; } @@ -208,7 +206,7 @@ private org.apache.cloudstack.agent.lb.IndirectAgentLBAlgorithm getAgentMSLBAlgo @Override public void propagateMSListToAgents() { - LOG.debug("Propagating management server list update to agents"); + logger.debug("Propagating management server list update to agents"); final String lbAlgorithm = getLBAlgorithmName(); final Map> dcOrderedHostsMap = new HashMap<>(); for (final Host host : getAllAgentBasedHosts()) { @@ -221,7 +219,7 @@ public void propagateMSListToAgents() { final SetupMSListCommand cmd = new SetupMSListCommand(msList, lbAlgorithm, lbCheckInterval); final Answer answer = agentManager.easySend(host.getId(), cmd); if (answer == null || !answer.getResult()) { - LOG.warn(String.format("Failed to setup management servers list to the agent of %s", host)); + logger.warn(String.format("Failed to setup management servers list to the agent of %s", host)); } } } diff --git a/server/src/main/java/org/apache/cloudstack/annotation/AnnotationManagerImpl.java b/server/src/main/java/org/apache/cloudstack/annotation/AnnotationManagerImpl.java index 6a9d40cad18e..6975ecbef666 100644 --- a/server/src/main/java/org/apache/cloudstack/annotation/AnnotationManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/annotation/AnnotationManagerImpl.java @@ -49,7 +49,6 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.cluster.dao.ManagementServerHostDao; import com.cloud.dc.ClusterVO; @@ -101,7 +100,6 @@ * @since 4.11 */ public final class AnnotationManagerImpl extends ManagerBase implements AnnotationService, Configurable, PluggableService { - public static final Logger LOGGER = Logger.getLogger(AnnotationManagerImpl.class); @Inject private AnnotationDao annotationDao; @@ -281,8 +279,8 @@ public AnnotationResponse removeAnnotation(RemoveAnnotationCmd removeAnnotationC throw new CloudRuntimeException(String.format("Only administrators or entity owner users can delete annotations, " + "cannot remove annotation with uuid: %s - type: %s ", uuid, annotation.getEntityType().name())); } - if(LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Removing annotation uuid: %s - type: %s", uuid, annotation.getEntityType().name())); + if(logger.isDebugEnabled()) { + logger.debug(String.format("Removing annotation uuid: %s - type: %s", uuid, annotation.getEntityType().name())); } updateResourceDetailsInContext(annotation.getEntityUuid(), annotation.getEntityType()); annotationDao.remove(annotation.getId()); @@ -301,8 +299,8 @@ public AnnotationResponse updateAnnotationVisibility(UpdateAnnotationVisibilityC throw new CloudRuntimeException(String.format("Only admins can update annotations' visibility. " + "Cannot update visibility for annotation with id: %s - %s", uuid, errDesc)); } - if(LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Updating annotation with uuid: %s visibility to %B: ", uuid, adminsOnly)); + if(logger.isDebugEnabled()) { + logger.debug(String.format("Updating annotation with uuid: %s visibility to %B: ", uuid, adminsOnly)); } annotation.setAdminsOnly(adminsOnly); annotationDao.update(annotation.getId(), annotation); @@ -380,8 +378,8 @@ private Pair, Integer> getAnnotationsForApiCmd(ListAnnotation private List getAllAnnotations(String annotationFilter, String userUuid, String callingUserUuid, boolean isCallerAdmin, String keyword) { - if(LOGGER.isDebugEnabled()) { - LOGGER.debug("getting all annotations"); + if(logger.isDebugEnabled()) { + logger.debug("getting all annotations"); } if ("self".equalsIgnoreCase(annotationFilter) && StringUtils.isBlank(userUuid)) { userUuid = callingUserUuid; @@ -416,8 +414,8 @@ private List getAnnotationsForSpecificEntityId(String entityUuid, private List getAnnotationsForSpecificEntityType(String entityType, String entityUuid, String userUuid, boolean isCallerAdmin, String annotationFilter, String callingUserUuid, String keyword, UserVO callingUser) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("getting annotations for type: " + entityType); + if (logger.isDebugEnabled()) { + logger.debug("getting annotations for type: " + entityType); } if ("self".equalsIgnoreCase(annotationFilter) && StringUtils.isBlank(userUuid)) { userUuid = callingUserUuid; @@ -438,8 +436,8 @@ private List getAnnotationsForSpecificEntityType(String entityType private List getSingleAnnotationListByUuid(String uuid, String userUuid, String annotationFilter, String callingUserUuid, boolean isCallerAdmin) { List annotations = new ArrayList<>(); - if(LOGGER.isDebugEnabled()) { - LOGGER.debug("getting single annotation by uuid: " + uuid); + if(logger.isDebugEnabled()) { + logger.debug("getting single annotation by uuid: " + uuid); } AnnotationVO annotationVO = annotationDao.findByUuid(uuid); @@ -456,8 +454,8 @@ private List getAnnotationsByEntityIdAndType(String entityType, St boolean isCallerAdmin, String annotationFilter, String callingUserUuid, String keyword, UserVO callingUser) { isEntityOwnedByTheUser(entityType, entityUuid, callingUser); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("getting annotations for entity: " + entityUuid); + if (logger.isDebugEnabled()) { + logger.debug("getting annotations for entity: " + entityUuid); } return annotationDao.listByEntity(entityType, entityUuid, userUuid, isCallerAdmin, annotationFilter, callingUserUuid, keyword); @@ -484,7 +482,7 @@ private boolean isEntityOwnedByTheUser(String entityType, String entityUuid, Use ControlledEntity entity = getEntityFromUuidAndType(entityUuid, type); if (entity == null) { String errMsg = String.format("Could not find an entity with type: %s and ID: %s", entityType, entityUuid); - LOGGER.error(errMsg); + logger.error(errMsg); throw new CloudRuntimeException(errMsg); } if (type == EntityType.NETWORK && entity instanceof NetworkVO && @@ -498,10 +496,10 @@ private boolean isEntityOwnedByTheUser(String entityType, String entityUuid, Use } } } catch (IllegalArgumentException e) { - LOGGER.error("Could not parse entity type " + entityType, e); + logger.error("Could not parse entity type " + entityType, e); return false; } catch (PermissionDeniedException e) { - LOGGER.debug(e.getMessage(), e); + logger.debug(e.getMessage(), e); return false; } return true; @@ -628,7 +626,7 @@ private void setResponseEntityName(AnnotationResponse response, String entityUui if (entityType.isUserAllowed()) { ControlledEntity entity = getEntityFromUuidAndType(entityUuid, entityType); if (entity != null) { - LOGGER.debug(String.format("Could not find an entity with type: %s and ID: %s", entityType.name(), entityUuid)); + logger.debug(String.format("Could not find an entity with type: %s and ID: %s", entityType.name(), entityUuid)); entityName = entity.getName(); } } else { diff --git a/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java b/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java index 2e45066ff605..55ef9427ae92 100644 --- a/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java @@ -70,7 +70,6 @@ import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.commons.lang3.BooleanUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.api.ApiDispatcher; import com.cloud.api.ApiGsonHelper; @@ -120,7 +119,6 @@ import com.google.gson.Gson; public class BackupManagerImpl extends ManagerBase implements BackupManager { - private static final Logger LOG = Logger.getLogger(BackupManagerImpl.class); @Inject private BackupDao backupDao; @@ -185,7 +183,7 @@ public List listBackupProviderOfferings(final Long zoneId) { throw new PermissionDeniedException("Parameter external can only be specified by a Root Admin, permission denied"); } final BackupProvider backupProvider = getBackupProvider(zoneId); - LOG.debug("Listing external backup offerings for the backup provider configured for zone ID " + zoneId); + logger.debug("Listing external backup offerings for the backup provider configured for zone ID " + zoneId); return backupProvider.listBackupOfferings(zoneId); } @@ -213,7 +211,7 @@ public BackupOffering importBackupOffering(final ImportBackupOfferingCmd cmd) { if (savedOffering == null) { throw new CloudRuntimeException("Unable to create backup offering: " + cmd.getExternalId() + ", name: " + cmd.getName()); } - LOG.debug("Successfully created backup offering " + cmd.getName() + " mapped to backup provider offering " + cmd.getExternalId()); + logger.debug("Successfully created backup offering " + cmd.getName() + " mapped to backup provider offering " + cmd.getExternalId()); return savedOffering; } @@ -322,7 +320,7 @@ public VMInstanceVO doInTransaction(final TransactionStatus status) { UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VM_BACKUP_OFFERING_ASSIGN, vm.getAccountId(), vm.getDataCenterId(), vmId, "Backup-" + vm.getHostName() + "-" + vm.getUuid(), vm.getBackupOfferingId(), null, null, Backup.class.getSimpleName(), vm.getUuid()); - LOG.debug(String.format("VM [%s] successfully added to Backup Offering [%s].", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(vm, + logger.debug(String.format("VM [%s] successfully added to Backup Offering [%s].", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(vm, "uuid", "instanceName", "backupOfferingId", "backupVolumes"), ReflectionToStringBuilderUtils.reflectOnlySelectedFields(offering, "uuid", "name", "externalId", "provider"))); } catch (Exception e) { @@ -330,8 +328,8 @@ public VMInstanceVO doInTransaction(final TransactionStatus status) { ReflectionToStringBuilderUtils.reflectOnlySelectedFields(vm, "uuid", "instanceName", "backupOfferingId", "backupVolumes"), ReflectionToStringBuilderUtils.reflectOnlySelectedFields(offering, "uuid", "name", "externalId", "provider"), backupProvider.getName(), backupProvider.getClass().getSimpleName(), e.getMessage()); - LOG.error(msg); - LOG.debug(msg, e); + logger.error(msg); + logger.debug(msg, e); } return vm; } @@ -389,7 +387,7 @@ public boolean removeVMFromBackupOffering(final Long vmId, final boolean forced) result = true; } } catch (Exception e) { - LOG.error(String.format("Exception caught when trying to remove VM [uuid: %s, name: %s] from the backup offering [uuid: %s, name: %s] due to: [%s].", + logger.error(String.format("Exception caught when trying to remove VM [uuid: %s, name: %s] from the backup offering [uuid: %s, name: %s] due to: [%s].", vm.getUuid(), vm.getInstanceName(), offering.getUuid(), offering.getName(), e.getMessage()), e); } return result; @@ -422,7 +420,7 @@ public BackupSchedule configureBackupSchedule(CreateBackupScheduleCmd cmd) { final String timezoneId = timeZone.getID(); if (!timezoneId.equals(cmd.getTimezone())) { - LOG.warn("Using timezone: " + timezoneId + " for running this snapshot policy as an equivalent of " + cmd.getTimezone()); + logger.warn("Using timezone: " + timezoneId + " for running this snapshot policy as an equivalent of " + cmd.getTimezone()); } Date nextDateTime = null; @@ -569,7 +567,7 @@ public boolean importRestoredVM(long zoneId, long domainId, long accountId, long try { vm = guru.importVirtualMachineFromBackup(zoneId, domainId, accountId, userId, vmInternalName, backup); } catch (final Exception e) { - LOG.error(String.format("Failed to import VM [vmInternalName: %s] from backup restoration [%s] with hypervisor [type: %s] due to: [%s].", vmInternalName, + logger.error(String.format("Failed to import VM [vmInternalName: %s] from backup restoration [%s] with hypervisor [type: %s] due to: [%s].", vmInternalName, ReflectionToStringBuilderUtils.reflectOnlySelectedFields(backup, "id", "uuid", "vmId", "externalId", "backupType"), hypervisorType, e.getMessage()), e); ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, vm.getAccountId(), EventVO.LEVEL_ERROR, EventTypes.EVENT_VM_BACKUP_RESTORE, String.format("Failed to import VM %s from backup %s with hypervisor [type: %s]", vmInternalName, backup.getUuid(), hypervisorType), @@ -579,7 +577,7 @@ public boolean importRestoredVM(long zoneId, long domainId, long accountId, long if (vm == null) { String message = String.format("Failed to import restored VM %s with hypervisor type %s using backup of VM ID %s", vmInternalName, hypervisorType, backup.getVmId()); - LOG.error(message); + logger.error(message); ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, vm.getAccountId(), EventVO.LEVEL_ERROR, EventTypes.EVENT_VM_BACKUP_RESTORE, message, vm.getId(), ApiCommandResourceType.VirtualMachine.toString(),0); } else { @@ -650,7 +648,7 @@ protected void tryRestoreVM(BackupVO backup, VMInstanceVO vm, BackupOffering off // The restore process is executed by a backup provider outside of ACS, I am using the catch-all (Exception) to // ensure that no provider-side exception is missed. Therefore, we have a proper handling of exceptions, and rollbacks if needed. } catch (Exception e) { - LOG.error(String.format("Failed to restore backup [%s] due to: [%s].", backupDetailsInMessage, e.getMessage()), e); + logger.error(String.format("Failed to restore backup [%s] due to: [%s].", backupDetailsInMessage, e.getMessage()), e); updateVolumeState(vm, Volume.Event.RestoreFailed, Volume.State.Ready); updateVmState(vm, VirtualMachine.Event.RestoringFailed, VirtualMachine.State.Stopped); throw new CloudRuntimeException(String.format("Error restoring VM from backup [%s].", backupDetailsInMessage)); @@ -664,7 +662,7 @@ protected void tryRestoreVM(BackupVO backup, VMInstanceVO vm, BackupOffering off * @param next The desired state, just needed to add more context to the logs */ private void updateVmState(VMInstanceVO vm, VirtualMachine.Event event, VirtualMachine.State next) { - LOG.debug(String.format("Trying to update state of VM [%s] with event [%s].", vm, event)); + logger.debug(String.format("Trying to update state of VM [%s] with event [%s].", vm, event)); Transaction.execute(TransactionLegacy.CLOUD_DB, (TransactionCallback) status -> { try { if (!virtualMachineManager.stateTransitTo(vm, event, vm.getHostId())) { @@ -672,7 +670,7 @@ private void updateVmState(VMInstanceVO vm, VirtualMachine.Event event, VirtualM } } catch (NoTransitionException e) { String errMsg = String.format("Failed to update state of VM [%s] with event [%s] due to [%s].", vm, event, e.getMessage()); - LOG.error(errMsg, e); + logger.error(errMsg, e); throw new RuntimeException(errMsg); } return null; @@ -702,14 +700,14 @@ private void updateVolumeState(VMInstanceVO vm, Volume.Event event, Volume.State * */ private void tryToUpdateStateOfSpecifiedVolume(VolumeVO volume, Volume.Event event, Volume.State next) { - LOG.debug(String.format("Trying to update state of volume [%s] with event [%s].", volume, event)); + logger.debug(String.format("Trying to update state of volume [%s] with event [%s].", volume, event)); try { if (!volumeApiService.stateTransitTo(volume, event)) { throw new CloudRuntimeException(String.format("Unable to change state of volume [%s] to [%s].", volume, next)); } } catch (NoTransitionException e) { String errMsg = String.format("Failed to update state of volume [%s] with event [%s] due to [%s].", volume, event, e.getMessage()); - LOG.error(errMsg, e); + logger.error(errMsg, e); throw new RuntimeException(errMsg); } } @@ -756,7 +754,7 @@ public boolean restoreBackupVolumeAndAttachToVM(final String backedUpVolumeUuid, HostVO host = restoreInfo.first(); StoragePoolVO datastore = restoreInfo.second(); - LOG.debug("Asking provider to restore volume " + backedUpVolumeUuid + " from backup " + backupId + + logger.debug("Asking provider to restore volume " + backedUpVolumeUuid + " from backup " + backupId + " (with external ID " + backup.getExternalId() + ") and attach it to VM: " + vm.getUuid()); final BackupOffering offering = backupOfferingDao.findByIdIncludingRemoved(backup.getBackupOfferingId()); @@ -765,7 +763,7 @@ public boolean restoreBackupVolumeAndAttachToVM(final String backedUpVolumeUuid, } BackupProvider backupProvider = getBackupProvider(offering.getProvider()); - LOG.debug(String.format("Trying to restore volume using host private IP address: [%s].", host.getPrivateIpAddress())); + logger.debug(String.format("Trying to restore volume using host private IP address: [%s].", host.getPrivateIpAddress())); String[] hostPossibleValues = {host.getPrivateIpAddress(), host.getName()}; String[] datastoresPossibleValues = {datastore.getUuid(), datastore.getName()}; @@ -792,7 +790,7 @@ protected Pair restoreBackedUpVolume(final String backedUpVolum Pair result = new Pair<>(false, ""); for (String hostData : hostPossibleValues) { for (String datastoreData : datastoresPossibleValues) { - LOG.debug(String.format("Trying to restore volume [UUID: %s], using host [%s] and datastore [%s].", + logger.debug(String.format("Trying to restore volume [UUID: %s], using host [%s] and datastore [%s].", backedUpVolumeUuid, hostData, datastoreData)); try { @@ -802,7 +800,7 @@ protected Pair restoreBackedUpVolume(final String backedUpVolum return result; } } catch (Exception e) { - LOG.debug(String.format("Failed to restore volume [UUID: %s], using host [%s] and datastore [%s] due to: [%s].", + logger.debug(String.format("Failed to restore volume [UUID: %s], using host [%s] and datastore [%s] due to: [%s].", backedUpVolumeUuid, hostData, datastoreData, e.getMessage()), e); } } @@ -876,7 +874,7 @@ private boolean attachVolumeToVM(Long zoneId, String restoredVolumeLocation, Lis } volumeInfo.setType(Volume.Type.DATADISK); - LOG.debug("Attaching the restored volume to VM " + vm.getId()); + logger.debug("Attaching the restored volume to VM " + vm.getId()); StoragePoolVO pool = primaryDataStoreDao.findByUuid(datastoreUuid); try { return guru.attachRestoredVolumeToVirtualMachine(zoneId, restoredVolumeLocation, volumeInfo, vm, pool.getId(), backup); @@ -1037,10 +1035,10 @@ private void checkStatusOfCurrentlyExecutingBackups() { case FAILED: final Date nextDateTime = scheduleNextBackupJob(backupSchedule); final String nextScheduledTime = DateUtil.displayDateInTimezone(DateUtil.GMT_TIMEZONE, nextDateTime); - LOG.debug("Next backup scheduled time for VM ID " + backupSchedule.getVmId() + " is " + nextScheduledTime); + logger.debug("Next backup scheduled time for VM ID " + backupSchedule.getVmId() + " is " + nextScheduledTime); break; default: - LOG.debug(String.format("Found async backup job [id: %s, vmId: %s] with status [%s] and cmd information: [cmd: %s, cmdInfo: %s].", asyncJob.getId(), backupSchedule.getVmId(), + logger.debug(String.format("Found async backup job [id: %s, vmId: %s] with status [%s] and cmd information: [cmd: %s, cmdInfo: %s].", asyncJob.getId(), backupSchedule.getVmId(), asyncJob.getStatus(), asyncJob.getCmd(), asyncJob.getCmdInfo())); break; } @@ -1050,7 +1048,7 @@ private void checkStatusOfCurrentlyExecutingBackups() { @DB public void scheduleBackups() { String displayTime = DateUtil.displayDateInTimezone(DateUtil.GMT_TIMEZONE, currentTimestamp); - LOG.debug("Backup backup.poll is being called at " + displayTime); + logger.debug("Backup backup.poll is being called at " + displayTime); final List backupsToBeExecuted = backupScheduleDao.getSchedulesToExecute(currentTimestamp); for (final BackupScheduleVO backupSchedule: backupsToBeExecuted) { @@ -1074,14 +1072,14 @@ public void scheduleBackups() { final Account backupAccount = accountService.getAccount(vm.getAccountId()); if (backupAccount == null || backupAccount.getState() == Account.State.DISABLED) { - LOG.debug(String.format("Skip backup for VM [uuid: %s, name: %s] since its account has been removed or disabled.", vm.getUuid(), vm.getInstanceName())); + logger.debug(String.format("Skip backup for VM [uuid: %s, name: %s] since its account has been removed or disabled.", vm.getUuid(), vm.getInstanceName())); continue; } - if (LOG.isDebugEnabled()) { + if (logger.isDebugEnabled()) { final Date scheduledTimestamp = backupSchedule.getScheduledTimestamp(); displayTime = DateUtil.displayDateInTimezone(DateUtil.GMT_TIMEZONE, scheduledTimestamp); - LOG.debug(String.format("Scheduling 1 backup for VM [ID: %s, name: %s, hostName: %s] for backup schedule id: [%s] at [%s].", + logger.debug(String.format("Scheduling 1 backup for VM [ID: %s, name: %s, hostName: %s] for backup schedule id: [%s] at [%s].", vm.getId(), vm.getInstanceName(), vm.getHostName(), backupSchedule.getId(), displayTime)); } @@ -1115,7 +1113,7 @@ public void scheduleBackups() { tmpBackupScheduleVO.setAsyncJobId(jobId); backupScheduleDao.update(backupScheduleId, tmpBackupScheduleVO); } catch (Exception e) { - LOG.error(String.format("Scheduling backup failed due to: [%s].", e.getMessage()), e); + logger.error(String.format("Scheduling backup failed due to: [%s].", e.getMessage()), e); } finally { if (tmpBackupScheduleVO != null) { backupScheduleDao.releaseFromLockTable(backupScheduleId); @@ -1138,7 +1136,7 @@ protected void runInContext() { try { poll(new Date()); } catch (final Throwable t) { - LOG.warn("Catch throwable in backup scheduler ", t); + logger.warn("Catch throwable in backup scheduler ", t); } } }; @@ -1174,24 +1172,24 @@ public BackupSyncTask(final BackupManager backupManager) { @Override protected void runInContext() { try { - if (LOG.isTraceEnabled()) { - LOG.trace("Backup sync background task is running..."); + if (logger.isTraceEnabled()) { + logger.trace("Backup sync background task is running..."); } for (final DataCenter dataCenter : dataCenterDao.listAllZones()) { if (dataCenter == null || isDisabled(dataCenter.getId())) { - LOG.debug(String.format("Backup Sync Task is not enabled in zone [%s]. Skipping this zone!", dataCenter == null ? "NULL Zone!" : dataCenter.getId())); + logger.debug(String.format("Backup Sync Task is not enabled in zone [%s]. Skipping this zone!", dataCenter == null ? "NULL Zone!" : dataCenter.getId())); continue; } final BackupProvider backupProvider = getBackupProvider(dataCenter.getId()); if (backupProvider == null) { - LOG.warn("Backup provider not available or configured for zone ID " + dataCenter.getId()); + logger.warn("Backup provider not available or configured for zone ID " + dataCenter.getId()); continue; } List vms = vmInstanceDao.listByZoneWithBackups(dataCenter.getId(), null); if (vms == null || vms.isEmpty()) { - LOG.debug(String.format("Can't find any VM to sync backups in zone [id: %s].", dataCenter.getId())); + logger.debug(String.format("Can't find any VM to sync backups in zone [id: %s].", dataCenter.getId())); continue; } @@ -1199,7 +1197,7 @@ protected void runInContext() { syncBackupMetrics(backupProvider, metrics); } } catch (final Throwable t) { - LOG.error(String.format("Error trying to run backup-sync background task due to: [%s].", t.getMessage()), t); + logger.error(String.format("Error trying to run backup-sync background task due to: [%s].", t.getMessage()), t); } } @@ -1216,7 +1214,7 @@ private void tryToSyncVMBackups(BackupProvider backupProvider, Map certsMap = caManager.getActiveCertificatesMap(); @@ -337,18 +335,18 @@ protected void runInContext() { try { certificate.checkValidity(now.plusDays(CertExpiryAlertPeriod.valueIn(host.getClusterId())).toDate()); } catch (final CertificateExpiredException | CertificateNotYetValidException e) { - LOG.warn("Certificate is going to expire for " + hostDescription, e); + logger.warn("Certificate is going to expire for " + hostDescription, e); if (AutomaticCertRenewal.valueIn(host.getClusterId())) { try { - LOG.debug("Attempting certificate auto-renewal for " + hostDescription, e); + logger.debug("Attempting certificate auto-renewal for " + hostDescription, e); boolean result = caManager.provisionCertificate(host, false, null); if (result) { - LOG.debug("Succeeded in auto-renewing certificate for " + hostDescription, e); + logger.debug("Succeeded in auto-renewing certificate for " + hostDescription, e); } else { - LOG.debug("Failed in auto-renewing certificate for " + hostDescription, e); + logger.debug("Failed in auto-renewing certificate for " + hostDescription, e); } } catch (final Throwable ex) { - LOG.warn("Failed to auto-renew certificate for " + hostDescription + ", with error=", ex); + logger.warn("Failed to auto-renew certificate for " + hostDescription + ", with error=", ex); caManager.sendAlert(host, "Certificate auto-renewal failed for " + hostDescription, String.format("Certificate is going to expire for %s. Auto-renewal failed to renew the certificate, please renew it manually. It is not valid after %s.", hostDescription, certificate.getNotAfter())); @@ -367,7 +365,7 @@ protected void runInContext() { } } } catch (final Throwable t) { - LOG.error("Error trying to run CA background task", t); + logger.error("Error trying to run CA background task", t); } } @@ -398,7 +396,7 @@ public boolean start() { configuredCaProvider = caProviderMap.get(CAProviderPlugin.value()); } if (configuredCaProvider == null) { - LOG.error("Failed to find valid configured CA provider, please check!"); + logger.error("Failed to find valid configured CA provider, please check!"); return false; } return true; diff --git a/server/src/main/java/org/apache/cloudstack/cluster/ClusterDrsServiceImpl.java b/server/src/main/java/org/apache/cloudstack/cluster/ClusterDrsServiceImpl.java index f949233e8e85..e44f8f8f598e 100644 --- a/server/src/main/java/org/apache/cloudstack/cluster/ClusterDrsServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/cluster/ClusterDrsServiceImpl.java @@ -73,7 +73,6 @@ import org.apache.cloudstack.managed.context.ManagedContextTimerTask; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.time.DateUtils; -import org.apache.log4j.Logger; import javax.inject.Inject; import javax.naming.ConfigurationException; @@ -92,8 +91,6 @@ public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsService, PluggableService { - private static final Logger logger = Logger.getLogger(ClusterDrsServiceImpl.class); - private static final String CLUSTER_LOCK_STR = "drs.plan.cluster.%s"; AsyncJobDispatcher asyncJobDispatcher; diff --git a/server/src/main/java/org/apache/cloudstack/consoleproxy/ConsoleAccessManagerImpl.java b/server/src/main/java/org/apache/cloudstack/consoleproxy/ConsoleAccessManagerImpl.java index 27fd25829778..124ca05cc376 100644 --- a/server/src/main/java/org/apache/cloudstack/consoleproxy/ConsoleAccessManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/consoleproxy/ConsoleAccessManagerImpl.java @@ -34,7 +34,6 @@ import org.apache.commons.lang3.ArrayUtils; import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -72,6 +71,8 @@ import com.cloud.vm.dao.UserVmDetailsDao; import com.google.gson.Gson; import com.google.gson.GsonBuilder; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.joda.time.DateTime; @@ -108,7 +109,7 @@ public class ConsoleAccessManagerImpl extends ManagerBase implements ConsoleAcce private static KeysManager secretKeysManager; private final Gson gson = new GsonBuilder().create(); - public static final Logger s_logger = Logger.getLogger(ConsoleAccessManagerImpl.class.getName()); + protected Logger logger = LogManager.getLogger(ConsoleAccessManagerImpl.class); private static final List unsupportedConsoleVMState = Arrays.asList( VirtualMachine.State.Stopped, VirtualMachine.State.Error, VirtualMachine.State.Destroyed @@ -125,7 +126,7 @@ public boolean configure(String name, Map params) throws Configu public boolean start() { int consoleCleanupInterval = ConsoleAccessManager.ConsoleSessionCleanupInterval.value(); if (consoleCleanupInterval > 0) { - s_logger.info(String.format("The ConsoleSessionCleanupTask will run every %s hours", consoleCleanupInterval)); + logger.info(String.format("The ConsoleSessionCleanupTask will run every %s hours", consoleCleanupInterval)); executorService.scheduleWithFixedDelay(new ConsoleSessionCleanupTask(), consoleCleanupInterval, consoleCleanupInterval, TimeUnit.HOURS); } return true; @@ -162,18 +163,18 @@ protected void runInContext() { } private void reallyRun() { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Starting ConsoleSessionCleanupTask..."); + if (logger.isDebugEnabled()) { + logger.debug("Starting ConsoleSessionCleanupTask..."); } Integer retentionHours = ConsoleAccessManager.ConsoleSessionCleanupRetentionHours.value(); Date dateBefore = DateTime.now().minusHours(retentionHours).toDate(); - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Retention hours: %s, checking for removed console session " + + if (logger.isDebugEnabled()) { + logger.debug(String.format("Retention hours: %s, checking for removed console session " + "records to expunge older than: %s", retentionHours, dateBefore)); } int sessionsExpunged = consoleSessionDao.expungeSessionsOlderThanDate(dateBefore); - if (s_logger.isDebugEnabled()) { - s_logger.debug(sessionsExpunged > 0 ? + if (logger.isDebugEnabled()) { + logger.debug(sessionsExpunged > 0 ? String.format("Expunged %s removed console session records", sessionsExpunged) : "No removed console session records expunged on this cleanup task run"); } @@ -189,7 +190,7 @@ public ConsoleEndpoint generateConsoleEndpoint(Long vmId, String extraSecurityTo if (keysManager.getHashKey() == null) { String msg = "Console access denied. Ticket service is not ready yet"; - s_logger.debug(msg); + logger.debug(msg); return new ConsoleEndpoint(false, null, msg); } @@ -197,13 +198,13 @@ public ConsoleEndpoint generateConsoleEndpoint(Long vmId, String extraSecurityTo // Do a sanity check here to make sure the user hasn't already been deleted if (account == null) { - s_logger.debug("Invalid user/account, reject console access"); + logger.debug("Invalid user/account, reject console access"); return new ConsoleEndpoint(false, null,"Access denied. Invalid or inconsistent account is found"); } VirtualMachine vm = entityManager.findById(VirtualMachine.class, vmId); if (vm == null) { - s_logger.info("Invalid console servlet command parameter: " + vmId); + logger.info("Invalid console servlet command parameter: " + vmId); return new ConsoleEndpoint(false, null, "Cannot find VM with ID " + vmId); } @@ -214,7 +215,7 @@ public ConsoleEndpoint generateConsoleEndpoint(Long vmId, String extraSecurityTo DataCenter zone = dataCenterDao.findById(vm.getDataCenterId()); if (zone != null && DataCenter.Type.Edge.equals(zone.getType())) { String errorMsg = "Console access is not supported for Edge zones"; - s_logger.error(errorMsg); + logger.error(errorMsg); return new ConsoleEndpoint(false, null, errorMsg); } @@ -223,7 +224,7 @@ public ConsoleEndpoint generateConsoleEndpoint(Long vmId, String extraSecurityTo } catch (Exception e) { String errorMsg = String.format("Unexepected exception in ConsoleAccessManager - vmId: %s, clientAddress: %s", vmId, clientAddress); - s_logger.error(errorMsg, e); + logger.error(errorMsg, e); return new ConsoleEndpoint(false, null, "Server Internal Error: " + e.getMessage()); } } @@ -262,14 +263,14 @@ protected boolean checkSessionPermission(VirtualMachine vm, Account account) { accountManager.checkAccess(account, null, true, vm); } catch (PermissionDeniedException ex) { if (accountManager.isNormalUser(account.getId())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("VM access is denied for VM ID " + vm.getUuid() + ". VM owner account " + + if (logger.isDebugEnabled()) { + logger.debug("VM access is denied for VM ID " + vm.getUuid() + ". VM owner account " + vm.getAccountId() + " does not match the account id in session " + account.getId() + " and caller is a normal user"); } } else if ((accountManager.isDomainAdmin(account.getId()) - || account.getType() == Account.Type.READ_ONLY_ADMIN) && s_logger.isDebugEnabled()) { - s_logger.debug("VM access is denied for VM ID " + vm.getUuid() + ". VM owner account " + + || account.getType() == Account.Type.READ_ONLY_ADMIN) && logger.isDebugEnabled()) { + logger.debug("VM access is denied for VM ID " + vm.getUuid() + ". VM owner account " + vm.getAccountId() + " does not match the account id in session " + account.getId() + " and the domain-admin caller does not manage the target domain"); } @@ -283,7 +284,7 @@ protected boolean checkSessionPermission(VirtualMachine vm, Account account) { return false; default: - s_logger.warn("Unrecoginized virtual machine type, deny access by default. type: " + vm.getType()); + logger.warn("Unrecoginized virtual machine type, deny access by default. type: " + vm.getType()); return false; } @@ -295,28 +296,28 @@ private ConsoleEndpoint generateAccessEndpoint(Long vmId, String sessionUuid, St String msg; if (vm == null) { msg = "VM " + vmId + " does not exist, sending blank response for console access request"; - s_logger.warn(msg); + logger.warn(msg); throw new CloudRuntimeException(msg); } String vmUuid = vm.getUuid(); if (unsupportedConsoleVMState.contains(vm.getState())) { msg = "VM " + vmUuid + " must be running to connect console, sending blank response for console access request"; - s_logger.warn(msg); + logger.warn(msg); throw new CloudRuntimeException(msg); } Long hostId = vm.getState() != VirtualMachine.State.Migrating ? vm.getHostId() : vm.getLastHostId(); if (hostId == null) { msg = "VM " + vmUuid + " lost host info, sending blank response for console access request"; - s_logger.warn(msg); + logger.warn(msg); throw new CloudRuntimeException(msg); } HostVO host = managementServer.getHostBy(hostId); if (host == null) { msg = "VM " + vmUuid + "'s host does not exist, sending blank response for console access request"; - s_logger.warn(msg); + logger.warn(msg); throw new CloudRuntimeException(msg); } @@ -330,7 +331,7 @@ private ConsoleEndpoint generateAccessEndpoint(Long vmId, String sessionUuid, St } ConsoleEndpoint consoleEndpoint = composeConsoleAccessEndpoint(rootUrl, vm, host, clientAddress, sessionUuid, extraSecurityToken); - s_logger.debug("The console URL is: " + consoleEndpoint.getUrl()); + logger.debug("The console URL is: " + consoleEndpoint.getUrl()); return consoleEndpoint; } @@ -347,7 +348,7 @@ private ConsoleEndpoint composeConsoleAccessEndpoint(String rootUrl, VirtualMach if (detailAddress != null && detailPort != null) { portInfo = new Pair<>(detailAddress.getValue(), Integer.valueOf(detailPort.getValue())); } else { - s_logger.warn("KVM Host in ErrorInMaintenance/ErrorInPrepareForMaintenance but " + + logger.warn("KVM Host in ErrorInMaintenance/ErrorInPrepareForMaintenance but " + "no VNC Address/Port was available. Falling back to default one from MS."); } } @@ -356,8 +357,8 @@ private ConsoleEndpoint composeConsoleAccessEndpoint(String rootUrl, VirtualMach portInfo = managementServer.getVncPort(vm); } - if (s_logger.isDebugEnabled()) - s_logger.debug("Port info " + portInfo.first()); + if (logger.isDebugEnabled()) + logger.debug("Port info " + portInfo.first()); Ternary parsedHostInfo = parseHostInfo(portInfo.first()); @@ -387,7 +388,7 @@ private ConsoleEndpoint composeConsoleAccessEndpoint(String rootUrl, VirtualMach String url = generateConsoleAccessUrl(rootUrl, param, token, vncPort, vm, hostVo, details); - s_logger.debug("Adding allowed session: " + sessionUuid); + logger.debug("Adding allowed session: " + sessionUuid); persistConsoleSession(sessionUuid, vm.getId(), hostVo.getId()); managementServer.setConsoleAccessForVm(vm.getId(), sessionUuid); @@ -437,8 +438,8 @@ private String generateConsoleAccessUrl(String rootUrl, ConsoleProxyClientParam if (guestOsVo.getCategoryId() == 6) sb.append("&guest=windows"); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Compose console url: " + sb); + if (logger.isDebugEnabled()) { + logger.debug("Compose console url: " + sb); } return sb.toString().startsWith("https") ? sb.toString() : "http:" + sb; } @@ -462,7 +463,7 @@ private ConsoleProxyClientParam generateConsoleProxyClientParam(Ternary parseHostInfo(String hostInfo) { + public Ternary parseHostInfo(String hostInfo) { String host = null; String tunnelUrl = null; String tunnelSession = null; - s_logger.info("Parse host info returned from executing GetVNCPortCommand. host info: " + hostInfo); + logger.info("Parse host info returned from executing GetVNCPortCommand. host info: " + hostInfo); if (hostInfo != null) { if (hostInfo.startsWith("consoleurl")) { @@ -524,11 +525,13 @@ private boolean requiresVncOverWebSocketConnection(VirtualMachine vm, HostVO hos return vm.getHypervisorType() == Hypervisor.HypervisorType.VMware && hostVo.getHypervisorVersion().compareTo("7.0") >= 0; } - public static String genAccessTicket(String host, String port, String sid, String tag, String sessionUuid) { + @Override + public String genAccessTicket(String host, String port, String sid, String tag, String sessionUuid) { return genAccessTicket(host, port, sid, tag, new Date(), sessionUuid); } - public static String genAccessTicket(String host, String port, String sid, String tag, Date normalizedHashTime, String sessionUuid) { + @Override + public String genAccessTicket(String host, String port, String sid, String tag, Date normalizedHashTime, String sessionUuid) { String params = "host=" + host + "&port=" + port + "&sid=" + sid + "&tag=" + tag + "&session=" + sessionUuid; try { @@ -547,7 +550,7 @@ public static String genAccessTicket(String host, String port, String sid, Strin return Base64.encodeBase64String(encryptedBytes); } catch (Exception e) { - s_logger.error("Unexpected exception ", e); + logger.error("Unexpected exception ", e); } return ""; } @@ -566,7 +569,7 @@ private String getEncryptorPassword() { private void setWebsocketUrl(VirtualMachine vm, ConsoleProxyClientParam param) { String ticket = acquireVncTicketForVmwareVm(vm); if (StringUtils.isBlank(ticket)) { - s_logger.error("Could not obtain VNC ticket for VM " + vm.getInstanceName()); + logger.error("Could not obtain VNC ticket for VM " + vm.getInstanceName()); return; } String wsUrl = composeWebsocketUrlForVmwareVm(ticket, param); @@ -587,16 +590,16 @@ private String composeWebsocketUrlForVmwareVm(String ticket, ConsoleProxyClientP */ private String acquireVncTicketForVmwareVm(VirtualMachine vm) { try { - s_logger.info("Acquiring VNC ticket for VM = " + vm.getHostName()); + logger.info("Acquiring VNC ticket for VM = " + vm.getHostName()); GetVmVncTicketCommand cmd = new GetVmVncTicketCommand(vm.getInstanceName()); Answer answer = agentManager.send(vm.getHostId(), cmd); GetVmVncTicketAnswer ans = (GetVmVncTicketAnswer) answer; if (!ans.getResult()) { - s_logger.info("VNC ticket could not be acquired correctly: " + ans.getDetails()); + logger.info("VNC ticket could not be acquired correctly: " + ans.getDetails()); } return ans.getTicket(); } catch (AgentUnavailableException | OperationTimedoutException e) { - s_logger.error("Error acquiring ticket", e); + logger.error("Error acquiring ticket", e); return null; } } diff --git a/server/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsHelper.java b/server/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsHelper.java index 282eee202cfc..cd1942f59d06 100644 --- a/server/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsHelper.java +++ b/server/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsHelper.java @@ -30,12 +30,13 @@ import java.util.Set; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.utils.script.Script2; public class DiagnosticsHelper { - private static final Logger LOGGER = Logger.getLogger(DiagnosticsHelper.class); + protected static Logger LOGGER = LogManager.getLogger(DiagnosticsHelper.class); public static void setDirFilePermissions(Path path) throws java.io.IOException { Set perms = Files.readAttributes(path, PosixFileAttributes.class).permissions(); @@ -51,7 +52,7 @@ public static void setDirFilePermissions(Path path) throws java.io.IOException { Files.setPosixFilePermissions(path, perms); } - public static void umountSecondaryStorage(String mountPoint) { + public void umountSecondaryStorage(String mountPoint) { if (StringUtils.isNotBlank(mountPoint)) { Script2 umountCmd = new Script2("/bin/bash", LOGGER); umountCmd.add("-c"); diff --git a/server/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsServiceImpl.java b/server/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsServiceImpl.java index 72f4a3c5b869..62bc50889f14 100644 --- a/server/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsServiceImpl.java @@ -51,7 +51,6 @@ import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -77,7 +76,6 @@ import com.cloud.vm.dao.VMInstanceDao; public class DiagnosticsServiceImpl extends ManagerBase implements PluggableService, DiagnosticsService, Configurable { - private static final Logger LOGGER = Logger.getLogger(DiagnosticsServiceImpl.class); @Inject private AgentManager agentManager; @@ -284,10 +282,10 @@ private Answer deleteDiagnosticsZipFileInsystemVm(VMInstanceVO vmInstance, Strin configureNetworkElementCommand(cmd, vmInstance); final Answer fileCleanupAnswer = agentManager.easySend(vmInstance.getHostId(), cmd); if (fileCleanupAnswer == null) { - LOGGER.error(String.format("Failed to cleanup diagnostics zip file on vm: %s", vmInstance.getUuid())); + logger.error(String.format("Failed to cleanup diagnostics zip file on vm: %s", vmInstance.getUuid())); } else { if (!fileCleanupAnswer.getResult()) { - LOGGER.error(String.format("Zip file cleanup for vm %s has failed with: %s", vmInstance.getUuid(), fileCleanupAnswer.getDetails())); + logger.error(String.format("Zip file cleanup for vm %s has failed with: %s", vmInstance.getUuid(), fileCleanupAnswer.getDetails())); } } @@ -326,11 +324,11 @@ private Pair copyToSecondaryStorageNonVMware(final DataStore st } private Pair copyToSecondaryStorageVMware(final DataStore store, final String vmSshIp, String diagnosticsFile) { - LOGGER.info(String.format("Copying %s from %s to secondary store %s", diagnosticsFile, vmSshIp, store.getUri())); + logger.info(String.format("Copying %s from %s to secondary store %s", diagnosticsFile, vmSshIp, store.getUri())); boolean success = false; String mountPoint = mountManager.getMountPoint(store.getUri(), imageStoreDetailsUtil.getNfsVersion(store.getId())); if (StringUtils.isBlank(mountPoint)) { - LOGGER.error("Failed to generate mount point for copying to secondary storage for " + store.getName()); + logger.error("Failed to generate mount point for copying to secondary storage for " + store.getName()); return new Pair<>(false, "Failed to mount secondary storage:" + store.getName()); } @@ -351,7 +349,7 @@ private Pair copyToSecondaryStorageVMware(final DataStore store success = fileInSecondaryStore.exists(); } catch (Exception e) { String msg = String.format("Exception caught during scp from %s to secondary store %s: ", vmSshIp, dataDirectoryInSecondaryStore); - LOGGER.error(msg, e); + logger.error(msg, e); return new Pair<>(false, msg); } @@ -405,7 +403,7 @@ private VMInstanceVO getSystemVMInstance(Long vmId) { VirtualMachine.Type.DomainRouter, VirtualMachine.Type.SecondaryStorageVm); if (vmInstance == null) { String msg = String.format("Unable to find vm instance with id: %s", vmId); - LOGGER.error(msg); + logger.error(msg); throw new CloudRuntimeException("Diagnostics command execution failed, " + msg); } @@ -446,15 +444,15 @@ public GCBackgroundTask(DiagnosticsServiceImpl serviceImpl) { this.serviceImpl = serviceImpl; } - private static void deleteOldDiagnosticsFiles(File directory, String storeName) { + private void deleteOldDiagnosticsFiles(File directory, String storeName) { final File[] fileList = directory.listFiles(); if (fileList != null) { String msg = String.format("Found %s diagnostics files in store %s for garbage collection", fileList.length, storeName); - LOGGER.info(msg); + logger.info(msg); for (File file : fileList) { if (file.isFile() && MaximumFileAgeforGarbageCollection.value() <= getTimeDifference(file)) { boolean success = file.delete(); - LOGGER.info(file.getName() + " delete status: " + success); + logger.info(file.getName() + " delete status: " + success); } } } diff --git a/server/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadManagerImpl.java b/server/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadManagerImpl.java index af543c6c7989..0428a9e6907a 100644 --- a/server/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadManagerImpl.java @@ -73,7 +73,6 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; @@ -109,7 +108,6 @@ public class DirectDownloadManagerImpl extends ManagerBase implements DirectDownloadManager { - private static final Logger s_logger = Logger.getLogger(DirectDownloadManagerImpl.class); protected static final String httpHeaderDetailKey = "HTTP_HEADER"; protected static final String BEGIN_CERT = "-----BEGIN CERTIFICATE-----"; protected static final String END_CERT = "-----END CERTIFICATE-----"; @@ -287,8 +285,8 @@ public void downloadTemplate(long templateId, long poolId, long hostId) { VMTemplateStoragePoolVO sPoolRef = vmTemplatePoolDao.findByPoolTemplate(poolId, templateId, null); if (sPoolRef == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Not found (templateId:" + templateId + " poolId: " + poolId + ") in template_spool_ref, persisting it"); + if (logger.isDebugEnabled()) { + logger.debug("Not found (templateId:" + templateId + " poolId: " + poolId + ") in template_spool_ref, persisting it"); } DirectDownloadAnswer ans = (DirectDownloadAnswer) answer; sPoolRef = new VMTemplateStoragePoolVO(poolId, templateId, null); @@ -347,7 +345,7 @@ private Answer sendDirectDownloadCommand(DirectDownloadCommand cmd, VMTemplateVO } } - s_logger.debug("Sending Direct download command to host " + hostToSendDownloadCmd); + logger.debug("Sending Direct download command to host " + hostToSendDownloadCmd); answer = agentManager.easySend(hostToSendDownloadCmd, cmd); if (answer != null) { DirectDownloadAnswer ans = (DirectDownloadAnswer)answer; @@ -386,7 +384,7 @@ private void logUsageEvent(VMTemplateVO template, long poolId) { event = EventTypes.EVENT_ISO_DIRECT_DOWNLOAD_FAILURE; } String description = "Direct Download for template Id: " + template.getId() + " on pool Id: " + poolId + " failed"; - s_logger.error(description); + logger.error(description); ActionEventUtils.onCompletedActionEvent(CallContext.current().getCallingUserId(), template.getAccountId(), EventVO.LEVEL_INFO, event, description, template.getId(), ApiCommandResourceType.Template.toString(), 0); } @@ -454,11 +452,11 @@ protected void certificateSanity(String certificatePem) { x509Cert.checkValidity(); } catch (CertificateExpiredException | CertificateNotYetValidException e) { String msg = "Certificate is invalid. Please provide a valid certificate. Error: " + e.getMessage(); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } if (x509Cert.getSubjectDN() != null) { - s_logger.debug("Valid certificate for domain name: " + x509Cert.getSubjectDN().getName()); + logger.debug("Valid certificate for domain name: " + x509Cert.getSubjectDN().getName()); } } } @@ -495,12 +493,12 @@ public Pair> uploadCertif hosts = Collections.singletonList(host); certificateVO = directDownloadCertificateDao.findByAlias(alias, hypervisorType, zoneId); if (certificateVO == null) { - s_logger.info("Certificate must be uploaded on zone " + zoneId); + logger.info("Certificate must be uploaded on zone " + zoneId); return new Pair<>(certificateVO, new ArrayList<>()); } } - s_logger.info("Attempting to upload certificate: " + alias + " to " + hosts.size() + " hosts on zone " + zoneId); + logger.info("Attempting to upload certificate: " + alias + " to " + hosts.size() + " hosts on zone " + zoneId); int success = 0; int failed = 0; List results = new ArrayList<>(); @@ -513,7 +511,7 @@ public Pair> uploadCertif Pair result = provisionCertificate(certificateVO.getId(), host.getId()); if (!result.first()) { String msg = "Could not upload certificate " + alias + " on host: " + host.getName() + " (" + host.getUuid() + "): " + result.second(); - s_logger.error(msg); + logger.error(msg); failed++; hostStatus = new HostCertificateStatus(CertificateStatus.FAILED, host, result.second()); } else { @@ -523,7 +521,7 @@ public Pair> uploadCertif results.add(hostStatus); } } - s_logger.info("Certificate was successfully uploaded to " + success + " hosts, " + failed + " failed"); + logger.info("Certificate was successfully uploaded to " + success + " hosts, " + failed + " failed"); return new Pair<>(certificateVO, results); } @@ -532,7 +530,7 @@ private Pair setupCertificateOnHost(DirectDownloadCertificate c String alias = certificate.getAlias(); long certificateId = certificate.getId(); - s_logger.debug("Uploading certificate: " + alias + " to host " + hostId); + logger.debug("Uploading certificate: " + alias + " to host " + hostId); SetupDirectDownloadCertificateCommand cmd = new SetupDirectDownloadCertificateCommand(certificateStr, alias); Answer answer = agentManager.easySend(hostId, cmd); Pair result; @@ -541,13 +539,13 @@ private Pair setupCertificateOnHost(DirectDownloadCertificate c if (answer != null) { msg += " due to: " + answer.getDetails(); } - s_logger.error(msg); + logger.error(msg); result = new Pair<>(false, msg); } else { result = new Pair<>(true, "OK"); } - s_logger.info("Certificate " + alias + " successfully uploaded to host: " + hostId); + logger.info("Certificate " + alias + " successfully uploaded to host: " + hostId); DirectDownloadCertificateHostMapVO map = directDownloadCertificateHostMapDao.findByCertificateAndHost(certificateId, hostId); if (map != null) { map.setRevoked(false); @@ -581,33 +579,33 @@ public Pair provisionCertificate(long certificateId, long hostI public boolean syncCertificatesToHost(long hostId, long zoneId) { List zoneCertificates = directDownloadCertificateDao.listByZone(zoneId); if (CollectionUtils.isEmpty(zoneCertificates)) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("No certificates to sync on host: " + hostId); + if (logger.isTraceEnabled()) { + logger.trace("No certificates to sync on host: " + hostId); } return true; } boolean syncCertificatesResult = true; int certificatesSyncCount = 0; - s_logger.debug("Syncing certificates on host: " + hostId); + logger.debug("Syncing certificates on host: " + hostId); for (DirectDownloadCertificateVO certificateVO : zoneCertificates) { DirectDownloadCertificateHostMapVO mapping = directDownloadCertificateHostMapDao.findByCertificateAndHost(certificateVO.getId(), hostId); if (mapping == null) { - s_logger.debug("Syncing certificate " + certificateVO.getId() + " (" + certificateVO.getAlias() + ") on host: " + hostId + ", uploading it"); + logger.debug("Syncing certificate " + certificateVO.getId() + " (" + certificateVO.getAlias() + ") on host: " + hostId + ", uploading it"); Pair result = provisionCertificate(certificateVO.getId(), hostId); if (!result.first()) { String msg = "Could not sync certificate " + certificateVO.getId() + " (" + certificateVO.getAlias() + ") on host: " + hostId + ", upload failed: " + result.second(); - s_logger.error(msg); + logger.error(msg); syncCertificatesResult = false; } else { certificatesSyncCount++; } } else { - s_logger.debug("Certificate " + certificateVO.getId() + " (" + certificateVO.getAlias() + ") already synced on host: " + hostId); + logger.debug("Certificate " + certificateVO.getId() + " (" + certificateVO.getAlias() + ") already synced on host: " + hostId); } } - s_logger.debug("Synced " + certificatesSyncCount + " out of " + zoneCertificates.size() + " certificates on host: " + hostId); + logger.debug("Synced " + certificatesSyncCount + " out of " + zoneCertificates.size() + " certificates on host: " + hostId); return syncCertificatesResult; } @@ -619,10 +617,10 @@ private List getCertificateHostMappings(Dire DirectDownloadCertificateHostMapVO hostMap = directDownloadCertificateHostMapDao.findByCertificateAndHost(certificate.getId(), hostId); if (hostMap == null) { String msg = "Certificate " + certificate.getAlias() + " cannot be revoked from host " + hostId + " as it is not available on the host"; - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } else if (hostMap.isRevoked()) { - s_logger.debug("Certificate " + certificate.getAlias() + " was already revoked from host " + hostId + " skipping it"); + logger.debug("Certificate " + certificate.getAlias() + " was already revoked from host " + hostId + " skipping it"); return new LinkedList<>(); } maps = Collections.singletonList(hostMap); @@ -664,7 +662,7 @@ public List revokeCertificate(DirectDownloadCertificate c int success = 0; int failed = 0; int skipped = 0; - s_logger.info("Attempting to revoke certificate alias: " + certificateAlias + " from " + maps.size() + " hosts"); + logger.info("Attempting to revoke certificate alias: " + certificateAlias + " from " + maps.size() + " hosts"); for (DirectDownloadCertificateHostMapVO map : maps) { Long mappingHostId = map.getHostId(); HostVO host = hostDao.findById(mappingHostId); @@ -672,7 +670,7 @@ public List revokeCertificate(DirectDownloadCertificate c if (host == null || host.getDataCenterId() != zoneId || host.getHypervisorType() != HypervisorType.KVM) { if (host != null) { String reason = host.getDataCenterId() != zoneId ? "Host is not in the zone " + zoneId : "Host hypervisor is not KVM"; - s_logger.debug("Skipping host " + host.getName() + ": " + reason); + logger.debug("Skipping host " + host.getName() + ": " + reason); hostStatus = new HostCertificateStatus(CertificateStatus.SKIPPED, host, reason); hostsList.add(hostStatus); } @@ -682,11 +680,11 @@ public List revokeCertificate(DirectDownloadCertificate c Pair result = revokeCertificateAliasFromHost(certificateAlias, mappingHostId); if (!result.first()) { String msg = "Could not revoke certificate from host: " + mappingHostId + ": " + result.second(); - s_logger.error(msg); + logger.error(msg); hostStatus = new HostCertificateStatus(CertificateStatus.FAILED, host, result.second()); failed++; } else { - s_logger.info("Certificate " + certificateAlias + " revoked from host " + mappingHostId); + logger.info("Certificate " + certificateAlias + " revoked from host " + mappingHostId); map.setRevoked(true); hostStatus = new HostCertificateStatus(CertificateStatus.REVOKED, host, null); success++; @@ -694,7 +692,7 @@ public List revokeCertificate(DirectDownloadCertificate c } hostsList.add(hostStatus); } - s_logger.info(String.format("Certificate alias %s revoked from: %d hosts, %d failed, %d skipped", + logger.info(String.format("Certificate alias %s revoked from: %d hosts, %d failed, %d skipped", certificateAlias, success, failed, skipped)); return hostsList; } @@ -729,7 +727,7 @@ protected Pair revokeCertificateAliasFromHost(String alias, Lon Answer answer = agentManager.send(hostId, cmd); return new Pair<>(answer != null && answer.getResult(), answer != null ? answer.getDetails() : ""); } catch (AgentUnavailableException | OperationTimedoutException e) { - s_logger.error("Error revoking certificate " + alias + " from host " + hostId, e); + logger.error("Error revoking certificate " + alias + " from host " + hostId, e); return new Pair<>(false, e.getMessage()); } } @@ -796,8 +794,8 @@ public DirectDownloadCertificateUploadBackgroundTask( @Override protected void runInContext() { try { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Direct Download Manager background task is running..."); + if (logger.isTraceEnabled()) { + logger.trace("Direct Download Manager background task is running..."); } final DateTime now = DateTime.now(DateTimeZone.UTC); List enabledZones = dataCenterDao.listEnabledZones(); @@ -810,15 +808,15 @@ protected void runInContext() { for (HostVO hostVO : hostsToUpload) { DirectDownloadCertificateHostMapVO mapping = directDownloadCertificateHostMapDao.findByCertificateAndHost(certificateVO.getId(), hostVO.getId()); if (mapping == null) { - s_logger.debug("Certificate " + certificateVO.getId() + + logger.debug("Certificate " + certificateVO.getId() + " (" + certificateVO.getAlias() + ") was not uploaded to host: " + hostVO.getId() + " uploading it"); Pair result = directDownloadManager.provisionCertificate(certificateVO.getId(), hostVO.getId()); - s_logger.debug("Certificate " + certificateVO.getAlias() + " " + + logger.debug("Certificate " + certificateVO.getAlias() + " " + (result.first() ? "uploaded" : "could not be uploaded") + " to host " + hostVO.getId()); if (!result.first()) { - s_logger.error("Certificate " + certificateVO.getAlias() + " failed: " + result.second()); + logger.error("Certificate " + certificateVO.getAlias() + " failed: " + result.second()); } } } @@ -827,7 +825,7 @@ protected void runInContext() { } } } catch (final Throwable t) { - s_logger.error("Error trying to run Direct Download background task", t); + logger.error("Error trying to run Direct Download background task", t); } } } diff --git a/server/src/main/java/org/apache/cloudstack/ha/HAManagerImpl.java b/server/src/main/java/org/apache/cloudstack/ha/HAManagerImpl.java index 5d045609f887..2ab252430d5a 100644 --- a/server/src/main/java/org/apache/cloudstack/ha/HAManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/ha/HAManagerImpl.java @@ -60,7 +60,6 @@ import org.apache.cloudstack.poll.BackgroundPollTask; import org.apache.cloudstack.utils.identity.ManagementServerNode; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.cluster.ClusterManagerListener; import com.cloud.dc.ClusterDetailsDao; @@ -90,7 +89,6 @@ import com.google.common.base.Preconditions; public final class HAManagerImpl extends ManagerBase implements HAManager, ClusterManagerListener, PluggableService, Configurable, StateListener { - public static final Logger LOG = Logger.getLogger(HAManagerImpl.class); @Inject private HAConfigDao haConfigDao; @@ -157,7 +155,7 @@ public boolean transitionHAState(final HAConfig.Event event, final HAConfig haCo if (result) { final String message = String.format("Transitioned host HA state from:%s to:%s due to event:%s for the host id:%d", currentHAState, nextState, event, haConfig.getResourceId()); - LOG.debug(message); + logger.debug(message); if (nextState == HAConfig.HAState.Recovering || nextState == HAConfig.HAState.Fencing || nextState == HAConfig.HAState.Fenced) { ActionEventUtils.onActionEvent(CallContext.current().getCallingUserId(), CallContext.current().getCallingAccountId(), @@ -166,7 +164,7 @@ public boolean transitionHAState(final HAConfig.Event event, final HAConfig haCo } return result; } catch (NoTransitionException e) { - LOG.warn(String.format("Unable to find next HA state for current HA state=[%s] for event=[%s] for host=[%s].", currentHAState, event, haConfig.getResourceId()), e); + logger.warn(String.format("Unable to find next HA state for current HA state=[%s] for event=[%s] for host=[%s].", currentHAState, event, haConfig.getResourceId()), e); } return false; } @@ -309,10 +307,10 @@ public Boolean isVMAliveOnHost(final Host host) throws Investigator.UnknownVM { final HAConfig haConfig = haConfigDao.findHAResource(host.getId(), HAResource.ResourceType.Host); if (haConfig != null) { if (haConfig.getState() == HAConfig.HAState.Fenced) { - LOG.debug(String.format("HA: Host [%s] is fenced.", host.getId())); + logger.debug(String.format("HA: Host [%s] is fenced.", host.getId())); return false; } - LOG.debug(String.format("HA: Host [%s] is alive.", host.getId())); + logger.debug(String.format("HA: Host [%s] is alive.", host.getId())); return true; } throw new Investigator.UnknownVM(); @@ -322,10 +320,10 @@ public Status getHostStatus(final Host host) { final HAConfig haConfig = haConfigDao.findHAResource(host.getId(), HAResource.ResourceType.Host); if (haConfig != null) { if (haConfig.getState() == HAConfig.HAState.Fenced) { - LOG.debug(String.format("HA: Agent [%s] is available/suspect/checking Up.", host.getId())); + logger.debug(String.format("HA: Agent [%s] is available/suspect/checking Up.", host.getId())); return Status.Down; } else if (haConfig.getState() == HAConfig.HAState.Degraded || haConfig.getState() == HAConfig.HAState.Recovering || haConfig.getState() == HAConfig.HAState.Fencing) { - LOG.debug(String.format("HA: Agent [%s] is disconnected. State: %s, %s.", host.getId(), haConfig.getState(), haConfig.getState().getDescription())); + logger.debug(String.format("HA: Agent [%s] is disconnected. State: %s, %s.", host.getId(), haConfig.getState(), haConfig.getState().getDescription())); return Status.Disconnected; } return Status.Up; @@ -537,20 +535,20 @@ public boolean preStateTransitionEvent(final HAConfig.HAState oldState, final HA return false; } - LOG.debug(String.format("HA state pre-transition:: new state=[%s], old state=[%s], for resource id=[%s], status=[%s], ha config state=[%s]." , newState, oldState, haConfig.getResourceId(), status, haConfig.getState())); + logger.debug(String.format("HA state pre-transition:: new state=[%s], old state=[%s], for resource id=[%s], status=[%s], ha config state=[%s]." , newState, oldState, haConfig.getResourceId(), status, haConfig.getState())); if (status && haConfig.getState() != newState) { - LOG.warn(String.format("HA state pre-transition:: HA state is not equal to transition state, HA state=[%s], new state=[%s].", haConfig.getState(), newState)); + logger.warn(String.format("HA state pre-transition:: HA state is not equal to transition state, HA state=[%s], new state=[%s].", haConfig.getState(), newState)); } return processHAStateChange(haConfig, newState, status); } @Override public boolean postStateTransitionEvent(final StateMachine2.Transition transition, final HAConfig haConfig, final boolean status, final Object opaque) { - LOG.debug(String.format("HA state post-transition:: new state=[%s], old state=[%s], for resource id=[%s], status=[%s], ha config state=[%s].", transition.getToState(), transition.getCurrentState(), haConfig.getResourceId(), status, haConfig.getState())); + logger.debug(String.format("HA state post-transition:: new state=[%s], old state=[%s], for resource id=[%s], status=[%s], ha config state=[%s].", transition.getToState(), transition.getCurrentState(), haConfig.getResourceId(), status, haConfig.getState())); if (status && haConfig.getState() != transition.getToState()) { - LOG.warn(String.format("HA state post-transition:: HA state is not equal to transition state, HA state=[%s], new state=[%s].", haConfig.getState(), transition.getToState())); + logger.warn(String.format("HA state post-transition:: HA state is not equal to transition state, HA state=[%s], new state=[%s].", haConfig.getState(), transition.getToState())); } return processHAStateChange(haConfig, transition.getToState(), status); } @@ -607,7 +605,7 @@ public boolean configure(final String name, final Map params) th pollManager.submitTask(new HAManagerBgPollTask()); HAConfig.HAState.getStateMachine().registerListener(this); - LOG.debug("HA manager has been configured."); + logger.debug("HA manager has been configured."); return true; } @@ -644,7 +642,7 @@ protected void runInContext() { HAConfig currentHaConfig = null; try { - LOG.debug("HA health check task is running..."); + logger.debug("HA health check task is running..."); final List haConfigList = new ArrayList(haConfigDao.listAll()); for (final HAConfig haConfig : haConfigList) { @@ -718,9 +716,9 @@ protected void runInContext() { } } catch (Throwable t) { if (currentHaConfig != null) { - LOG.error(String.format("Error trying to perform health checks in HA manager [%s].", currentHaConfig.getHaProvider()), t); + logger.error(String.format("Error trying to perform health checks in HA manager [%s].", currentHaConfig.getHaProvider()), t); } else { - LOG.error("Error trying to perform health checks in HA manager.", t); + logger.error("Error trying to perform health checks in HA manager.", t); } } } diff --git a/server/src/main/java/org/apache/cloudstack/ha/provider/host/HAAbstractHostProvider.java b/server/src/main/java/org/apache/cloudstack/ha/provider/host/HAAbstractHostProvider.java index 966c2843e65a..af76d2d4ae71 100644 --- a/server/src/main/java/org/apache/cloudstack/ha/provider/host/HAAbstractHostProvider.java +++ b/server/src/main/java/org/apache/cloudstack/ha/provider/host/HAAbstractHostProvider.java @@ -33,13 +33,11 @@ import org.apache.cloudstack.ha.HAResource; import org.apache.cloudstack.ha.provider.HAProvider; import org.apache.cloudstack.utils.identity.ManagementServerNode; -import org.apache.log4j.Logger; import javax.inject.Inject; public abstract class HAAbstractHostProvider extends AdapterBase implements HAProvider { - private final static Logger LOG = Logger.getLogger(HAAbstractHostProvider.class); @Inject private AlertManager alertManager; @@ -74,11 +72,11 @@ public boolean isInMaintenanceMode(final Host host) { public void fenceSubResources(final Host r) { if (r.getState() != Status.Down) { try { - LOG.debug("Trying to disconnect the host without investigation and scheduling HA for the VMs on host id=" + r.getId()); + logger.debug("Trying to disconnect the host without investigation and scheduling HA for the VMs on host id=" + r.getId()); agentManager.disconnectWithoutInvestigation(r.getId(), Event.HostDown); oldHighAvailabilityManager.scheduleRestartForVmsOnHost((HostVO)r, true); } catch (Exception e) { - LOG.error("Failed to disconnect host and schedule HA restart of VMs after fencing the host: ", e); + logger.error("Failed to disconnect host and schedule HA restart of VMs after fencing the host: ", e); } } } @@ -88,7 +86,7 @@ public void enableMaintenance(final Host r) { try { resourceManager.resourceStateTransitTo(r, ResourceState.Event.InternalEnterMaintenance, ManagementServerNode.getManagementServerId()); } catch (NoTransitionException e) { - LOG.error("Failed to put host in maintenance mode after host-ha fencing and scheduling VM-HA: ", e); + logger.error("Failed to put host in maintenance mode after host-ha fencing and scheduling VM-HA: ", e); } } diff --git a/server/src/main/java/org/apache/cloudstack/ha/task/ActivityCheckTask.java b/server/src/main/java/org/apache/cloudstack/ha/task/ActivityCheckTask.java index 24f969632623..5ddbac626bc5 100644 --- a/server/src/main/java/org/apache/cloudstack/ha/task/ActivityCheckTask.java +++ b/server/src/main/java/org/apache/cloudstack/ha/task/ActivityCheckTask.java @@ -28,12 +28,10 @@ import org.apache.cloudstack.ha.provider.HACheckerException; import org.apache.cloudstack.ha.provider.HAProvider; import org.apache.cloudstack.ha.provider.HAProvider.HAProviderConfig; -import org.apache.log4j.Logger; import org.joda.time.DateTime; public class ActivityCheckTask extends BaseHATask { - public static final Logger LOG = Logger.getLogger(ActivityCheckTask.class); @Inject private HAManager haManager; diff --git a/server/src/main/java/org/apache/cloudstack/ha/task/BaseHATask.java b/server/src/main/java/org/apache/cloudstack/ha/task/BaseHATask.java index 9c878092a546..9cc65e796a84 100644 --- a/server/src/main/java/org/apache/cloudstack/ha/task/BaseHATask.java +++ b/server/src/main/java/org/apache/cloudstack/ha/task/BaseHATask.java @@ -30,11 +30,12 @@ import org.apache.cloudstack.ha.provider.HAFenceException; import org.apache.cloudstack.ha.provider.HAProvider; import org.apache.cloudstack.ha.provider.HARecoveryException; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.joda.time.DateTime; public abstract class BaseHATask implements Callable { - public static final Logger LOG = Logger.getLogger(BaseHATask.class); + protected Logger logger = LogManager.getLogger(getClass()); private final HAResource resource; private final HAProvider haProvider; @@ -96,10 +97,10 @@ public Boolean call() throws HACheckerException, HAFenceException, HARecoveryExc result = future.get(timeout, TimeUnit.SECONDS); } } catch (InterruptedException | ExecutionException e) { - LOG.warn("Exception occurred while running " + getTaskType() + " on a resource: " + e.getMessage(), e.getCause()); + logger.warn("Exception occurred while running " + getTaskType() + " on a resource: " + e.getMessage(), e.getCause()); throwable = e.getCause(); } catch (TimeoutException e) { - LOG.trace(getTaskType() + " operation timed out for resource id:" + resource.getId()); + logger.trace(getTaskType() + " operation timed out for resource id:" + resource.getId()); } processResult(result, throwable); return result; diff --git a/server/src/main/java/org/apache/cloudstack/ha/task/HealthCheckTask.java b/server/src/main/java/org/apache/cloudstack/ha/task/HealthCheckTask.java index 92dcdc2164da..f982d7dc6a45 100644 --- a/server/src/main/java/org/apache/cloudstack/ha/task/HealthCheckTask.java +++ b/server/src/main/java/org/apache/cloudstack/ha/task/HealthCheckTask.java @@ -23,7 +23,6 @@ import org.apache.cloudstack.ha.HAResourceCounter; import org.apache.cloudstack.ha.provider.HACheckerException; import org.apache.cloudstack.ha.provider.HAProvider; -import org.apache.log4j.Logger; import javax.inject.Inject; import java.util.concurrent.ExecutorService; @@ -33,7 +32,6 @@ public class HealthCheckTask extends BaseHATask { @Inject private HAManager haManager; - public static final Logger LOG = Logger.getLogger(HealthCheckTask.class); public HealthCheckTask(final HAResource resource, final HAProvider haProvider, final HAConfig haConfig, final HAProvider.HAProviderConfig haProviderConfig, final ExecutorService executor) { diff --git a/server/src/main/java/org/apache/cloudstack/network/lb/ApplicationLoadBalancerManagerImpl.java b/server/src/main/java/org/apache/cloudstack/network/lb/ApplicationLoadBalancerManagerImpl.java index d9f1db616f58..f05e216f1eb5 100644 --- a/server/src/main/java/org/apache/cloudstack/network/lb/ApplicationLoadBalancerManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/network/lb/ApplicationLoadBalancerManagerImpl.java @@ -23,7 +23,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.acl.SecurityChecker.AccessType; @@ -79,7 +78,6 @@ @Component public class ApplicationLoadBalancerManagerImpl extends ManagerBase implements ApplicationLoadBalancerService { - private static final Logger s_logger = Logger.getLogger(ApplicationLoadBalancerManagerImpl.class); @Inject NetworkModel _networkModel; @@ -182,7 +180,7 @@ public ApplicationLoadBalancerRuleVO doInTransaction(TransactionStatus status) t if (!_firewallDao.setStateToAdd(newRule)) { throw new CloudRuntimeException("Unable to update the state to add for " + newRule); } - s_logger.debug("Load balancer " + newRule.getId() + " for Ip address " + newRule.getSourceIp().addr() + ", source port " + + logger.debug("Load balancer " + newRule.getId() + " for Ip address " + newRule.getSourceIp().addr() + ", source port " + newRule.getSourcePortStart().intValue() + ", instance port " + newRule.getDefaultPortStart() + " is added successfully."); CallContext.current().setEventDetails("Load balancer Id: " + newRule.getId()); Network ntwk = _networkModel.getNetwork(newRule.getNetworkId()); @@ -259,7 +257,7 @@ protected Ip getSourceIp(Scheme scheme, Network sourceIpNtwk, String requestedIp if (requestedIp != null) { if (_lbDao.countBySourceIp(new Ip(requestedIp), sourceIpNtwk.getId()) > 0) { - s_logger.debug("IP address " + requestedIp + " is already used by existing LB rule, returning it"); + logger.debug("IP address " + requestedIp + " is already used by existing LB rule, returning it"); return new Ip(requestedIp); } @@ -530,8 +528,8 @@ protected void detectInternalLbRulesConflict(ApplicationLoadBalancerRule newLbRu } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("No network rule conflicts detected for " + newLbRule + " against " + (lbRules.size() - 1) + " existing rules"); + if (logger.isDebugEnabled()) { + logger.debug("No network rule conflicts detected for " + newLbRule + " against " + (lbRules.size() - 1) + " existing rules"); } } diff --git a/server/src/main/java/org/apache/cloudstack/network/router/deployment/RouterDeploymentDefinition.java b/server/src/main/java/org/apache/cloudstack/network/router/deployment/RouterDeploymentDefinition.java index 2b8ea7ffe5fe..c09775ddaf66 100644 --- a/server/src/main/java/org/apache/cloudstack/network/router/deployment/RouterDeploymentDefinition.java +++ b/server/src/main/java/org/apache/cloudstack/network/router/deployment/RouterDeploymentDefinition.java @@ -26,7 +26,8 @@ import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.dao.DiskOfferingDao; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.configuration.ConfigurationManagerImpl; import com.cloud.dc.DataCenter.NetworkType; @@ -79,7 +80,7 @@ import com.cloud.vm.dao.VMInstanceDao; public class RouterDeploymentDefinition { - private static final Logger logger = Logger.getLogger(RouterDeploymentDefinition.class); + protected Logger logger = LogManager.getLogger(getClass()); protected static final int LIMIT_NUMBER_OF_ROUTERS = 5; protected static final int MAX_NUMBER_OF_ROUTERS = 2; diff --git a/server/src/main/java/org/apache/cloudstack/network/router/deployment/VpcRouterDeploymentDefinition.java b/server/src/main/java/org/apache/cloudstack/network/router/deployment/VpcRouterDeploymentDefinition.java index 23da0dddedb4..8b964a3ba80c 100644 --- a/server/src/main/java/org/apache/cloudstack/network/router/deployment/VpcRouterDeploymentDefinition.java +++ b/server/src/main/java/org/apache/cloudstack/network/router/deployment/VpcRouterDeploymentDefinition.java @@ -20,7 +20,6 @@ import java.util.List; import java.util.Map; -import org.apache.log4j.Logger; import com.cloud.dc.dao.VlanDao; import com.cloud.deploy.DataCenterDeployment; @@ -44,7 +43,6 @@ import com.cloud.vm.VirtualMachineProfile.Param; public class VpcRouterDeploymentDefinition extends RouterDeploymentDefinition { - private static final Logger logger = Logger.getLogger(VpcRouterDeploymentDefinition.class); protected VpcDao vpcDao; protected VpcOfferingDao vpcOffDao; diff --git a/server/src/main/java/org/apache/cloudstack/network/ssl/CertServiceImpl.java b/server/src/main/java/org/apache/cloudstack/network/ssl/CertServiceImpl.java index 2e1e59463198..928e58a4f25c 100644 --- a/server/src/main/java/org/apache/cloudstack/network/ssl/CertServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/network/ssl/CertServiceImpl.java @@ -62,7 +62,8 @@ import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.network.tls.CertService; import org.apache.commons.io.IOUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.bouncycastle.jce.provider.BouncyCastleProvider; import org.bouncycastle.util.io.pem.PemObject; import org.bouncycastle.util.io.pem.PemReader; @@ -92,7 +93,7 @@ public class CertServiceImpl implements CertService { - private static final Logger s_logger = Logger.getLogger(CertServiceImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject AccountManager _accountMgr; @@ -126,7 +127,7 @@ public SslCertResponse uploadSslCert(final UploadSslCertCmd certCmd) { final String name = certCmd.getName(); validate(cert, key, password, chain, certCmd.getEnabledRevocationCheck()); - s_logger.debug("Certificate Validation succeeded"); + logger.debug("Certificate Validation succeeded"); final String fingerPrint = CertificateHelper.generateFingerPrint(parseCertificate(cert)); @@ -232,7 +233,7 @@ public List listSslCerts(final ListSslCertsCmd listSslCertCmd) lbCertMapRule = _lbCertDao.findByLbRuleId(lbRuleId); if (lbCertMapRule == null) { - s_logger.debug("No certificate bound to loadbalancer id: " + lbRuleId); + logger.debug("No certificate bound to loadbalancer id: " + lbRuleId); return certResponseList; } diff --git a/server/src/main/java/org/apache/cloudstack/network/topology/AdvancedNetworkTopology.java b/server/src/main/java/org/apache/cloudstack/network/topology/AdvancedNetworkTopology.java index fc29fcca9980..e777e959b849 100644 --- a/server/src/main/java/org/apache/cloudstack/network/topology/AdvancedNetworkTopology.java +++ b/server/src/main/java/org/apache/cloudstack/network/topology/AdvancedNetworkTopology.java @@ -47,7 +47,6 @@ import com.cloud.vm.VirtualMachine.State; import com.cloud.vm.VirtualMachineProfile; -import org.apache.log4j.Logger; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Qualifier; import org.springframework.stereotype.Component; @@ -55,7 +54,6 @@ @Component public class AdvancedNetworkTopology extends BasicNetworkTopology { - private static final Logger s_logger = Logger.getLogger(AdvancedNetworkTopology.class); @Autowired @Qualifier("advancedNetworkVisitor") @@ -69,7 +67,7 @@ public BasicNetworkVisitor getVisitor() { @Override public String[] applyVpnUsers(final RemoteAccessVpn remoteAccessVpn, final List users, final VirtualRouter router) throws ResourceUnavailableException { - s_logger.debug("APPLYING ADVANCED VPN USERS RULES"); + logger.debug("APPLYING ADVANCED VPN USERS RULES"); final AdvancedVpnRules routesRules = new AdvancedVpnRules(remoteAccessVpn, users); @@ -90,10 +88,10 @@ public String[] applyVpnUsers(final RemoteAccessVpn remoteAccessVpn, final List< @Override public boolean applyStaticRoutes(final List staticRoutes, final List routers) throws ResourceUnavailableException { - s_logger.debug("APPLYING STATIC ROUTES RULES"); + logger.debug("APPLYING STATIC ROUTES RULES"); if (staticRoutes == null || staticRoutes.isEmpty()) { - s_logger.debug("No static routes to apply"); + logger.debug("No static routes to apply"); return true; } @@ -106,9 +104,9 @@ public boolean applyStaticRoutes(final List staticRoutes, fi result = result && routesRules.accept(_advancedVisitor, router); } else if (router.getState() == State.Stopped || router.getState() == State.Stopping) { - s_logger.debug("Router " + router.getInstanceName() + " is in " + router.getState() + ", so not sending StaticRoute command to the backend"); + logger.debug("Router " + router.getInstanceName() + " is in " + router.getState() + ", so not sending StaticRoute command to the backend"); } else { - s_logger.warn("Unable to apply StaticRoute, virtual router is not in the right state " + router.getState()); + logger.warn("Unable to apply StaticRoute, virtual router is not in the right state " + router.getState()); throw new ResourceUnavailableException("Unable to apply StaticRoute on the backend," + " virtual router is not in the right state", DataCenter.class, router.getDataCenterId()); @@ -120,7 +118,7 @@ public boolean applyStaticRoutes(final List staticRoutes, fi @Override public boolean setupDhcpForPvlan(final boolean isAddPvlan, final DomainRouterVO router, final Long hostId, final NicProfile nic) throws ResourceUnavailableException { - s_logger.debug("SETUP DHCP PVLAN RULES"); + logger.debug("SETUP DHCP PVLAN RULES"); if (!nic.getBroadCastUri().getScheme().equals("pvlan")) { return false; @@ -133,7 +131,7 @@ public boolean setupDhcpForPvlan(final boolean isAddPvlan, final DomainRouterVO @Override public boolean setupPrivateGateway(final PrivateGateway gateway, final VirtualRouter router) throws ConcurrentOperationException, ResourceUnavailableException { - s_logger.debug("SETUP PRIVATE GATEWAY RULES"); + logger.debug("SETUP PRIVATE GATEWAY RULES"); final PrivateGatewayRules routesRules = new PrivateGatewayRules(gateway); @@ -144,7 +142,7 @@ public boolean setupPrivateGateway(final PrivateGateway gateway, final VirtualRo public boolean applyUserData(final Network network, final NicProfile nic, final VirtualMachineProfile profile, final DeployDestination dest, final DomainRouterVO router) throws ResourceUnavailableException { - s_logger.debug("APPLYING VPC USERDATA RULES"); + logger.debug("APPLYING VPC USERDATA RULES"); final String typeString = "userdata and password entry"; final boolean isPodLevelException = false; @@ -160,7 +158,7 @@ public boolean applyUserData(final Network network, final NicProfile nic, final public boolean applyDhcpEntry(final Network network, final NicProfile nic, final VirtualMachineProfile profile, final DeployDestination dest, final DomainRouterVO router) throws ResourceUnavailableException { - s_logger.debug("APPLYING VPC DHCP ENTRY RULES"); + logger.debug("APPLYING VPC DHCP ENTRY RULES"); final String typeString = "dhcp entry"; final Long podId = null; @@ -174,7 +172,7 @@ public boolean applyDhcpEntry(final Network network, final NicProfile nic, final @Override public boolean removeDhcpEntry(Network network, NicProfile nic, VirtualMachineProfile profile, VirtualRouter virtualRouter) throws ResourceUnavailableException { - s_logger.debug("REMOVE VPC DHCP ENTRY RULES"); + logger.debug("REMOVE VPC DHCP ENTRY RULES"); final String typeString = "dhcp entry"; final Long podId = null; @@ -192,7 +190,7 @@ public boolean associatePublicIP(final Network network, final List routers) throws ResourceUnavailableException { - s_logger.debug("CONFIG DHCP FOR SUBNETS RULES"); + logger.debug("CONFIG DHCP FOR SUBNETS RULES"); // Assuming we have only one router per network For Now. final DomainRouterVO router = routers.get(0); if (router.getState() != State.Running) { - s_logger.warn("Failed to configure dhcp: router not in running state"); + logger.warn("Failed to configure dhcp: router not in running state"); throw new ResourceUnavailableException("Unable to assign ip addresses, domR is not in right state " + router.getState(), DataCenter.class, network.getDataCenterId()); } @@ -142,7 +143,7 @@ public boolean configDhcpForSubnet(final Network network, final NicProfile nic, public boolean applyDhcpEntry(final Network network, final NicProfile nic, final VirtualMachineProfile profile, final DeployDestination dest, final DomainRouterVO router) throws ResourceUnavailableException { - s_logger.debug("APPLYING DHCP ENTRY RULES"); + logger.debug("APPLYING DHCP ENTRY RULES"); final String typeString = "dhcp entry"; final Long podId = dest.getPod().getId(); @@ -167,7 +168,7 @@ public boolean applyDhcpEntry(final Network network, final NicProfile nic, final public boolean applyUserData(final Network network, final NicProfile nic, final VirtualMachineProfile profile, final DeployDestination dest, final DomainRouterVO router) throws ResourceUnavailableException { - s_logger.debug("APPLYING USERDATA RULES"); + logger.debug("APPLYING USERDATA RULES"); final String typeString = "userdata and password entry"; final Long podId = dest.getPod().getId(); @@ -190,11 +191,11 @@ public boolean applyLoadBalancingRules(final Network network, final List rules, final VirtualRouter router) throws ResourceUnavailableException { if (rules == null || rules.isEmpty()) { - s_logger.debug("No firewall rules to be applied for network " + network.getId()); + logger.debug("No firewall rules to be applied for network " + network.getId()); return true; } - s_logger.debug("APPLYING FIREWALL RULES"); + logger.debug("APPLYING FIREWALL RULES"); final String typeString = "firewall rules"; final boolean isPodLevelException = false; @@ -229,11 +230,11 @@ public boolean applyFirewallRules(final Network network, final List rules, final VirtualRouter router) throws ResourceUnavailableException { if (rules == null || rules.isEmpty()) { - s_logger.debug("No static nat rules to be applied for network " + network.getId()); + logger.debug("No static nat rules to be applied for network " + network.getId()); return true; } - s_logger.debug("APPLYING STATIC NAT RULES"); + logger.debug("APPLYING STATIC NAT RULES"); final String typeString = "static nat rules"; final boolean isPodLevelException = false; @@ -249,11 +250,11 @@ public boolean applyStaticNats(final Network network, final List ipAddress, final VirtualRouter router) throws ResourceUnavailableException { if (ipAddress == null || ipAddress.isEmpty()) { - s_logger.debug("No ip association rules to be applied for network " + network.getId()); + logger.debug("No ip association rules to be applied for network " + network.getId()); return true; } - s_logger.debug("APPLYING IP RULES"); + logger.debug("APPLYING IP RULES"); final String typeString = "ip association"; final boolean isPodLevelException = false; @@ -268,22 +269,22 @@ public boolean associatePublicIP(final Network network, final List users, final List routers) throws ResourceUnavailableException { if (routers == null || routers.isEmpty()) { - s_logger.warn("Failed to add/remove VPN users: no router found for account and zone"); + logger.warn("Failed to add/remove VPN users: no router found for account and zone"); throw new ResourceUnavailableException("Unable to assign ip addresses, domR doesn't exist for network " + network.getId(), DataCenter.class, network.getDataCenterId()); } - s_logger.debug("APPLYING BASIC VPN RULES"); + logger.debug("APPLYING BASIC VPN RULES"); final BasicVpnRules vpnRules = new BasicVpnRules(network, users); boolean agentResults = true; for (final DomainRouterVO router : routers) { if(router.getState() == State.Stopped || router.getState() == State.Stopping){ - s_logger.info("The router " + router.getInstanceName()+ " is in the " + router.getState() + " state. So not applying the VPN rules. Will be applied once the router gets restarted."); + logger.info("The router " + router.getInstanceName()+ " is in the " + router.getState() + " state. So not applying the VPN rules. Will be applied once the router gets restarted."); continue; } else if (router.getState() != State.Running) { - s_logger.warn("Failed to add/remove VPN users: router not in running state"); + logger.warn("Failed to add/remove VPN users: router not in running state"); throw new ResourceUnavailableException("Unable to assign ip addresses, domR is not in right state " + router.getState(), DataCenter.class, network.getDataCenterId()); } @@ -311,7 +312,7 @@ else if (router.getState() != State.Running) { public boolean savePasswordToRouter(final Network network, final NicProfile nic, final VirtualMachineProfile profile, final VirtualRouter router) throws ResourceUnavailableException { - s_logger.debug("SAVE PASSWORD TO ROUTE RULES"); + logger.debug("SAVE PASSWORD TO ROUTE RULES"); final String typeString = "save password entry"; final boolean isPodLevelException = false; @@ -326,7 +327,7 @@ public boolean savePasswordToRouter(final Network network, final NicProfile nic, @Override public boolean saveSSHPublicKeyToRouter(final Network network, final NicProfile nic, final VirtualMachineProfile profile, final VirtualRouter router, final String sshPublicKey) throws ResourceUnavailableException { - s_logger.debug("SAVE SSH PUB KEY TO ROUTE RULES"); + logger.debug("SAVE SSH PUB KEY TO ROUTE RULES"); final String typeString = "save SSHkey entry"; final boolean isPodLevelException = false; @@ -341,7 +342,7 @@ public boolean saveSSHPublicKeyToRouter(final Network network, final NicProfile @Override public boolean saveUserDataToRouter(final Network network, final NicProfile nic, final VirtualMachineProfile profile, final VirtualRouter router) throws ResourceUnavailableException { - s_logger.debug("SAVE USERDATA TO ROUTE RULES"); + logger.debug("SAVE USERDATA TO ROUTE RULES"); final String typeString = "save userdata entry"; final boolean isPodLevelException = false; @@ -357,7 +358,7 @@ public boolean applyRules(final Network network, final VirtualRouter router, fin final boolean failWhenDisconnect, final RuleApplierWrapper ruleApplierWrapper) throws ResourceUnavailableException { if (router == null) { - s_logger.warn("Unable to apply " + typeString + ", virtual router doesn't exist in the network " + network.getId()); + logger.warn("Unable to apply " + typeString + ", virtual router doesn't exist in the network " + network.getId()); throw new ResourceUnavailableException("Unable to apply " + typeString, DataCenter.class, network.getDataCenterId()); } @@ -374,14 +375,14 @@ public boolean applyRules(final Network network, final VirtualRouter router, fin boolean result = true; final String msg = "Unable to apply " + typeString + " on disconnected router "; if (router.getState() == State.Running) { - s_logger.debug("Applying " + typeString + " in network " + network); + logger.debug("Applying " + typeString + " in network " + network); if (router.isStopPending()) { if (_hostDao.findById(router.getHostId()).getState() == Status.Up) { throw new ResourceUnavailableException("Unable to process due to the stop pending router " + router.getInstanceName() + " haven't been stopped after it's host coming back!", DataCenter.class, router.getDataCenterId()); } - s_logger.debug("Router " + router.getInstanceName() + " is stop pending, so not sending apply " + typeString + " commands to the backend"); + logger.debug("Router " + router.getInstanceName() + " is stop pending, so not sending apply " + typeString + " commands to the backend"); return false; } @@ -389,7 +390,7 @@ public boolean applyRules(final Network network, final VirtualRouter router, fin result = ruleApplier.accept(getVisitor(), router); connectedRouters.add(router); } catch (final AgentUnavailableException e) { - s_logger.warn(msg + router.getInstanceName(), e); + logger.warn(msg + router.getInstanceName(), e); disconnectedRouters.add(router); } @@ -403,9 +404,9 @@ public boolean applyRules(final Network network, final VirtualRouter router, fin } } else if (router.getState() == State.Stopped || router.getState() == State.Stopping) { - s_logger.debug("Router " + router.getInstanceName() + " is in " + router.getState() + ", so not sending apply " + typeString + " commands to the backend"); + logger.debug("Router " + router.getInstanceName() + " is in " + router.getState() + ", so not sending apply " + typeString + " commands to the backend"); } else { - s_logger.warn("Unable to apply " + typeString + ", virtual router is not in the right state " + router.getState()); + logger.warn("Unable to apply " + typeString + ", virtual router is not in the right state " + router.getState()); if (isZoneBasic && isPodLevelException) { throw new ResourceUnavailableException("Unable to apply " + typeString + ", virtual router is not in the right state", Pod.class, podId); } @@ -426,8 +427,8 @@ public boolean applyRules(final Network network, final VirtualRouter router, fin } } } else if (!disconnectedRouters.isEmpty()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(msg + router.getInstanceName() + "(" + router.getId() + ")"); + if (logger.isDebugEnabled()) { + logger.debug(msg + router.getInstanceName() + "(" + router.getId() + ")"); } if (isZoneBasic && isPodLevelException) { throw new ResourceUnavailableException(msg, Pod.class, podId); @@ -444,7 +445,7 @@ public boolean applyRules(final Network network, final VirtualRouter router, fin @Override public boolean removeDhcpEntry(Network network, NicProfile nic, VirtualMachineProfile profile, VirtualRouter virtualRouter) throws ResourceUnavailableException { - s_logger.debug("REMOVING DHCP ENTRY RULE"); + logger.debug("REMOVING DHCP ENTRY RULE"); final String typeString = "dhcp entry"; final Long podId = profile.getVirtualMachine().getPodIdToDeployIn(); diff --git a/server/src/main/java/org/apache/cloudstack/network/topology/BasicNetworkVisitor.java b/server/src/main/java/org/apache/cloudstack/network/topology/BasicNetworkVisitor.java index 42fac0aea23b..78f281f32cfe 100644 --- a/server/src/main/java/org/apache/cloudstack/network/topology/BasicNetworkVisitor.java +++ b/server/src/main/java/org/apache/cloudstack/network/topology/BasicNetworkVisitor.java @@ -22,7 +22,6 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Qualifier; import org.springframework.stereotype.Component; @@ -74,7 +73,6 @@ @Component public class BasicNetworkVisitor extends NetworkTopologyVisitor { - private static final Logger s_logger = Logger.getLogger(BasicNetworkVisitor.class); @Autowired @Qualifier("networkHelper") @@ -157,7 +155,7 @@ public boolean visit(final FirewallRules firewall) throws ResourceUnavailableExc return _networkGeneralHelper.sendCommandsToRouter(router, cmds); } - s_logger.warn("Unable to apply rules of purpose: " + rules.get(0).getPurpose()); + logger.warn("Unable to apply rules of purpose: " + rules.get(0).getPurpose()); return false; } diff --git a/server/src/main/java/org/apache/cloudstack/network/topology/NetworkTopologyVisitor.java b/server/src/main/java/org/apache/cloudstack/network/topology/NetworkTopologyVisitor.java index 96cfcd02a340..035c67457e5d 100644 --- a/server/src/main/java/org/apache/cloudstack/network/topology/NetworkTopologyVisitor.java +++ b/server/src/main/java/org/apache/cloudstack/network/topology/NetworkTopologyVisitor.java @@ -37,9 +37,13 @@ import com.cloud.network.rules.UserdataToRouterRules; import com.cloud.network.rules.VirtualNetworkApplianceFactory; import com.cloud.network.rules.VpcIpAssociationRules; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; public abstract class NetworkTopologyVisitor { + protected Logger logger = LogManager.getLogger(getClass()); + public abstract VirtualNetworkApplianceFactory getVirtualNetworkApplianceFactory(); public abstract boolean visit(StaticNatRules nat) throws ResourceUnavailableException; diff --git a/server/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementServiceImpl.java b/server/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementServiceImpl.java index 302765aa2873..02600b87f290 100644 --- a/server/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementServiceImpl.java @@ -43,7 +43,6 @@ import org.apache.cloudstack.poll.BackgroundPollTask; import org.apache.cloudstack.utils.identity.ManagementServerNode; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.alert.AlertManager; @@ -73,7 +72,6 @@ @Component public class OutOfBandManagementServiceImpl extends ManagerBase implements OutOfBandManagementService, Manager, Configurable { - public static final Logger LOG = Logger.getLogger(OutOfBandManagementServiceImpl.class); @Inject private ClusterDetailsDao clusterDetailsDao; @@ -108,7 +106,7 @@ private void initializeDriversMap() { for (final OutOfBandManagementDriver driver : outOfBandManagementDrivers) { outOfBandManagementDriversMap.put(driver.getName().toLowerCase(), driver); } - LOG.debug("Discovered out-of-band management drivers configured in the OutOfBandManagementService"); + logger.debug("Discovered out-of-band management drivers configured in the OutOfBandManagementService"); } } @@ -194,7 +192,7 @@ private void sendAuthError(final Host host, final String message) { boolean concurrentUpdateResult = hostAlertCache.asMap().replace(host.getId(), sentCount, sentCount+1L); if (concurrentUpdateResult) { final String subject = String.format("Out-of-band management auth-error detected for %s in cluster [id: %d] and zone [id: %d].", host, host.getClusterId(), host.getDataCenterId()); - LOG.error(subject + ": " + message); + logger.error(subject + ": " + message); alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_OOBM_AUTH_ERROR, host.getDataCenterId(), host.getPodId(), subject, message); } } @@ -213,7 +211,7 @@ private boolean transitionPowerState(OutOfBandManagement.PowerState.Event event, boolean result = OutOfBandManagement.PowerState.getStateMachine().transitTo(outOfBandManagementHost, event, null, outOfBandManagementDao); if (result) { final String message = String.format("Transitioned out-of-band management power state from %s to %s due to event: %s for %s", currentPowerState, newPowerState, event, host); - LOG.debug(message); + logger.debug(message); if (newPowerState == OutOfBandManagement.PowerState.Unknown) { ActionEventUtils.onActionEvent(CallContext.current().getCallingUserId(), CallContext.current().getCallingAccountId(), Domain.ROOT_DOMAIN, EventTypes.EVENT_HOST_OUTOFBAND_MANAGEMENT_POWERSTATE_TRANSITION, message, host.getId(), ApiCommandResourceType.Host.toString()); @@ -221,7 +219,7 @@ private boolean transitionPowerState(OutOfBandManagement.PowerState.Event event, } return result; } catch (NoTransitionException ignored) { - LOG.trace(String.format("Unable to transition out-of-band management power state for %s for the event: %s and current power state: %s", host, event, currentPowerState)); + logger.trace(String.format("Unable to transition out-of-band management power state for %s for the event: %s and current power state: %s", host, event, currentPowerState)); } return false; } @@ -256,7 +254,7 @@ private boolean isOutOfBandManagementEnabledForHost(Long hostId) { Host host = hostDao.findById(hostId); if (host == null || host.getResourceState() == ResourceState.Degraded) { String state = host != null ? String.valueOf(host.getResourceState()) : null; - LOG.debug(String.format("Host [id=%s, state=%s] was removed or placed in Degraded state by the Admin.", hostId, state)); + logger.debug(String.format("Host [id=%s, state=%s] was removed or placed in Degraded state by the Admin.", hostId, state)); return false; } @@ -393,7 +391,7 @@ public OutOfBandManagementResponse configure(final Host host, final ImmutableMap } String result = String.format("Out-of-band management successfully configured for %s.", host); - LOG.debug(result); + logger.debug(result); final OutOfBandManagementResponse response = new OutOfBandManagementResponse(outOfBandManagementDao.findByHost(host.getId())); response.setResultDescription(result); @@ -432,7 +430,7 @@ public OutOfBandManagementResponse executePowerOperation(final Host host, final sendAuthError(host, errorMessage); } if (!powerOperation.equals(OutOfBandManagement.PowerOperation.STATUS)) { - LOG.debug(errorMessage); + logger.debug(errorMessage); } throw new CloudRuntimeException(errorMessage); } @@ -476,7 +474,7 @@ public Boolean doInTransaction(TransactionStatus status) { try { driverResponse = driver.execute(changePasswordCmd); } catch (Exception e) { - LOG.error("Out-of-band management change password failed due to driver error: " + e.getMessage()); + logger.error("Out-of-band management change password failed due to driver error: " + e.getMessage()); throw new CloudRuntimeException(String.format("Failed to change out-of-band management password for %s due to driver error: %s", host, e.getMessage())); } @@ -524,7 +522,7 @@ public boolean configure(final String name, final Map params) th backgroundPollManager.submitTask(new OutOfBandManagementPowerStatePollTask()); - LOG.info("Starting out-of-band management background sync executor with thread pool-size=" + poolSize); + logger.info("Starting out-of-band management background sync executor with thread pool-size=" + poolSize); return true; } @@ -563,8 +561,8 @@ private final class OutOfBandManagementPowerStatePollTask extends ManagedContext @Override protected void runInContext() { try { - if (LOG.isTraceEnabled()) { - LOG.trace("Host out-of-band management power state poll task is running..."); + if (logger.isTraceEnabled()) { + logger.trace("Host out-of-band management power state poll task is running..."); } final List outOfBandManagementHosts = outOfBandManagementDao.findAllByManagementServer(ManagementServerNode.getManagementServerId()); if (outOfBandManagementHosts == null || outOfBandManagementHosts.isEmpty()) { @@ -579,14 +577,14 @@ protected void runInContext() { submitBackgroundPowerSyncTask(host); } else if (outOfBandManagementHost.getPowerState() != OutOfBandManagement.PowerState.Disabled) { if (transitionPowerStateToDisabled(Collections.singletonList(host))) { - if (LOG.isDebugEnabled()) { - LOG.debug(String.format("Out-of-band management was disabled in zone/cluster/host, disabled power state for %s", host)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Out-of-band management was disabled in zone/cluster/host, disabled power state for %s", host)); } } } } } catch (Throwable t) { - LOG.error("Error trying to retrieve host out-of-band management stats", t); + logger.error("Error trying to retrieve host out-of-band management stats", t); } } diff --git a/server/src/main/java/org/apache/cloudstack/outofbandmanagement/PowerOperationTask.java b/server/src/main/java/org/apache/cloudstack/outofbandmanagement/PowerOperationTask.java index 8d037f2fc993..487a11c97526 100644 --- a/server/src/main/java/org/apache/cloudstack/outofbandmanagement/PowerOperationTask.java +++ b/server/src/main/java/org/apache/cloudstack/outofbandmanagement/PowerOperationTask.java @@ -19,7 +19,8 @@ import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.event.ActionEventUtils; import com.cloud.event.EventTypes; @@ -27,7 +28,7 @@ import com.cloud.host.Host; public class PowerOperationTask implements Runnable { - public static final Logger LOG = Logger.getLogger(PowerOperationTask.class); + protected Logger logger = LogManager.getLogger(getClass()); final private OutOfBandManagementService service; final private Host host; @@ -49,7 +50,7 @@ public void run() { try { service.executePowerOperation(host, powerOperation, null); } catch (Exception e) { - LOG.warn(String.format("Out-of-band management background task operation=%s for host %s failed with: %s", + logger.warn(String.format("Out-of-band management background task operation=%s for host %s failed with: %s", powerOperation.name(), host.getName(), e.getMessage())); String eventMessage = String diff --git a/server/src/main/java/org/apache/cloudstack/poll/BackgroundPollManagerImpl.java b/server/src/main/java/org/apache/cloudstack/poll/BackgroundPollManagerImpl.java index f4a634032d4c..c6d4c56d4d9d 100644 --- a/server/src/main/java/org/apache/cloudstack/poll/BackgroundPollManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/poll/BackgroundPollManagerImpl.java @@ -22,7 +22,6 @@ import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.exception.CloudRuntimeException; import com.google.common.base.Preconditions; -import org.apache.log4j.Logger; import java.util.ArrayList; import java.util.List; @@ -31,7 +30,6 @@ import java.util.concurrent.TimeUnit; public final class BackgroundPollManagerImpl extends ManagerBase implements BackgroundPollManager, Manager { - public static final Logger LOG = Logger.getLogger(BackgroundPollManagerImpl.class); private ScheduledExecutorService backgroundPollTaskScheduler; private List submittedTasks = new ArrayList<>(); @@ -57,7 +55,7 @@ public boolean start() { delay = getRoundDelay(); } backgroundPollTaskScheduler.scheduleWithFixedDelay(task, getInitialDelay(), delay, TimeUnit.MILLISECONDS); - LOG.debug("Scheduled background poll task: " + task.getClass().getName()); + logger.debug("Scheduled background poll task: " + task.getClass().getName()); } isConfiguredAndStarted = true; return true; @@ -77,7 +75,7 @@ public void submitTask(final BackgroundPollTask task) { if (isConfiguredAndStarted) { throw new CloudRuntimeException("Background Poll Manager cannot accept poll task as it has been configured and started."); } - LOG.debug("Background Poll Manager received task: " + task.getClass().getSimpleName()); + logger.debug("Background Poll Manager received task: " + task.getClass().getSimpleName()); submittedTasks.add(task); } } diff --git a/server/src/main/java/org/apache/cloudstack/region/RegionManagerImpl.java b/server/src/main/java/org/apache/cloudstack/region/RegionManagerImpl.java index 0878eef8e8ab..3085f655943f 100644 --- a/server/src/main/java/org/apache/cloudstack/region/RegionManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/region/RegionManagerImpl.java @@ -25,7 +25,6 @@ import javax.naming.ConfigurationException; import org.apache.cloudstack.api.command.admin.user.MoveUserCmd; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.command.admin.account.UpdateAccountCmd; @@ -49,7 +48,6 @@ @Component public class RegionManagerImpl extends ManagerBase implements RegionManager, Manager { - public static final Logger s_logger = Logger.getLogger(RegionManagerImpl.class); @Inject RegionDao _regionDao; diff --git a/server/src/main/java/org/apache/cloudstack/region/RegionServiceImpl.java b/server/src/main/java/org/apache/cloudstack/region/RegionServiceImpl.java index 5afafffc5dab..982395637e35 100644 --- a/server/src/main/java/org/apache/cloudstack/region/RegionServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/region/RegionServiceImpl.java @@ -22,7 +22,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.api.command.admin.account.DeleteAccountCmd; @@ -48,7 +47,6 @@ @Component public class RegionServiceImpl extends ManagerBase implements RegionService, Manager { - public static final Logger s_logger = Logger.getLogger(RegionServiceImpl.class); @Inject private RegionManager _regionMgr; diff --git a/server/src/main/java/org/apache/cloudstack/region/RegionsApiUtil.java b/server/src/main/java/org/apache/cloudstack/region/RegionsApiUtil.java index 7fbcfa0795ed..934087a59a5d 100644 --- a/server/src/main/java/org/apache/cloudstack/region/RegionsApiUtil.java +++ b/server/src/main/java/org/apache/cloudstack/region/RegionsApiUtil.java @@ -36,7 +36,8 @@ import org.apache.commons.httpclient.HttpMethod; import org.apache.commons.httpclient.NameValuePair; import org.apache.commons.httpclient.methods.GetMethod; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.thoughtworks.xstream.XStream; import com.thoughtworks.xstream.io.xml.DomDriver; @@ -50,7 +51,7 @@ * */ public class RegionsApiUtil { - public static final Logger s_logger = Logger.getLogger(RegionsApiUtil.class); + protected static Logger LOGGER = LogManager.getLogger(RegionsApiUtil.class); /** * Makes an api call using region service end_point, api command and params @@ -71,10 +72,10 @@ protected static boolean makeAPICall(Region region, String command, List params) } } } catch (UnsupportedEncodingException e) { - s_logger.error(e.getMessage()); + LOGGER.error(e.getMessage()); return null; } return paramString.toString(); @@ -282,7 +283,7 @@ private static String buildUrl(String apiParams, Region region) { return finalUrl; } catch (UnsupportedEncodingException e) { - s_logger.error(e.getMessage()); + LOGGER.error(e.getMessage()); return null; } } @@ -303,7 +304,7 @@ private static String signRequest(String request, String key) { byte[] encryptedBytes = mac.doFinal(); return URLEncoder.encode(Base64.encodeBase64String(encryptedBytes), "UTF-8"); } catch (Exception ex) { - s_logger.error(ex.getMessage()); + LOGGER.error(ex.getMessage()); return null; } } diff --git a/server/src/main/java/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java b/server/src/main/java/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java index 7c6ff0587c9a..3680c869eb19 100644 --- a/server/src/main/java/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java @@ -35,7 +35,8 @@ import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.region.Region; import org.apache.cloudstack.region.dao.RegionDao; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.AgentManager; import com.cloud.agent.api.routing.GlobalLoadBalancerConfigCommand; @@ -69,7 +70,7 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingRulesService { - private static final Logger s_logger = Logger.getLogger(GlobalLoadBalancingRulesServiceImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); @Inject AccountManager _accountMgr; @@ -159,7 +160,7 @@ public GlobalLoadBalancerRuleVO doInTransaction(TransactionStatus status) { } }); - s_logger.debug("successfully created new global load balancer rule for the account " + gslbOwner.getId()); + logger.debug("successfully created new global load balancer rule for the account " + gslbOwner.getId()); return newGslbRule; } @@ -279,11 +280,11 @@ public void doInTransactionWithoutResult(TransactionStatus status) { boolean success = false; try { - s_logger.debug("Configuring gslb rule configuration on the gslb service providers in the participating zones"); + logger.debug("Configuring gslb rule configuration on the gslb service providers in the participating zones"); // apply the gslb rule on to the back end gslb service providers on zones participating in gslb if (!applyGlobalLoadBalancerRuleConfig(gslbRuleId, false)) { - s_logger.warn("Failed to add load balancer rules " + newLbRuleIds + " to global load balancer rule id " + gslbRuleId); + logger.warn("Failed to add load balancer rules " + newLbRuleIds + " to global load balancer rule id " + gslbRuleId); CloudRuntimeException ex = new CloudRuntimeException("Failed to add load balancer rules to GSLB rule "); throw ex; } @@ -382,11 +383,11 @@ public void doInTransactionWithoutResult(TransactionStatus status) { boolean success = false; try { - s_logger.debug("Attempting to configure global load balancer rule configuration on the gslb service providers "); + logger.debug("Attempting to configure global load balancer rule configuration on the gslb service providers "); // apply the gslb rule on to the back end gslb service providers if (!applyGlobalLoadBalancerRuleConfig(gslbRuleId, false)) { - s_logger.warn("Failed to remove load balancer rules " + lbRuleIdsToremove + " from global load balancer rule id " + gslbRuleId); + logger.warn("Failed to remove load balancer rules " + lbRuleIdsToremove + " from global load balancer rule id " + gslbRuleId); CloudRuntimeException ex = new CloudRuntimeException("Failed to remove load balancer rule ids from GSLB rule "); throw ex; } @@ -426,7 +427,7 @@ public boolean deleteGlobalLoadBalancerRule(DeleteGlobalLoadBalancerRuleCmd dele try { revokeGslbRule(gslbRuleId, caller); } catch (Exception e) { - s_logger.warn("Failed to delete GSLB rule due to" + e.getMessage()); + logger.warn("Failed to delete GSLB rule due to" + e.getMessage()); return false; } @@ -445,8 +446,8 @@ private void revokeGslbRule(final long gslbRuleId, Account caller) { _accountMgr.checkAccess(caller, SecurityChecker.AccessType.OperateEntry, true, gslbRule); if (gslbRule.getState() == com.cloud.region.ha.GlobalLoadBalancerRule.State.Staged) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Rule Id: " + gslbRuleId + " is still in Staged state so just removing it."); + if (logger.isDebugEnabled()) { + logger.debug("Rule Id: " + gslbRuleId + " is still in Staged state so just removing it."); } _gslbRuleDao.remove(gslbRuleId); UsageEventUtils.publishUsageEvent(EventTypes.EVENT_GLOBAL_LOAD_BALANCER_DELETE, gslbRule.getAccountId(), 0, gslbRule.getId(), gslbRule.getName(), @@ -541,7 +542,7 @@ public GlobalLoadBalancerRule updateGlobalLoadBalancerRule(UpdateGlobalLoadBalan _gslbRuleDao.update(gslbRule.getId(), gslbRule); try { - s_logger.debug("Updating global load balancer with id " + gslbRule.getUuid()); + logger.debug("Updating global load balancer with id " + gslbRule.getUuid()); // apply the gslb rule on to the back end gslb service providers on zones participating in gslb applyGlobalLoadBalancerRuleConfig(gslbRuleId, false); @@ -687,7 +688,7 @@ private boolean applyGlobalLoadBalancerRuleConfig(long gslbRuleId, boolean revok lookupGslbServiceProvider().applyGlobalLoadBalancerRule(zoneId.first(), zoneId.second(), gslbConfigCmd); } catch (ResourceUnavailableException | NullPointerException e) { String msg = "Failed to configure GSLB rule in the zone " + zoneId.first() + " due to " + e.getMessage(); - s_logger.warn(msg); + logger.warn(msg); throw new CloudRuntimeException(msg); } } @@ -703,7 +704,7 @@ public boolean revokeAllGslbRulesForAccount(com.cloud.user.Account caller, long revokeGslbRule(gslbRule.getId(), caller); } } - s_logger.debug("Successfully cleaned up GSLB rules for account id=" + accountId); + logger.debug("Successfully cleaned up GSLB rules for account id=" + accountId); return true; } diff --git a/server/src/main/java/org/apache/cloudstack/snapshot/SnapshotHelper.java b/server/src/main/java/org/apache/cloudstack/snapshot/SnapshotHelper.java index a11593a86080..d7c3f1033f2e 100644 --- a/server/src/main/java/org/apache/cloudstack/snapshot/SnapshotHelper.java +++ b/server/src/main/java/org/apache/cloudstack/snapshot/SnapshotHelper.java @@ -45,7 +45,8 @@ import org.apache.commons.lang3.BooleanUtils; import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.commons.lang3.builder.ToStringStyle; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.hypervisor.Hypervisor; import com.cloud.hypervisor.Hypervisor.HypervisorType; @@ -58,7 +59,7 @@ import com.cloud.utils.exception.CloudRuntimeException; public class SnapshotHelper { - private final Logger logger = Logger.getLogger(this.getClass()); + protected Logger logger = LogManager.getLogger(getClass()); @Inject protected SnapshotDataStoreDao snapshotDataStoreDao; diff --git a/server/src/main/java/org/apache/cloudstack/storage/NfsMountManagerImpl.java b/server/src/main/java/org/apache/cloudstack/storage/NfsMountManagerImpl.java index b6105d589cd2..0d59a6e3a856 100644 --- a/server/src/main/java/org/apache/cloudstack/storage/NfsMountManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/storage/NfsMountManagerImpl.java @@ -38,12 +38,13 @@ import com.cloud.utils.script.Script; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.utils.identity.ManagementServerNode; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.stereotype.Component; @Component public class NfsMountManagerImpl implements NfsMountManager { - private static final Logger s_logger = Logger.getLogger(NfsMountManager.class); + protected Logger logger = LogManager.getLogger(getClass()); private StorageLayer storage; private int timeout; @@ -70,13 +71,13 @@ public String getMountPoint(String storageUrl, String nfsVersion) { try { uri = new URI(storageUrl); } catch (URISyntaxException e) { - s_logger.error("Invalid storage URL format ", e); + logger.error("Invalid storage URL format ", e); throw new CloudRuntimeException("Unable to create mount point due to invalid storage URL format " + storageUrl); } mountPoint = mount(uri.getHost() + ":" + uri.getPath(), MOUNT_PARENT.value(), nfsVersion); if (mountPoint == null) { - s_logger.error("Unable to create mount point for " + storageUrl); + logger.error("Unable to create mount point for " + storageUrl); throw new CloudRuntimeException("Unable to create mount point for " + storageUrl); } @@ -87,11 +88,11 @@ public String getMountPoint(String storageUrl, String nfsVersion) { private String mount(String path, String parent, String nfsVersion) { String mountPoint = setupMountPoint(parent); if (mountPoint == null) { - s_logger.warn("Unable to create a mount point"); + logger.warn("Unable to create a mount point"); return null; } - Script command = new Script(true, "mount", timeout, s_logger); + Script command = new Script(true, "mount", timeout, logger); command.add("-t", "nfs"); if (nfsVersion != null){ command.add("-o", "vers=" + nfsVersion); @@ -104,17 +105,17 @@ private String mount(String path, String parent, String nfsVersion) { command.add(mountPoint); String result = command.execute(); if (result != null) { - s_logger.warn("Unable to mount " + path + " due to " + result); + logger.warn("Unable to mount " + path + " due to " + result); deleteMountPath(mountPoint); return null; } // Change permissions for the mountpoint - Script script = new Script(true, "chmod", timeout, s_logger); + Script script = new Script(true, "chmod", timeout, logger); script.add("1777", mountPoint); result = script.execute(); if (result != null) { - s_logger.warn("Unable to set permissions for " + mountPoint + " due to " + result); + logger.warn("Unable to set permissions for " + mountPoint + " due to " + result); } return mountPoint; } @@ -130,7 +131,7 @@ private String setupMountPoint(String parent) { break; } } - s_logger.error("Unable to create mount: " + mntPt); + logger.error("Unable to create mount: " + mntPt); } return mountPoint; @@ -140,29 +141,29 @@ private void umount(String localRootPath) { if (!mountExists(localRootPath)) { return; } - Script command = new Script(true, "umount", timeout, s_logger); + Script command = new Script(true, "umount", timeout, logger); command.add(localRootPath); String result = command.execute(); if (result != null) { // Fedora Core 12 errors out with any -o option executed from java String errMsg = "Unable to umount " + localRootPath + " due to " + result; - s_logger.error(errMsg); + logger.error(errMsg); throw new CloudRuntimeException(errMsg); } deleteMountPath(localRootPath); - s_logger.debug("Successfully umounted " + localRootPath); + logger.debug("Successfully umounted " + localRootPath); } private void deleteMountPath(String localRootPath) { try { Files.deleteIfExists(Paths.get(localRootPath)); } catch (IOException e) { - s_logger.warn(String.format("unable to delete mount directory %s:%s.%n", localRootPath, e.getMessage())); + logger.warn(String.format("unable to delete mount directory %s:%s.%n", localRootPath, e.getMessage())); } } private boolean mountExists(String localRootPath) { - Script script = new Script(true, "mount", timeout, s_logger); + Script script = new Script(true, "mount", timeout, logger); PathParser parser = new PathParser(localRootPath); script.execute(parser); return parser.getPaths().stream().filter(s -> s.contains(localRootPath)).findAny().map(s -> true).orElse(false); @@ -197,7 +198,7 @@ public boolean drain() { @PreDestroy public void destroy() { - s_logger.info("Clean up mounted NFS mount points used in current session."); + logger.info("Clean up mounted NFS mount points used in current session."); storageMounts.values().stream().forEach(this::umount); } } diff --git a/server/src/main/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelper.java b/server/src/main/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelper.java index 9dfc75e06f23..267d813364b8 100644 --- a/server/src/main/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelper.java +++ b/server/src/main/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelper.java @@ -42,7 +42,8 @@ import org.apache.cloudstack.storage.heuristics.presetvariables.Template; import org.apache.cloudstack.storage.heuristics.presetvariables.Volume; import org.apache.cloudstack.utils.jsinterpreter.JsInterpreter; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import javax.inject.Inject; import java.io.IOException; @@ -54,7 +55,7 @@ */ public class HeuristicRuleHelper { - protected static final Logger LOGGER = Logger.getLogger(HeuristicRuleHelper.class); + protected Logger logger = LogManager.getLogger(HeuristicRuleHelper.class); private static final Long HEURISTICS_SCRIPT_TIMEOUT = StorageManager.HEURISTICS_SCRIPT_TIMEOUT.value(); @@ -86,10 +87,10 @@ public DataStore getImageStoreIfThereIsHeuristicRule(Long zoneId, HeuristicType HeuristicVO heuristicsVO = secondaryStorageHeuristicDao.findByZoneIdAndType(zoneId, heuristicType); if (heuristicsVO == null) { - LOGGER.debug(String.format("No heuristic rules found for zone with ID [%s] and heuristic type [%s]. Returning null.", zoneId, heuristicType)); + logger.debug(String.format("No heuristic rules found for zone with ID [%s] and heuristic type [%s]. Returning null.", zoneId, heuristicType)); return null; } else { - LOGGER.debug(String.format("Found the heuristic rule %s to apply for zone with ID [%s].", heuristicsVO, zoneId)); + logger.debug(String.format("Found the heuristic rule %s to apply for zone with ID [%s].", heuristicsVO, zoneId)); return interpretHeuristicRule(heuristicsVO.getHeuristicRule(), heuristicType, obj, zoneId); } } @@ -270,7 +271,7 @@ public DataStore interpretHeuristicRule(String rule, HeuristicType heuristicType return dataStore; } catch (IOException ex) { String message = String.format("Error while executing script [%s].", rule); - LOGGER.error(message, ex); + logger.error(message, ex); throw new CloudRuntimeException(message, ex); } } diff --git a/server/src/main/java/org/apache/cloudstack/storage/object/BucketApiServiceImpl.java b/server/src/main/java/org/apache/cloudstack/storage/object/BucketApiServiceImpl.java index bfd29cc04423..e6acd180f16f 100644 --- a/server/src/main/java/org/apache/cloudstack/storage/object/BucketApiServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/storage/object/BucketApiServiceImpl.java @@ -40,7 +40,6 @@ import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.storage.datastore.db.ObjectStoreDao; import org.apache.cloudstack.storage.datastore.db.ObjectStoreVO; -import org.apache.log4j.Logger; import javax.inject.Inject; import javax.naming.ConfigurationException; @@ -51,7 +50,6 @@ import java.util.concurrent.TimeUnit; public class BucketApiServiceImpl extends ManagerBase implements BucketApiService, Configurable { - private final static Logger s_logger = Logger.getLogger(BucketApiServiceImpl.class); @Inject private ObjectStoreDao _objectStoreDao; @@ -108,7 +106,7 @@ public Bucket allocBucket(CreateBucketCmd cmd) { try { BucketNameUtils.validateBucketName(cmd.getBucketName()); } catch (IllegalBucketNameException e) { - s_logger.error("Invalid Bucket Name: " +cmd.getBucketName(), e); + logger.error("Invalid Bucket Name: " +cmd.getBucketName(), e); throw new InvalidParameterValueException("Invalid Bucket Name: "+e.getMessage()); } //ToDo check bucket exists @@ -118,11 +116,11 @@ public Bucket allocBucket(CreateBucketCmd cmd) { ObjectStoreEntity objectStore = (ObjectStoreEntity)_dataStoreMgr.getDataStore(objectStoreVO.getId(), DataStoreRole.Object); try { if(!objectStore.createUser(ownerId)) { - s_logger.error("Failed to create user in objectstore "+ objectStore.getName()); + logger.error("Failed to create user in objectstore "+ objectStore.getName()); return null; } } catch (CloudRuntimeException e) { - s_logger.error("Error while checking object store user.", e); + logger.error("Error while checking object store user.", e); return null; } @@ -166,7 +164,7 @@ public Bucket createBucket(CreateBucketCmd cmd) { bucket.setState(Bucket.State.Created); _bucketDao.update(bucket.getId(), bucket); } catch (Exception e) { - s_logger.debug("Failed to create bucket with name: "+bucket.getName(), e); + logger.debug("Failed to create bucket with name: "+bucket.getName(), e); if(bucketCreated) { objectStore.deleteBucket(bucket.getName()); } @@ -289,9 +287,9 @@ protected void runInContext() { } } } - s_logger.debug("Completed updating bucket usage for all object stores"); + logger.debug("Completed updating bucket usage for all object stores"); } catch (Exception e) { - s_logger.error("Error while fetching bucket usage", e); + logger.error("Error while fetching bucket usage", e); } finally { scanLock.unlock(); } diff --git a/server/src/main/java/org/apache/cloudstack/storage/template/VnfTemplateManagerImpl.java b/server/src/main/java/org/apache/cloudstack/storage/template/VnfTemplateManagerImpl.java index 0371be864483..9e4a59031736 100644 --- a/server/src/main/java/org/apache/cloudstack/storage/template/VnfTemplateManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/storage/template/VnfTemplateManagerImpl.java @@ -80,13 +80,10 @@ import org.apache.cloudstack.framework.config.Configurable; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; -import org.apache.log4j.Logger; public class VnfTemplateManagerImpl extends ManagerBase implements VnfTemplateManager, PluggableService, Configurable { - static final Logger LOGGER = Logger.getLogger(VnfTemplateManagerImpl.class); - public static final String VNF_SECURITY_GROUP_NAME = "VNF_SecurityGroup_"; public static final String ACCESS_METHOD_SEPARATOR = ","; public static final Integer ACCESS_DEFAULT_SSH_PORT = 22; @@ -267,17 +264,17 @@ protected Map getManagementNetworkAndIp(VirtualMachineTemplate continue; } if (!networkModel.areServicesSupportedInNetwork(network.getId(), Network.Service.StaticNat)) { - LOGGER.info(String.format("Network ID: %s does not support static nat, " + + logger.info(String.format("Network ID: %s does not support static nat, " + "skipping this network configuration for VNF appliance", network.getUuid())); continue; } if (network.getVpcId() != null) { - LOGGER.info(String.format("Network ID: %s is a VPC tier, " + + logger.info(String.format("Network ID: %s is a VPC tier, " + "skipping this network configuration for VNF appliance", network.getUuid())); continue; } if (!networkModel.areServicesSupportedInNetwork(network.getId(), Network.Service.Firewall)) { - LOGGER.info(String.format("Network ID: %s does not support firewall, " + + logger.info(String.format("Network ID: %s does not support firewall, " + "skipping this network configuration for VNF appliance", network.getUuid())); continue; } @@ -296,10 +293,10 @@ public SecurityGroup createSecurityGroupForVnfAppliance(DataCenter zone, Virtual if (!cmd.getVnfConfigureManagement()) { return null; } - LOGGER.debug("Creating security group and rules for VNF appliance"); + logger.debug("Creating security group and rules for VNF appliance"); Set ports = getOpenPortsForVnfAppliance(template); if (ports.size() == 0) { - LOGGER.debug("No need to create security group and rules for VNF appliance as there is no ports to be open"); + logger.debug("No need to create security group and rules for VNF appliance as there is no ports to be open"); return null; } String securityGroupName = VNF_SECURITY_GROUP_NAME.concat(Long.toHexString(System.currentTimeMillis())); @@ -325,7 +322,7 @@ public void createIsolatedNetworkRulesForVnfAppliance(DataCenter zone, VirtualMa Set ports = getOpenPortsForVnfAppliance(template); for (Map.Entry entry : networkAndIpMap.entrySet()) { Network network = entry.getKey(); - LOGGER.debug("Creating network rules for VNF appliance on isolated network " + network.getUuid()); + logger.debug("Creating network rules for VNF appliance on isolated network " + network.getUuid()); String ip = entry.getValue(); IpAddress publicIp = networkService.allocateIP(owner, zone.getId(), network.getId(), null, null); if (publicIp == null) { @@ -366,7 +363,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) throws }); firewallService.applyIngressFwRules(publicIp.getId(), owner); } - LOGGER.debug("Created network rules for VNF appliance on isolated network " + network.getUuid()); + logger.debug("Created network rules for VNF appliance on isolated network " + network.getUuid()); } } } diff --git a/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java b/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java index 22aa36e6931b..2f8c7fb19bc5 100644 --- a/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java @@ -171,7 +171,8 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import javax.inject.Inject; import java.util.ArrayList; @@ -187,7 +188,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager { public static final String VM_IMPORT_DEFAULT_TEMPLATE_NAME = "system-default-vm-import-dummy-template.iso"; public static final String KVM_VM_IMPORT_DEFAULT_TEMPLATE_NAME = "kvm-default-vm-import-dummy-template"; - private static final Logger LOGGER = Logger.getLogger(UnmanagedVMsManagerImpl.class); + protected Logger logger = LogManager.getLogger(UnmanagedVMsManagerImpl.class); private static final List importUnmanagedInstancesSupportedHypervisors = Arrays.asList(Hypervisor.HypervisorType.VMware, Hypervisor.HypervisorType.KVM); @@ -297,7 +298,7 @@ private VMTemplateVO createDefaultDummyVmImportTemplate(boolean isKVM) { templateDao.remove(template.getId()); template = templateDao.findByName(templateName); } catch (Exception e) { - LOGGER.error("Unable to create default dummy template for VM import", e); + logger.error("Unable to create default dummy template for VM import", e); } return template; } @@ -419,7 +420,7 @@ private List getAdditionalNameFilters(Cluster cluster) { } } } catch (Exception e) { - LOGGER.warn(String.format("Unable to find volume file name for volume ID: %s while adding filters unmanaged VMs", volumeVO.getUuid()), e); + logger.warn(String.format("Unable to find volume file name for volume ID: %s while adding filters unmanaged VMs", volumeVO.getUuid()), e); } if (!volumeFileNames.isEmpty()) { additionalNameFilter.addAll(volumeFileNames); @@ -478,7 +479,7 @@ private ServiceOfferingVO getUnmanagedInstanceServiceOffering(final UnmanagedIns try { cpuSpeed = Integer.parseInt(details.get(VmDetailConstants.CPU_SPEED)); } catch (Exception e) { - LOGGER.error(String.format("Failed to get CPU speed for importing VM [%s] due to [%s].", instance.getName(), e.getMessage()), e); + logger.error(String.format("Failed to get CPU speed for importing VM [%s] due to [%s].", instance.getName(), e.getMessage()), e); } } Map parameters = new HashMap<>(); @@ -572,7 +573,7 @@ private Pair> getRootAn Set callerDiskIds = dataDiskOfferingMap.keySet(); if (callerDiskIds.size() != disks.size() - 1) { String msg = String.format("VM has total %d disks for which %d disk offering mappings provided. %d disks need a disk offering for import", disks.size(), callerDiskIds.size(), disks.size() - 1); - LOGGER.error(String.format("%s. %s parameter can be used to provide disk offerings for the disks", msg, ApiConstants.DATADISK_OFFERING_LIST)); + logger.error(String.format("%s. %s parameter can be used to provide disk offerings for the disks", msg, ApiConstants.DATADISK_OFFERING_LIST)); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, msg); } List diskIdsWithoutOffering = new ArrayList<>(); @@ -733,7 +734,7 @@ private Map getUnmanagedNicNetworkMap(String instanceName, List> diskProfileStoragePoolList) { UserVm vm = userVm; if (vm == null) { - LOGGER.error(String.format("Failed to check migrations need during VM import")); + logger.error(String.format("Failed to check migrations need during VM import")); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to check migrations need during VM import")); } if (sourceHost == null || serviceOffering == null || diskProfileStoragePoolList == null) { - LOGGER.error(String.format("Failed to check migrations need during import, VM: %s", userVm.getInstanceName())); + logger.error(String.format("Failed to check migrations need during import, VM: %s", userVm.getInstanceName())); cleanupFailedImportVM(vm); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to check migrations need during import, VM: %s", userVm.getInstanceName())); } if (!hostSupportsServiceOffering(sourceHost, serviceOffering)) { - LOGGER.debug(String.format("VM %s needs to be migrated", vm.getUuid())); + logger.debug(String.format("VM %s needs to be migrated", vm.getUuid())); final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm, template, serviceOffering, owner, null); DeploymentPlanner.ExcludeList excludeList = new DeploymentPlanner.ExcludeList(); excludeList.addHost(sourceHost.getId()); @@ -903,7 +904,7 @@ private UserVm migrateImportedVM(HostVO sourceHost, VirtualMachineTemplate templ dest = deploymentPlanningManager.planDeployment(profile, plan, excludeList, null); } catch (Exception e) { String errorMsg = String.format("VM import failed for Unmanaged VM [%s] during VM migration, cannot find deployment destination due to [%s].", vm.getInstanceName(), e.getMessage()); - LOGGER.warn(errorMsg, e); + logger.warn(errorMsg, e); cleanupFailedImportVM(vm); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, errorMsg); } @@ -923,7 +924,7 @@ private UserVm migrateImportedVM(HostVO sourceHost, VirtualMachineTemplate templ vm = userVmManager.getUserVm(vm.getId()); } catch (Exception e) { String errorMsg = String.format("VM import failed for Unmanaged VM [%s] during VM migration due to [%s].", vm.getInstanceName(), e.getMessage()); - LOGGER.error(errorMsg, e); + logger.error(errorMsg, e); cleanupFailedImportVM(vm); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, errorMsg); } @@ -947,7 +948,7 @@ private UserVm migrateImportedVM(HostVO sourceHost, VirtualMachineTemplate templ if (poolSupportsOfferings) { continue; } - LOGGER.debug(String.format("Volume %s needs to be migrated", volumeVO.getUuid())); + logger.debug(String.format("Volume %s needs to be migrated", volumeVO.getUuid())); Pair, List> poolsPair = managementService.listStoragePoolsForSystemMigrationOfVolume(profile.getVolumeId(), null, null, null, null, false, true); if (CollectionUtils.isEmpty(poolsPair.first()) && CollectionUtils.isEmpty(poolsPair.second())) { cleanupFailedImportVM(vm); @@ -981,7 +982,7 @@ private UserVm migrateImportedVM(HostVO sourceHost, VirtualMachineTemplate templ cleanupFailedImportVM(vm); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM import failed for unmanaged vm: %s during volume ID: %s migration as no suitable pool found", userVm.getInstanceName(), volumeVO.getUuid())); } else { - LOGGER.debug(String.format("Found storage pool %s(%s) for migrating the volume %s to", storagePool.getName(), storagePool.getUuid(), volumeVO.getUuid())); + logger.debug(String.format("Found storage pool %s(%s) for migrating the volume %s to", storagePool.getName(), storagePool.getUuid(), volumeVO.getUuid())); } try { Volume volume = null; @@ -997,11 +998,11 @@ private UserVm migrateImportedVM(HostVO sourceHost, VirtualMachineTemplate templ } else { msg = String.format("Migration for volume ID: %s to destination pool ID: %s failed", volumeVO.getUuid(), storagePool.getUuid()); } - LOGGER.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } } catch (Exception e) { - LOGGER.error(String.format("VM import failed for unmanaged vm: %s during volume migration", vm.getInstanceName()), e); + logger.error(String.format("VM import failed for unmanaged vm: %s during volume migration", vm.getInstanceName()), e); cleanupFailedImportVM(vm); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM import failed for unmanaged vm: %s during volume migration. %s", userVm.getInstanceName(), StringUtils.defaultString(e.getMessage()))); } @@ -1011,7 +1012,7 @@ private UserVm migrateImportedVM(HostVO sourceHost, VirtualMachineTemplate templ private void publishVMUsageUpdateResourceCount(final UserVm userVm, ServiceOfferingVO serviceOfferingVO) { if (userVm == null || serviceOfferingVO == null) { - LOGGER.error(String.format("Failed to publish usage records during VM import because VM [%s] or ServiceOffering [%s] is null.", userVm, serviceOfferingVO)); + logger.error(String.format("Failed to publish usage records during VM import because VM [%s] or ServiceOffering [%s] is null.", userVm, serviceOfferingVO)); cleanupFailedImportVM(userVm); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "VM import failed for Unmanaged VM during publishing Usage Records."); } @@ -1028,7 +1029,7 @@ private void publishVMUsageUpdateResourceCount(final UserVm userVm, ServiceOffer userVm.getHypervisorType().toString(), VirtualMachine.class.getName(), userVm.getUuid(), userVm.isDisplayVm()); } } catch (Exception e) { - LOGGER.error(String.format("Failed to publish usage records during VM import for unmanaged VM [%s] due to [%s].", userVm.getInstanceName(), e.getMessage()), e); + logger.error(String.format("Failed to publish usage records during VM import for unmanaged VM [%s] due to [%s].", userVm.getInstanceName(), e.getMessage()), e); cleanupFailedImportVM(userVm); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM import failed for unmanaged vm %s during publishing usage records", userVm.getInstanceName())); } @@ -1042,7 +1043,7 @@ private void publishVMUsageUpdateResourceCount(final UserVm userVm, ServiceOffer UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(), volume.getDiskOfferingId(), null, volume.getSize(), Volume.class.getName(), volume.getUuid(), volume.isDisplayVolume()); } catch (Exception e) { - LOGGER.error(String.format("Failed to publish volume ID: %s usage records during VM import", volume.getUuid()), e); + logger.error(String.format("Failed to publish volume ID: %s usage records during VM import", volume.getUuid()), e); } resourceLimitService.incrementResourceCount(userVm.getAccountId(), Resource.ResourceType.volume, volume.isDisplayVolume()); resourceLimitService.incrementResourceCount(userVm.getAccountId(), Resource.ResourceType.primary_storage, volume.isDisplayVolume(), volume.getSize()); @@ -1055,7 +1056,7 @@ private void publishVMUsageUpdateResourceCount(final UserVm userVm, ServiceOffer UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NETWORK_OFFERING_ASSIGN, userVm.getAccountId(), userVm.getDataCenterId(), userVm.getId(), Long.toString(nic.getId()), network.getNetworkOfferingId(), null, 1L, VirtualMachine.class.getName(), userVm.getUuid(), userVm.isDisplay()); } catch (Exception e) { - LOGGER.error(String.format("Failed to publish network usage records during VM import. %s", StringUtils.defaultString(e.getMessage()))); + logger.error(String.format("Failed to publish network usage records during VM import. %s", StringUtils.defaultString(e.getMessage()))); } } } @@ -1065,7 +1066,7 @@ private UserVm importVirtualMachineInternal(final UnmanagedInstanceTO unmanagedI final ServiceOfferingVO serviceOffering, final Map dataDiskOfferingMap, final Map nicNetworkMap, final Map callerNicIpAddressMap, final Map details, final boolean migrateAllowed, final boolean forced, final boolean isImportUnmanagedFromSameHypervisor) { - LOGGER.debug(LogUtils.logGsonWithoutException("Trying to import VM [%s] with name [%s], in zone [%s], cluster [%s], and host [%s], using template [%s], service offering [%s], disks map [%s], NICs map [%s] and details [%s].", + logger.debug(LogUtils.logGsonWithoutException("Trying to import VM [%s] with name [%s], in zone [%s], cluster [%s], and host [%s], using template [%s], service offering [%s], disks map [%s], NICs map [%s] and details [%s].", unmanagedInstance, instanceName, zone, cluster, host, template, serviceOffering, dataDiskOfferingMap, nicNetworkMap, details)); UserVm userVm = null; ServiceOfferingVO validatedServiceOffering = null; @@ -1073,7 +1074,7 @@ private UserVm importVirtualMachineInternal(final UnmanagedInstanceTO unmanagedI validatedServiceOffering = getUnmanagedInstanceServiceOffering(unmanagedInstance, serviceOffering, owner, zone, details, cluster.getHypervisorType()); } catch (Exception e) { String errorMsg = String.format("Failed to import Unmanaged VM [%s] because the service offering [%s] is not compatible due to [%s].", unmanagedInstance.getName(), serviceOffering.getUuid(), StringUtils.defaultIfEmpty(e.getMessage(), "")); - LOGGER.error(errorMsg, e); + logger.error(errorMsg, e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, errorMsg); } @@ -1125,7 +1126,7 @@ private UserVm importVirtualMachineInternal(final UnmanagedInstanceTO unmanagedI } resourceLimitService.checkResourceLimit(owner, Resource.ResourceType.volume, unmanagedInstanceDisks.size()); } catch (ResourceAllocationException e) { - LOGGER.error(String.format("Volume resource allocation error for owner: %s", owner.getUuid()), e); + logger.error(String.format("Volume resource allocation error for owner: %s", owner.getUuid()), e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Volume resource allocation error for owner: %s. %s", owner.getUuid(), StringUtils.defaultString(e.getMessage()))); } // Check NICs and supplied networks @@ -1151,7 +1152,7 @@ private UserVm importVirtualMachineInternal(final UnmanagedInstanceTO unmanagedI cluster.getHypervisorType(), allDetails, powerState, null); } catch (InsufficientCapacityException ice) { String errorMsg = String.format("Failed to import VM [%s] due to [%s].", instanceName, ice.getMessage()); - LOGGER.error(errorMsg, ice); + logger.error(errorMsg, ice); throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, errorMsg); } @@ -1186,7 +1187,7 @@ private UserVm importVirtualMachineInternal(final UnmanagedInstanceTO unmanagedI deviceId++; } } catch (Exception e) { - LOGGER.error(String.format("Failed to import volumes while importing vm: %s", instanceName), e); + logger.error(String.format("Failed to import volumes while importing vm: %s", instanceName), e); cleanupFailedImportVM(userVm); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to import volumes while importing vm: %s. %s", instanceName, StringUtils.defaultString(e.getMessage()))); } @@ -1199,7 +1200,7 @@ private UserVm importVirtualMachineInternal(final UnmanagedInstanceTO unmanagedI nicIndex++; } } catch (Exception e) { - LOGGER.error(String.format("Failed to import NICs while importing vm: %s", instanceName), e); + logger.error(String.format("Failed to import NICs while importing vm: %s", instanceName), e); cleanupFailedImportVM(userVm); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to import NICs while importing vm: %s. %s", instanceName, StringUtils.defaultString(e.getMessage()))); } @@ -1410,7 +1411,7 @@ private void checkResourceLimitForImportInstance(Account owner) { try { resourceLimitService.checkResourceLimit(owner, Resource.ResourceType.user_vm, 1); } catch (ResourceAllocationException e) { - LOGGER.error(String.format("VM resource allocation error for account: %s", owner.getUuid()), e); + logger.error(String.format("VM resource allocation error for account: %s", owner.getUuid()), e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM resource allocation error for account: %s. %s", owner.getUuid(), StringUtils.defaultString(e.getMessage()))); } } @@ -1560,7 +1561,7 @@ protected UserVm importUnmanagedInstanceFromVmwareToKvm(DataCenter zone, Cluster VmwareDatacenterVO existingDC = vmwareDatacenterDao.findById(existingVcenterId); if (existingDC == null) { String err = String.format("Cannot find any existing Vmware DC with ID %s", existingVcenterId); - LOGGER.error(err); + logger.error(err); throw new CloudRuntimeException(err); } vcenter = existingDC.getVcenterHost(); @@ -1583,10 +1584,10 @@ protected UserVm importUnmanagedInstanceFromVmwareToKvm(DataCenter zone, Cluster serviceOffering, dataDiskOfferingMap, nicNetworkMap, nicIpAddressMap, details, false, forced, false); - LOGGER.debug(String.format("VM %s imported successfully", sourceVM)); + logger.debug(String.format("VM %s imported successfully", sourceVM)); return userVm; } catch (CloudRuntimeException e) { - LOGGER.error(String.format("Error importing VM: %s", e.getMessage()), e); + logger.error(String.format("Error importing VM: %s", e.getMessage()), e); ActionEventUtils.onCompletedActionEvent(userId, owner.getId(), EventVO.LEVEL_ERROR, EventTypes.EVENT_VM_IMPORT, cmd.getEventDescription(), null, null, 0); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); @@ -1605,7 +1606,7 @@ private void checkNetworkingBeforeConvertingVmwareInstance(DataCenter zone, Acco if (nics.size() != networkIds.size()) { String msg = String.format("Different number of nics found on instance %s: %s vs %s nics provided", clonedInstance.getName(), nics.size(), networkIds.size()); - LOGGER.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } @@ -1614,7 +1615,7 @@ private void checkNetworkingBeforeConvertingVmwareInstance(DataCenter zone, Acco NetworkVO network = networkDao.findById(networkId); if (network == null) { String err = String.format("Cannot find a network with id = %s", networkId); - LOGGER.error(err); + logger.error(err); throw new CloudRuntimeException(err); } Network.IpAddresses ipAddresses = null; @@ -1634,7 +1635,7 @@ private void checkUnmanagedNicAndNetworkMacAddressForImport(NetworkVO network, U if (existingNic != null && !forced) { String err = String.format("NIC with MAC address = %s exists on network with ID = %s and forced flag is disabled", nic.getMacAddress(), network.getId()); - LOGGER.error(err); + logger.error(err); throw new CloudRuntimeException(err); } } @@ -1686,10 +1687,10 @@ private void removeClonedInstance(String vcenter, String datacenterName, if (!result) { String msg = String.format("Could not properly remove the cloned instance %s from VMware datacenter %s:%s", clonedInstanceName, vcenter, datacenterName); - LOGGER.warn(msg); + logger.warn(msg); return; } - LOGGER.debug(String.format("Removed the cloned instance %s from VMWare datacenter %s:%s", + logger.debug(String.format("Removed the cloned instance %s from VMWare datacenter %s:%s", clonedInstanceName, vcenter, datacenterName)); } @@ -1708,14 +1709,14 @@ private HostVO selectInstanceConvertionKVMHostInCluster(Cluster destinationClust HostVO selectedHost = hostDao.findById(convertInstanceHostId); if (selectedHost == null) { String msg = String.format("Cannot find host with ID %s", convertInstanceHostId); - LOGGER.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } if (selectedHost.getResourceState() != ResourceState.Enabled || selectedHost.getStatus() != Status.Up || selectedHost.getType() != Host.Type.Routing || selectedHost.getClusterId() != destinationCluster.getId()) { String msg = String.format("Cannot perform the conversion on the host %s as it is not a running and Enabled host", selectedHost.getName()); - LOGGER.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } return selectedHost; @@ -1724,7 +1725,7 @@ private HostVO selectInstanceConvertionKVMHostInCluster(Cluster destinationClust if (CollectionUtils.isEmpty(hosts)) { String err = String.format("Could not find any running %s host in cluster %s", destinationCluster.getHypervisorType(), destinationCluster.getName()); - LOGGER.error(err); + logger.error(err); throw new CloudRuntimeException(err); } List filteredHosts = hosts.stream() @@ -1733,7 +1734,7 @@ private HostVO selectInstanceConvertionKVMHostInCluster(Cluster destinationClust if (CollectionUtils.isEmpty(filteredHosts)) { String err = String.format("Could not find a %s host in cluster %s to perform the instance conversion", destinationCluster.getHypervisorType(), destinationCluster.getName()); - LOGGER.error(err); + logger.error(err); throw new CloudRuntimeException(err); } return filteredHosts.get(new Random().nextInt(filteredHosts.size())); @@ -1745,7 +1746,7 @@ private UnmanagedInstanceTO convertVmwareInstanceToKVM(String vcenter, String da Long convertInstanceHostId, Long convertStoragePoolId) { HostVO convertHost = selectInstanceConvertionKVMHostInCluster(destinationCluster, convertInstanceHostId); String vmName = clonedInstance.getName(); - LOGGER.debug(String.format("The host %s (%s) is selected to execute the conversion of the instance %s" + + logger.debug(String.format("The host %s (%s) is selected to execute the conversion of the instance %s" + " from VMware to KVM ", convertHost.getId(), convertHost.getName(), vmName)); RemoteInstanceTO remoteInstanceTO = new RemoteInstanceTO(hostName, vmName, @@ -1763,14 +1764,14 @@ private UnmanagedInstanceTO convertVmwareInstanceToKVM(String vcenter, String da } catch (AgentUnavailableException | OperationTimedoutException e) { String err = String.format("Could not send the convert instance command to host %s (%s) due to: %s", convertHost.getId(), convertHost.getName(), e.getMessage()); - LOGGER.error(err, e); + logger.error(err, e); throw new CloudRuntimeException(err); } if (!convertAnswer.getResult()) { String err = String.format("The convert process failed for instance %s from Vmware to KVM on host %s: %s", vmName, convertHost.getName(), convertAnswer.getDetails()); - LOGGER.error(err); + logger.error(err); throw new CloudRuntimeException(err); } return ((ConvertInstanceAnswer) convertAnswer).getConvertedInstance(); @@ -1788,7 +1789,7 @@ private List selectInstanceConvertionStoragePools(Cluster destinationClu } private void logFailureAndThrowException(String msg) { - LOGGER.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } @@ -1950,7 +1951,7 @@ private boolean existsVMToUnmanage(String instanceName, Long hostId) { } PrepareUnmanageVMInstanceAnswer answer = (PrepareUnmanageVMInstanceAnswer) ans; if (!answer.getResult()) { - LOGGER.error("Error verifying VM " + instanceName + " exists on host with ID = " + hostId + ": " + answer.getDetails()); + logger.error("Error verifying VM " + instanceName + " exists on host with ID = " + hostId + ": " + answer.getDetails()); } return answer.getResult(); } @@ -2003,7 +2004,7 @@ private UserVmResponse importKvmInstance(ImportVmCmd cmd) { try { resourceLimitService.checkResourceLimit(owner, Resource.ResourceType.user_vm, 1); } catch (ResourceAllocationException e) { - LOGGER.error(String.format("VM resource allocation error for account: %s", owner.getUuid()), e); + logger.error(String.format("VM resource allocation error for account: %s", owner.getUuid()), e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("VM resource allocation error for account: %s. %s", owner.getUuid(), StringUtils.defaultString(e.getMessage()))); } String displayName = cmd.getDisplayName(); @@ -2166,7 +2167,7 @@ private UserVm importExternalKvmVirtualMachine(final UnmanagedInstanceTO unmanag serviceOffering, null, hostName, Hypervisor.HypervisorType.KVM, allDetails, powerState, null); } catch (InsufficientCapacityException ice) { - LOGGER.error(String.format("Failed to import vm name: %s", instanceName), ice); + logger.error(String.format("Failed to import vm name: %s", instanceName), ice); throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, ice.getMessage()); } if (userVm == null) { @@ -2194,7 +2195,7 @@ private UserVm importExternalKvmVirtualMachine(final UnmanagedInstanceTO unmanag try { dest = deploymentPlanningManager.planDeployment(profile, plan, excludeList, null); } catch (Exception e) { - LOGGER.warn(String.format("Import failed for Vm: %s while finding deployment destination", userVm.getInstanceName()), e); + logger.warn(String.format("Import failed for Vm: %s while finding deployment destination", userVm.getInstanceName()), e); cleanupFailedImportVM(userVm); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Import failed for Vm: %s while finding deployment destination", userVm.getInstanceName())); } @@ -2222,7 +2223,7 @@ private UserVm importExternalKvmVirtualMachine(final UnmanagedInstanceTO unmanag deviceId++; } } catch (Exception e) { - LOGGER.error(String.format("Failed to import volumes while importing vm: %s", instanceName), e); + logger.error(String.format("Failed to import volumes while importing vm: %s", instanceName), e); cleanupFailedImportVM(userVm); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to import volumes while importing vm: %s. %s", instanceName, StringUtils.defaultString(e.getMessage()))); } @@ -2235,7 +2236,7 @@ private UserVm importExternalKvmVirtualMachine(final UnmanagedInstanceTO unmanag nicIndex++; } } catch (Exception e) { - LOGGER.error(String.format("Failed to import NICs while importing vm: %s", instanceName), e); + logger.error(String.format("Failed to import NICs while importing vm: %s", instanceName), e); cleanupFailedImportVM(userVm); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to import NICs while importing vm: %s. %s", instanceName, StringUtils.defaultString(e.getMessage()))); } @@ -2311,7 +2312,7 @@ private UserVm importKvmVirtualMachineFromDisk(final ImportSource importSource, serviceOffering, null, hostName, Hypervisor.HypervisorType.KVM, allDetails, powerState, networkNicMap); } catch (InsufficientCapacityException ice) { - LOGGER.error(String.format("Failed to import vm name: %s", instanceName), ice); + logger.error(String.format("Failed to import vm name: %s", instanceName), ice); throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, ice.getMessage()); } if (userVm == null) { @@ -2328,7 +2329,7 @@ private UserVm importKvmVirtualMachineFromDisk(final ImportSource importSource, try { dest = deploymentPlanningManager.planDeployment(profile, plan, excludeList, null); } catch (Exception e) { - LOGGER.warn(String.format("Import failed for Vm: %s while finding deployment destination", userVm.getInstanceName()), e); + logger.warn(String.format("Import failed for Vm: %s while finding deployment destination", userVm.getInstanceName()), e); cleanupFailedImportVM(userVm); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Import failed for Vm: %s while finding deployment destination", userVm.getInstanceName())); } @@ -2368,7 +2369,7 @@ private UserVm importKvmVirtualMachineFromDisk(final ImportSource importSource, template, deviceId, hostId, diskPath, diskProfile)); } } catch (Exception e) { - LOGGER.error(String.format("Failed to import volumes while importing vm: %s", instanceName), e); + logger.error(String.format("Failed to import volumes while importing vm: %s", instanceName), e); cleanupFailedImportVM(userVm); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to import volumes while importing vm: %s. %s", instanceName, StringUtils.defaultString(e.getMessage()))); } @@ -2425,7 +2426,7 @@ private NetworkVO createDefaultNetworkForAccount(DataCenter zone, Account owner, throw new InvalidParameterValueException("Unable to find physical network with id: " + physicalNetworkId + " and tag: " + requiredOfferings.get(0).getTags()); } - LOGGER.debug("Creating network for account " + owner + " from the network offering id=" + requiredOfferings.get(0).getId() + " as a part of deployVM process"); + logger.debug("Creating network for account " + owner + " from the network offering id=" + requiredOfferings.get(0).getId() + " as a part of deployVM process"); Network newNetwork = networkMgr.createGuestNetwork(requiredOfferings.get(0).getId(), owner.getAccountName() + "-network", owner.getAccountName() + "-network", null, null, null, false, null, owner, null, physicalNetwork, zone.getId(), ControlledEntity.ACLType.Account, null, null, null, null, true, null, null, null, null, null, null, null, null, null, null); diff --git a/server/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduleManagerImpl.java b/server/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduleManagerImpl.java index 0bb2f94ba7f7..2898fd5d0f36 100644 --- a/server/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduleManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/vm/schedule/VMScheduleManagerImpl.java @@ -43,7 +43,6 @@ import org.apache.cloudstack.vm.schedule.dao.VMScheduleDao; import org.apache.commons.lang.time.DateUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import org.springframework.scheduling.support.CronExpression; import javax.inject.Inject; @@ -56,8 +55,6 @@ public class VMScheduleManagerImpl extends MutualExclusiveIdsManagerBase implements VMScheduleManager, PluggableService { - private static Logger LOGGER = Logger.getLogger(VMScheduleManagerImpl.class); - @Inject private VMScheduleDao vmScheduleDao; @Inject @@ -118,7 +115,7 @@ public VMScheduleResponse createSchedule(CreateVMScheduleCmd cmd) { description = String.format("%s - %s", action, DateUtil.getHumanReadableSchedule(cronExpression)); } else description = cmd.getDescription(); - LOGGER.warn(String.format("Using timezone [%s] for running the schedule for VM [%s], as an equivalent of [%s].", timeZoneId, vm.getUuid(), cmdTimeZone)); + logger.warn(String.format("Using timezone [%s] for running the schedule for VM [%s], as an equivalent of [%s].", timeZoneId, vm.getUuid(), cmdTimeZone)); String finalDescription = description; VMSchedule.Action finalAction = action; @@ -215,7 +212,7 @@ public VMScheduleResponse updateSchedule(UpdateVMScheduleCmd cmd) { timeZone = TimeZone.getTimeZone(cmdTimeZone); timeZoneId = timeZone.getID(); if (!timeZoneId.equals(cmdTimeZone)) { - LOGGER.warn(String.format("Using timezone [%s] for running the schedule [%s] for VM %s, as an equivalent of [%s].", + logger.warn(String.format("Using timezone [%s] for running the schedule [%s] for VM %s, as an equivalent of [%s].", timeZoneId, vmSchedule.getSchedule(), vmSchedule.getVmId(), cmdTimeZone)); } vmSchedule.setTimeZone(timeZoneId); diff --git a/server/src/main/java/org/apache/cloudstack/vm/schedule/VMSchedulerImpl.java b/server/src/main/java/org/apache/cloudstack/vm/schedule/VMSchedulerImpl.java index 5d25f36ca316..139a4d0be1f4 100644 --- a/server/src/main/java/org/apache/cloudstack/vm/schedule/VMSchedulerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/vm/schedule/VMSchedulerImpl.java @@ -43,7 +43,6 @@ import org.apache.cloudstack.vm.schedule.dao.VMScheduleDao; import org.apache.cloudstack.vm.schedule.dao.VMScheduledJobDao; import org.apache.commons.lang.time.DateUtils; -import org.apache.log4j.Logger; import org.springframework.scheduling.support.CronExpression; import javax.inject.Inject; @@ -61,7 +60,6 @@ import java.util.TimerTask; public class VMSchedulerImpl extends ManagerBase implements VMScheduler, Configurable { - private static Logger LOGGER = Logger.getLogger(VMSchedulerImpl.class); @Inject private VMScheduledJobDao vmScheduledJobDao; @Inject @@ -97,12 +95,12 @@ public String getConfigComponentName() { @Override public void removeScheduledJobs(List vmScheduleIds) { if (vmScheduleIds == null || vmScheduleIds.isEmpty()) { - LOGGER.debug("Removed 0 scheduled jobs"); + logger.debug("Removed 0 scheduled jobs"); return; } Date now = new Date(); int rowsRemoved = vmScheduledJobDao.expungeJobsForSchedules(vmScheduleIds, now); - LOGGER.debug(String.format("Removed %s VM scheduled jobs", rowsRemoved)); + logger.debug(String.format("Removed %s VM scheduled jobs", rowsRemoved)); } @Override @@ -114,7 +112,7 @@ public void updateScheduledJob(VMScheduleVO vmSchedule) { @Override public Date scheduleNextJob(VMScheduleVO vmSchedule, Date timestamp) { if (!vmSchedule.getEnabled()) { - LOGGER.debug(String.format("VM Schedule [id=%s] for VM [id=%s] is disabled. Not scheduling next job.", vmSchedule.getUuid(), vmSchedule.getVmId())); + logger.debug(String.format("VM Schedule [id=%s] for VM [id=%s] is disabled. Not scheduling next job.", vmSchedule.getUuid(), vmSchedule.getVmId())); return null; } @@ -124,7 +122,7 @@ public Date scheduleNextJob(VMScheduleVO vmSchedule, Date timestamp) { VirtualMachine vm = userVmManager.getUserVm(vmSchedule.getVmId()); if (vm == null) { - LOGGER.info(String.format("VM [id=%s] is removed. Disabling VM schedule [id=%s].", vmSchedule.getVmId(), vmSchedule.getUuid())); + logger.info(String.format("VM [id=%s] is removed. Disabling VM schedule [id=%s].", vmSchedule.getVmId(), vmSchedule.getUuid())); vmSchedule.setEnabled(false); vmScheduleDao.persist(vmSchedule); return null; @@ -142,7 +140,7 @@ public Date scheduleNextJob(VMScheduleVO vmSchedule, Date timestamp) { zonedEndDate = ZonedDateTime.ofInstant(endDate.toInstant(), vmSchedule.getTimeZoneId()); } if (zonedEndDate != null && now.isAfter(zonedEndDate)) { - LOGGER.info(String.format("End time is less than current time. Disabling VM schedule [id=%s] for VM [id=%s].", vmSchedule.getUuid(), vmSchedule.getVmId())); + logger.info(String.format("End time is less than current time. Disabling VM schedule [id=%s] for VM [id=%s].", vmSchedule.getUuid(), vmSchedule.getVmId())); vmSchedule.setEnabled(false); vmScheduleDao.persist(vmSchedule); return null; @@ -156,7 +154,7 @@ public Date scheduleNextJob(VMScheduleVO vmSchedule, Date timestamp) { } if (ts == null) { - LOGGER.info(String.format("No next schedule found. Disabling VM schedule [id=%s] for VM [id=%s].", vmSchedule.getUuid(), vmSchedule.getVmId())); + logger.info(String.format("No next schedule found. Disabling VM schedule [id=%s] for VM [id=%s].", vmSchedule.getUuid(), vmSchedule.getVmId())); vmSchedule.setEnabled(false); vmScheduleDao.persist(vmSchedule); return null; @@ -170,7 +168,7 @@ public Date scheduleNextJob(VMScheduleVO vmSchedule, Date timestamp) { String.format("Scheduled action (%s) [vmId: %s scheduleId: %s] at %s", vmSchedule.getAction(), vm.getUuid(), vmSchedule.getUuid(), scheduledDateTime), vm.getId(), ApiCommandResourceType.VirtualMachine.toString(), true, 0); } catch (EntityExistsException exception) { - LOGGER.debug("Job is already scheduled."); + logger.debug("Job is already scheduled."); } return scheduledDateTime; } @@ -194,7 +192,7 @@ protected void runInContext() { try { poll(new Date()); } catch (final Throwable t) { - LOGGER.warn("Catch throwable in VM scheduler ", t); + logger.warn("Catch throwable in VM scheduler ", t); } } }; @@ -208,7 +206,7 @@ protected void runInContext() { public void poll(Date timestamp) { currentTimestamp = DateUtils.round(timestamp, Calendar.MINUTE); String displayTime = DateUtil.displayDateInTimezone(DateUtil.GMT_TIMEZONE, currentTimestamp); - LOGGER.debug(String.format("VM scheduler.poll is being called at %s", displayTime)); + logger.debug(String.format("VM scheduler.poll is being called at %s", displayTime)); GlobalLock scanLock = GlobalLock.getInternLock("vmScheduler.poll"); try { @@ -239,7 +237,7 @@ public void poll(Date timestamp) { try { cleanupVMScheduledJobs(); } catch (Exception e) { - LOGGER.warn("Error in cleaning up vm scheduled jobs", e); + logger.warn("Error in cleaning up vm scheduled jobs", e); } } @@ -248,7 +246,7 @@ private void scheduleNextJobs(Date timestamp) { try { scheduleNextJob(schedule, timestamp); } catch (Exception e) { - LOGGER.warn("Error in scheduling next job for schedule " + schedule.getUuid(), e); + logger.warn("Error in scheduling next job for schedule " + schedule.getUuid(), e); } } } @@ -259,7 +257,7 @@ private void scheduleNextJobs(Date timestamp) { private void cleanupVMScheduledJobs() { Date deleteBeforeDate = DateUtils.addDays(currentTimestamp, -1 * VMScheduledJobExpireInterval.value()); int rowsRemoved = vmScheduledJobDao.expungeJobsBefore(deleteBeforeDate); - LOGGER.info(String.format("Cleaned up %d VM scheduled job entries", rowsRemoved)); + logger.info(String.format("Cleaned up %d VM scheduled job entries", rowsRemoved)); } void executeJobs(Map jobsToExecute) { @@ -271,10 +269,10 @@ void executeJobs(Map jobsToExecute) { VMScheduledJobVO tmpVMScheduleJob = null; try { - if (LOGGER.isDebugEnabled()) { + if (logger.isDebugEnabled()) { final Date scheduledTimestamp = vmScheduledJob.getScheduledTime(); displayTime = DateUtil.displayDateInTimezone(DateUtil.GMT_TIMEZONE, scheduledTimestamp); - LOGGER.debug(String.format("Executing %s for VM id %d for schedule id: %d at %s", vmScheduledJob.getAction(), vmScheduledJob.getVmId(), vmScheduledJob.getVmScheduleId(), displayTime)); + logger.debug(String.format("Executing %s for VM id %d for schedule id: %d at %s", vmScheduledJob.getAction(), vmScheduledJob.getVmId(), vmScheduledJob.getVmScheduleId(), displayTime)); } tmpVMScheduleJob = vmScheduledJobDao.acquireInLockTable(vmScheduledJob.getId()); @@ -284,7 +282,7 @@ void executeJobs(Map jobsToExecute) { vmScheduledJobDao.update(vmScheduledJob.getId(), tmpVMScheduleJob); } } catch (final Exception e) { - LOGGER.warn(String.format("Executing scheduled job id: %s failed due to %s", vmScheduledJob.getId(), e)); + logger.warn(String.format("Executing scheduled job id: %s failed due to %s", vmScheduledJob.getId(), e)); } finally { if (tmpVMScheduleJob != null) { vmScheduledJobDao.releaseFromLockTable(vmScheduledJob.getId()); @@ -295,7 +293,7 @@ void executeJobs(Map jobsToExecute) { Long processJob(VMScheduledJob vmScheduledJob, VirtualMachine vm) { if (!Arrays.asList(VirtualMachine.State.Running, VirtualMachine.State.Stopped).contains(vm.getState())) { - LOGGER.info(String.format("Skipping action (%s) for [vmId:%s scheduleId: %s] because VM is invalid state: %s", vmScheduledJob.getAction(), vm.getUuid(), vmScheduledJob.getVmScheduleId(), vm.getState())); + logger.info(String.format("Skipping action (%s) for [vmId:%s scheduleId: %s] because VM is invalid state: %s", vmScheduledJob.getAction(), vm.getUuid(), vmScheduledJob.getVmScheduleId(), vm.getState())); return null; } @@ -319,7 +317,7 @@ Long processJob(VMScheduledJob vmScheduledJob, VirtualMachine vm) { return executeStartVMJob(vm, eventId); } - LOGGER.warn(String.format("Skipping action (%s) for [vmId:%s scheduleId: %s] because VM is in state: %s", + logger.warn(String.format("Skipping action (%s) for [vmId:%s scheduleId: %s] because VM is in state: %s", vmScheduledJob.getAction(), vm.getUuid(), vmScheduledJob.getVmScheduleId(), vm.getState())); return null; } @@ -331,7 +329,7 @@ private void skipJobs(Map jobsToExecute, Map vmScheduledJobs = vmScheduledJobDao.listJobsToStart(currentTimestamp); - LOGGER.debug(String.format("Got %d scheduled jobs to be executed at %s", vmScheduledJobs.size(), displayTime)); + logger.debug(String.format("Got %d scheduled jobs to be executed at %s", vmScheduledJobs.size(), displayTime)); Map jobsToExecute = new HashMap<>(); Map> jobsNotToExecute = new HashMap<>(); diff --git a/server/src/test/java/com/cloud/alert/AlertControlsUnitTest.java b/server/src/test/java/com/cloud/alert/AlertControlsUnitTest.java index 20c437ea6d29..e1ccbb00544d 100644 --- a/server/src/test/java/com/cloud/alert/AlertControlsUnitTest.java +++ b/server/src/test/java/com/cloud/alert/AlertControlsUnitTest.java @@ -21,7 +21,8 @@ import com.cloud.user.Account; import com.cloud.user.AccountManager; import junit.framework.TestCase; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -39,7 +40,7 @@ import static org.mockito.Mockito.when; public class AlertControlsUnitTest extends TestCase { - private static final Logger s_logger = Logger.getLogger(AlertControlsUnitTest.class); + private Logger logger = LogManager.getLogger(AlertControlsUnitTest.class); @Spy ManagementServerImpl _mgmtServer = new ManagementServerImpl(); @@ -69,10 +70,10 @@ public void tearDown() throws Exception { @Test public void testInjected() throws Exception { - s_logger.info("Starting test to archive and delete alerts"); + logger.info("Starting test to archive and delete alerts"); archiveAlerts(); deleteAlerts(); - s_logger.info("archive/delete alerts: TEST PASSED"); + logger.info("archive/delete alerts: TEST PASSED"); } protected void archiveAlerts() { diff --git a/server/src/test/java/com/cloud/alert/AlertManagerImplTest.java b/server/src/test/java/com/cloud/alert/AlertManagerImplTest.java index dc787d73484a..ba0d3cab002c 100644 --- a/server/src/test/java/com/cloud/alert/AlertManagerImplTest.java +++ b/server/src/test/java/com/cloud/alert/AlertManagerImplTest.java @@ -18,7 +18,7 @@ import com.cloud.alert.dao.AlertDao; import org.apache.cloudstack.utils.mailing.SMTPMailSender; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; diff --git a/server/src/test/java/com/cloud/api/dispatch/ParamGenericValidationWorkerTest.java b/server/src/test/java/com/cloud/api/dispatch/ParamGenericValidationWorkerTest.java index bfb1b2915450..23f733bc85e8 100644 --- a/server/src/test/java/com/cloud/api/dispatch/ParamGenericValidationWorkerTest.java +++ b/server/src/test/java/com/cloud/api/dispatch/ParamGenericValidationWorkerTest.java @@ -33,36 +33,39 @@ import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; + +import org.apache.logging.log4j.Logger; import org.junit.Test; import java.util.HashMap; import java.util.Map; import java.util.UUID; -import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import org.junit.runner.RunWith; +import org.mockito.ArgumentCaptor; +import org.mockito.Mock; +import org.mockito.Mockito; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import org.mockito.junit.MockitoJUnitRunner; +@RunWith(MockitoJUnitRunner.class) public class ParamGenericValidationWorkerTest { protected static final String FAKE_CMD_NAME = "fakecmdname"; protected static final String FAKE_CMD_ROLE_NAME = "fakecmdrolename"; + @Mock Logger loggerMock; + protected String loggerOutput; protected void driveTest(final BaseCmd cmd, final Map params) { final ParamGenericValidationWorker genValidationWorker = new ParamGenericValidationWorker(); // We create a mock logger to verify the result - ParamGenericValidationWorker.s_logger = new Logger("") { - @Override - public void warn(final Object msg) { - loggerOutput = msg.toString(); - } - }; + genValidationWorker.logger = loggerMock; // Execute genValidationWorker.handle(new DispatchTask(cmd, params)); @@ -114,8 +117,7 @@ public void testHandle() throws ResourceAllocationException { CallContext.unregister(); } - // Assert - assertEquals("There should be no errors since there are no unknown parameters for this command class", null, loggerOutput); + Mockito.verify(loggerMock, Mockito.never()).warn(Mockito.anyString()); } @Test @@ -139,9 +141,13 @@ public void testHandleWithUnknownParams() throws ResourceAllocationException { CallContext.unregister(); } + ArgumentCaptor captor = ArgumentCaptor.forClass(String.class); + + Mockito.verify(loggerMock).warn(captor.capture()); + // Assert - assertTrue("There should be error msg, since there is one unknown parameter", loggerOutput.contains(unknownParamKey)); - assertTrue("There should be error msg containing the correct command name", loggerOutput.contains(FAKE_CMD_NAME)); + assertTrue("There should be error msg, since there is one unknown parameter", captor.getValue().contains(unknownParamKey)); + assertTrue("There should be error msg containing the correct command name", captor.getValue().contains(FAKE_CMD_NAME)); } @Test @@ -150,9 +156,13 @@ public void testHandleWithoutAuthorization() throws ResourceAllocationException driveAuthTest(type); + ArgumentCaptor captor = ArgumentCaptor.forClass(String.class); + + Mockito.verify(loggerMock).warn(captor.capture()); + // Assert - assertTrue("There should be error msg, since there is one unauthorized parameter", loggerOutput.contains("paramWithRole")); - assertTrue("There should be error msg containing the correct command name", loggerOutput.contains(FAKE_CMD_ROLE_NAME)); + assertTrue("There should be error msg, since there is one unauthorized parameter", captor.getValue().contains("paramWithRole")); + assertTrue("There should be error msg containing the correct command name", captor.getValue().contains(FAKE_CMD_ROLE_NAME)); } @Test @@ -161,7 +171,7 @@ public void testHandleWithAuthorization() throws ResourceAllocationException { driveAuthTest(type); // Assert - assertEquals("There should be no errors since parameters have authorization", null, loggerOutput); + Mockito.verify(loggerMock, Mockito.never()).warn(Mockito.anyString()); } protected void driveAuthTest(final Account.Type type) { diff --git a/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java b/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java index d421c3223933..a579a8ae36ae 100644 --- a/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java +++ b/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java @@ -97,7 +97,8 @@ import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -135,7 +136,7 @@ public class ConfigurationManagerTest { - private static final Logger s_logger = Logger.getLogger(ConfigurationManagerTest.class); + private Logger logger = LogManager.getLogger(ConfigurationManagerTest.class); @Spy @InjectMocks @@ -272,7 +273,7 @@ public void tearDown() throws Exception { @Test public void testDedicatePublicIpRange() throws Exception { - s_logger.info("Running tests for DedicatePublicIpRange API"); + logger.info("Running tests for DedicatePublicIpRange API"); /* * TEST 1: given valid parameters DedicatePublicIpRange should succeed @@ -302,7 +303,7 @@ public void testDedicatePublicIpRange() throws Exception { @Test public void testReleasePublicIpRange() throws Exception { - s_logger.info("Running tests for DedicatePublicIpRange API"); + logger.info("Running tests for DedicatePublicIpRange API"); /* * TEST 1: given valid parameters and no allocated public ip's in the range ReleasePublicIpRange should succeed @@ -345,7 +346,7 @@ void runDedicatePublicIpRangePostiveTest() throws Exception { Vlan result = configurationMgr.dedicatePublicIpRange(dedicatePublicIpRangesCmd); Assert.assertNotNull(result); } catch (Exception e) { - s_logger.info("exception in testing runDedicatePublicIpRangePostiveTest message: " + e.toString()); + logger.info("exception in testing runDedicatePublicIpRangePostiveTest message: " + e.toString()); } finally { txn.close("runDedicatePublicIpRangePostiveTest"); } @@ -464,7 +465,7 @@ void runReleasePublicIpRangePostiveTest1() throws Exception { Boolean result = configurationMgr.releasePublicIpRange(releasePublicIpRangesCmd); Assert.assertTrue(result); } catch (Exception e) { - s_logger.info("exception in testing runReleasePublicIpRangePostiveTest1 message: " + e.toString()); + logger.info("exception in testing runReleasePublicIpRangePostiveTest1 message: " + e.toString()); } finally { txn.close("runReleasePublicIpRangePostiveTest1"); } @@ -498,7 +499,7 @@ void runReleasePublicIpRangePostiveTest2() throws Exception { Boolean result = configurationMgr.releasePublicIpRange(releasePublicIpRangesCmd); Assert.assertTrue(result); } catch (Exception e) { - s_logger.info("exception in testing runReleasePublicIpRangePostiveTest2 message: " + e.toString()); + logger.info("exception in testing runReleasePublicIpRangePostiveTest2 message: " + e.toString()); } finally { txn.close("runReleasePublicIpRangePostiveTest2"); } diff --git a/server/src/test/java/com/cloud/consoleproxy/ConsoleProxyManagerTest.java b/server/src/test/java/com/cloud/consoleproxy/ConsoleProxyManagerTest.java index ed23d1ecb70a..bea2096cec9c 100644 --- a/server/src/test/java/com/cloud/consoleproxy/ConsoleProxyManagerTest.java +++ b/server/src/test/java/com/cloud/consoleproxy/ConsoleProxyManagerTest.java @@ -31,7 +31,9 @@ import com.google.gson.Gson; import com.google.gson.GsonBuilder; import com.google.gson.JsonParseException; -import org.apache.log4j.Logger; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -55,7 +57,7 @@ public class ConsoleProxyManagerTest { - private static final Logger s_logger = Logger.getLogger(ConsoleProxyManagerTest.class); + protected Logger logger = LogManager.getLogger(getClass()); @Mock GlobalLock globalLockMock; @@ -65,6 +67,9 @@ public class ConsoleProxyManagerTest { DataCenterDao dataCenterDaoMock; @Mock NetworkDao networkDaoMock; + + @Mock + Logger loggerMock; @Mock ConsoleProxyManagerImpl consoleProxyManagerImplMock; private AutoCloseable closeable; @@ -75,6 +80,7 @@ public void setup() throws Exception { ReflectionTestUtils.setField(consoleProxyManagerImplMock, "allocProxyLock", globalLockMock); ReflectionTestUtils.setField(consoleProxyManagerImplMock, "dataCenterDao", dataCenterDaoMock); ReflectionTestUtils.setField(consoleProxyManagerImplMock, "networkDao", networkDaoMock); + ReflectionTestUtils.setField(consoleProxyManagerImplMock, "logger", loggerMock); Mockito.doCallRealMethod().when(consoleProxyManagerImplMock).expandPool(Mockito.anyLong(), Mockito.any()); Mockito.doCallRealMethod().when(consoleProxyManagerImplMock).getDefaultNetworkForCreation(Mockito.any(DataCenter.class)); Mockito.doCallRealMethod().when(consoleProxyManagerImplMock).getDefaultNetworkForAdvancedZone(Mockito.any(DataCenter.class)); @@ -88,7 +94,7 @@ public void tearDown() throws Exception { @Test public void testNewCPVMCreation() throws Exception { - s_logger.info("Running test for new CPVM creation"); + logger.info("Running test for new CPVM creation"); // No existing CPVM Mockito.when(consoleProxyManagerImplMock.assignProxyFromStoppedPool(Mockito.anyLong())).thenReturn(null); @@ -104,7 +110,7 @@ public void testNewCPVMCreation() throws Exception { @Test public void testExistingCPVMStart() throws Exception { - s_logger.info("Running test for existing CPVM start"); + logger.info("Running test for existing CPVM start"); // CPVM already exists Mockito.when(consoleProxyManagerImplMock.assignProxyFromStoppedPool(Mockito.anyLong())).thenReturn(consoleProxyVOMock); @@ -116,7 +122,7 @@ public void testExistingCPVMStart() throws Exception { @Test public void testExisingCPVMStartFailure() throws Exception { - s_logger.info("Running test for existing CPVM start failure"); + logger.info("Running test for existing CPVM start failure"); // CPVM already exists Mockito.when(consoleProxyManagerImplMock.assignProxyFromStoppedPool(Mockito.anyLong())).thenReturn(consoleProxyVOMock); diff --git a/server/src/test/java/com/cloud/event/EventControlsUnitTest.java b/server/src/test/java/com/cloud/event/EventControlsUnitTest.java index 5e29f219c50e..8a968ed5eb9c 100644 --- a/server/src/test/java/com/cloud/event/EventControlsUnitTest.java +++ b/server/src/test/java/com/cloud/event/EventControlsUnitTest.java @@ -23,7 +23,8 @@ import junit.framework.TestCase; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.SecurityChecker.AccessType; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -41,7 +42,7 @@ import static org.mockito.Mockito.when; public class EventControlsUnitTest extends TestCase { - private static final Logger s_logger = Logger.getLogger(EventControlsUnitTest.class); + private Logger logger = LogManager.getLogger(EventControlsUnitTest.class); @Spy ManagementServerImpl _mgmtServer = new ManagementServerImpl(); @@ -70,10 +71,10 @@ public void tearDown() throws Exception { @Test public void testInjected() throws Exception { - s_logger.info("Starting test to archive and delete events"); + logger.info("Starting test to archive and delete events"); archiveEvents(); deleteEvents(); - s_logger.info("archive/delete events: TEST PASSED"); + logger.info("archive/delete events: TEST PASSED"); } protected void archiveEvents() { diff --git a/server/src/test/java/com/cloud/ha/HighAvailabilityManagerImplTest.java b/server/src/test/java/com/cloud/ha/HighAvailabilityManagerImplTest.java index 06a078ae88c3..fcd3c373addb 100644 --- a/server/src/test/java/com/cloud/ha/HighAvailabilityManagerImplTest.java +++ b/server/src/test/java/com/cloud/ha/HighAvailabilityManagerImplTest.java @@ -34,7 +34,8 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.managed.context.ManagedContext; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; @@ -76,7 +77,7 @@ @RunWith(MockitoJUnitRunner.class) public class HighAvailabilityManagerImplTest { - private static final Logger s_logger = Logger.getLogger(HighAvailabilityManagerImplTest.class); + protected Logger logger = LogManager.getLogger(getClass()); @Mock HighAvailabilityDao _haDao; @Mock @@ -136,7 +137,6 @@ public static void initOnce() { processWorkMethod = HighAvailabilityManagerImpl.class.getDeclaredMethod("processWork", HaWorkVO.class); processWorkMethod.setAccessible(true); } catch (NoSuchMethodException e) { - s_logger.info("[ignored] expected NoSuchMethodException caught: " + e.getLocalizedMessage()); } } @@ -191,13 +191,13 @@ public void scheduleRestartForVmsOnHostNonEmptyVMList() { List vms = new ArrayList(); VMInstanceVO vm1 = Mockito.mock(VMInstanceVO.class); Mockito.lenient().when(vm1.getHostId()).thenReturn(1l); - Mockito.when(vm1.getInstanceName()).thenReturn("i-2-3-VM"); + //Mockito.when(vm1.getInstanceName()).thenReturn("i-2-3-VM"); Mockito.when(vm1.getType()).thenReturn(VirtualMachine.Type.User); Mockito.when(vm1.isHaEnabled()).thenReturn(true); vms.add(vm1); VMInstanceVO vm2 = Mockito.mock(VMInstanceVO.class); Mockito.when(vm2.getHostId()).thenReturn(1l); - Mockito.when(vm2.getInstanceName()).thenReturn("r-2-VM"); + //Mockito.when(vm2.getInstanceName()).thenReturn("r-2-VM"); Mockito.when(vm2.getType()).thenReturn(VirtualMachine.Type.DomainRouter); Mockito.when(vm2.isHaEnabled()).thenReturn(true); vms.add(vm2); @@ -247,11 +247,11 @@ private void processWorkWithRetryCount(int count, Step expectedStep) { try { processWorkMethod.invoke(highAvailabilityManagerSpy, work); } catch (IllegalAccessException e) { - s_logger.info("[ignored] expected IllegalAccessException caught: " + e.getLocalizedMessage()); + logger.info("[ignored] expected IllegalAccessException caught: " + e.getLocalizedMessage()); } catch (IllegalArgumentException e) { - s_logger.info("[ignored] expected IllegalArgumentException caught: " + e.getLocalizedMessage()); + logger.info("[ignored] expected IllegalArgumentException caught: " + e.getLocalizedMessage()); } catch (InvocationTargetException e) { - s_logger.info("[ignored] expected InvocationTargetException caught: " + e.getLocalizedMessage()); + logger.info("[ignored] expected InvocationTargetException caught: " + e.getLocalizedMessage()); } assertTrue(work.getStep() == expectedStep); } diff --git a/server/src/test/java/com/cloud/keystore/KeystoreTest.java b/server/src/test/java/com/cloud/keystore/KeystoreTest.java index 2a0b909860ed..970892dc325e 100644 --- a/server/src/test/java/com/cloud/keystore/KeystoreTest.java +++ b/server/src/test/java/com/cloud/keystore/KeystoreTest.java @@ -20,12 +20,10 @@ import junit.framework.TestCase; import org.apache.cloudstack.api.response.AlertResponse; import org.apache.cloudstack.api.response.UserVmResponse; -import org.apache.log4j.Logger; import org.junit.After; import org.junit.Before; public class KeystoreTest extends TestCase { - private final static Logger s_logger = Logger.getLogger(KeystoreTest.class); private final String keyContent = "MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBALV5vGlkiWwoZX4hTRplPXP8qtST\n" + "hwZhko8noeY5vf8ECwmd+vrCTw/JvnOtkx/8oYNbg/SeUt1EfOsk6gqJdBblGFBZRMcUJlIpqE9z\n" diff --git a/server/src/test/java/com/cloud/network/CreatePrivateNetworkTest.java b/server/src/test/java/com/cloud/network/CreatePrivateNetworkTest.java index 6564f48ff26d..1160bf2ac8e9 100644 --- a/server/src/test/java/com/cloud/network/CreatePrivateNetworkTest.java +++ b/server/src/test/java/com/cloud/network/CreatePrivateNetworkTest.java @@ -48,7 +48,9 @@ import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; -import org.apache.log4j.Logger; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -70,7 +72,7 @@ //@Ignore("Requires database to be set up") public class CreatePrivateNetworkTest { - private static final Logger s_logger = Logger.getLogger(CreatePrivateNetworkTest.class); + protected Logger logger = LogManager.getLogger(getClass()); NetworkServiceImpl networkService = new NetworkServiceImpl(); @@ -173,13 +175,13 @@ public void createInvalidlyHostedPrivateNetwork() { Assert.assertEquals("'bla' should not be accepted as scheme", true, invalid); Assert.assertEquals("'mido' should not yet be supported as scheme", true, unsupported); } catch (ResourceAllocationException e) { - s_logger.error("no resources", e); + logger.error("no resources", e); fail("no resources"); } catch (ConcurrentOperationException e) { - s_logger.error("another one is in the way", e); + logger.error("another one is in the way", e); fail("another one is in the way"); } catch (InsufficientCapacityException e) { - s_logger.error("no capacity", e); + logger.error("no capacity", e); fail("no capacity"); } finally { __txn.close("createInvalidlyHostedPrivateNetworkTest"); diff --git a/server/src/test/java/com/cloud/network/DedicateGuestVlanRangesTest.java b/server/src/test/java/com/cloud/network/DedicateGuestVlanRangesTest.java index 5f39bfbeeadb..949c68636b54 100644 --- a/server/src/test/java/com/cloud/network/DedicateGuestVlanRangesTest.java +++ b/server/src/test/java/com/cloud/network/DedicateGuestVlanRangesTest.java @@ -36,7 +36,6 @@ import org.apache.cloudstack.api.command.admin.network.ListDedicatedGuestVlanRangesCmd; import org.apache.cloudstack.api.command.admin.network.ReleaseDedicatedGuestVlanRangeCmd; import org.apache.cloudstack.context.CallContext; -import org.apache.log4j.Logger; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -57,7 +56,6 @@ public class DedicateGuestVlanRangesTest { - private static final Logger s_logger = Logger.getLogger(DedicateGuestVlanRangesTest.class); NetworkServiceImpl networkService = new NetworkServiceImpl(); @@ -132,7 +130,6 @@ public void tearDown() throws Exception { @Test public void testDedicateGuestVlanRange() throws Exception { - s_logger.info("Running tests for DedicateGuestVlanRange API"); /* * TEST 1: given valid parameters DedicateGuestVlanRange should succeed @@ -168,7 +165,6 @@ public void testDedicateGuestVlanRange() throws Exception { @Test public void testReleaseDedicatedGuestVlanRange() throws Exception { - s_logger.info("Running tests for ReleaseDedicatedGuestVlanRange API"); /* * TEST 1: given valid parameters ReleaseDedicatedGuestVlanRange should succeed @@ -211,7 +207,6 @@ void runDedicateGuestVlanRangePostiveTest() throws Exception { GuestVlanRange result = networkService.dedicateGuestVlanRange(dedicateGuestVlanRangesCmd); Assert.assertNotNull(result); } catch (Exception e) { - s_logger.info("exception in testing runDedicateGuestVlanRangePostiveTest message: " + e.toString()); } finally { txn.close("runDedicateGuestRangePostiveTest"); } @@ -356,7 +351,6 @@ void runReleaseDedicatedGuestVlanRangePostiveTest() throws Exception { Boolean result = networkService.releaseDedicatedGuestVlanRange(releaseDedicatedGuestVlanRangesCmd.getId()); Assert.assertTrue(result); } catch (Exception e) { - s_logger.info("exception in testing runReleaseGuestVlanRangePostiveTest1 message: " + e.toString()); } finally { txn.close("runReleaseDedicatedGuestVlanRangePostiveTest"); } diff --git a/server/src/test/java/com/cloud/network/firewall/FirewallManagerTest.java b/server/src/test/java/com/cloud/network/firewall/FirewallManagerTest.java index f8c37cd6bf77..04ef756a3c8c 100644 --- a/server/src/test/java/com/cloud/network/firewall/FirewallManagerTest.java +++ b/server/src/test/java/com/cloud/network/firewall/FirewallManagerTest.java @@ -37,7 +37,8 @@ import com.cloud.utils.component.ComponentContext; import junit.framework.Assert; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.junit.After; import org.junit.Before; import org.junit.Ignore; @@ -61,7 +62,7 @@ @RunWith(MockitoJUnitRunner.class) public class FirewallManagerTest { - private static final Logger s_logger = Logger.getLogger(FirewallManagerTest.class); + private Logger logger = LogManager.getLogger(FirewallManagerTest.class); private AutoCloseable closeable; @@ -91,7 +92,7 @@ public void testInjected() { // Assert.assertTrue(firewallMgr._staticNatElements.get("VirtualRouter") instanceof StaticNatServiceProvider); // Assert.assertTrue(firewallMgr._networkAclElements.get("VpcVirtualRouter") instanceof NetworkACLServiceProvider); - s_logger.info("Done testing injection of service elements into firewall manager"); + logger.info("Done testing injection of service elements into firewall manager"); } diff --git a/server/src/test/java/com/cloud/resourcelimit/ResourceLimitManagerImplTest.java b/server/src/test/java/com/cloud/resourcelimit/ResourceLimitManagerImplTest.java index 30085193e55e..86a1a0f43563 100644 --- a/server/src/test/java/com/cloud/resourcelimit/ResourceLimitManagerImplTest.java +++ b/server/src/test/java/com/cloud/resourcelimit/ResourceLimitManagerImplTest.java @@ -19,13 +19,14 @@ import com.cloud.configuration.ResourceLimit; import com.cloud.vpc.MockResourceLimitManagerImpl; import junit.framework.TestCase; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.junit.After; import org.junit.Before; import org.junit.Test; public class ResourceLimitManagerImplTest extends TestCase { - private static final Logger s_logger = Logger.getLogger(ResourceLimitManagerImplTest.class); + private Logger logger = LogManager.getLogger(ResourceLimitManagerImplTest.class); MockResourceLimitManagerImpl _resourceLimitService = new MockResourceLimitManagerImpl(); @@ -42,11 +43,11 @@ public void tearDown() throws Exception { @Test public void testInjected() throws Exception { - s_logger.info("Starting test for Resource Limit manager"); + logger.info("Starting test for Resource Limit manager"); updateResourceCount(); updateResourceLimit(); //listResourceLimits(); - s_logger.info("Resource Limit Manager: TEST PASSED"); + logger.info("Resource Limit Manager: TEST PASSED"); } protected void updateResourceCount() { diff --git a/server/src/test/java/com/cloud/template/HypervisorTemplateAdapterTest.java b/server/src/test/java/com/cloud/template/HypervisorTemplateAdapterTest.java index 8657c07b5ef5..6e028af895ff 100644 --- a/server/src/test/java/com/cloud/template/HypervisorTemplateAdapterTest.java +++ b/server/src/test/java/com/cloud/template/HypervisorTemplateAdapterTest.java @@ -32,7 +32,6 @@ import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.dao.VMTemplateZoneDao; -import com.cloud.test.TestAppender; import com.cloud.user.AccountVO; import com.cloud.user.ResourceLimitService; import com.cloud.user.dao.AccountDao; @@ -55,7 +54,7 @@ import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; import org.apache.cloudstack.storage.heuristics.HeuristicRuleHelper; import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity; -import org.apache.log4j.Level; +import org.apache.logging.log4j.Logger; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -82,13 +81,13 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.ExecutionException; -import java.util.regex.Pattern; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyLong; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; import static org.mockito.Mockito.when; @RunWith(MockitoJUnitRunner.class) @@ -139,6 +138,9 @@ public class HypervisorTemplateAdapterTest { @Mock StatsCollector statsCollectorMock; + @Mock + Logger loggerMock; + @Spy @InjectMocks HypervisorTemplateAdapter _adapter = new HypervisorTemplateAdapter(); @@ -439,14 +441,9 @@ public void isZoneAndImageStoreAvailableTestZoneIdIsNullShouldReturnFalse() { Set zoneSet = null; boolean isTemplatePrivate = false; - TestAppender.TestAppenderBuilder appenderBuilder = new TestAppender.TestAppenderBuilder(); - appenderBuilder.addExpectedPattern(Level.WARN, Pattern.quote(String.format("Zone ID is null, cannot allocate ISO/template in image store [%s].", dataStoreMock))); - TestAppender testLogAppender = appenderBuilder.build(); - TestAppender.safeAddAppender(HypervisorTemplateAdapter.s_logger, testLogAppender); - boolean result = _adapter.isZoneAndImageStoreAvailable(dataStoreMock, zoneId, zoneSet, isTemplatePrivate); - testLogAppender.assertMessagesLogged(); + Mockito.verify(loggerMock, Mockito.times(1)).warn(String.format("Zone ID is null, cannot allocate ISO/template in image store [%s].", dataStoreMock)); Assert.assertFalse(result); } @@ -461,15 +458,10 @@ public void isZoneAndImageStoreAvailableTestZoneIsNullShouldReturnFalse() { Mockito.when(_dcDao.findById(Mockito.anyLong())).thenReturn(dataCenterVOMock); Mockito.when(dataStoreMock.getId()).thenReturn(2L); - TestAppender.TestAppenderBuilder appenderBuilder = new TestAppender.TestAppenderBuilder(); - appenderBuilder.addExpectedPattern(Level.WARN, Pattern.quote(String.format("Unable to find zone by id [%s], so skip downloading template to its image store [%s].", - zoneId, dataStoreMock.getId()))); - TestAppender testLogAppender = appenderBuilder.build(); - TestAppender.safeAddAppender(HypervisorTemplateAdapter.s_logger, testLogAppender); - boolean result = _adapter.isZoneAndImageStoreAvailable(dataStoreMock, zoneId, zoneSet, isTemplatePrivate); - testLogAppender.assertMessagesLogged(); + Mockito.verify(loggerMock, Mockito.times(1)).warn(String.format("Unable to find zone by id [%s], so skip downloading template to its image store [%s].", + zoneId, dataStoreMock.getId())); Assert.assertFalse(result); } @@ -485,14 +477,9 @@ public void isZoneAndImageStoreAvailableTestZoneIsDisabledShouldReturnFalse() { Mockito.when(dataCenterVOMock.getAllocationState()).thenReturn(Grouping.AllocationState.Disabled); Mockito.when(dataStoreMock.getId()).thenReturn(2L); - TestAppender.TestAppenderBuilder appenderBuilder = new TestAppender.TestAppenderBuilder(); - appenderBuilder.addExpectedPattern(Level.INFO, Pattern.quote(String.format("Zone [%s] is disabled. Skip downloading template to its image store [%s].", zoneId, dataStoreMock.getId()))); - TestAppender testLogAppender = appenderBuilder.build(); - TestAppender.safeAddAppender(HypervisorTemplateAdapter.s_logger, testLogAppender); - boolean result = _adapter.isZoneAndImageStoreAvailable(dataStoreMock, zoneId, zoneSet, isTemplatePrivate); - testLogAppender.assertMessagesLogged(); + Mockito.verify(loggerMock, Mockito.times(1)).info(String.format("Zone [%s] is disabled. Skip downloading template to its image store [%s].", zoneId, dataStoreMock.getId())); Assert.assertFalse(result); } @@ -509,15 +496,10 @@ public void isZoneAndImageStoreAvailableTestImageStoreDoesNotHaveEnoughCapacityS Mockito.when(dataStoreMock.getId()).thenReturn(2L); Mockito.when(statsCollectorMock.imageStoreHasEnoughCapacity(any(DataStore.class))).thenReturn(false); - TestAppender.TestAppenderBuilder appenderBuilder = new TestAppender.TestAppenderBuilder(); - appenderBuilder.addExpectedPattern(Level.INFO, Pattern.quote(String.format("Image store doesn't have enough capacity. Skip downloading template to this image store [%s].", - dataStoreMock.getId()))); - TestAppender testLogAppender = appenderBuilder.build(); - TestAppender.safeAddAppender(HypervisorTemplateAdapter.s_logger, testLogAppender); - boolean result = _adapter.isZoneAndImageStoreAvailable(dataStoreMock, zoneId, zoneSet, isTemplatePrivate); - testLogAppender.assertMessagesLogged(); + Mockito.verify(loggerMock, times(1)).info(String.format("Image store doesn't have enough capacity. Skip downloading template to this image store [%s].", + dataStoreMock.getId())); Assert.assertFalse(result); } @@ -533,15 +515,10 @@ public void isZoneAndImageStoreAvailableTestImageStoreHasEnoughCapacityAndZoneSe Mockito.when(dataCenterVOMock.getAllocationState()).thenReturn(Grouping.AllocationState.Enabled); Mockito.when(statsCollectorMock.imageStoreHasEnoughCapacity(any(DataStore.class))).thenReturn(true); - TestAppender.TestAppenderBuilder appenderBuilder = new TestAppender.TestAppenderBuilder(); - appenderBuilder.addExpectedPattern(Level.INFO, Pattern.quote(String.format("Zone set is null; therefore, the ISO/template should be allocated in every secondary storage " + - "of zone [%s].", dataCenterVOMock))); - TestAppender testLogAppender = appenderBuilder.build(); - TestAppender.safeAddAppender(HypervisorTemplateAdapter.s_logger, testLogAppender); - boolean result = _adapter.isZoneAndImageStoreAvailable(dataStoreMock, zoneId, zoneSet, isTemplatePrivate); - testLogAppender.assertMessagesLogged(); + Mockito.verify(loggerMock, times(1)).info(String.format("Zone set is null; therefore, the ISO/template should be allocated in every secondary storage " + + "of zone [%s].", dataCenterVOMock)); Assert.assertTrue(result); } @@ -557,15 +534,10 @@ public void isZoneAndImageStoreAvailableTestTemplateIsPrivateAndItIsAlreadyAlloc Mockito.when(dataCenterVOMock.getAllocationState()).thenReturn(Grouping.AllocationState.Enabled); Mockito.when(statsCollectorMock.imageStoreHasEnoughCapacity(any(DataStore.class))).thenReturn(true); - TestAppender.TestAppenderBuilder appenderBuilder = new TestAppender.TestAppenderBuilder(); - appenderBuilder.addExpectedPattern(Level.INFO, Pattern.quote(String.format("The template is private and it is already allocated in a secondary storage in zone [%s]; " + - "therefore, image store [%s] will be skipped.", dataCenterVOMock, dataStoreMock))); - TestAppender testLogAppender = appenderBuilder.build(); - TestAppender.safeAddAppender(HypervisorTemplateAdapter.s_logger, testLogAppender); - boolean result = _adapter.isZoneAndImageStoreAvailable(dataStoreMock, zoneId, zoneSet, isTemplatePrivate); - testLogAppender.assertMessagesLogged(); + Mockito.verify(loggerMock, times(1)).info(String.format("The template is private and it is already allocated in a secondary storage in zone [%s]; " + + "therefore, image store [%s] will be skipped.", dataCenterVOMock, dataStoreMock)); Assert.assertFalse(result); } @@ -581,15 +553,10 @@ public void isZoneAndImageStoreAvailableTestTemplateIsPrivateAndItIsNotAlreadyAl Mockito.when(dataCenterVOMock.getAllocationState()).thenReturn(Grouping.AllocationState.Enabled); Mockito.when(statsCollectorMock.imageStoreHasEnoughCapacity(any(DataStore.class))).thenReturn(true); - TestAppender.TestAppenderBuilder appenderBuilder = new TestAppender.TestAppenderBuilder(); - appenderBuilder.addExpectedPattern(Level.INFO, Pattern.quote(String.format("Private template will be allocated in image store [%s] in zone [%s].", - dataStoreMock, dataCenterVOMock))); - TestAppender testLogAppender = appenderBuilder.build(); - TestAppender.safeAddAppender(HypervisorTemplateAdapter.s_logger, testLogAppender); - boolean result = _adapter.isZoneAndImageStoreAvailable(dataStoreMock, zoneId, zoneSet, isTemplatePrivate); - testLogAppender.assertMessagesLogged(); + Mockito.verify(loggerMock, times(1)).info(String.format("Private template will be allocated in image store [%s] in zone [%s].", + dataStoreMock, dataCenterVOMock)); Assert.assertTrue(result); } diff --git a/server/src/test/java/com/cloud/vpc/MockNetworkManagerImpl.java b/server/src/test/java/com/cloud/vpc/MockNetworkManagerImpl.java index 288211c43304..434c644f5969 100644 --- a/server/src/test/java/com/cloud/vpc/MockNetworkManagerImpl.java +++ b/server/src/test/java/com/cloud/vpc/MockNetworkManagerImpl.java @@ -46,7 +46,6 @@ import org.apache.cloudstack.api.command.user.vm.ListNicsCmd; import org.apache.cloudstack.api.response.AcquirePodIpCmdResponse; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.deploy.DataCenterDeployment; @@ -111,7 +110,6 @@ public class MockNetworkManagerImpl extends ManagerBase implements NetworkOrches List _networkElements; private static HashMap s_providerToNetworkElementMap = new HashMap(); - private static final Logger s_logger = Logger.getLogger(MockNetworkManagerImpl.class); /* (non-Javadoc) * @see com.cloud.utils.component.Manager#start() @@ -122,7 +120,7 @@ public boolean start() { Provider implementedProvider = element.getProvider(); if (implementedProvider != null) { if (s_providerToNetworkElementMap.containsKey(implementedProvider.getName())) { - s_logger.error("Cannot start MapNetworkManager: Provider <-> NetworkElement must be a one-to-one map, " + + logger.error("Cannot start MapNetworkManager: Provider <-> NetworkElement must be a one-to-one map, " + "multiple NetworkElements found for Provider: " + implementedProvider.getName()); return false; } diff --git a/server/src/test/java/com/cloud/vpc/Site2SiteVpnTest.java b/server/src/test/java/com/cloud/vpc/Site2SiteVpnTest.java index b4168bfa10cd..f8a42df6c606 100644 --- a/server/src/test/java/com/cloud/vpc/Site2SiteVpnTest.java +++ b/server/src/test/java/com/cloud/vpc/Site2SiteVpnTest.java @@ -16,7 +16,6 @@ // under the License. package com.cloud.vpc; -import org.apache.log4j.Logger; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -27,7 +26,6 @@ @RunWith(SpringJUnit4ClassRunner.class) @ContextConfiguration(locations = "classpath:/VpcTestContext.xml") public class Site2SiteVpnTest { - private final static Logger s_logger = Logger.getLogger(Site2SiteVpnTest.class); // private static void addDaos(MockComponentLocator locator) { // locator.addDao("AccountDao", AccountDaoImpl.class); @@ -51,7 +49,7 @@ public void setUp() { // locator = new MockComponentLocator("management-server"); // addDaos(locator); // addManagers(locator); -// s_logger.info("Finished setUp"); +// logger.info("Finished setUp"); } @After @@ -64,11 +62,11 @@ public void testInjected() throws Exception { // new ArrayList>>(); // list.add(new Pair>("Site2SiteVpnServiceProvider", MockSite2SiteVpnServiceProvider.class)); // locator.addAdapterChain(Site2SiteVpnServiceProvider.class, list); -// s_logger.info("Finished add adapter"); +// logger.info("Finished add adapter"); // locator.makeActive(new DefaultInterceptorLibrary()); -// s_logger.info("Finished make active"); +// logger.info("Finished make active"); // Site2SiteVpnManagerImpl vpnMgr = ComponentLocator.inject(Site2SiteVpnManagerImpl.class); -// s_logger.info("Finished inject"); +// logger.info("Finished inject"); // Assert.assertTrue(vpnMgr.configure("Site2SiteVpnMgr",new HashMap()) ); // Assert.assertTrue(vpnMgr.start()); diff --git a/server/src/test/java/com/cloud/vpc/dao/MockNetworkOfferingDaoImpl.java b/server/src/test/java/com/cloud/vpc/dao/MockNetworkOfferingDaoImpl.java index 43bb8824fbe3..d4fcf5ed117c 100644 --- a/server/src/test/java/com/cloud/vpc/dao/MockNetworkOfferingDaoImpl.java +++ b/server/src/test/java/com/cloud/vpc/dao/MockNetworkOfferingDaoImpl.java @@ -26,14 +26,12 @@ import com.cloud.offerings.dao.NetworkOfferingDao; import com.cloud.offerings.dao.NetworkOfferingDaoImpl; import com.cloud.utils.db.DB; -import org.apache.log4j.Logger; import java.lang.reflect.Field; import java.util.List; @DB() public class MockNetworkOfferingDaoImpl extends NetworkOfferingDaoImpl implements NetworkOfferingDao { - private static final Logger s_logger = Logger.getLogger(MockNetworkOfferingDaoImpl.class); /* (non-Javadoc) * @see com.cloud.offerings.dao.NetworkOfferingDao#findByUniqueName(java.lang.String) @@ -140,10 +138,10 @@ private NetworkOfferingVO setId(NetworkOfferingVO vo, long id) { f.setAccessible(true); f.setLong(voToReturn, id); } catch (NoSuchFieldException ex) { - s_logger.warn(ex); + logger.warn(ex); return null; } catch (IllegalAccessException ex) { - s_logger.warn(ex); + logger.warn(ex); return null; } diff --git a/server/src/test/java/com/cloud/vpc/dao/MockVpcDaoImpl.java b/server/src/test/java/com/cloud/vpc/dao/MockVpcDaoImpl.java index 4ef5506c513e..76403be76222 100644 --- a/server/src/test/java/com/cloud/vpc/dao/MockVpcDaoImpl.java +++ b/server/src/test/java/com/cloud/vpc/dao/MockVpcDaoImpl.java @@ -22,7 +22,6 @@ import com.cloud.network.vpc.dao.VpcDao; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; -import org.apache.log4j.Logger; import java.lang.reflect.Field; import java.util.List; @@ -30,7 +29,6 @@ @DB() public class MockVpcDaoImpl extends GenericDaoBase implements VpcDao { - private static final Logger s_logger = Logger.getLogger(MockNetworkOfferingDaoImpl.class); /* (non-Javadoc) * @see com.cloud.network.vpc.Dao.VpcDao#getVpcCountByOfferingId(long) @@ -113,10 +111,10 @@ private VpcVO setId(VpcVO vo, long id) { f.setAccessible(true); f.setLong(voToReturn, id); } catch (NoSuchFieldException ex) { - s_logger.warn(ex); + logger.warn(ex); return null; } catch (IllegalAccessException ex) { - s_logger.warn(ex); + logger.warn(ex); return null; } diff --git a/server/src/test/java/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImplTest.java b/server/src/test/java/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImplTest.java index 50d0996d9cb1..ba66645a0c65 100644 --- a/server/src/test/java/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImplTest.java +++ b/server/src/test/java/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImplTest.java @@ -43,7 +43,9 @@ import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.region.RegionVO; import org.apache.cloudstack.region.dao.RegionDao; -import org.apache.log4j.Logger; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -61,7 +63,7 @@ public class GlobalLoadBalancingRulesServiceImplTest extends TestCase { - private static final Logger s_logger = Logger.getLogger(GlobalLoadBalancingRulesServiceImplTest.class); + private Logger logger = LogManager.getLogger(GlobalLoadBalancingRulesServiceImplTest.class); @Override @Before @@ -82,7 +84,7 @@ public void tearDown() { @Test public void testCreateGlobalLoadBalancerRule() throws Exception { - s_logger.info("Running tests for CreateGlobalLoadBalancerRule() service API"); + logger.info("Running tests for CreateGlobalLoadBalancerRule() service API"); /* * TEST 1: given valid parameters CreateGlobalLoadBalancerRule should succeed @@ -113,7 +115,7 @@ public void testCreateGlobalLoadBalancerRule() throws Exception { @Test public void testAssignToGlobalLoadBalancerRule() throws Exception { - s_logger.info("Running tests for AssignToGlobalLoadBalancerRule() service API"); + logger.info("Running tests for AssignToGlobalLoadBalancerRule() service API"); /* * TEST 1: given valid gslb rule id, valid lb rule id, and caller has access to both the rules @@ -136,7 +138,7 @@ public void testAssignToGlobalLoadBalancerRule() throws Exception { @Test public void testRemoveFromGlobalLoadBalancerRule() throws Exception { - s_logger.info("Running tests for RemoveFromGlobalLoadBalancerRule() service API"); + logger.info("Running tests for RemoveFromGlobalLoadBalancerRule() service API"); /* * TEST 1: given valid gslb rule id, valid lb rule id and is assigned to given gslb rule id @@ -160,7 +162,7 @@ public void testRemoveFromGlobalLoadBalancerRule() throws Exception { @Test public void testDeleteGlobalLoadBalancerRule() throws Exception { - s_logger.info("Running tests for DeleteGlobalLoadBalancerRule() service API"); + logger.info("Running tests for DeleteGlobalLoadBalancerRule() service API"); /* * TEST 1: given valid gslb rule id with assigned Lb rules, DeleteGlobalLoadBalancerRule() @@ -236,7 +238,7 @@ void runCreateGlobalLoadBalancerRulePostiveTest() throws Exception { try { gslbServiceImpl.createGlobalLoadBalancerRule(createCmd); } catch (Exception e) { - s_logger.info("exception in testing runCreateGlobalLoadBalancerRulePostiveTest message: " + e.toString()); + logger.info("exception in testing runCreateGlobalLoadBalancerRulePostiveTest message: " + e.toString()); } } @@ -556,7 +558,7 @@ void runAssignToGlobalLoadBalancerRuleTest() throws Exception { try { gslbServiceImpl.assignToGlobalLoadBalancerRule(assignCmd); } catch (Exception e) { - s_logger.info("exception in testing runAssignToGlobalLoadBalancerRuleTest message: " + e.toString()); + logger.info("exception in testing runAssignToGlobalLoadBalancerRuleTest message: " + e.toString()); } } @@ -640,7 +642,7 @@ void runAssignToGlobalLoadBalancerRuleTestSameZoneLb() throws Exception { try { gslbServiceImpl.assignToGlobalLoadBalancerRule(assignCmd); } catch (InvalidParameterValueException e) { - s_logger.info(e.getMessage()); + logger.info(e.getMessage()); Assert.assertTrue(e.getMessage().contains("Load balancer rule specified should be in unique zone")); } } @@ -924,7 +926,7 @@ void runDeleteGlobalLoadBalancerRuleTestWithNoLbRules() throws Exception { gslbServiceImpl.deleteGlobalLoadBalancerRule(deleteCmd); Assert.assertTrue(gslbRule.getState() == GlobalLoadBalancerRule.State.Revoke); } catch (Exception e) { - s_logger.info("exception in testing runDeleteGlobalLoadBalancerRuleTestWithNoLbRules. " + e.toString()); + logger.info("exception in testing runDeleteGlobalLoadBalancerRuleTestWithNoLbRules. " + e.toString()); } } @@ -969,7 +971,7 @@ void runDeleteGlobalLoadBalancerRuleTestWithLbRules() throws Exception { Assert.assertTrue(gslbRule.getState() == GlobalLoadBalancerRule.State.Revoke); Assert.assertTrue(gslbLmMap.isRevoke() == true); } catch (Exception e) { - s_logger.info("exception in testing runDeleteGlobalLoadBalancerRuleTestWithLbRules. " + e.toString()); + logger.info("exception in testing runDeleteGlobalLoadBalancerRuleTestWithLbRules. " + e.toString()); } } diff --git a/server/src/test/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelperTest.java b/server/src/test/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelperTest.java index 6bf7eef28441..d7684b824e3e 100644 --- a/server/src/test/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelperTest.java +++ b/server/src/test/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelperTest.java @@ -18,7 +18,6 @@ import com.cloud.storage.VMTemplateVO; import com.cloud.storage.VolumeVO; -import com.cloud.test.TestAppender; import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; @@ -28,7 +27,7 @@ import org.apache.cloudstack.secstorage.heuristics.HeuristicType; import org.apache.cloudstack.storage.heuristics.presetvariables.PresetVariables; import org.apache.cloudstack.utils.jsinterpreter.JsInterpreter; -import org.apache.log4j.Level; +import org.apache.logging.log4j.Logger; import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; @@ -38,8 +37,6 @@ import org.mockito.Spy; import org.mockito.junit.MockitoJUnitRunner; -import java.util.regex.Pattern; - @RunWith(MockitoJUnitRunner.class) public class HeuristicRuleHelperTest { @@ -64,6 +61,9 @@ public class HeuristicRuleHelperTest { @Mock DataStore dataStoreMock; + @Mock + Logger loggerMock; + @Spy @InjectMocks HeuristicRuleHelper heuristicRuleHelperSpy = new HeuristicRuleHelper(); @@ -74,15 +74,10 @@ public void getImageStoreIfThereIsHeuristicRuleTestZoneDoesNotHaveHeuristicRuleS Mockito.when(secondaryStorageHeuristicDaoMock.findByZoneIdAndType(Mockito.anyLong(), Mockito.any(HeuristicType.class))).thenReturn(null); - TestAppender.TestAppenderBuilder appenderBuilder = new TestAppender.TestAppenderBuilder(); - appenderBuilder.addExpectedPattern(Level.DEBUG, Pattern.quote(String.format("No heuristic rules found for zone with ID [%s] and heuristic type [%s]. Returning null.", - zoneId, HeuristicType.TEMPLATE))); - TestAppender testLogAppender = appenderBuilder.build(); - TestAppender.safeAddAppender(HeuristicRuleHelper.LOGGER, testLogAppender); - DataStore result = heuristicRuleHelperSpy.getImageStoreIfThereIsHeuristicRule(zoneId, HeuristicType.TEMPLATE, null); - testLogAppender.assertMessagesLogged(); + Mockito.verify(loggerMock, Mockito.times(1)).debug(String.format("No heuristic rules found for zone with ID [%s] and heuristic type [%s]. Returning null.", + zoneId, HeuristicType.TEMPLATE)); Assert.assertNull(result); } @@ -95,14 +90,9 @@ public void getImageStoreIfThereIsHeuristicRuleTestZoneHasHeuristicRuleShouldCal Mockito.doReturn(null).when(heuristicRuleHelperSpy).interpretHeuristicRule(Mockito.anyString(), Mockito.any(HeuristicType.class), Mockito.isNull(), Mockito.anyLong()); - TestAppender.TestAppenderBuilder appenderBuilder = new TestAppender.TestAppenderBuilder(); - appenderBuilder.addExpectedPattern(Level.DEBUG, Pattern.quote(String.format("Found the heuristic rule %s to apply for zone with ID [%s].", heuristicVOMock, zoneId))); - TestAppender testLogAppender = appenderBuilder.build(); - TestAppender.safeAddAppender(HeuristicRuleHelper.LOGGER, testLogAppender); - DataStore result = heuristicRuleHelperSpy.getImageStoreIfThereIsHeuristicRule(zoneId, HeuristicType.TEMPLATE, null); - testLogAppender.assertMessagesLogged(); + Mockito.verify(loggerMock, Mockito.times(1)).debug(String.format("Found the heuristic rule %s to apply for zone with ID [%s].", heuristicVOMock, zoneId)); Assert.assertNull(result); } diff --git a/server/src/test/java/org/apache/cloudstack/vm/schedule/VMSchedulerImplTest.java b/server/src/test/java/org/apache/cloudstack/vm/schedule/VMSchedulerImplTest.java index d40cd6164846..cad36b962ac2 100644 --- a/server/src/test/java/org/apache/cloudstack/vm/schedule/VMSchedulerImplTest.java +++ b/server/src/test/java/org/apache/cloudstack/vm/schedule/VMSchedulerImplTest.java @@ -329,19 +329,10 @@ public void testExecuteJobs() { UserVm vm1 = Mockito.mock(UserVm.class); UserVm vm2 = Mockito.mock(UserVm.class); - Mockito.when(job1.getVmId()).thenReturn(1L); - Mockito.when(job1.getScheduledTime()).thenReturn(new Date()); - Mockito.when(job1.getAction()).thenReturn(VMSchedule.Action.START); - Mockito.when(job1.getVmScheduleId()).thenReturn(1L); Mockito.when(job2.getVmId()).thenReturn(2L); - Mockito.when(job2.getScheduledTime()).thenReturn(new Date()); - Mockito.when(job2.getAction()).thenReturn(VMSchedule.Action.STOP); - Mockito.when(job2.getVmScheduleId()).thenReturn(2L); - Mockito.when(userVmManager.getUserVm(1L)).thenReturn(vm1); Mockito.when(userVmManager.getUserVm(2L)).thenReturn(vm2); - Mockito.doReturn(1L).when(vmScheduler).processJob(job1, vm1); Mockito.doReturn(null).when(vmScheduler).processJob(job2, vm2); Mockito.when(vmScheduledJobDao.acquireInLockTable(job1.getId())).thenReturn(job1); diff --git a/services/console-proxy/rdpconsole/src/main/java/common/BufferedImagePixelsAdapter.java b/services/console-proxy/rdpconsole/src/main/java/common/BufferedImagePixelsAdapter.java index 336ff4435d0f..bc41647ddd3d 100644 --- a/services/console-proxy/rdpconsole/src/main/java/common/BufferedImagePixelsAdapter.java +++ b/services/console-proxy/rdpconsole/src/main/java/common/BufferedImagePixelsAdapter.java @@ -21,7 +21,8 @@ import java.awt.image.DataBufferInt; import java.util.Arrays; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import streamer.BaseElement; import streamer.ByteBuffer; @@ -29,7 +30,7 @@ import streamer.Link; public class BufferedImagePixelsAdapter extends BaseElement { - private static final Logger s_logger = Logger.getLogger(BufferedImagePixelsAdapter.class); + protected static Logger LOGGER = LogManager.getLogger(BufferedImagePixelsAdapter.class); public static final String TARGET_X = "x"; public static final String TARGET_Y = "y"; @@ -58,7 +59,7 @@ public String toString() { @Override public void handleData(ByteBuffer buf, Link link) { if (verbose) - s_logger.debug("[" + this + "] INFO: Data received: " + buf + "."); + LOGGER.debug("[" + this + "] INFO: Data received: " + buf + "."); int x = (Integer)buf.getMetadata(TARGET_X); int y = (Integer)buf.getMetadata(TARGET_Y); @@ -103,7 +104,7 @@ public void handleData(ByteBuffer buf, Link link) { try { System.arraycopy(intArray, srcLine * rectWidth, imageBuffer, x + dstLine * imageWidth, rectWidth); } catch (IndexOutOfBoundsException e) { - s_logger.info("[ignored] copy error",e); + LOGGER.info("[ignored] copy error",e); } } break; @@ -145,7 +146,7 @@ public static void main(String args[]) { String actualData = Arrays.toString(((DataBufferInt)canvas.getOfflineImage().getRaster().getDataBuffer()).getData()); String expectedData = Arrays.toString(pixelsLE); if (!actualData.equals(expectedData)) - s_logger.error("Actual image: " + actualData + "\nExpected image: " + expectedData + "."); + LOGGER.error("Actual image: " + actualData + "\nExpected image: " + expectedData + "."); } diff --git a/services/console-proxy/rdpconsole/src/main/java/rdpclient/rdp/ClientInfoPDU.java b/services/console-proxy/rdpconsole/src/main/java/rdpclient/rdp/ClientInfoPDU.java index 61960049dfd4..005727e54109 100644 --- a/services/console-proxy/rdpconsole/src/main/java/rdpclient/rdp/ClientInfoPDU.java +++ b/services/console-proxy/rdpconsole/src/main/java/rdpclient/rdp/ClientInfoPDU.java @@ -35,7 +35,7 @@ public class ClientInfoPDU extends OneTimeSwitch { public static final int INFO_UNICODE = 0x10; public static final int INFO_MAXIMIZESHELL = 0x20; - public static final int INFO_LOGONNOTIFY = 0x40; + public static final int INFO_loggerONNOTIFY = 0x40; public static final int INFO_ENABLEWINDOWSKEY = 0x100; public static final int INFO_MOUSE_HAS_WHEEL = 0x00020000; public static final int INFO_NOAUDIOPLAYBACK = 0x00080000; @@ -104,7 +104,7 @@ protected void onStart() { // Flags buf.writeIntLE(INFO_MOUSE | INFO_DISABLECTRLALTDEL | INFO_UNICODE | - INFO_MAXIMIZESHELL | INFO_LOGONNOTIFY | INFO_ENABLEWINDOWSKEY | + INFO_MAXIMIZESHELL | INFO_loggerONNOTIFY | INFO_ENABLEWINDOWSKEY | INFO_MOUSE_HAS_WHEEL | INFO_NOAUDIOPLAYBACK); // @@ -293,7 +293,7 @@ public static void main(String args[]) { (byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x00, // Flags: 0xa0173 (LE), INFO_MOUSE (0x1), INFO_DISABLECTRLALTDEL (0x2), INFO_UNICODE (0x10), - // INFO_MAXIMIZESHELL (0x20), INFO_LOGONNOTIFY (0x40), INFO_ENABLEWINDOWSKEY (0x100), + // INFO_MAXIMIZESHELL (0x20), INFO_loggerONNOTIFY (0x40), INFO_ENABLEWINDOWSKEY (0x100), // INFO_MOUSE_HAS_WHEEL (0x00020000), INFO_NOAUDIOPLAYBACK (0x00080000), (byte) 0x73, (byte) 0x01, (byte) 0x0a, (byte) 0x00, diff --git a/services/console-proxy/rdpconsole/src/main/java/streamer/BaseElement.java b/services/console-proxy/rdpconsole/src/main/java/streamer/BaseElement.java index e616165752ae..15e1a8710335 100644 --- a/services/console-proxy/rdpconsole/src/main/java/streamer/BaseElement.java +++ b/services/console-proxy/rdpconsole/src/main/java/streamer/BaseElement.java @@ -21,11 +21,15 @@ import java.util.Map.Entry; import java.util.Set; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import streamer.debug.FakeSink; import streamer.debug.FakeSource; public class BaseElement implements Element { + protected Logger logger = LogManager.getLogger(getClass()); + protected String id; /** diff --git a/services/console-proxy/rdpconsole/src/main/java/streamer/InputStreamSource.java b/services/console-proxy/rdpconsole/src/main/java/streamer/InputStreamSource.java index f596cf22cefe..3d2ad4147136 100644 --- a/services/console-proxy/rdpconsole/src/main/java/streamer/InputStreamSource.java +++ b/services/console-proxy/rdpconsole/src/main/java/streamer/InputStreamSource.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.io.InputStream; -import org.apache.log4j.Logger; import streamer.debug.FakeSink; @@ -28,7 +27,6 @@ * Source element, which reads data from InputStream. */ public class InputStreamSource extends BaseElement { - private static final Logger s_logger = Logger.getLogger(InputStreamSource.class); protected InputStream is; protected SocketWrapperImpl socketWrapper; @@ -151,13 +149,13 @@ private void closeStream() { try { is.close(); } catch (IOException e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "io error on input stream: " + e.getLocalizedMessage()); } try { sendEventToAllPads(Event.STREAM_CLOSE, Direction.OUT); } catch (Exception e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "error sending an event to all pods: " + e.getLocalizedMessage()); } } diff --git a/services/console-proxy/rdpconsole/src/main/java/streamer/OutputStreamSink.java b/services/console-proxy/rdpconsole/src/main/java/streamer/OutputStreamSink.java index c2d58c0f1ff0..6ea9620b5d24 100644 --- a/services/console-proxy/rdpconsole/src/main/java/streamer/OutputStreamSink.java +++ b/services/console-proxy/rdpconsole/src/main/java/streamer/OutputStreamSink.java @@ -20,12 +20,10 @@ import java.io.IOException; import java.io.OutputStream; -import org.apache.log4j.Logger; import streamer.debug.FakeSource; public class OutputStreamSink extends BaseElement { - private static final Logger s_logger = Logger.getLogger(OutputStreamSink.class); protected OutputStream os; protected SocketWrapperImpl socketWrapper; @@ -113,13 +111,13 @@ private void closeStream() { try { os.close(); } catch (IOException e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "io error on output: " + e.getLocalizedMessage()); } try { sendEventToAllPads(Event.STREAM_CLOSE, Direction.IN); } catch (Exception e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "error sending output close event: " + e.getLocalizedMessage()); } } diff --git a/services/console-proxy/rdpconsole/src/main/java/streamer/PipelineImpl.java b/services/console-proxy/rdpconsole/src/main/java/streamer/PipelineImpl.java index 342f2c3c52eb..84ed51440d0d 100644 --- a/services/console-proxy/rdpconsole/src/main/java/streamer/PipelineImpl.java +++ b/services/console-proxy/rdpconsole/src/main/java/streamer/PipelineImpl.java @@ -22,11 +22,15 @@ import java.util.Map; import java.util.Set; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import streamer.debug.FakeSink; import streamer.debug.FakeSource; public class PipelineImpl implements Pipeline { + protected Logger logger = LogManager.getLogger(getClass()); + protected String id; protected boolean verbose = System.getProperty("streamer.Pipeline.debug", "false").equals("true"); diff --git a/services/console-proxy/rdpconsole/src/main/java/streamer/SocketWrapperImpl.java b/services/console-proxy/rdpconsole/src/main/java/streamer/SocketWrapperImpl.java index 3e05d45f1ad6..43534ac7b8a5 100644 --- a/services/console-proxy/rdpconsole/src/main/java/streamer/SocketWrapperImpl.java +++ b/services/console-proxy/rdpconsole/src/main/java/streamer/SocketWrapperImpl.java @@ -32,7 +32,6 @@ import javax.net.ssl.SSLSocketFactory; import javax.net.ssl.TrustManager; -import org.apache.log4j.Logger; import org.apache.cloudstack.utils.security.SSLUtils; import org.apache.cloudstack.utils.security.SecureSSLSocketFactory; @@ -43,7 +42,6 @@ import streamer.ssl.TrustAllX509TrustManager; public class SocketWrapperImpl extends PipelineImpl implements SocketWrapper { - private static final Logger s_logger = Logger.getLogger(SocketWrapperImpl.class); protected InputStreamSource source; protected OutputStreamSink sink; @@ -177,26 +175,26 @@ public void shutdown() { try { handleEvent(Event.STREAM_CLOSE, Direction.IN); } catch (Exception e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "error sending input close event: " + e.getLocalizedMessage()); } try { handleEvent(Event.STREAM_CLOSE, Direction.OUT); } catch (Exception e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "error sending output close event: " + e.getLocalizedMessage()); } try { if (sslSocket != null) sslSocket.close(); } catch (Exception e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "error closing ssl socket: " + e.getLocalizedMessage()); } try { socket.close(); } catch (Exception e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "error closing socket: " + e.getLocalizedMessage()); } } diff --git a/services/console-proxy/rdpconsole/src/main/java/streamer/SyncLink.java b/services/console-proxy/rdpconsole/src/main/java/streamer/SyncLink.java index d0e7d33934b4..bbb14bfc66ce 100644 --- a/services/console-proxy/rdpconsole/src/main/java/streamer/SyncLink.java +++ b/services/console-proxy/rdpconsole/src/main/java/streamer/SyncLink.java @@ -16,14 +16,15 @@ // under the License. package streamer; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; /** * Link to transfer data in bounds of single thread (synchronized transfer). * Must not be used to send data to elements served in different threads. */ public class SyncLink implements Link { - private static final Logger s_logger = Logger.getLogger(SyncLink.class); + protected Logger logger = LogManager.getLogger(getClass()); /** * When null packet is pulled from source element, then make slight delay to @@ -115,7 +116,7 @@ public SyncLink(String id) { @Override public void pushBack(ByteBuffer buf) { if (verbose) - s_logger.debug("[" + this + "] INFO: Buffer pushed back: " + buf + "."); + logger.debug("[" + this + "] INFO: Buffer pushed back: " + buf + "."); if (cacheBuffer != null) { ByteBuffer tmp = cacheBuffer.join(buf); @@ -154,7 +155,7 @@ public void sendData(ByteBuffer buf) { throw new RuntimeException("[" + this + "] ERROR: link is not in push mode."); if (verbose) - s_logger.debug("[" + this + "] INFO: Incoming buffer: " + buf + "."); + logger.debug("[" + this + "] INFO: Incoming buffer: " + buf + "."); if (buf == null && cacheBuffer == null) return; @@ -175,7 +176,7 @@ public void sendData(ByteBuffer buf) { while (cacheBuffer != null) { if (paused || hold) { if (verbose) - s_logger.debug("[" + this + "] INFO: Transfer is paused. Data in cache buffer: " + cacheBuffer + "."); + logger.debug("[" + this + "] INFO: Transfer is paused. Data in cache buffer: " + cacheBuffer + "."); // Wait until rest of packet will be read return; @@ -183,7 +184,7 @@ public void sendData(ByteBuffer buf) { if (expectedPacketSize > 0 && cacheBuffer.length < expectedPacketSize) { if (verbose) - s_logger.debug("[" + this + "] INFO: Transfer is suspended because available data is less than expected packet size. Expected packet size: " + logger.debug("[" + this + "] INFO: Transfer is suspended because available data is less than expected packet size. Expected packet size: " + expectedPacketSize + ", data in cache buffer: " + cacheBuffer + "."); // Wait until rest of packet will be read @@ -210,7 +211,7 @@ public void sendData(ByteBuffer buf) { public void sendEvent(Event event, Direction direction) { if (verbose) - s_logger.debug("[" + this + "] INFO: Event " + event + " is received."); + logger.debug("[" + this + "] INFO: Event " + event + " is received."); // Shutdown main loop (if any) when STREAM_CLOSE event is received. switch (event) { @@ -257,14 +258,14 @@ public ByteBuffer pull(boolean block) { if (paused) { if (verbose) - s_logger.debug("[" + this + "] INFO: Cannot pull, link is paused."); + logger.debug("[" + this + "] INFO: Cannot pull, link is paused."); // Make slight delay in such case, to avoid consuming 100% of CPU if (block) { try { Thread.sleep(100); } catch (InterruptedException e) { - s_logger.info("[ignored] interrupted during pull", e); + logger.info("[ignored] interrupted during pull", e); } } @@ -275,7 +276,7 @@ public ByteBuffer pull(boolean block) { // then return it instead of asking for more data from source if (cacheBuffer != null && (expectedPacketSize == 0 || (expectedPacketSize > 0 && cacheBuffer.length >= expectedPacketSize))) { if (verbose) - s_logger.debug("[" + this + "] INFO: Data pulled from cache buffer: " + cacheBuffer + "."); + logger.debug("[" + this + "] INFO: Data pulled from cache buffer: " + cacheBuffer + "."); ByteBuffer tmp = cacheBuffer; cacheBuffer = null; @@ -294,7 +295,7 @@ public ByteBuffer pull(boolean block) { // Can return something only when data was stored in buffer if (cacheBuffer != null && (expectedPacketSize == 0 || (expectedPacketSize > 0 && cacheBuffer.length >= expectedPacketSize))) { if (verbose) - s_logger.debug("[" + this + "] INFO: Data pulled from source: " + cacheBuffer + "."); + logger.debug("[" + this + "] INFO: Data pulled from source: " + cacheBuffer + "."); ByteBuffer tmp = cacheBuffer; cacheBuffer = null; @@ -370,7 +371,7 @@ public void run() { sendEvent(Event.LINK_SWITCH_TO_PULL_MODE, Direction.IN); if (verbose) - s_logger.debug("[" + this + "] INFO: Starting pull loop."); + logger.debug("[" + this + "] INFO: Starting pull loop."); // Pull source in loop while (!shutdown) { @@ -386,7 +387,7 @@ public void run() { } if (verbose) - s_logger.debug("[" + this + "] INFO: Pull loop finished."); + logger.debug("[" + this + "] INFO: Pull loop finished."); } @@ -401,7 +402,7 @@ protected void delay() { @Override public void setPullMode() { if (verbose) - s_logger.debug("[" + this + "] INFO: Switching to PULL mode."); + logger.debug("[" + this + "] INFO: Switching to PULL mode."); pullMode = true; } diff --git a/services/console-proxy/rdpconsole/src/main/java/streamer/apr/AprSocketSink.java b/services/console-proxy/rdpconsole/src/main/java/streamer/apr/AprSocketSink.java index 326570b71416..bbe87a0b450d 100644 --- a/services/console-proxy/rdpconsole/src/main/java/streamer/apr/AprSocketSink.java +++ b/services/console-proxy/rdpconsole/src/main/java/streamer/apr/AprSocketSink.java @@ -16,7 +16,6 @@ // under the License. package streamer.apr; -import org.apache.log4j.Logger; import org.apache.tomcat.jni.Socket; import streamer.BaseElement; @@ -27,7 +26,6 @@ import streamer.Link; public class AprSocketSink extends BaseElement { - private static final Logger s_logger = Logger.getLogger(AprSocketSink.class); protected AprSocketWrapperImpl socketWrapper; protected Long socket; @@ -119,7 +117,7 @@ private void closeStream() { try { sendEventToAllPads(Event.STREAM_CLOSE, Direction.IN); } catch (Exception e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "failing sending sink event to all pads: " + e.getLocalizedMessage()); } socketWrapper.shutdown(); diff --git a/services/console-proxy/rdpconsole/src/main/java/streamer/apr/AprSocketSource.java b/services/console-proxy/rdpconsole/src/main/java/streamer/apr/AprSocketSource.java index f4cd7e2539ec..bab28d799cb8 100644 --- a/services/console-proxy/rdpconsole/src/main/java/streamer/apr/AprSocketSource.java +++ b/services/console-proxy/rdpconsole/src/main/java/streamer/apr/AprSocketSource.java @@ -16,7 +16,6 @@ // under the License. package streamer.apr; -import org.apache.log4j.Logger; import org.apache.tomcat.jni.Socket; import streamer.BaseElement; @@ -30,7 +29,6 @@ * Source element, which reads data from InputStream. */ public class AprSocketSource extends BaseElement { - private static final Logger s_logger = Logger.getLogger(AprSocketSource.class); protected AprSocketWrapperImpl socketWrapper; protected Long socket; @@ -164,7 +162,7 @@ private void closeStream() { try { sendEventToAllPads(Event.STREAM_CLOSE, Direction.OUT); } catch (Exception e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "failing sending source event to all pads: " + e.getLocalizedMessage()); } socketWrapper.shutdown(); diff --git a/services/console-proxy/rdpconsole/src/main/java/streamer/apr/AprSocketWrapperImpl.java b/services/console-proxy/rdpconsole/src/main/java/streamer/apr/AprSocketWrapperImpl.java index e8741400cedd..113a15c3caa8 100644 --- a/services/console-proxy/rdpconsole/src/main/java/streamer/apr/AprSocketWrapperImpl.java +++ b/services/console-proxy/rdpconsole/src/main/java/streamer/apr/AprSocketWrapperImpl.java @@ -23,7 +23,6 @@ import java.net.InetSocketAddress; import java.util.HashMap; -import org.apache.log4j.Logger; import org.apache.tomcat.jni.Address; import org.apache.tomcat.jni.Error; import org.apache.tomcat.jni.Library; @@ -47,7 +46,6 @@ import sun.security.x509.X509CertImpl; public class AprSocketWrapperImpl extends PipelineImpl implements SocketWrapper { - private static final Logger s_logger = Logger.getLogger(AprSocketWrapperImpl.class); static { try { @@ -200,13 +198,13 @@ public void shutdown() { try { handleEvent(Event.STREAM_CLOSE, Direction.IN); } catch (Exception e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "handling stream close event failed on input: " + e.getLocalizedMessage()); } try { handleEvent(Event.STREAM_CLOSE, Direction.OUT); } catch (Exception e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "handling event close event failed on output: " + e.getLocalizedMessage()); } } @@ -222,7 +220,7 @@ void destroyPull() { // Socket.shutdown(socket, Socket.APR_SHUTDOWN_READWRITE); Pool.destroy(pool); } catch (Exception e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "failure during network cleanup: " + e.getLocalizedMessage()); } diff --git a/services/console-proxy/rdpconsole/src/main/java/streamer/bco/BcoSocketWrapperImpl.java b/services/console-proxy/rdpconsole/src/main/java/streamer/bco/BcoSocketWrapperImpl.java index 39aaba9e3402..92a72c2c8d25 100644 --- a/services/console-proxy/rdpconsole/src/main/java/streamer/bco/BcoSocketWrapperImpl.java +++ b/services/console-proxy/rdpconsole/src/main/java/streamer/bco/BcoSocketWrapperImpl.java @@ -16,7 +16,6 @@ // under the License. package streamer.bco; -import org.apache.log4j.Logger; import org.bouncycastle.jce.provider.BouncyCastleProvider; import org.bouncycastle.tls.DefaultTlsClient; import org.bouncycastle.tls.ServerOnlyTlsAuthentication; @@ -37,7 +36,6 @@ @SuppressWarnings("deprecation") public class BcoSocketWrapperImpl extends SocketWrapperImpl { - private static final Logger s_logger = Logger.getLogger(BcoSocketWrapperImpl.class); static { Security.addProvider(new BouncyCastleProvider()); @@ -99,26 +97,26 @@ public void shutdown() { try { handleEvent(Event.STREAM_CLOSE, Direction.IN); } catch (Exception e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "failure handling close event for bso input stream: " + e.getLocalizedMessage()); } try { handleEvent(Event.STREAM_CLOSE, Direction.OUT); } catch (Exception e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "failure handling close event for bso output stream: " + e.getLocalizedMessage()); } try { if (bcoSslSocket != null) bcoSslSocket.close(); } catch (Exception e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "failure handling close event for bso socket: " + e.getLocalizedMessage()); } try { socket.close(); } catch (Exception e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "failure handling close event for socket: " + e.getLocalizedMessage()); } } diff --git a/services/console-proxy/rdpconsole/src/main/java/streamer/debug/FakeSource.java b/services/console-proxy/rdpconsole/src/main/java/streamer/debug/FakeSource.java index 1a0f56b9a94e..ccf56b9765d5 100644 --- a/services/console-proxy/rdpconsole/src/main/java/streamer/debug/FakeSource.java +++ b/services/console-proxy/rdpconsole/src/main/java/streamer/debug/FakeSource.java @@ -16,7 +16,6 @@ // under the License. package streamer.debug; -import org.apache.log4j.Logger; import streamer.BaseElement; import streamer.ByteBuffer; @@ -27,7 +26,6 @@ import streamer.SyncLink; public class FakeSource extends BaseElement { - private static final Logger s_logger = Logger.getLogger(FakeSource.class); /** * Delay for null packets in poll method when blocking is requested, in @@ -69,7 +67,7 @@ protected void delay() { try { Thread.sleep(delay); } catch (InterruptedException e) { - s_logger.info("[ignored] interrupted while creating latency", e); + logger.info("[ignored] interrupted while creating latency", e); } } diff --git a/services/console-proxy/rdpconsole/src/main/java/streamer/debug/MockServer.java b/services/console-proxy/rdpconsole/src/main/java/streamer/debug/MockServer.java index d4e48c6ee29d..70926c7a7e1f 100644 --- a/services/console-proxy/rdpconsole/src/main/java/streamer/debug/MockServer.java +++ b/services/console-proxy/rdpconsole/src/main/java/streamer/debug/MockServer.java @@ -27,10 +27,11 @@ import javax.net.ssl.SSLSocket; import javax.net.ssl.SSLSocketFactory; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; public class MockServer implements Runnable { - private static final Logger s_logger = Logger.getLogger(MockServer.class); + protected Logger logger = LogManager.getLogger(getClass()); private boolean shutdown = false; private ServerSocket serverSocket; @@ -134,19 +135,19 @@ public void run() { try { is.close(); } catch (Throwable e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "in stream close failed: " + e.getLocalizedMessage()); } try { os.close(); } catch (Throwable e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "out stream close failed: " + e.getLocalizedMessage()); } try { serverSocket.close(); } catch (Throwable e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "server socket close failed: " + e.getLocalizedMessage()); } } diff --git a/services/console-proxy/server/pom.xml b/services/console-proxy/server/pom.xml index 8080aa33cfa5..ce6fe5943adc 100644 --- a/services/console-proxy/server/pom.xml +++ b/services/console-proxy/server/pom.xml @@ -29,8 +29,12 @@ - ch.qos.reload4j - reload4j + org.apache.logging.log4j + log4j-core + + + org.apache.logging.log4j + log4j-api com.google.code.gson diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/AjaxFIFOImageCache.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/AjaxFIFOImageCache.java index cd6517d0913a..5a0a29977b53 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/AjaxFIFOImageCache.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/AjaxFIFOImageCache.java @@ -24,7 +24,7 @@ import com.cloud.consoleproxy.util.Logger; public class AjaxFIFOImageCache { - private static final Logger s_logger = Logger.getLogger(AjaxFIFOImageCache.class); + protected Logger logger = Logger.getLogger(getClass()); private List fifoQueue; private Map cache; @@ -47,14 +47,14 @@ public synchronized int putImage(byte[] image) { Integer keyToRemove = fifoQueue.remove(0); cache.remove(keyToRemove); - if (s_logger.isTraceEnabled()) - s_logger.trace("Remove image from cache, key: " + keyToRemove); + if (logger.isTraceEnabled()) + logger.trace("Remove image from cache, key: " + keyToRemove); } int key = getNextKey(); - if (s_logger.isTraceEnabled()) - s_logger.trace("Add image to cache, key: " + key); + if (logger.isTraceEnabled()) + logger.trace("Add image to cache, key: " + key); cache.put(key, image); fifoQueue.add(key); @@ -66,14 +66,14 @@ public synchronized byte[] getImage(int key) { key = nextKey; } if (cache.containsKey(key)) { - if (s_logger.isTraceEnabled()) - s_logger.trace("Retrieve image from cache, key: " + key); + if (logger.isTraceEnabled()) + logger.trace("Retrieve image from cache, key: " + key); return cache.get(key); } - if (s_logger.isTraceEnabled()) - s_logger.trace("Image is no long in cache, key: " + key); + if (logger.isTraceEnabled()) + logger.trace("Image is no long in cache, key: " + key); return null; } diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxy.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxy.java index 2753d9fcb654..c841f76540d2 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxy.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxy.java @@ -36,7 +36,7 @@ import org.apache.commons.lang3.ArrayUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.xml.DOMConfigurator; +import org.apache.logging.log4j.core.config.Configurator; import org.eclipse.jetty.websocket.api.Session; import com.cloud.consoleproxy.util.Logger; @@ -49,7 +49,7 @@ * ConsoleProxy, singleton class that manages overall activities in console proxy process. To make legacy code work, we still */ public class ConsoleProxy { - private static final Logger s_logger = Logger.getLogger(ConsoleProxy.class); + protected static Logger LOGGER = Logger.getLogger(ConsoleProxy.class); public static final int KEYBOARD_RAW = 0; public static final int KEYBOARD_COOKED = 1; @@ -107,7 +107,7 @@ private static void configLog4j() { File file = new File(configUrl.toURI()); System.out.println("Log4j configuration from : " + file.getAbsolutePath()); - DOMConfigurator.configureAndWatch(file.getAbsolutePath(), 10000); + Configurator.initialize(null, file.getAbsolutePath()); } catch (URISyntaxException e) { System.out.println("Unable to convert log4j configuration Url to URI"); } @@ -118,23 +118,23 @@ private static void configLog4j() { } private static void configProxy(Properties conf) { - s_logger.info("Configure console proxy..."); + LOGGER.info("Configure console proxy..."); for (Object key : conf.keySet()) { - s_logger.info("Property " + (String)key + ": " + conf.getProperty((String)key)); + LOGGER.info("Property " + (String)key + ": " + conf.getProperty((String)key)); if (!ArrayUtils.contains(skipProperties, key)) { - s_logger.info("Property " + (String)key + ": " + conf.getProperty((String)key)); + LOGGER.info("Property " + (String)key + ": " + conf.getProperty((String)key)); } } String s = conf.getProperty("consoleproxy.httpListenPort"); if (s != null) { httpListenPort = Integer.parseInt(s); - s_logger.info("Setting httpListenPort=" + s); + LOGGER.info("Setting httpListenPort=" + s); } s = conf.getProperty("premium"); if (s != null && s.equalsIgnoreCase("true")) { - s_logger.info("Premium setting will override settings from consoleproxy.properties, listen at port 443"); + LOGGER.info("Premium setting will override settings from consoleproxy.properties, listen at port 443"); httpListenPort = 443; factoryClzName = "com.cloud.consoleproxy.ConsoleProxySecureServerFactoryImpl"; } else { @@ -144,19 +144,19 @@ private static void configProxy(Properties conf) { s = conf.getProperty("consoleproxy.httpCmdListenPort"); if (s != null) { httpCmdListenPort = Integer.parseInt(s); - s_logger.info("Setting httpCmdListenPort=" + s); + LOGGER.info("Setting httpCmdListenPort=" + s); } s = conf.getProperty("consoleproxy.reconnectMaxRetry"); if (s != null) { reconnectMaxRetry = Integer.parseInt(s); - s_logger.info("Setting reconnectMaxRetry=" + reconnectMaxRetry); + LOGGER.info("Setting reconnectMaxRetry=" + reconnectMaxRetry); } s = conf.getProperty("consoleproxy.readTimeoutSeconds"); if (s != null) { readTimeoutSeconds = Integer.parseInt(s); - s_logger.info("Setting readTimeoutSeconds=" + readTimeoutSeconds); + LOGGER.info("Setting readTimeoutSeconds=" + readTimeoutSeconds); } } @@ -168,14 +168,14 @@ public static ConsoleProxyServerFactory getHttpServerFactory() { factory.init(ConsoleProxy.ksBits, ConsoleProxy.ksPassword); return factory; } catch (InstantiationException e) { - s_logger.error(e.getMessage(), e); + LOGGER.error(e.getMessage(), e); return null; } catch (IllegalAccessException e) { - s_logger.error(e.getMessage(), e); + LOGGER.error(e.getMessage(), e); return null; } } catch (ClassNotFoundException e) { - s_logger.warn("Unable to find http server factory class: " + factoryClzName); + LOGGER.warn("Unable to find http server factory class: " + factoryClzName); return new ConsoleProxyBaseServerFactoryImpl(); } } @@ -191,11 +191,11 @@ public static ConsoleProxyAuthenticationResult authenticateConsoleAccess(Console if (org.apache.commons.lang3.StringUtils.isNotBlank(param.getExtraSecurityToken())) { String extraToken = param.getExtraSecurityToken(); String clientProvidedToken = param.getClientProvidedExtraSecurityToken(); - s_logger.debug(String.format("Extra security validation for the console access, provided %s " + + LOGGER.debug(String.format("Extra security validation for the console access, provided %s " + "to validate against %s", clientProvidedToken, extraToken)); if (!extraToken.equals(clientProvidedToken)) { - s_logger.error("The provided extra token does not match the expected value for this console endpoint"); + LOGGER.error("The provided extra token does not match the expected value for this console endpoint"); authResult.setSuccess(false); return authResult; } @@ -203,10 +203,10 @@ public static ConsoleProxyAuthenticationResult authenticateConsoleAccess(Console String sessionUuid = param.getSessionUuid(); if (allowedSessions.contains(sessionUuid)) { - s_logger.debug("Acquiring the session " + sessionUuid + " not available for future use"); + LOGGER.debug("Acquiring the session " + sessionUuid + " not available for future use"); allowedSessions.remove(sessionUuid); } else { - s_logger.info("Session " + sessionUuid + " has already been used, cannot connect"); + LOGGER.info("Session " + sessionUuid + " has already been used, cannot connect"); authResult.setSuccess(false); return authResult; } @@ -227,11 +227,11 @@ public static ConsoleProxyAuthenticationResult authenticateConsoleAccess(Console authMethod.invoke(ConsoleProxy.context, param.getClientHostAddress(), String.valueOf(param.getClientHostPort()), param.getClientTag(), param.getClientHostPassword(), param.getTicket(), reauthentication, param.getSessionUuid()); } catch (IllegalAccessException e) { - s_logger.error("Unable to invoke authenticateConsoleAccess due to IllegalAccessException" + " for vm: " + param.getClientTag(), e); + LOGGER.error("Unable to invoke authenticateConsoleAccess due to IllegalAccessException" + " for vm: " + param.getClientTag(), e); authResult.setSuccess(false); return authResult; } catch (InvocationTargetException e) { - s_logger.error("Unable to invoke authenticateConsoleAccess due to InvocationTargetException " + " for vm: " + param.getClientTag(), e); + LOGGER.error("Unable to invoke authenticateConsoleAccess due to InvocationTargetException " + " for vm: " + param.getClientTag(), e); authResult.setSuccess(false); return authResult; } @@ -239,11 +239,11 @@ public static ConsoleProxyAuthenticationResult authenticateConsoleAccess(Console if (result != null && result instanceof String) { authResult = new Gson().fromJson((String)result, ConsoleProxyAuthenticationResult.class); } else { - s_logger.error("Invalid authentication return object " + result + " for vm: " + param.getClientTag() + ", decline the access"); + LOGGER.error("Invalid authentication return object " + result + " for vm: " + param.getClientTag() + ", decline the access"); authResult.setSuccess(false); } } else { - s_logger.warn("Private channel towards management server is not setup. Switch to offline mode and allow access to vm: " + param.getClientTag()); + LOGGER.warn("Private channel towards management server is not setup. Switch to offline mode and allow access to vm: " + param.getClientTag()); } return authResult; @@ -254,12 +254,12 @@ public static void reportLoadInfo(String gsonLoadInfo) { try { reportMethod.invoke(ConsoleProxy.context, gsonLoadInfo); } catch (IllegalAccessException e) { - s_logger.error("Unable to invoke reportLoadInfo due to " + e.getMessage()); + LOGGER.error("Unable to invoke reportLoadInfo due to " + e.getMessage()); } catch (InvocationTargetException e) { - s_logger.error("Unable to invoke reportLoadInfo due to " + e.getMessage()); + LOGGER.error("Unable to invoke reportLoadInfo due to " + e.getMessage()); } } else { - s_logger.warn("Private channel towards management server is not setup. Switch to offline mode and ignore load report"); + LOGGER.warn("Private channel towards management server is not setup. Switch to offline mode and ignore load report"); } } @@ -268,12 +268,12 @@ public static void ensureRoute(String address) { try { ensureRouteMethod.invoke(ConsoleProxy.context, address); } catch (IllegalAccessException e) { - s_logger.error("Unable to invoke ensureRoute due to " + e.getMessage()); + LOGGER.error("Unable to invoke ensureRoute due to " + e.getMessage()); } catch (InvocationTargetException e) { - s_logger.error("Unable to invoke ensureRoute due to " + e.getMessage()); + LOGGER.error("Unable to invoke ensureRoute due to " + e.getMessage()); } } else { - s_logger.warn("Unable to find ensureRoute method, console proxy agent is not up to date"); + LOGGER.warn("Unable to find ensureRoute method, console proxy agent is not up to date"); } } @@ -281,12 +281,12 @@ public static void startWithContext(Properties conf, Object context, byte[] ksBi setEncryptorPassword(password); configLog4j(); Logger.setFactory(new ConsoleProxyLoggerFactory()); - s_logger.info("Start console proxy with context"); + LOGGER.info("Start console proxy with context"); if (conf != null) { for (Object key : conf.keySet()) { if (!ArrayUtils.contains(skipProperties, key)) { - s_logger.info("Context property " + (String) key + ": " + conf.getProperty((String) key)); + LOGGER.info("Context property " + (String) key + ": " + conf.getProperty((String) key)); } } } @@ -304,13 +304,13 @@ public static void startWithContext(Properties conf, Object context, byte[] ksBi reportMethod = contextClazz.getDeclaredMethod("reportLoadInfo", String.class); ensureRouteMethod = contextClazz.getDeclaredMethod("ensureRoute", String.class); } catch (SecurityException e) { - s_logger.error("Unable to setup private channel due to SecurityException", e); + LOGGER.error("Unable to setup private channel due to SecurityException", e); } catch (NoSuchMethodException e) { - s_logger.error("Unable to setup private channel due to NoSuchMethodException", e); + LOGGER.error("Unable to setup private channel due to NoSuchMethodException", e); } catch (IllegalArgumentException e) { - s_logger.error("Unable to setup private channel due to IllegalArgumentException", e); + LOGGER.error("Unable to setup private channel due to IllegalArgumentException", e); } catch (ClassNotFoundException e) { - s_logger.error("Unable to setup private channel due to ClassNotFoundException", e); + LOGGER.error("Unable to setup private channel due to ClassNotFoundException", e); } // merge properties from conf file @@ -319,12 +319,12 @@ public static void startWithContext(Properties conf, Object context, byte[] ksBi if (confs == null) { final File file = PropertiesUtil.findConfigFile("consoleproxy.properties"); if (file == null) - s_logger.info("Can't load consoleproxy.properties from classpath, will use default configuration"); + LOGGER.info("Can't load consoleproxy.properties from classpath, will use default configuration"); else try { confs = new FileInputStream(file); } catch (FileNotFoundException e) { - s_logger.info("Ignoring file not found exception and using defaults"); + LOGGER.info("Ignoring file not found exception and using defaults"); } } if (confs != null) { @@ -338,13 +338,13 @@ public static void startWithContext(Properties conf, Object context, byte[] ksBi conf.put(key, props.get(key)); } } catch (Exception e) { - s_logger.error(e.toString(), e); + LOGGER.error(e.toString(), e); } } try { confs.close(); } catch (IOException e) { - s_logger.error("Failed to close consolepropxy.properties : " + e.toString(), e); + LOGGER.error("Failed to close consolepropxy.properties : " + e.toString(), e); } start(conf); @@ -357,21 +357,21 @@ public static void start(Properties conf) { ConsoleProxyServerFactory factory = getHttpServerFactory(); if (factory == null) { - s_logger.error("Unable to load console proxy server factory"); + LOGGER.error("Unable to load console proxy server factory"); System.exit(1); } if (httpListenPort != 0) { startupHttpMain(); } else { - s_logger.error("A valid HTTP server port is required to be specified, please check your consoleproxy.httpListenPort settings"); + LOGGER.error("A valid HTTP server port is required to be specified, please check your consoleproxy.httpListenPort settings"); System.exit(1); } if (httpCmdListenPort > 0) { startupHttpCmdPort(); } else { - s_logger.info("HTTP command port is disabled"); + LOGGER.info("HTTP command port is disabled"); } ConsoleProxyGCThread cthread = new ConsoleProxyGCThread(connectionMap, removedSessionsSet); @@ -383,7 +383,7 @@ private static void startupHttpMain() { try { ConsoleProxyServerFactory factory = getHttpServerFactory(); if (factory == null) { - s_logger.error("Unable to load HTTP server factory"); + LOGGER.error("Unable to load HTTP server factory"); System.exit(1); } @@ -399,7 +399,7 @@ private static void startupHttpMain() { noVNCServer.start(); } catch (Exception e) { - s_logger.error(e.getMessage(), e); + LOGGER.error(e.getMessage(), e); System.exit(1); } } @@ -413,13 +413,13 @@ private static ConsoleProxyNoVNCServer getNoVNCServer() { private static void startupHttpCmdPort() { try { - s_logger.info("Listening for HTTP CMDs on port " + httpCmdListenPort); + LOGGER.info("Listening for HTTP CMDs on port " + httpCmdListenPort); HttpServer cmdServer = HttpServer.create(new InetSocketAddress(httpCmdListenPort), 2); cmdServer.createContext("/cmd", new ConsoleProxyCmdHandler()); cmdServer.setExecutor(new ThreadExecutor()); // creates a default executor cmdServer.start(); } catch (Exception e) { - s_logger.error(e.getMessage(), e); + LOGGER.error(e.getMessage(), e); System.exit(1); } } @@ -432,17 +432,17 @@ public static void main(String[] argv) { InputStream confs = ConsoleProxy.class.getResourceAsStream("/conf/consoleproxy.properties"); Properties conf = new Properties(); if (confs == null) { - s_logger.info("Can't load consoleproxy.properties from classpath, will use default configuration"); + LOGGER.info("Can't load consoleproxy.properties from classpath, will use default configuration"); } else { try { conf.load(confs); } catch (Exception e) { - s_logger.error(e.toString(), e); + LOGGER.error(e.toString(), e); } finally { try { confs.close(); } catch (IOException ioex) { - s_logger.error(ioex.toString(), ioex); + LOGGER.error(ioex.toString(), ioex); } } } @@ -460,14 +460,14 @@ public static ConsoleProxyClient getVncViewer(ConsoleProxyClientParam param) thr viewer = getClient(param); viewer.initClient(param); connectionMap.put(clientKey, viewer); - s_logger.info("Added viewer object " + viewer); + LOGGER.info("Added viewer object " + viewer); reportLoadChange = true; } else if (!viewer.isFrontEndAlive()) { - s_logger.info("The rfb thread died, reinitializing the viewer " + viewer); + LOGGER.info("The rfb thread died, reinitializing the viewer " + viewer); viewer.initClient(param); } else if (!param.getClientHostPassword().equals(viewer.getClientHostPassword())) { - s_logger.warn("Bad sid detected(VNC port may be reused). sid in session: " + viewer.getClientHostPassword() + ", sid in request: " + + LOGGER.warn("Bad sid detected(VNC port may be reused). sid in session: " + viewer.getClientHostPassword() + ", sid in request: " + param.getClientHostPassword()); viewer.initClient(param); } @@ -477,8 +477,8 @@ public static ConsoleProxyClient getVncViewer(ConsoleProxyClientParam param) thr ConsoleProxyClientStatsCollector statsCollector = getStatsCollector(); String loadInfo = statsCollector.getStatsReport(); reportLoadInfo(loadInfo); - if (s_logger.isDebugEnabled()) - s_logger.debug("Report load change : " + loadInfo); + if (LOGGER.isDebugEnabled()) + LOGGER.debug("Report load change : " + loadInfo); } return viewer; @@ -496,7 +496,7 @@ public static ConsoleProxyClient getAjaxVncViewer(ConsoleProxyClientParam param, viewer.initClient(param); connectionMap.put(clientKey, viewer); - s_logger.info("Added viewer object " + viewer); + LOGGER.info("Added viewer object " + viewer); reportLoadChange = true; } else { // protected against malicious attack by modifying URL content @@ -522,8 +522,8 @@ public static ConsoleProxyClient getAjaxVncViewer(ConsoleProxyClientParam param, ConsoleProxyClientStatsCollector statsCollector = getStatsCollector(); String loadInfo = statsCollector.getStatsReport(); reportLoadInfo(loadInfo); - if (s_logger.isDebugEnabled()) - s_logger.debug("Report load change : " + loadInfo); + if (LOGGER.isDebugEnabled()) + LOGGER.debug("Report load change : " + loadInfo); } return viewer; } @@ -559,7 +559,7 @@ public static void authenticationExternally(ConsoleProxyClientParam param) throw ConsoleProxyAuthenticationResult authResult = authenticateConsoleAccess(param, false); if (authResult == null || !authResult.isSuccess()) { - s_logger.warn("External authenticator failed authentication request for vm " + param.getClientTag() + " with sid " + param.getClientHostPassword()); + LOGGER.warn("External authenticator failed authentication request for vm " + param.getClientTag() + " with sid " + param.getClientHostPassword()); throw new AuthenticationException("External authenticator failed request for vm " + param.getClientTag() + " with sid " + param.getClientHostPassword()); } @@ -609,14 +609,14 @@ public static ConsoleProxyNoVncClient getNoVncViewer(ConsoleProxyClientParam par try { authenticationExternally(param); } catch (Exception e) { - s_logger.error("Authentication failed for param: " + param); + LOGGER.error("Authentication failed for param: " + param); return null; } - s_logger.info("Initializing new novnc client and disconnecting existing session"); + LOGGER.info("Initializing new novnc client and disconnecting existing session"); try { ((ConsoleProxyNoVncClient)viewer).getSession().disconnect(); } catch (IOException e) { - s_logger.error("Exception while disconnect session of novnc viewer object: " + viewer, e); + LOGGER.error("Exception while disconnect session of novnc viewer object: " + viewer, e); } removeViewer(viewer); viewer = new ConsoleProxyNoVncClient(session); @@ -629,8 +629,8 @@ public static ConsoleProxyNoVncClient getNoVncViewer(ConsoleProxyClientParam par ConsoleProxyClientStatsCollector statsCollector = getStatsCollector(); String loadInfo = statsCollector.getStatsReport(); reportLoadInfo(loadInfo); - if (s_logger.isDebugEnabled()) - s_logger.debug("Report load change : " + loadInfo); + if (LOGGER.isDebugEnabled()) + LOGGER.debug("Report load change : " + loadInfo); } return (ConsoleProxyNoVncClient)viewer; } diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyAjaxHandler.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyAjaxHandler.java index 563843840eba..e42917db6aa0 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyAjaxHandler.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyAjaxHandler.java @@ -35,7 +35,7 @@ import com.cloud.consoleproxy.util.Logger; public class ConsoleProxyAjaxHandler implements HttpHandler { - private static final Logger s_logger = Logger.getLogger(ConsoleProxyAjaxHandler.class); + protected Logger logger = Logger.getLogger(getClass()); public ConsoleProxyAjaxHandler() { } @@ -43,22 +43,22 @@ public ConsoleProxyAjaxHandler() { @Override public void handle(HttpExchange t) throws IOException { try { - if (s_logger.isTraceEnabled()) - s_logger.trace("AjaxHandler " + t.getRequestURI()); + if (logger.isTraceEnabled()) + logger.trace("AjaxHandler " + t.getRequestURI()); long startTick = System.currentTimeMillis(); doHandle(t); - if (s_logger.isTraceEnabled()) - s_logger.trace(t.getRequestURI() + " process time " + (System.currentTimeMillis() - startTick) + " ms"); + if (logger.isTraceEnabled()) + logger.trace(t.getRequestURI() + " process time " + (System.currentTimeMillis() - startTick) + " ms"); } catch (IOException e) { throw e; } catch (IllegalArgumentException e) { - s_logger.warn("Exception, ", e); + logger.warn("Exception, ", e); t.sendResponseHeaders(400, -1); // bad request } catch (Throwable e) { - s_logger.error("Unexpected exception, ", e); + logger.error("Unexpected exception, ", e); t.sendResponseHeaders(500, -1); // server error } finally { t.close(); @@ -67,8 +67,8 @@ public void handle(HttpExchange t) throws IOException { private void doHandle(HttpExchange t) throws Exception, IllegalArgumentException { String queries = t.getRequestURI().getQuery(); - if (s_logger.isTraceEnabled()) - s_logger.trace("Handle AJAX request: " + queries); + if (logger.isTraceEnabled()) + logger.trace("Handle AJAX request: " + queries); Map queryMap = ConsoleProxyHttpHandlerHelper.getQueryMap(queries); @@ -101,7 +101,7 @@ private void doHandle(HttpExchange t) throws Exception, IllegalArgumentException try { port = Integer.parseInt(portStr); } catch (NumberFormatException e) { - s_logger.warn("Invalid number parameter in query string: " + portStr); + logger.warn("Invalid number parameter in query string: " + portStr); throw new IllegalArgumentException(e); } @@ -109,7 +109,7 @@ private void doHandle(HttpExchange t) throws Exception, IllegalArgumentException try { ajaxSessionId = Long.parseLong(ajaxSessionIdStr); } catch (NumberFormatException e) { - s_logger.warn("Invalid number parameter in query string: " + ajaxSessionIdStr); + logger.warn("Invalid number parameter in query string: " + ajaxSessionIdStr); throw new IllegalArgumentException(e); } } @@ -118,7 +118,7 @@ private void doHandle(HttpExchange t) throws Exception, IllegalArgumentException try { event = Integer.parseInt(eventStr); } catch (NumberFormatException e) { - s_logger.warn("Invalid number parameter in query string: " + eventStr); + logger.warn("Invalid number parameter in query string: " + eventStr); throw new IllegalArgumentException(e); } } @@ -142,7 +142,7 @@ private void doHandle(HttpExchange t) throws Exception, IllegalArgumentException viewer = ConsoleProxy.getAjaxVncViewer(param, ajaxSessionIdStr); } catch (Exception e) { - s_logger.warn("Failed to create viewer due to " + e.getMessage(), e); + logger.warn("Failed to create viewer due to " + e.getMessage(), e); String[] content = new String[] {"", "
", @@ -167,33 +167,33 @@ private void doHandle(HttpExchange t) throws Exception, IllegalArgumentException } sendResponse(t, "text/html", "OK"); } else { - if (s_logger.isDebugEnabled()) - s_logger.debug("Ajax request comes from a different session, id in request: " + ajaxSessionId + ", id in viewer: " + viewer.getAjaxSessionId()); + if (logger.isDebugEnabled()) + logger.debug("Ajax request comes from a different session, id in request: " + ajaxSessionId + ", id in viewer: " + viewer.getAjaxSessionId()); sendResponse(t, "text/html", "Invalid ajax client session id"); } } else { if (ajaxSessionId != 0 && ajaxSessionId != viewer.getAjaxSessionId()) { - s_logger.info("Ajax request comes from a different session, id in request: " + ajaxSessionId + ", id in viewer: " + viewer.getAjaxSessionId()); + logger.info("Ajax request comes from a different session, id in request: " + ajaxSessionId + ", id in viewer: " + viewer.getAjaxSessionId()); handleClientKickoff(t, viewer); } else if (ajaxSessionId == 0) { - if (s_logger.isDebugEnabled()) - s_logger.debug("Ajax request indicates a fresh client start"); + if (logger.isDebugEnabled()) + logger.debug("Ajax request indicates a fresh client start"); String title = queryMap.get("t"); String guest = queryMap.get("guest"); handleClientStart(t, viewer, title != null ? title : "", guest); } else { - if (s_logger.isTraceEnabled()) - s_logger.trace("Ajax request indicates client update"); + if (logger.isTraceEnabled()) + logger.trace("Ajax request indicates client update"); handleClientUpdate(t, viewer); } } } - private static String convertStreamToString(InputStream is, boolean closeStreamAfterRead) { + private String convertStreamToString(InputStream is, boolean closeStreamAfterRead) { BufferedReader reader = new BufferedReader(new InputStreamReader(is)); StringBuilder sb = new StringBuilder(); String line = null; @@ -202,7 +202,7 @@ private static String convertStreamToString(InputStream is, boolean closeStreamA sb.append(line + "\n"); } } catch (IOException e) { - s_logger.warn("Exception while reading request body: ", e); + logger.warn("Exception while reading request body: ", e); } finally { if (closeStreamAfterRead) { closeAutoCloseable(is, "error closing stream after read"); @@ -226,8 +226,8 @@ private void sendResponse(HttpExchange t, String contentType, String response) t @SuppressWarnings("deprecation") private void handleClientEventBag(ConsoleProxyClient viewer, String requestData) { - if (s_logger.isTraceEnabled()) - s_logger.trace("Handle event bag, event bag: " + requestData); + if (logger.isTraceEnabled()) + logger.trace("Handle event bag, event bag: " + requestData); int start = requestData.indexOf("="); if (start < 0) @@ -273,11 +273,11 @@ else if (start > 0) } } } catch (NumberFormatException e) { - s_logger.warn("Exception in handle client event bag: " + data + ", ", e); + logger.warn("Exception in handle client event bag: " + data + ", ", e); } catch (Exception e) { - s_logger.warn("Exception in handle client event bag: " + data + ", ", e); + logger.warn("Exception in handle client event bag: " + data + ", ", e); } catch (OutOfMemoryError e) { - s_logger.error("Unrecoverable OutOfMemory Error, exit and let it be re-launched"); + logger.error("Unrecoverable OutOfMemory Error, exit and let it be re-launched"); System.exit(1); } } @@ -300,7 +300,7 @@ private void handleClientEvent(ConsoleProxyClient viewer, int event, Map tiles, boolean init) { imgBits = getTilesMergedJpeg(tiles, tracker.getTileWidth(), tracker.getTileHeight()); if (imgBits == null) { - s_logger.warn("Unable to generate jpeg image"); + logger.warn("Unable to generate jpeg image"); } else { - if (s_logger.isTraceEnabled()) - s_logger.trace("Generated jpeg image size: " + imgBits.length); + if (logger.isTraceEnabled()) + logger.trace("Generated jpeg image size: " + imgBits.length); } int key = ajaxImageCache.putImage(imgBits); @@ -231,7 +232,7 @@ private boolean waitForViewerReady() { try { Thread.sleep(100); } catch (InterruptedException e) { - s_logger.debug("[ignored] Console proxy was interrupted while waiting for viewer to become ready."); + logger.debug("[ignored] Console proxy was interrupted while waiting for viewer to become ready."); } } return false; @@ -259,8 +260,8 @@ public String onAjaxClientStart(String title, List languages, String gue int width = tracker.getTrackWidth(); int height = tracker.getTrackHeight(); - if (s_logger.isTraceEnabled()) - s_logger.trace("Ajax client start, frame buffer w: " + width + ", " + height); + if (logger.isTraceEnabled()) + logger.trace("Ajax client start, frame buffer w: " + width + ", " + height); List tiles = tracker.scan(true); String imgUrl = prepareAjaxImage(tiles, true); @@ -344,7 +345,7 @@ public String onAjaxClientUpdate() { try { tileDirtyEvent.wait(3000); } catch (InterruptedException e) { - s_logger.debug("[ignored] Console proxy ajax update was interrupted while waiting for viewer to become ready."); + logger.debug("[ignored] Console proxy ajax update was interrupted while waiting for viewer to become ready."); } } } diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyCmdHandler.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyCmdHandler.java index 6249e001818a..400eb2b99849 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyCmdHandler.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyCmdHandler.java @@ -27,26 +27,26 @@ import com.cloud.consoleproxy.util.Logger; public class ConsoleProxyCmdHandler implements HttpHandler { - private static final Logger s_logger = Logger.getLogger(ConsoleProxyCmdHandler.class); + protected Logger logger = Logger.getLogger(getClass()); @Override public void handle(HttpExchange t) throws IOException { try { Thread.currentThread().setName("Cmd Thread " + Thread.currentThread().getId() + " " + t.getRemoteAddress()); - s_logger.info("CmdHandler " + t.getRequestURI()); + logger.info("CmdHandler " + t.getRequestURI()); doHandle(t); } catch (Exception e) { - s_logger.error(e.toString(), e); + logger.error(e.toString(), e); String response = "Not found"; t.sendResponseHeaders(404, response.length()); OutputStream os = t.getResponseBody(); os.write(response.getBytes()); os.close(); } catch (OutOfMemoryError e) { - s_logger.error("Unrecoverable OutOfMemory Error, exit and let it be re-launched"); + logger.error("Unrecoverable OutOfMemory Error, exit and let it be re-launched"); System.exit(1); } catch (Throwable e) { - s_logger.error(e.toString(), e); + logger.error(e.toString(), e); } finally { t.close(); } @@ -56,7 +56,7 @@ public void doHandle(HttpExchange t) throws Exception { String path = t.getRequestURI().getPath(); int i = path.indexOf("/", 1); String cmd = path.substring(i + 1); - s_logger.info("Get CMD request for " + cmd); + logger.info("Get CMD request for " + cmd); if (cmd.equals("getstatus")) { ConsoleProxyClientStatsCollector statsCollector = ConsoleProxy.getStatsCollector(); diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyGCThread.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyGCThread.java index 16046abc7eb0..0e8f576cf6db 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyGCThread.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyGCThread.java @@ -22,7 +22,8 @@ import java.util.Map; import java.util.Set; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; /** * @@ -31,7 +32,7 @@ * management software */ public class ConsoleProxyGCThread extends Thread { - private static final Logger s_logger = Logger.getLogger(ConsoleProxyGCThread.class); + protected Logger logger = LogManager.getLogger(ConsoleProxyGCThread.class); private final static int MAX_SESSION_IDLE_SECONDS = 180; @@ -58,7 +59,7 @@ private void cleanupLogging() { try { file.delete(); } catch (Throwable e) { - s_logger.info("[ignored]" + logger.info("[ignored]" + "failed to delete file: " + e.getLocalizedMessage()); } } @@ -76,8 +77,8 @@ public void run() { cleanupLogging(); bReportLoad = false; - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("connMap=%s, removedSessions=%s", connMap, removedSessionsSet)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("connMap=%s, removedSessions=%s", connMap, removedSessionsSet)); } Set e = connMap.keySet(); Iterator iterator = e.iterator(); @@ -101,7 +102,7 @@ public void run() { } // close the server connection - s_logger.info("Dropping " + client + " which has not been used for " + seconds_unused + " seconds"); + logger.info("Dropping " + client + " which has not been used for " + seconds_unused + " seconds"); client.closeClient(); } @@ -116,15 +117,15 @@ public void run() { removedSessionsSet.clear(); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Report load change : " + loadInfo); + if (logger.isDebugEnabled()) { + logger.debug("Report load change : " + loadInfo); } } try { Thread.sleep(5000); } catch (InterruptedException ex) { - s_logger.debug("[ignored] Console proxy was interrupted during GC."); + logger.debug("[ignored] Console proxy was interrupted during GC."); } } } diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyHttpHandlerHelper.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyHttpHandlerHelper.java index ad2d944ef6ff..fb9d0794c227 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyHttpHandlerHelper.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyHttpHandlerHelper.java @@ -22,7 +22,7 @@ import com.cloud.consoleproxy.util.Logger; public class ConsoleProxyHttpHandlerHelper { - private static final Logger s_logger = Logger.getLogger(ConsoleProxyHttpHandlerHelper.class); + protected static Logger LOGGER = Logger.getLogger(ConsoleProxyHttpHandlerHelper.class); public static Map getQueryMap(String query) { String[] params = query.split("&"); @@ -39,8 +39,8 @@ public static Map getQueryMap(String query) { String value = paramTokens[1] + "=" + paramTokens[2]; map.put(name, value); } else { - if (s_logger.isDebugEnabled()) - s_logger.debug("Invalid parameter in URL found. param: " + param); + if (LOGGER.isDebugEnabled()) + LOGGER.debug("Invalid parameter in URL found. param: " + param); } } @@ -54,35 +54,35 @@ public static Map getQueryMap(String query) { guardUserInput(map); if (param != null) { if (param.getClientHostAddress() != null) { - s_logger.debug("decode token. host: " + param.getClientHostAddress()); + LOGGER.debug("decode token. host: " + param.getClientHostAddress()); map.put("host", param.getClientHostAddress()); } else { - s_logger.error("decode token. host info is not found!"); + LOGGER.error("decode token. host info is not found!"); } if (param.getClientHostPort() != 0) { - s_logger.debug("decode token. port: " + param.getClientHostPort()); + LOGGER.debug("decode token. port: " + param.getClientHostPort()); map.put("port", String.valueOf(param.getClientHostPort())); } else { - s_logger.error("decode token. port info is not found!"); + LOGGER.error("decode token. port info is not found!"); } if (param.getClientTag() != null) { - s_logger.debug("decode token. tag: " + param.getClientTag()); + LOGGER.debug("decode token. tag: " + param.getClientTag()); map.put("tag", param.getClientTag()); } else { - s_logger.error("decode token. tag info is not found!"); + LOGGER.error("decode token. tag info is not found!"); } if (param.getClientDisplayName() != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("decode token. displayname: " + param.getClientDisplayName()); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("decode token. displayname: " + param.getClientDisplayName()); } map.put("displayname", param.getClientDisplayName()); } else { - s_logger.error("decode token. displayname info is not found!"); + LOGGER.error("decode token. displayname info is not found!"); } if (param.getClientHostPassword() != null) { map.put("sid", param.getClientHostPassword()); } else { - s_logger.error("decode token. sid info is not found!"); + LOGGER.error("decode token. sid info is not found!"); } if (param.getClientTunnelUrl() != null) map.put("consoleurl", param.getClientTunnelUrl()); @@ -110,7 +110,7 @@ public static Map getQueryMap(String query) { map.put("extraSecurityToken", param.getExtraSecurityToken()); } } else { - s_logger.error("Unable to decode token"); + LOGGER.error("Unable to decode token"); } } else { // we no longer accept information from parameter other than token @@ -118,7 +118,7 @@ public static Map getQueryMap(String query) { } if (map.containsKey("extra")) { - s_logger.debug(String.format("Found extra parameter: %s for client security validation check " + + LOGGER.debug(String.format("Found extra parameter: %s for client security validation check " + "on the VNC server", map.get("extra"))); } diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyLoggerFactory.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyLoggerFactory.java index 4ed3d94b6afb..74e393f64d81 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyLoggerFactory.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyLoggerFactory.java @@ -18,6 +18,7 @@ import com.cloud.consoleproxy.util.Logger; import com.cloud.consoleproxy.util.LoggerFactory; +import org.apache.logging.log4j.LogManager; public class ConsoleProxyLoggerFactory implements LoggerFactory { public ConsoleProxyLoggerFactory() { @@ -25,13 +26,13 @@ public ConsoleProxyLoggerFactory() { @Override public Logger getLogger(Class clazz) { - return new Log4jLogger(org.apache.log4j.Logger.getLogger(clazz)); + return new Log4jLogger(LogManager.getLogger(clazz)); } public static class Log4jLogger extends Logger { - private org.apache.log4j.Logger logger; + private org.apache.logging.log4j.Logger logger; - public Log4jLogger(org.apache.log4j.Logger logger) { + public Log4jLogger(org.apache.logging.log4j.Logger logger) { this.logger = logger; } diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyMonitor.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyMonitor.java index 2cd510297d69..378072ad8045 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyMonitor.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyMonitor.java @@ -23,9 +23,9 @@ import java.util.HashMap; import java.util.Map; -import org.apache.log4j.xml.DOMConfigurator; import com.cloud.consoleproxy.util.Logger; +import org.apache.logging.log4j.core.config.Configurator; // // @@ -33,7 +33,7 @@ // itself and the shell script will re-launch console proxy // public class ConsoleProxyMonitor { - private static final Logger s_logger = Logger.getLogger(ConsoleProxyMonitor.class); + protected Logger logger = Logger.getLogger(getClass()); private String[] _argv; private Map _argMap = new HashMap(); @@ -47,11 +47,11 @@ public ConsoleProxyMonitor(String[] argv) { for (String arg : _argv) { String[] tokens = arg.split("="); if (tokens.length == 2) { - s_logger.info("Add argument " + tokens[0] + "=" + tokens[1] + " to the argument map"); + logger.info("Add argument " + tokens[0] + "=" + tokens[1] + " to the argument map"); _argMap.put(tokens[0].trim(), tokens[1].trim()); } else { - s_logger.warn("unrecognized argument, skip adding it to argument map"); + logger.warn("unrecognized argument, skip adding it to argument map"); } } } @@ -68,12 +68,12 @@ public void run() { while (!_quit) { String cmdLine = getLaunchCommandLine(); - s_logger.info("Launch console proxy process with command line: " + cmdLine); + logger.info("Launch console proxy process with command line: " + cmdLine); try { _process = Runtime.getRuntime().exec(cmdLine); } catch (IOException e) { - s_logger.error("Unexpected exception ", e); + logger.error("Unexpected exception ", e); System.exit(1); } @@ -84,11 +84,11 @@ public void run() { exitCode = _process.waitFor(); waitSucceeded = true; - if (s_logger.isInfoEnabled()) - s_logger.info("Console proxy process exits with code: " + exitCode); + if (logger.isInfoEnabled()) + logger.info("Console proxy process exits with code: " + exitCode); } catch (InterruptedException e) { - if (s_logger.isInfoEnabled()) - s_logger.info("InterruptedException while waiting for termination of console proxy, will retry"); + if (logger.isInfoEnabled()) + logger.info("InterruptedException while waiting for termination of console proxy, will retry"); } } } @@ -111,8 +111,8 @@ private String getLaunchCommandLine() { private void onShutdown() { if (_process != null) { - if (s_logger.isInfoEnabled()) - s_logger.info("Console proxy monitor shuts dwon, terminate console proxy process"); + if (logger.isInfoEnabled()) + logger.info("Console proxy monitor shuts dwon, terminate console proxy process"); _process.destroy(); } } @@ -136,7 +136,7 @@ private static void configLog4j() { File file = new File(configUrl.toURI()); System.out.println("Log4j configuration from : " + file.getAbsolutePath()); - DOMConfigurator.configureAndWatch(file.getAbsolutePath(), 10000); + Configurator.initialize(null, file.getAbsolutePath()); } catch (URISyntaxException e) { System.out.println("Unable to convert log4j configuration Url to URI"); } diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyNoVNCHandler.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyNoVNCHandler.java index 849042e7ec45..be0db7b8fb47 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyNoVNCHandler.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyNoVNCHandler.java @@ -40,7 +40,7 @@ public class ConsoleProxyNoVNCHandler extends WebSocketHandler { private ConsoleProxyNoVncClient viewer = null; - private static final Logger s_logger = Logger.getLogger(ConsoleProxyNoVNCHandler.class); + protected Logger logger = Logger.getLogger(ConsoleProxyNoVNCHandler.class); public ConsoleProxyNoVNCHandler() { super(); @@ -104,7 +104,7 @@ public void onConnect(final Session session) throws IOException, InterruptedExce try { port = Integer.parseInt(portStr); } catch (NumberFormatException e) { - s_logger.warn("Invalid number parameter in query string: " + portStr); + logger.warn("Invalid number parameter in query string: " + portStr); throw new IllegalArgumentException(e); } @@ -112,7 +112,7 @@ public void onConnect(final Session session) throws IOException, InterruptedExce try { ajaxSessionId = Long.parseLong(ajaxSessionIdStr); } catch (NumberFormatException e) { - s_logger.warn("Invalid number parameter in query string: " + ajaxSessionIdStr); + logger.warn("Invalid number parameter in query string: " + ajaxSessionIdStr); throw new IllegalArgumentException(e); } } @@ -145,7 +145,7 @@ public void onConnect(final Session session) throws IOException, InterruptedExce } viewer = ConsoleProxy.getNoVncViewer(param, ajaxSessionIdStr, session); } catch (Exception e) { - s_logger.warn("Failed to create viewer due to " + e.getMessage(), e); + logger.warn("Failed to create viewer due to " + e.getMessage(), e); return; } finally { if (viewer == null) { @@ -157,9 +157,9 @@ public void onConnect(final Session session) throws IOException, InterruptedExce private boolean checkSessionSourceIp(final Session session, final String sourceIP) throws IOException { // Verify source IP String sessionSourceIP = session.getRemoteAddress().getAddress().getHostAddress(); - s_logger.info("Get websocket connection request from remote IP : " + sessionSourceIP); + logger.info("Get websocket connection request from remote IP : " + sessionSourceIP); if (ConsoleProxy.isSourceIpCheckEnabled && (sessionSourceIP == null || ! sessionSourceIP.equals(sourceIP))) { - s_logger.warn("Failed to access console as the source IP to request the console is " + sourceIP); + logger.warn("Failed to access console as the source IP to request the console is " + sourceIP); session.disconnect(); return false; } @@ -180,6 +180,6 @@ public void onFrame(Frame f) throws IOException { @OnWebSocketError public void onError(Throwable cause) { - s_logger.error("Error on websocket", cause); + logger.error("Error on websocket", cause); } } diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyNoVNCServer.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyNoVNCServer.java index a8e300429ae5..f65754169f64 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyNoVNCServer.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyNoVNCServer.java @@ -34,7 +34,7 @@ public class ConsoleProxyNoVNCServer { - private static final Logger s_logger = Logger.getLogger(ConsoleProxyNoVNCServer.class); + protected static Logger LOGGER = Logger.getLogger(ConsoleProxyNoVNCServer.class); public static final int WS_PORT = 8080; public static final int WSS_PORT = 8443; private static final String VNC_CONF_FILE_LOCATION = "/root/vncport"; @@ -46,7 +46,7 @@ public static int getVNCPort() { try { portStr = Files.readString(Path.of(VNC_CONF_FILE_LOCATION)).trim(); } catch (IOException e) { - s_logger.error("Cannot read the VNC port from the file " + VNC_CONF_FILE_LOCATION + " setting it to 8080", e); + LOGGER.error("Cannot read the VNC port from the file " + VNC_CONF_FILE_LOCATION + " setting it to 8080", e); return WS_PORT; } return Integer.parseInt(portStr); @@ -85,7 +85,7 @@ public ConsoleProxyNoVNCServer(byte[] ksBits, String ksPassword) { sslConnector.setPort(WSS_PORT); server.addConnector(sslConnector); } catch (Exception e) { - s_logger.error("Unable to secure server due to exception ", e); + LOGGER.error("Unable to secure server due to exception ", e); } } diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyNoVncClient.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyNoVncClient.java index 27fabb53124b..38e5a3d41043 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyNoVncClient.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyNoVncClient.java @@ -17,7 +17,8 @@ package com.cloud.consoleproxy; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.eclipse.jetty.websocket.api.Session; import org.eclipse.jetty.websocket.api.WebSocketException; import org.eclipse.jetty.websocket.api.extensions.Frame; @@ -32,7 +33,7 @@ import com.cloud.consoleproxy.vnc.NoVncClient; public class ConsoleProxyNoVncClient implements ConsoleProxyClient { - private static final Logger s_logger = Logger.getLogger(ConsoleProxyNoVncClient.class); + protected Logger logger = LogManager.getLogger(getClass()); private static int nextClientId = 0; private NoVncClient client; @@ -74,7 +75,7 @@ public boolean isHostConnected() { public boolean isFrontEndAlive() { if (!connectionAlive || System.currentTimeMillis() - getClientLastFrontEndActivityTime() > ConsoleProxy.VIEWER_LINGER_SECONDS * 1000) { - s_logger.info("Front end has been idle for too long"); + logger.info("Front end has been idle for too long"); return false; } return true; @@ -117,14 +118,14 @@ public void run() { try { Thread.sleep(1); } catch (InterruptedException e) { - s_logger.error("Error on sleep for vnc over websocket", e); + logger.error("Error on sleep for vnc over websocket", e); } } else if (client.isVncOverNioSocket()) { byte[] bytesArr; int nextBytes = client.getNextBytes(); bytesArr = new byte[nextBytes]; client.readBytes(bytesArr, nextBytes); - s_logger.trace(String.format("Read [%s] bytes from client [%s]", nextBytes, clientId)); + logger.trace(String.format("Read [%s] bytes from client [%s]", nextBytes, clientId)); if (nextBytes > 0) { session.getRemote().sendBytes(ByteBuffer.wrap(bytesArr)); updateFrontEndActivityTime(); @@ -134,15 +135,15 @@ public void run() { } else { b = new byte[100]; readBytes = client.read(b); - s_logger.trace(String.format("Read [%s] bytes from client [%s]", readBytes, clientId)); + logger.trace(String.format("Read [%s] bytes from client [%s]", readBytes, clientId)); if (readBytes == -1 || (readBytes > 0 && !sendReadBytesToNoVNC(b, readBytes))) { connectionAlive = false; } } } - s_logger.info(String.format("Connection with client [%s] is dead.", clientId)); + logger.info(String.format("Connection with client [%s] is dead.", clientId)); } catch (IOException e) { - s_logger.error("Error on VNC client", e); + logger.error("Error on VNC client", e); } } @@ -155,7 +156,7 @@ private boolean sendReadBytesToNoVNC(byte[] b, int readBytes) { session.getRemote().sendBytes(ByteBuffer.wrap(b, 0, readBytes)); updateFrontEndActivityTime(); } catch (WebSocketException | IOException e) { - s_logger.debug("Connection exception", e); + logger.debug("Connection exception", e); return false; } return true; @@ -230,8 +231,8 @@ protected void handshakeProtocolVersion() { protected void authenticateVNCServerThroughNioSocket() { handshakePhase(); initialisationPhase(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Authenticated successfully"); + if (logger.isDebugEnabled()) { + logger.debug("Authenticated successfully"); } } @@ -261,7 +262,7 @@ private void sendMessageToVNCClient(byte[] arr, int length) { try { session.getRemote().sendBytes(ByteBuffer.wrap(arr, 0, length)); } catch (IOException e) { - s_logger.error("Error sending a message to the noVNC client", e); + logger.error("Error sending a message to the noVNC client", e); } } @@ -283,25 +284,25 @@ protected static byte[] rewriteServerNameInServerInit(byte[] serverInitBytes, St private void connectClientToVNCServer(String tunnelUrl, String tunnelSession, String websocketUrl) { try { if (StringUtils.isNotBlank(websocketUrl)) { - s_logger.info(String.format("Connect to VNC over websocket URL: %s", websocketUrl)); + logger.info(String.format("Connect to VNC over websocket URL: %s", websocketUrl)); client.connectToWebSocket(websocketUrl, session); } else if (tunnelUrl != null && !tunnelUrl.isEmpty() && tunnelSession != null && !tunnelSession.isEmpty()) { URI uri = new URI(tunnelUrl); - s_logger.info(String.format("Connect to VNC server via tunnel. url: %s, session: %s", + logger.info(String.format("Connect to VNC server via tunnel. url: %s, session: %s", tunnelUrl, tunnelSession)); ConsoleProxy.ensureRoute(uri.getHost()); client.connectTo(uri.getHost(), uri.getPort(), uri.getPath() + "?" + uri.getQuery(), tunnelSession, "https".equalsIgnoreCase(uri.getScheme())); } else { - s_logger.info(String.format("Connect to VNC server directly. host: %s, port: %s", + logger.info(String.format("Connect to VNC server directly. host: %s, port: %s", getClientHostAddress(), getClientHostPort())); ConsoleProxy.ensureRoute(getClientHostAddress()); client.connectTo(getClientHostAddress(), getClientHostPort()); } } catch (Throwable e) { - s_logger.error("Unexpected exception", e); + logger.error("Unexpected exception", e); } } diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyPasswordBasedEncryptor.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyPasswordBasedEncryptor.java index 4fc85607b793..19f5d407c1cf 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyPasswordBasedEncryptor.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyPasswordBasedEncryptor.java @@ -17,7 +17,8 @@ package com.cloud.consoleproxy; import org.apache.commons.codec.binary.Base64; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.google.gson.Gson; import com.google.gson.GsonBuilder; @@ -26,7 +27,7 @@ import com.cloud.utils.crypt.Base64Encryptor; public class ConsoleProxyPasswordBasedEncryptor { - private static final Logger s_logger = Logger.getLogger(ConsoleProxyPasswordBasedEncryptor.class); + protected Logger logger = LogManager.getLogger(getClass()); private Gson gson; diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyRdpClient.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyRdpClient.java index dc3f31b1235b..1824a13b2de5 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyRdpClient.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyRdpClient.java @@ -21,7 +21,6 @@ import java.awt.event.MouseEvent; import java.net.InetSocketAddress; -import org.apache.log4j.Logger; import rdpclient.RdpClient; import streamer.Pipeline; @@ -41,7 +40,6 @@ public class ConsoleProxyRdpClient extends ConsoleProxyClientBase { - private static final Logger s_logger = Logger.getLogger(ConsoleProxyRdpClient.class); private static final int SHIFT_KEY_MASK = 64; private static final int CTRL_KEY_MASK = 128; @@ -75,7 +73,7 @@ public void onClientConnected() { @Override public void onClientClose() { - s_logger.info("Received client close indication. remove viewer from map."); + logger.info("Received client close indication. remove viewer from map."); ConsoleProxy.removeViewer(this); } @@ -89,7 +87,7 @@ public boolean isHostConnected() { public boolean isFrontEndAlive() { if (_socket != null) { if (_workerDone || System.currentTimeMillis() - getClientLastFrontEndActivityTime() > ConsoleProxy.VIEWER_LINGER_SECONDS * 1000) { - s_logger.info("Front end has been idle for too long"); + logger.info("Front end has been idle for too long"); _socket.shutdown(); return false; } else { @@ -276,7 +274,7 @@ public void sizeChanged(int width, int height) { } }); - s_logger.info("connecting to instance " + instanceId + " on host " + host); + logger.info("connecting to instance " + instanceId + " on host " + host); _client = new RdpClient("client", host, domain, name, password, instanceId, _screen, _canvas, sslState); _mouseEventSource = _client.getMouseEventSource(); @@ -296,16 +294,16 @@ public void run() { try { _workerDone = false; - s_logger.info("Connecting socket to remote server and run main loop(s)"); + logger.info("Connecting socket to remote server and run main loop(s)"); _socket.connect(address); } catch (Exception e) { - s_logger.info(" error occurred in connecting to socket " + e.getMessage()); + logger.info(" error occurred in connecting to socket " + e.getMessage()); } finally { shutdown(); } _threadStopTime = System.currentTimeMillis(); - s_logger.info("Receiver thread stopped."); + logger.info("Receiver thread stopped."); _workerDone = true; } }); @@ -313,7 +311,7 @@ public void run() { _worker.start(); } catch (Exception e) { _workerDone = true; - s_logger.info("error occurred in initializing rdp client " + e.getMessage()); + logger.info("error occurred in initializing rdp client " + e.getMessage()); } } diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyResourceHandler.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyResourceHandler.java index db24c951e4ba..949e632786c6 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyResourceHandler.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyResourceHandler.java @@ -31,7 +31,7 @@ import com.cloud.consoleproxy.util.Logger; public class ConsoleProxyResourceHandler implements HttpHandler { - private static final Logger s_logger = Logger.getLogger(ConsoleProxyResourceHandler.class); + protected Logger logger = Logger.getLogger(getClass()); static Map s_mimeTypes; static { @@ -63,19 +63,19 @@ public ConsoleProxyResourceHandler() { @Override public void handle(HttpExchange t) throws IOException { try { - if (s_logger.isDebugEnabled()) - s_logger.debug("Resource Handler " + t.getRequestURI()); + if (logger.isDebugEnabled()) + logger.debug("Resource Handler " + t.getRequestURI()); long startTick = System.currentTimeMillis(); doHandle(t); - if (s_logger.isDebugEnabled()) - s_logger.debug(t.getRequestURI() + " Process time " + (System.currentTimeMillis() - startTick) + " ms"); + if (logger.isDebugEnabled()) + logger.debug(t.getRequestURI() + " Process time " + (System.currentTimeMillis() - startTick) + " ms"); } catch (IOException e) { throw e; } catch (Throwable e) { - s_logger.error("Unexpected exception, ", e); + logger.error("Unexpected exception, ", e); t.sendResponseHeaders(500, -1); // server error } finally { t.close(); @@ -86,8 +86,8 @@ public void handle(HttpExchange t) throws IOException { private void doHandle(HttpExchange t) throws Exception { String path = t.getRequestURI().getPath(); - if (s_logger.isInfoEnabled()) - s_logger.info("Get resource request for " + path); + if (logger.isInfoEnabled()) + logger.info("Get resource request for " + path); int i = path.indexOf("/", 1); String filepath = path.substring(i + 1); @@ -96,8 +96,8 @@ private void doHandle(HttpExchange t) throws Exception { String contentType = getContentType(extension); if (!validatePath(filepath)) { - if (s_logger.isInfoEnabled()) - s_logger.info("Resource access is forbidden, uri: " + path); + if (logger.isInfoEnabled()) + logger.info("Resource access is forbidden, uri: " + path); t.sendResponseHeaders(403, -1); // forbidden return; @@ -114,8 +114,8 @@ private void doHandle(HttpExchange t) throws Exception { hds.set("content-type", contentType); t.sendResponseHeaders(304, -1); - if (s_logger.isInfoEnabled()) - s_logger.info("Sent 304 file has not been " + "modified since " + ifModifiedSince); + if (logger.isInfoEnabled()) + logger.info("Sent 304 file has not been " + "modified since " + ifModifiedSince); return; } } @@ -127,11 +127,11 @@ private void doHandle(HttpExchange t) throws Exception { t.sendResponseHeaders(200, length); responseFileContent(t, f); - if (s_logger.isInfoEnabled()) - s_logger.info("Sent file " + path + " with content type " + contentType); + if (logger.isInfoEnabled()) + logger.info("Sent file " + path + " with content type " + contentType); } else { - if (s_logger.isInfoEnabled()) - s_logger.info("file does not exist" + path); + if (logger.isInfoEnabled()) + logger.info("file does not exist" + path); t.sendResponseHeaders(404, -1); } } @@ -158,17 +158,17 @@ private static void responseFileContent(HttpExchange t, File f) throws Exception } } - private static boolean validatePath(String path) { + private boolean validatePath(String path) { int i = path.indexOf("/"); if (i == -1) { - if (s_logger.isInfoEnabled()) - s_logger.info("Invalid resource path: can not start at resource root"); + if (logger.isInfoEnabled()) + logger.info("Invalid resource path: can not start at resource root"); return false; } if (path.contains("..")) { - if (s_logger.isInfoEnabled()) - s_logger.info("Invalid resource path: contains relative up-level navigation"); + if (logger.isInfoEnabled()) + logger.info("Invalid resource path: contains relative up-level navigation"); return false; } diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxySecureServerFactoryImpl.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxySecureServerFactoryImpl.java index df879fe9e826..a11ef7afaf2d 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxySecureServerFactoryImpl.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxySecureServerFactoryImpl.java @@ -21,7 +21,8 @@ import com.sun.net.httpserver.HttpsParameters; import com.sun.net.httpserver.HttpsServer; import org.apache.cloudstack.utils.security.SSLUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.SSLContext; @@ -35,7 +36,7 @@ import java.security.KeyStore; public class ConsoleProxySecureServerFactoryImpl implements ConsoleProxyServerFactory { - private static final Logger s_logger = Logger.getLogger(ConsoleProxySecureServerFactoryImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); private SSLContext sslContext = null; @@ -44,32 +45,32 @@ public ConsoleProxySecureServerFactoryImpl() { @Override public void init(byte[] ksBits, String ksPassword) { - s_logger.info("Start initializing SSL"); + logger.info("Start initializing SSL"); if (ksBits == null) { // this should not be the case - s_logger.info("No certificates passed, recheck global configuration and certificates"); + logger.info("No certificates passed, recheck global configuration and certificates"); } else { char[] passphrase = ksPassword != null ? ksPassword.toCharArray() : null; try { - s_logger.info("Initializing SSL from passed-in certificate"); + logger.info("Initializing SSL from passed-in certificate"); KeyStore ks = KeyStore.getInstance("JKS"); ks.load(new ByteArrayInputStream(ksBits), passphrase); KeyManagerFactory kmf = KeyManagerFactory.getInstance("SunX509"); kmf.init(ks, passphrase); - s_logger.info("Key manager factory is initialized"); + logger.info("Key manager factory is initialized"); TrustManagerFactory tmf = TrustManagerFactory.getInstance("SunX509"); tmf.init(ks); - s_logger.info("Trust manager factory is initialized"); + logger.info("Trust manager factory is initialized"); sslContext = SSLUtils.getSSLContext(); sslContext.init(kmf.getKeyManagers(), tmf.getTrustManagers(), null); - s_logger.info("SSL context is initialized"); + logger.info("SSL context is initialized"); } catch (Exception e) { - s_logger.error("Unable to init factory due to exception ", e); + logger.error("Unable to init factory due to exception ", e); } } @@ -98,10 +99,10 @@ public void configure(HttpsParameters params) { } }); - s_logger.info("create HTTPS server instance on port: " + port); + logger.info("create HTTPS server instance on port: " + port); return server; } catch (Exception ioe) { - s_logger.error(ioe.toString(), ioe); + logger.error(ioe.toString(), ioe); } return null; } @@ -115,10 +116,10 @@ public SSLServerSocket createSSLServerSocket(int port) throws IOException { srvSock.setEnabledProtocols(SSLUtils.getRecommendedProtocols()); srvSock.setEnabledCipherSuites(SSLUtils.getRecommendedCiphers()); - s_logger.info("create SSL server socket on port: " + port); + logger.info("create SSL server socket on port: " + port); return srvSock; } catch (Exception ioe) { - s_logger.error(ioe.toString(), ioe); + logger.error(ioe.toString(), ioe); } return null; } diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyThumbnailHandler.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyThumbnailHandler.java index 8f38539831a4..0103d9fa70eb 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyThumbnailHandler.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyThumbnailHandler.java @@ -35,7 +35,7 @@ import com.cloud.consoleproxy.util.Logger; public class ConsoleProxyThumbnailHandler implements HttpHandler { - private static final Logger s_logger = Logger.getLogger(ConsoleProxyThumbnailHandler.class); + protected Logger logger = Logger.getLogger(getClass()); public ConsoleProxyThumbnailHandler() { } @@ -46,26 +46,26 @@ public void handle(HttpExchange t) throws IOException { try { Thread.currentThread().setName("JPG Thread " + Thread.currentThread().getId() + " " + t.getRemoteAddress()); - if (s_logger.isDebugEnabled()) - s_logger.debug("ScreenHandler " + t.getRequestURI()); + if (logger.isDebugEnabled()) + logger.debug("ScreenHandler " + t.getRequestURI()); long startTick = System.currentTimeMillis(); doHandle(t); - if (s_logger.isDebugEnabled()) - s_logger.debug(t.getRequestURI() + "Process time " + (System.currentTimeMillis() - startTick) + " ms"); + if (logger.isDebugEnabled()) + logger.debug(t.getRequestURI() + "Process time " + (System.currentTimeMillis() - startTick) + " ms"); } catch (IllegalArgumentException e) { String response = "Bad query string"; - s_logger.error(response + ", request URI : " + t.getRequestURI()); + logger.error(response + ", request URI : " + t.getRequestURI()); t.sendResponseHeaders(200, response.length()); OutputStream os = t.getResponseBody(); os.write(response.getBytes()); os.close(); } catch (OutOfMemoryError e) { - s_logger.error("Unrecoverable OutOfMemory Error, exit and let it be re-launched"); + logger.error("Unrecoverable OutOfMemory Error, exit and let it be re-launched"); System.exit(1); } catch (Throwable e) { - s_logger.error("Unexpected exception while handing thumbnail request, ", e); + logger.error("Unexpected exception while handing thumbnail request, ", e); String queries = t.getRequestURI().getQuery(); Map queryMap = getQueryMap(queries); @@ -77,7 +77,7 @@ public void handle(HttpExchange t) throws IOException { width = Integer.parseInt(ws); height = Integer.parseInt(hs); } catch (NumberFormatException ex) { - s_logger.debug("Cannot parse width: " + ws + " or height: " + hs, ex); + logger.debug("Cannot parse width: " + ws + " or height: " + hs, ex); } width = Math.min(width, 800); height = Math.min(height, 600); @@ -94,7 +94,7 @@ public void handle(HttpExchange t) throws IOException { OutputStream os = t.getResponseBody(); os.write(bs); os.close(); - s_logger.error("Cannot get console, sent error JPG response for " + t.getRequestURI()); + logger.error("Cannot get console, sent error JPG response for " + t.getRequestURI()); return; } finally { t.close(); @@ -157,8 +157,8 @@ private void doHandle(HttpExchange t) throws Exception, IllegalArgumentException os.write(bs); os.close(); - if (s_logger.isInfoEnabled()) - s_logger.info("Console not ready, sent dummy JPG response"); + if (logger.isInfoEnabled()) + logger.info("Console not ready, sent dummy JPG response"); return; } @@ -181,7 +181,7 @@ private void doHandle(HttpExchange t) throws Exception, IllegalArgumentException } } - public static BufferedImage generateTextImage(int w, int h, String text) { + public BufferedImage generateTextImage(int w, int h, String text) { BufferedImage img = new BufferedImage(w, h, BufferedImage.TYPE_3BYTE_BGR); Graphics2D g = img.createGraphics(); g.setColor(Color.BLACK); @@ -196,7 +196,7 @@ public static BufferedImage generateTextImage(int w, int h, String text) { startx = 0; g.drawString(text, startx, h / 2); } catch (Throwable e) { - s_logger.warn("Problem in generating text to thumnail image, return blank image"); + logger.warn("Problem in generating text to thumnail image, return blank image"); } return img; } diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyVncClient.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyVncClient.java index 5992855ec607..921b2eb2e0fd 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyVncClient.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyVncClient.java @@ -20,7 +20,6 @@ import java.net.URI; import java.net.UnknownHostException; -import org.apache.log4j.Logger; import com.cloud.consoleproxy.vnc.FrameBufferCanvas; import com.cloud.consoleproxy.vnc.RfbConstants; @@ -32,7 +31,6 @@ * */ public class ConsoleProxyVncClient extends ConsoleProxyClientBase { - private static final Logger s_logger = Logger.getLogger(ConsoleProxyVncClient.class); private static final int SHIFT_KEY_MASK = 64; private static final int CTRL_KEY_MASK = 128; @@ -65,7 +63,7 @@ public boolean isHostConnected() { @Override public boolean isFrontEndAlive() { if (workerDone || System.currentTimeMillis() - getClientLastFrontEndActivityTime() > ConsoleProxy.VIEWER_LINGER_SECONDS * 1000) { - s_logger.info("Front end has been idle for too long"); + logger.info("Front end has been idle for too long"); return false; } return true; @@ -85,7 +83,7 @@ public void run() { try { if (tunnelUrl != null && !tunnelUrl.isEmpty() && tunnelSession != null && !tunnelSession.isEmpty()) { URI uri = new URI(tunnelUrl); - s_logger.info("Connect to VNC server via tunnel. url: " + tunnelUrl + ", session: " + tunnelSession); + logger.info("Connect to VNC server via tunnel. url: " + tunnelUrl + ", session: " + tunnelSession); ConsoleProxy.ensureRoute(uri.getHost()); client.connectTo( @@ -94,19 +92,19 @@ public void run() { tunnelSession, "https".equalsIgnoreCase(uri.getScheme()), getClientHostPassword()); } else { - s_logger.info("Connect to VNC server directly. host: " + getClientHostAddress() + ", port: " + getClientHostPort()); + logger.info("Connect to VNC server directly. host: " + getClientHostAddress() + ", port: " + getClientHostPort()); ConsoleProxy.ensureRoute(getClientHostAddress()); client.connectTo(getClientHostAddress(), getClientHostPort(), getClientHostPassword()); } } catch (UnknownHostException e) { - s_logger.error("Unexpected exception", e); + logger.error("Unexpected exception", e); } catch (IOException e) { - s_logger.error("Unexpected exception", e); + logger.error("Unexpected exception", e); } catch (Throwable e) { - s_logger.error("Unexpected exception", e); + logger.error("Unexpected exception", e); } - s_logger.info("Receiver thread stopped."); + logger.info("Receiver thread stopped."); workerDone = true; client.getClientListener().onClientClose(); } @@ -129,7 +127,7 @@ public void onClientConnected() { @Override public void onClientClose() { - s_logger.info("Received client close indication. remove viewer from map."); + logger.info("Received client close indication. remove viewer from map."); ConsoleProxy.removeViewer(this); } diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/rdp/RdpBufferedImageCanvas.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/rdp/RdpBufferedImageCanvas.java index 386a198dcb63..7fd19a15d2fe 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/rdp/RdpBufferedImageCanvas.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/rdp/RdpBufferedImageCanvas.java @@ -36,7 +36,7 @@ public class RdpBufferedImageCanvas extends BufferedImageCanvas implements Frame * */ private static final long serialVersionUID = 1L; - private static final Logger s_logger = Logger.getLogger(RdpBufferedImageCanvas.class); + protected Logger logger = Logger.getLogger(RdpBufferedImageCanvas.class); private final ConsoleProxyRdpClient _rdpClient; @@ -68,7 +68,7 @@ public byte[] getFrameBufferJpeg() { try { imgBits = ImageHelper.jpegFromImage(bufferedImage); } catch (IOException e) { - s_logger.info("[ignored] read error on image", e); + logger.info("[ignored] read error on image", e); } return imgBits; @@ -94,7 +94,7 @@ public byte[] getTilesMergedJpeg(List tileList, int tileWidth, int til try { imgBits = ImageHelper.jpegFromImage(bufferedImage); } catch (IOException e) { - s_logger.info("[ignored] read error on image tiles", e); + logger.info("[ignored] read error on image tiles", e); } return imgBits; } diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/util/RawHTTP.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/util/RawHTTP.java index 21b624141782..bc47ca03d122 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/util/RawHTTP.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/util/RawHTTP.java @@ -48,7 +48,7 @@ * connections and import/export operations. */ public final class RawHTTP { - private static final Logger s_logger = Logger.getLogger(RawHTTP.class); + protected Logger logger = Logger.getLogger(getClass()); private static final Pattern END_PATTERN = Pattern.compile("^\r\n$"); private static final Pattern HEADER_PATTERN = Pattern.compile("^([A-Z_a-z0-9-]+):\\s*(.*)\r\n$"); @@ -140,9 +140,9 @@ private Socket _getSocket() throws IOException { try { context = SSLUtils.getSSLContext("SunJSSE"); } catch (NoSuchAlgorithmException e) { - s_logger.error("Unexpected exception ", e); + logger.error("Unexpected exception ", e); } catch (NoSuchProviderException e) { - s_logger.error("Unexpected exception ", e); + logger.error("Unexpected exception ", e); } if (context == null) @@ -156,12 +156,12 @@ private Socket _getSocket() throws IOException { ssl.setEnabledProtocols(SSLUtils.getSupportedProtocols(ssl.getEnabledProtocols())); /* ssl.setSSLParameters(context.getDefaultSSLParameters()); */ } catch (IOException e) { - s_logger.error("IOException: " + e.getMessage(), e); + logger.error("IOException: " + e.getMessage(), e); throw e; } catch (KeyManagementException e) { - s_logger.error("KeyManagementException: " + e.getMessage(), e); + logger.error("KeyManagementException: " + e.getMessage(), e); } catch (NoSuchAlgorithmException e) { - s_logger.error("NoSuchAlgorithmException: " + e.getMessage(), e); + logger.error("NoSuchAlgorithmException: " + e.getMessage(), e); } return ssl; } else { diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/BufferedImageCanvas.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/BufferedImageCanvas.java index 8e27b4c1c1ba..9b86a8fbc66b 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/BufferedImageCanvas.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/BufferedImageCanvas.java @@ -36,7 +36,7 @@ */ public class BufferedImageCanvas extends Canvas implements FrameBufferCanvas { private static final long serialVersionUID = 1L; - private static final Logger s_logger = Logger.getLogger(BufferedImageCanvas.class); + protected Logger logger = Logger.getLogger(BufferedImageCanvas.class); // Offline screen buffer private BufferedImage offlineImage; @@ -123,7 +123,7 @@ public byte[] getFrameBufferJpeg() { try { imgBits = ImageHelper.jpegFromImage(bufferedImage); } catch (IOException e) { - s_logger.info("[ignored] read error on image", e); + logger.info("[ignored] read error on image", e); } return imgBits; } @@ -147,7 +147,7 @@ public byte[] getTilesMergedJpeg(List tileList, int tileWidth, int til try { imgBits = ImageHelper.jpegFromImage(bufferedImage); } catch (IOException e) { - s_logger.info("[ignored] read error on image tiles", e); + logger.info("[ignored] read error on image tiles", e); } return imgBits; } diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/NoVncClient.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/NoVncClient.java index c2d57bcfe3fd..c5764a994c59 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/NoVncClient.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/NoVncClient.java @@ -54,7 +54,7 @@ import javax.crypto.spec.DESKeySpec; public class NoVncClient { - private static final Logger s_logger = Logger.getLogger(NoVncClient.class); + protected Logger logger = Logger.getLogger(getClass()); private Socket socket; private DataInputStream is; @@ -86,12 +86,12 @@ public void connectTo(String host, int port, String path, String session, boolea public void connectTo(String host, int port) { // Connect to server - s_logger.info(String.format("Connecting to VNC server %s:%s ...", host, port)); + logger.info(String.format("Connecting to VNC server %s:%s ...", host, port)); try { NioSocket nioSocket = new NioSocket(host, port); this.nioSocketConnection = new NioSocketHandlerImpl(nioSocket); } catch (Exception e) { - s_logger.error(String.format("Cannot create socket to host: %s and port %s: %s", host, port, + logger.error(String.format("Cannot create socket to host: %s and port %s: %s", host, port, e.getMessage()), e); } } @@ -150,7 +150,7 @@ public String handshake() throws IOException { if (!rfbProtocol.contains(RfbConstants.RFB_PROTOCOL_VERSION_MAJOR)) { String msg = String.format("Cannot handshake with VNC server. Unsupported protocol version: [%s]", rfbProtocol); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } @@ -175,7 +175,7 @@ public byte[] authenticateTunnel(String password) is.readFully(buf); String reason = new String(buf, RfbConstants.CHARSET); - s_logger.error("Authentication to VNC server is failed. Reason: " + reason); + logger.error("Authentication to VNC server is failed. Reason: " + reason); throw new RuntimeException("Authentication to VNC server is failed. Reason: " + reason); } @@ -185,13 +185,13 @@ public byte[] authenticateTunnel(String password) } case RfbConstants.VNC_AUTH: { - s_logger.info("VNC server requires password authentication"); + logger.info("VNC server requires password authentication"); doVncAuth(is, os, password); break; } default: - s_logger.error("Unsupported VNC protocol authorization scheme, scheme code: " + authType + "."); + logger.error("Unsupported VNC protocol authorization scheme, scheme code: " + authType + "."); throw new RuntimeException( "Unsupported VNC protocol authorization scheme, scheme code: " + authType + "."); } @@ -214,7 +214,7 @@ private void doVncAuth(DataInputStream in, DataOutputStream out, String password try { response = encodePassword(challenge, password); } catch (Exception e) { - s_logger.error("Cannot encrypt client password to send to server: " + e.getMessage()); + logger.error("Cannot encrypt client password to send to server: " + e.getMessage()); throw new RuntimeException("Cannot encrypt client password to send to server: " + e.getMessage()); } @@ -227,7 +227,7 @@ private void doVncAuth(DataInputStream in, DataOutputStream out, String password Pair pair = processSecurityResultType(authResult); boolean success = BooleanUtils.toBoolean(pair.first()); if (!success) { - s_logger.error(pair.second()); + logger.error(pair.second()); throw new CloudRuntimeException(pair.second()); } } @@ -270,8 +270,8 @@ private void agreeVEncryptVersion() throws IOException { int majorVEncryptVersion = nioSocketConnection.readUnsignedInteger(8); int minorVEncryptVersion = nioSocketConnection.readUnsignedInteger(8); int vEncryptVersion = (majorVEncryptVersion << 8) | minorVEncryptVersion; - if (s_logger.isDebugEnabled()) { - s_logger.debug("VEncrypt version offered by the server: " + vEncryptVersion); + if (logger.isDebugEnabled()) { + logger.debug("VEncrypt version offered by the server: " + vEncryptVersion); } nioSocketConnection.writeUnsignedInteger(8, majorVEncryptVersion); if (vEncryptVersion >= 2) { @@ -297,8 +297,8 @@ private int selectVEncryptSubtype() { nioSocketConnection.waitForBytesAvailableForReading(4); int subtype = nioSocketConnection.readUnsignedInteger(32); if (subtype == RfbConstants.V_ENCRYPT_X509_VNC) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Selected VEncrypt subtype " + subtype); + if (logger.isDebugEnabled()) { + logger.debug("Selected VEncrypt subtype " + subtype); } return subtype; } @@ -373,7 +373,7 @@ protected void writeDataNioSocketConnection(byte[] data) { public ByteBuffer handshakeProtocolVersion() { ByteBuffer verStr = ByteBuffer.allocate(12); - s_logger.debug("Reading RFB protocol version"); + logger.debug("Reading RFB protocol version"); nioSocketConnection.readBytes(verStr, 12); @@ -390,8 +390,8 @@ public void waitForNoVNCReply() { while (isWaitForNoVnc()) { cycles++; } - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Waited %d cycles for NoVnc", cycles)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Waited %d cycles for NoVnc", cycles)); } } @@ -403,8 +403,8 @@ public void waitForNoVNCReply() { */ public int handshakeSecurityType() { waitForNoVNCReply(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Processing security types message"); + if (logger.isDebugEnabled()) { + logger.debug("Processing security types message"); } int selectedSecurityType = RfbConstants.CONNECTION_FAILED; @@ -420,13 +420,13 @@ public int handshakeSecurityType() { for (int i = 0; i < serverOfferedSecurityTypes; i++) { int serverSecurityType = nioSocketConnection.readUnsignedInteger(8); - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Server offers security type: %s", serverSecurityType)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Server offers security type: %s", serverSecurityType)); } if (supportedSecurityTypes.contains(serverSecurityType)) { selectedSecurityType = serverSecurityType; - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Selected supported security type: %s", selectedSecurityType)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Selected supported security type: %s", selectedSecurityType)); } break; } @@ -473,8 +473,8 @@ private Pair processSecurityResultType(int authResult) { } public void processSecurityResultMsg() { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Processing security result message"); + if (logger.isDebugEnabled()) { + logger.debug("Processing security result message"); } nioSocketConnection.waitForBytesAvailableForReading(1); @@ -485,10 +485,10 @@ public void processSecurityResultMsg() { if (success) { securityPhaseCompleted = true; } else { - s_logger.error(securityResultType.second()); + logger.error(securityResultType.second()); String reason = nioSocketConnection.readString(); String msg = String.format("%s - Reason: %s", securityResultType.second(), reason); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } } @@ -517,13 +517,13 @@ public void processHandshakeSecurityType(int secType, String vmPassword, String for (VncSecurity security : vncSecurityStack) { security.process(this.nioSocketConnection); if (security instanceof VncTLSSecurity) { - s_logger.debug("Setting new streams with SSLEngineManger after TLS security has passed"); + logger.debug("Setting new streams with SSLEngineManger after TLS security has passed"); NioSocketSSLEngineManager sslEngineManager = ((VncTLSSecurity) security).getSSLEngineManager(); nioSocketConnection.startTLSConnection(sslEngineManager); } } } catch (IOException e) { - s_logger.error("Error processing handshake security type " + secType, e); + logger.error("Error processing handshake security type " + secType, e); } } } diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/VncClient.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/VncClient.java index e8b53a29b7b9..e5a9918d9353 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/VncClient.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/VncClient.java @@ -39,7 +39,7 @@ import com.cloud.consoleproxy.vnc.packet.client.MouseEventPacket; public class VncClient { - private static final Logger s_logger = Logger.getLogger(VncClient.class); + protected static Logger LOGGER = Logger.getLogger(VncClient.class); private Socket socket; private DataInputStream is; @@ -66,23 +66,23 @@ public static void main(String args[]) { try { new VncClient(host, Integer.parseInt(port), password, false, null); } catch (NumberFormatException e) { - s_logger.error("Incorrect VNC server port number: " + port + "."); + LOGGER.error("Incorrect VNC server port number: " + port + "."); System.exit(1); } catch (UnknownHostException e) { - s_logger.error("Incorrect VNC server host name: " + host + "."); + LOGGER.error("Incorrect VNC server host name: " + host + "."); System.exit(1); } catch (IOException e) { - s_logger.error("Cannot communicate with VNC server: " + e.getMessage()); + LOGGER.error("Cannot communicate with VNC server: " + e.getMessage()); System.exit(1); } catch (Throwable e) { - s_logger.error("An error happened: " + e.getMessage()); + LOGGER.error("An error happened: " + e.getMessage()); System.exit(1); } System.exit(0); } private static void printHelpMessage() { - /* LOG */s_logger.info("Usage: HOST PORT PASSWORD."); + /* LOGGER */LOGGER.info("Usage: HOST PORT PASSWORD."); } public VncClient(ConsoleProxyClientListener clientListener) { @@ -108,7 +108,7 @@ public void shutdown() { try { is.close(); } catch (Throwable e) { - s_logger.info("[ignored]" + LOGGER.info("[ignored]" + "failed to close resource for input: " + e.getLocalizedMessage()); } } @@ -117,7 +117,7 @@ public void shutdown() { try { os.close(); } catch (Throwable e) { - s_logger.info("[ignored]" + LOGGER.info("[ignored]" + "failed to get close resource for output: " + e.getLocalizedMessage()); } } @@ -126,7 +126,7 @@ public void shutdown() { try { socket.close(); } catch (Throwable e) { - s_logger.info("[ignored]" + LOGGER.info("[ignored]" + "failed to get close resource for socket: " + e.getLocalizedMessage()); } } @@ -151,7 +151,7 @@ public void connectTo(String host, int port, String path, String session, boolea public void connectTo(String host, int port, String password) throws UnknownHostException, IOException { // Connect to server - s_logger.info("Connecting to VNC server " + host + ":" + port + "..."); + LOGGER.info("Connecting to VNC server " + host + ":" + port + "..."); socket = new Socket(host, port); doConnect(password); } @@ -165,7 +165,7 @@ private void doConnect(String password) throws IOException { authenticate(password); initialize(); - s_logger.info("Connecting to VNC server succeeded, start session"); + LOGGER.info("Connecting to VNC server succeeded, start session"); // Run client-to-server packet sender sender = new VncClientPacketSender(os, screen, this); @@ -233,7 +233,7 @@ private void handshake() throws IOException { // Server should use RFB protocol 3.x if (!rfbProtocol.contains(RfbConstants.RFB_PROTOCOL_VERSION_MAJOR)) { - s_logger.error("Cannot handshake with VNC server. Unsupported protocol version: \"" + rfbProtocol + "\"."); + LOGGER.error("Cannot handshake with VNC server. Unsupported protocol version: \"" + rfbProtocol + "\"."); throw new RuntimeException("Cannot handshake with VNC server. Unsupported protocol version: \"" + rfbProtocol + "\"."); } @@ -259,7 +259,7 @@ private void authenticate(String password) throws IOException { is.readFully(buf); String reason = new String(buf, RfbConstants.CHARSET); - s_logger.error("Authentication to VNC server is failed. Reason: " + reason); + LOGGER.error("Authentication to VNC server is failed. Reason: " + reason); throw new RuntimeException("Authentication to VNC server is failed. Reason: " + reason); } @@ -269,13 +269,13 @@ private void authenticate(String password) throws IOException { } case RfbConstants.VNC_AUTH: { - s_logger.info("VNC server requires password authentication"); + LOGGER.info("VNC server requires password authentication"); doVncAuth(password); break; } default: - s_logger.error("Unsupported VNC protocol authorization scheme, scheme code: " + authType + "."); + LOGGER.error("Unsupported VNC protocol authorization scheme, scheme code: " + authType + "."); throw new RuntimeException("Unsupported VNC protocol authorization scheme, scheme code: " + authType + "."); } } @@ -294,7 +294,7 @@ private void doVncAuth(String password) throws IOException { try { response = encodePassword(challenge, password); } catch (Exception e) { - s_logger.error("Cannot encrypt client password to send to server: " + e.getMessage()); + LOGGER.error("Cannot encrypt client password to send to server: " + e.getMessage()); throw new RuntimeException("Cannot encrypt client password to send to server: " + e.getMessage()); } @@ -312,15 +312,15 @@ private void doVncAuth(String password) throws IOException { } case RfbConstants.VNC_AUTH_TOO_MANY: - s_logger.error("Connection to VNC server failed: too many wrong attempts."); + LOGGER.error("Connection to VNC server failed: too many wrong attempts."); throw new RuntimeException("Connection to VNC server failed: too many wrong attempts."); case RfbConstants.VNC_AUTH_FAILED: - s_logger.error("Connection to VNC server failed: wrong password."); + LOGGER.error("Connection to VNC server failed: wrong password."); throw new RuntimeException("Connection to VNC server failed: wrong password."); default: - s_logger.error("Connection to VNC server failed, reason code: " + authResult); + LOGGER.error("Connection to VNC server failed, reason code: " + authResult); throw new RuntimeException("Connection to VNC server failed, reason code: " + authResult); } } diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/VncClientPacketSender.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/VncClientPacketSender.java index 480aeae99681..12daca619cec 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/VncClientPacketSender.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/VncClientPacketSender.java @@ -36,7 +36,7 @@ import com.cloud.consoleproxy.vnc.packet.client.SetPixelFormatPacket; public class VncClientPacketSender implements Runnable, PaintNotificationListener, KeyListener, MouseListener, MouseMotionListener, FrameBufferUpdateListener { - private static final Logger s_logger = Logger.getLogger(VncClientPacketSender.class); + protected Logger logger = Logger.getLogger(getClass()); // Queue for outgoing packets private final BlockingQueue queue = new ArrayBlockingQueue(30); @@ -75,12 +75,12 @@ public void run() { } } } catch (Throwable e) { - s_logger.error("Unexpected exception: ", e); + logger.error("Unexpected exception: ", e); if (connectionAlive) { closeConnection(); } } finally { - s_logger.info("Sending thread exit processing, shutdown connection"); + logger.info("Sending thread exit processing, shutdown connection"); vncConnection.shutdown(); } } diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/VncServerPacketReceiver.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/VncServerPacketReceiver.java index b98f57fcce59..effcb7b45998 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/VncServerPacketReceiver.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/VncServerPacketReceiver.java @@ -27,7 +27,7 @@ import com.cloud.consoleproxy.vnc.packet.server.ServerCutText; public class VncServerPacketReceiver implements Runnable { - private static final Logger s_logger = Logger.getLogger(VncServerPacketReceiver.class); + protected Logger logger = Logger.getLogger(getClass()); private final VncScreenDescription screen; private BufferedImageCanvas canvas; @@ -87,12 +87,12 @@ public void run() { } } } catch (Throwable e) { - s_logger.error("Unexpected exception: ", e); + logger.error("Unexpected exception: ", e); if (connectionAlive) { closeConnection(); } } finally { - s_logger.info("Receiving thread exit processing, shutdown connection"); + logger.info("Receiving thread exit processing, shutdown connection"); vncConnection.shutdown(); } } @@ -120,6 +120,6 @@ private void serverCutText(DataInputStream is) throws IOException { StringSelection contents = new StringSelection(clipboardContent.getContent()); Toolkit.getDefaultToolkit().getSystemClipboard().setContents(contents, null); - s_logger.info("Server clipboard buffer: " + clipboardContent.getContent()); + logger.info("Server clipboard buffer: " + clipboardContent.getContent()); } } diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/network/NioSocket.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/network/NioSocket.java index d1779042d86f..dfc47f333779 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/network/NioSocket.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/network/NioSocket.java @@ -16,7 +16,8 @@ // under the License. package com.cloud.consoleproxy.vnc.network; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import java.io.IOException; import java.net.InetSocketAddress; @@ -33,7 +34,7 @@ public class NioSocket { private Selector readSelector; private static final int CONNECTION_TIMEOUT_MILLIS = 3000; - private static final Logger s_logger = Logger.getLogger(NioSocket.class); + protected Logger logger = LogManager.getLogger(getClass()); private void initializeSocket() { try { @@ -45,14 +46,14 @@ private void initializeSocket() { socketChannel.register(writeSelector, SelectionKey.OP_WRITE); socketChannel.register(readSelector, SelectionKey.OP_READ); } catch (IOException e) { - s_logger.error("Could not initialize NioSocket: " + e.getMessage(), e); + logger.error("Could not initialize NioSocket: " + e.getMessage(), e); } } private void waitForSocketSelectorConnected(Selector selector) { try { while (selector.select(CONNECTION_TIMEOUT_MILLIS) <= 0) { - s_logger.debug("Waiting for ready operations to connect to the socket"); + logger.debug("Waiting for ready operations to connect to the socket"); } Set keys = selector.selectedKeys(); for (SelectionKey selectionKey: keys) { @@ -60,12 +61,12 @@ private void waitForSocketSelectorConnected(Selector selector) { if (socketChannel.isConnectionPending()) { socketChannel.finishConnect(); } - s_logger.debug("Connected to the socket"); + logger.debug("Connected to the socket"); break; } } } catch (IOException e) { - s_logger.error(String.format("Error waiting for socket selector ready: %s", e.getMessage()), e); + logger.error(String.format("Error waiting for socket selector ready: %s", e.getMessage()), e); } } @@ -78,7 +79,7 @@ private void connectSocket(String host, int port) { waitForSocketSelectorConnected(selector); socketChannel.socket().setTcpNoDelay(false); } catch (IOException e) { - s_logger.error(String.format("Error creating NioSocket to %s:%s: %s", host, port, e.getMessage()), e); + logger.error(String.format("Error creating NioSocket to %s:%s: %s", host, port, e.getMessage()), e); } } @@ -93,7 +94,7 @@ protected int select(boolean read, Integer timeout) { selector.selectedKeys().clear(); return timeout == null ? selector.select() : selector.selectNow(); } catch (IOException e) { - s_logger.error(String.format("Error obtaining %s select: %s", read ? "read" : "write", e.getMessage()), e); + logger.error(String.format("Error obtaining %s select: %s", read ? "read" : "write", e.getMessage()), e); return -1; } } @@ -105,7 +106,7 @@ protected int readFromSocketChannel(ByteBuffer readBuffer, int len) { readBuffer.position(position + readBytes); return Math.max(readBytes, 0); } catch (Exception e) { - s_logger.error("Error reading from socket channel: " + e.getMessage(), e); + logger.error("Error reading from socket channel: " + e.getMessage(), e); return 0; } } @@ -116,7 +117,7 @@ protected int writeToSocketChannel(ByteBuffer buf, int len) { buf.position(buf.position() + writtenBytes); return writtenBytes; } catch (java.io.IOException e) { - s_logger.error("Error writing bytes to socket channel: " + e.getMessage(), e); + logger.error("Error writing bytes to socket channel: " + e.getMessage(), e); return 0; } } diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/network/NioSocketHandlerImpl.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/network/NioSocketHandlerImpl.java index 27414aed8aba..3aa3524ea838 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/network/NioSocketHandlerImpl.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/network/NioSocketHandlerImpl.java @@ -16,7 +16,9 @@ // under the License. package com.cloud.consoleproxy.vnc.network; -import org.apache.log4j.Logger; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import java.nio.ByteBuffer; @@ -28,7 +30,7 @@ public class NioSocketHandlerImpl implements NioSocketHandler { private static final int DEFAULT_BUF_SIZE = 16384; - private static final Logger s_logger = Logger.getLogger(NioSocketHandlerImpl.class); + protected Logger logger = LogManager.getLogger(getClass()); public NioSocketHandlerImpl(NioSocket socket) { this.inputStream = new NioSocketInputStream(DEFAULT_BUF_SIZE, socket); @@ -53,7 +55,7 @@ public void readBytes(ByteBuffer data, int length) { @Override public void waitForBytesAvailableForReading(int bytes) { while (!inputStream.checkForSizeWithoutWait(bytes)) { - s_logger.trace("Waiting for inStream to be ready"); + logger.trace("Waiting for inStream to be ready"); } } diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/network/NioSocketStream.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/network/NioSocketStream.java index 66c18f09fd39..c00ca8407baf 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/network/NioSocketStream.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/network/NioSocketStream.java @@ -17,7 +17,8 @@ package com.cloud.consoleproxy.vnc.network; import com.cloud.utils.exception.CloudRuntimeException; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; public class NioSocketStream { @@ -28,7 +29,7 @@ public class NioSocketStream { protected int start; protected NioSocket socket; - private static final Logger s_logger = Logger.getLogger(NioSocketStream.class); + protected Logger logger = LogManager.getLogger(getClass()); public NioSocketStream(int bufferSize, NioSocket socket) { this.buffer = new byte[bufferSize]; @@ -46,7 +47,7 @@ protected boolean isUnsignedIntegerSizeAllowed(int sizeInBits) { protected void checkUnsignedIntegerSize(int sizeInBits) { if (!isUnsignedIntegerSizeAllowed(sizeInBits)) { String msg = "Unsupported size in bits for unsigned integer reading " + sizeInBits; - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } } @@ -82,7 +83,7 @@ protected void placeUnsignedIntegerToBuffer(int bytes, int value) { protected void checkItemSizeOnBuffer(int itemSize) { if (itemSize > buffer.length) { String msg = String.format("Item size: %s exceeds the buffer size: %s", itemSize, buffer.length); - s_logger.error(msg); + logger.error(msg); throw new CloudRuntimeException(msg); } } diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/network/NioSocketTLSInputStream.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/network/NioSocketTLSInputStream.java index 15a3e15fd05a..f57a56e8a946 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/network/NioSocketTLSInputStream.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/network/NioSocketTLSInputStream.java @@ -17,7 +17,6 @@ package com.cloud.consoleproxy.vnc.network; import com.cloud.utils.exception.CloudRuntimeException; -import org.apache.log4j.Logger; import java.io.IOException; import java.nio.ByteBuffer; @@ -26,8 +25,6 @@ public class NioSocketTLSInputStream extends NioSocketInputStream { private final NioSocketSSLEngineManager sslEngineManager; - private static final Logger s_logger = Logger.getLogger(NioSocketTLSInputStream.class); - public NioSocketTLSInputStream(NioSocketSSLEngineManager sslEngineManager, NioSocket socket) { super(sslEngineManager.getSession().getApplicationBufferSize(), socket); this.sslEngineManager = sslEngineManager; @@ -42,7 +39,7 @@ protected int readFromSSLEngineManager(byte[] buffer, int startPos, int length) } return readBytes; } catch (IOException e) { - s_logger.error(String.format("Error reading from SSL engine manager: %s", e.getMessage()), e); + logger.error(String.format("Error reading from SSL engine manager: %s", e.getMessage()), e); } return 0; } diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/network/NioSocketTLSOutputStream.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/network/NioSocketTLSOutputStream.java index 8ee01af059e2..6024e2718e9b 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/network/NioSocketTLSOutputStream.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/network/NioSocketTLSOutputStream.java @@ -16,8 +16,6 @@ // under the License. package com.cloud.consoleproxy.vnc.network; -import org.apache.log4j.Logger; - import java.io.IOException; import java.nio.ByteBuffer; @@ -25,8 +23,6 @@ public class NioSocketTLSOutputStream extends NioSocketOutputStream { private final NioSocketSSLEngineManager sslEngineManager; - private static final Logger s_logger = Logger.getLogger(NioSocketTLSOutputStream.class); - public NioSocketTLSOutputStream(NioSocketSSLEngineManager sslEngineManager, NioSocket socket) { super(sslEngineManager.getSession().getApplicationBufferSize(), socket); this.sslEngineManager = sslEngineManager; @@ -48,7 +44,7 @@ protected int writeThroughSSLEngineManager(byte[] data, int startPos, int length try { return sslEngineManager.write(ByteBuffer.wrap(data, startPos, length)); } catch (IOException e) { - s_logger.error(String.format("Error writing though SSL engine manager: %s", e.getMessage()), e); + logger.error(String.format("Error writing though SSL engine manager: %s", e.getMessage()), e); return 0; } } diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/packet/server/AbstractRect.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/packet/server/AbstractRect.java index 5880dd563efd..2059278905b8 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/packet/server/AbstractRect.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/packet/server/AbstractRect.java @@ -16,8 +16,12 @@ // under the License. package com.cloud.consoleproxy.vnc.packet.server; +import com.cloud.consoleproxy.util.Logger; + public abstract class AbstractRect implements Rect { + protected Logger logger = Logger.getLogger(getClass()); + protected final int x; protected final int y; protected final int width; diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/packet/server/RawRect.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/packet/server/RawRect.java index 37f0f9e05775..7bcfc2cae17e 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/packet/server/RawRect.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/packet/server/RawRect.java @@ -23,11 +23,9 @@ import java.io.DataInputStream; import java.io.IOException; -import com.cloud.consoleproxy.util.Logger; import com.cloud.consoleproxy.vnc.VncScreenDescription; public class RawRect extends AbstractRect { - private static final Logger s_logger = Logger.getLogger(RawRect.class); private final int[] buf; public RawRect(VncScreenDescription screen, int x, int y, int width, int height, DataInputStream is) throws IOException { @@ -65,7 +63,7 @@ public void paint(BufferedImage image, Graphics2D graphics) { try { System.arraycopy(buf, srcLine * width, imageBuffer, x + dstLine * imageWidth, width); } catch (IndexOutOfBoundsException e) { - s_logger.info("[ignored] buffer overflow!?!", e); + logger.info("[ignored] buffer overflow!?!", e); } } break; diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/packet/server/ServerCutText.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/packet/server/ServerCutText.java index 044f9589b233..79ed98cccd0e 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/packet/server/ServerCutText.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/packet/server/ServerCutText.java @@ -23,7 +23,7 @@ import com.cloud.consoleproxy.vnc.RfbConstants; public class ServerCutText { - private static final Logger s_logger = Logger.getLogger(ServerCutText.class); + protected Logger logger = Logger.getLogger(getClass()); private String content; @@ -43,7 +43,7 @@ private void readPacketData(DataInputStream is) throws IOException { content = new String(buf, RfbConstants.CHARSET); - /* LOG */s_logger.info("Clippboard content: " + content); + /* logger */logger.info("Clippboard content: " + content); } } diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/security/VncAuthSecurity.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/security/VncAuthSecurity.java index 3a394eb63399..29c29f8ff583 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/security/VncAuthSecurity.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/security/VncAuthSecurity.java @@ -29,7 +29,7 @@ public class VncAuthSecurity implements VncSecurity { private final String vmPass; private static final int VNC_AUTH_CHALLENGE_SIZE = 16; - private static final Logger s_logger = Logger.getLogger(VncAuthSecurity.class); + protected Logger logger = Logger.getLogger(getClass()); public VncAuthSecurity(String vmPass) { this.vmPass = vmPass; @@ -37,7 +37,7 @@ public VncAuthSecurity(String vmPass) { @Override public void process(NioSocketHandler socketHandler) throws IOException { - s_logger.info("VNC server requires password authentication"); + logger.info("VNC server requires password authentication"); // Read the challenge & obtain the user's password ByteBuffer challenge = ByteBuffer.allocate(VNC_AUTH_CHALLENGE_SIZE); @@ -47,13 +47,13 @@ public void process(NioSocketHandler socketHandler) throws IOException { try { encodedPassword = NoVncClient.encodePassword(challenge.array(), vmPass); } catch (Exception e) { - s_logger.error("Cannot encrypt client password to send to server: " + e.getMessage()); + logger.error("Cannot encrypt client password to send to server: " + e.getMessage()); throw new CloudRuntimeException("Cannot encrypt client password to send to server: " + e.getMessage()); } // Return the response to the server socketHandler.writeBytes(ByteBuffer.wrap(encodedPassword), encodedPassword.length); socketHandler.flushWriteBuffer(); - s_logger.info("Finished VNCAuth security"); + logger.info("Finished VNCAuth security"); } } diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/security/VncTLSSecurity.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/security/VncTLSSecurity.java index c11be02a3c20..00497a378281 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/security/VncTLSSecurity.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/vnc/security/VncTLSSecurity.java @@ -31,7 +31,7 @@ public class VncTLSSecurity implements VncSecurity { - private static final Logger s_logger = Logger.getLogger(VncTLSSecurity.class); + protected Logger logger = Logger.getLogger(getClass()); private SSLContext ctx; private SSLEngine engine; @@ -71,7 +71,7 @@ private void setParam() { @Override public void process(NioSocketHandler socketHandler) { - s_logger.info("Processing VNC TLS security"); + logger.info("Processing VNC TLS security"); initGlobal(); diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/websocket/WebSocketReverseProxy.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/websocket/WebSocketReverseProxy.java index 96293fa7f71e..582fb625f2c5 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/websocket/WebSocketReverseProxy.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/websocket/WebSocketReverseProxy.java @@ -51,7 +51,7 @@ public class WebSocketReverseProxy extends WebSocketClient { private static final DefaultExtension defaultExtension = new DefaultExtension(); private static final Draft_6455 draft = new Draft_6455(Collections.singletonList(defaultExtension), Collections.singletonList(protocol)); - private static final Logger logger = Logger.getLogger(WebSocketReverseProxy.class); + protected Logger logger = Logger.getLogger(getClass()); private Session remoteSession; private void acceptAllCerts() { diff --git a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java index b7b9736a742b..f4b72e53967c 100644 --- a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java +++ b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java @@ -25,7 +25,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import com.cloud.agent.api.Command; import com.cloud.configuration.Config; @@ -53,7 +52,6 @@ import com.cloud.vm.dao.SecondaryStorageVmDao; public class PremiumSecondaryStorageManagerImpl extends SecondaryStorageManagerImpl { - private static final Logger s_logger = Logger.getLogger(PremiumSecondaryStorageManagerImpl.class); private int _capacityPerSSVM = SecondaryStorageVmManager.DEFAULT_SS_VM_CAPACITY; private int migrateCapPerSSVM = DEFAULT_MIGRATE_SS_VM_CAPACITY; @@ -125,7 +123,7 @@ public Pair scanPool(Long pool) { // this is a hacking, has nothing to do with console proxy, it is just a flag that primary storage is being under maintenance mode String restart = _configDao.getValue("consoleproxy.restart"); if (restart != null && restart.equalsIgnoreCase("false")) { - s_logger.debug("Capacity scan disabled purposefully, consoleproxy.restart = false. This happens when the primarystorage is in maintenance mode"); + logger.debug("Capacity scan disabled purposefully, consoleproxy.restart = false. This happens when the primarystorage is in maintenance mode"); suspendAutoLoading = true; } } @@ -133,14 +131,14 @@ public Pair scanPool(Long pool) { List alreadyRunning = _secStorageVmDao.getSecStorageVmListInStates(SecondaryStorageVm.Role.templateProcessor, dataCenterId, State.Running, State.Migrating, State.Starting); if (alreadyRunning.size() == 0) { - s_logger.info("No running secondary storage vms found in datacenter id=" + dataCenterId + ", starting one"); + logger.info("No running secondary storage vms found in datacenter id=" + dataCenterId + ", starting one"); List stopped = _secStorageVmDao.getSecStorageVmListInStates(SecondaryStorageVm.Role.templateProcessor, dataCenterId, State.Stopped, State.Stopping); if (stopped.size() == 0 || !suspendAutoLoading) { List stopping = _secStorageVmDao.getSecStorageVmListInStates(SecondaryStorageVm.Role.templateProcessor, State.Stopping); if (stopping.size() > 0) { - s_logger.info("Found SSVMs that are currently at stopping state, wait until they are settled"); + logger.info("Found SSVMs that are currently at stopping state, wait until they are settled"); return new Pair(AfterScanAction.nop, null); } @@ -151,7 +149,7 @@ public Pair scanPool(Long pool) { if (!suspendAutoLoading) { // this is to avoid surprises that people may accidentally see two SSVMs being launched, capacity expanding only happens when we have at least the primary SSVM is up if (alreadyRunning.size() == 0) { - s_logger.info("Primary secondary storage is not even started, wait until next turn"); + logger.info("Primary secondary storage is not even started, wait until next turn"); return new Pair(AfterScanAction.nop, null); } @@ -172,7 +170,7 @@ private Pair scaleSSVMOnLoad(List int halfLimit = Math.round((float) (alreadyRunning.size() * migrateCapPerSSVM) / 2); currentTime = DateUtil.currentGMTTime().getTime(); if (alreadyRunning.size() * _capacityPerSSVM - activeCmds.size() < _standbyCapacity) { - s_logger.info("secondary storage command execution standby capactiy low (running VMs: " + alreadyRunning.size() + ", active cmds: " + activeCmds.size() + + logger.info("secondary storage command execution standby capactiy low (running VMs: " + alreadyRunning.size() + ", active cmds: " + activeCmds.size() + "), starting a new one"); return new Pair(AfterScanAction.expand, SecondaryStorageVm.Role.commandExecutor); } @@ -180,7 +178,7 @@ else if (!copyCmdsInPipeline.isEmpty() && copyCmdsInPipeline.size() >= halfLimi ((Math.abs(currentTime - copyCmdsInPipeline.get(halfLimit - 1).getCreated().getTime()) > maxDataMigrationWaitTime )) && (currentTime > nextSpawnTime) && alreadyRunning.size() <= maxSsvms) { nextSpawnTime = currentTime + maxDataMigrationWaitTime; - s_logger.debug("scaling SSVM to handle migration tasks"); + logger.debug("scaling SSVM to handle migration tasks"); return new Pair(AfterScanAction.expand, SecondaryStorageVm.Role.commandExecutor); } diff --git a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java index 59ac4f449382..e8158c71f854 100644 --- a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java +++ b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java @@ -52,7 +52,6 @@ import org.apache.commons.lang3.ArrayUtils; import org.apache.commons.lang3.BooleanUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -174,7 +173,6 @@ */ public class SecondaryStorageManagerImpl extends ManagerBase implements SecondaryStorageVmManager, VirtualMachineGuru, SystemVmLoadScanHandler, ResourceStateAdapter, Configurable { - private static final Logger s_logger = Logger.getLogger(SecondaryStorageManagerImpl.class); private static final int ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_SYNC_IN_SECONDS = 180; private static final int STARTUP_DELAY_IN_MILLISECONDS = 60000; @@ -280,7 +278,7 @@ public SecondaryStorageVmVO startSecStorageVm(long secStorageVmId) { _itMgr.advanceStart(secStorageVm.getUuid(), null, null); return _secStorageVmDao.findById(secStorageVm.getId()); } catch (ConcurrentOperationException | InsufficientCapacityException | OperationTimedoutException | ResourceUnavailableException e) { - s_logger.warn(String.format("Unable to start secondary storage VM [%s] due to [%s].", secStorageVmId, e.getMessage()), e); + logger.warn(String.format("Unable to start secondary storage VM [%s] due to [%s].", secStorageVmId, e.getMessage()), e); return null; } } @@ -302,7 +300,7 @@ public boolean generateSetupCommand(Long ssHostId) { SecondaryStorageVmVO secStorageVm = _secStorageVmDao.findByInstanceName(hostName); if (secStorageVm == null) { - s_logger.warn(String.format("Secondary storage VM [%s] does not exist.", hostName)); + logger.warn(String.format("Secondary storage VM [%s] does not exist.", hostName)); return false; } @@ -335,9 +333,9 @@ public boolean generateSetupCommand(Long ssHostId) { _imageStoreDao.update(ssStore.getId(), svo); } - s_logger.debug(String.format("Successfully programmed secondary storage [%s] in secondary storage VM [%s].", ssStore.getName(), secStorageVm.getInstanceName())); + logger.debug(String.format("Successfully programmed secondary storage [%s] in secondary storage VM [%s].", ssStore.getName(), secStorageVm.getInstanceName())); } else { - s_logger.debug(String.format("Unable to program secondary storage [%s] in secondary storage VM [%s] due to [%s].", ssStore.getName(), secStorageVm.getInstanceName(), answer == null ? "null answer" : answer.getDetails())); + logger.debug(String.format("Unable to program secondary storage [%s] in secondary storage VM [%s] due to [%s].", ssStore.getName(), secStorageVm.getInstanceName(), answer == null ? "null answer" : answer.getDetails())); result = false; } } @@ -355,7 +353,7 @@ public boolean generateVMSetupCommand(Long ssAHostId) { String ssvmName = ssAHost.getName(); SecondaryStorageVmVO secStorageVm = _secStorageVmDao.findByInstanceName(ssvmName); if (secStorageVm == null) { - s_logger.warn(String.format("Secondary storage VM [%s] does not exist.", ssvmName)); + logger.warn(String.format("Secondary storage VM [%s] does not exist.", ssvmName)); return false; } @@ -370,13 +368,13 @@ public boolean generateVMSetupCommand(Long ssAHostId) { Answer answer = _agentMgr.easySend(ssAHostId, setupCmd); if (answer != null && answer.getResult()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Successfully set HTTP auth into secondary storage VM [%s].", ssvmName)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Successfully set HTTP auth into secondary storage VM [%s].", ssvmName)); } return true; } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Failed to set HTTP auth into secondary storage VM [%s] due to [%s].", ssvmName, answer == null ? "answer null" : answer.getDetails())); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Failed to set HTTP auth into secondary storage VM [%s] due to [%s].", ssvmName, answer == null ? "answer null" : answer.getDetails())); } return false; } @@ -412,7 +410,7 @@ public boolean generateFirewallConfiguration(Long ssAHostId) { SecondaryStorageVmVO thisSecStorageVm = _secStorageVmDao.findByInstanceName(hostName); if (thisSecStorageVm == null) { - s_logger.warn(String.format("Secondary storage VM [%s] does not exist.", hostName)); + logger.warn(String.format("Secondary storage VM [%s] does not exist.", hostName)); return false; } @@ -435,12 +433,12 @@ public boolean generateFirewallConfiguration(Long ssAHostId) { hostName = ssvm.getName(); Answer answer = _agentMgr.easySend(ssvm.getId(), thiscpc); if (answer != null && answer.getResult()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Successfully created firewall rules into secondary storage VM [%s].", hostName)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Successfully created firewall rules into secondary storage VM [%s].", hostName)); } } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Failed to create firewall rules into secondary storage VM [%s].", hostName)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Failed to create firewall rules into secondary storage VM [%s].", hostName)); } return false; } @@ -459,12 +457,12 @@ public boolean generateFirewallConfiguration(Long ssAHostId) { Answer answer = _agentMgr.easySend(ssAHostId, allSSVMIpList); if (answer != null && answer.getResult()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Successfully created firewall rules into secondary storage VM [%s].", hostName)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Successfully created firewall rules into secondary storage VM [%s].", hostName)); } } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Failed to create firewall rules into secondary storage VM [%s] due to [%s].", hostName, answer == null ? "answer null" : answer.getDetails())); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Failed to create firewall rules into secondary storage VM [%s] due to [%s].", hostName, answer == null ? "answer null" : answer.getDetails())); } return false; } @@ -496,20 +494,20 @@ protected boolean isSecondaryStorageVmRequired(long dcId) { public SecondaryStorageVmVO startNew(long dataCenterId, SecondaryStorageVm.Role role) { if (!isSecondaryStorageVmRequired(dataCenterId)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Secondary storage VM not required in zone [%s] account to zone config.", dataCenterId)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Secondary storage VM not required in zone [%s] account to zone config.", dataCenterId)); } return null; } - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Assign secondary storage VM from a newly started instance for request from data center [%s].", dataCenterId)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Assign secondary storage VM from a newly started instance for request from data center [%s].", dataCenterId)); } Map context = createSecStorageVmInstance(dataCenterId, role); long secStorageVmId = (Long)context.get("secStorageVmId"); if (secStorageVmId == 0) { - s_logger.debug(String.format("Creating secondary storage VM instance failed on data center [%s].", dataCenterId)); + logger.debug(String.format("Creating secondary storage VM instance failed on data center [%s].", dataCenterId)); return null; } @@ -520,8 +518,8 @@ public SecondaryStorageVmVO startNew(long dataCenterId, SecondaryStorageVm.Role new SecStorageVmAlertEventArgs(SecStorageVmAlertEventArgs.SSVM_CREATED, dataCenterId, secStorageVmId, secStorageVm, null)); return secStorageVm; } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Unable to allocate secondary storage VM [%s] due to it was not found on database.", secStorageVmId)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Unable to allocate secondary storage VM [%s] due to it was not found on database.", secStorageVmId)); } SubscriptionMgr.getInstance().notifySubscribers(ALERT_SUBJECT, this, new SecStorageVmAlertEventArgs(SecStorageVmAlertEventArgs.SSVM_CREATE_FAILURE, dataCenterId, secStorageVmId, null, "Unable to allocate storage")); @@ -605,7 +603,7 @@ protected Map createSecStorageVmInstance(long dataCenterId, Seco DataStore secStore = _dataStoreMgr.getImageStoreWithFreeCapacity(dataCenterId); if (secStore == null) { String msg = String.format("No secondary storage available in zone %s, cannot create secondary storage VM.", dataCenterId); - s_logger.warn(msg); + logger.warn(msg); throw new CloudRuntimeException(msg); } @@ -635,7 +633,7 @@ protected Map createSecStorageVmInstance(long dataCenterId, Seco networks.put(_networkMgr.setupNetwork(systemAcct, offering, plan, null, null, false).get(0), new ArrayList<>()); } } catch (ConcurrentOperationException e) { - s_logger.error(String.format("Unable to setup networks on %s due [%s].", dc.toString(), e.getMessage()), e); + logger.error(String.format("Unable to setup networks on %s due [%s].", dc.toString(), e.getMessage()), e); return new HashMap<>(); } @@ -660,7 +658,7 @@ protected Map createSecStorageVmInstance(long dataCenterId, Seco secStorageVm = _secStorageVmDao.findById(secStorageVm.getId()); } catch (InsufficientCapacityException e) { String errorMessage = String.format("Unable to allocate secondary storage VM [%s] due to [%s].", name, e.getMessage()); - s_logger.warn(errorMessage, e); + logger.warn(errorMessage, e); throw new CloudRuntimeException(errorMessage, e); } @@ -682,22 +680,22 @@ protected String connect(String ipAddress, int port) { } public SecondaryStorageVmVO assignSecStorageVmFromRunningPool(long dataCenterId, SecondaryStorageVm.Role role) { - s_logger.debug(String.format("Assign secondary storage VM from running pool for request from zone [%s].", dataCenterId)); + logger.debug(String.format("Assign secondary storage VM from running pool for request from zone [%s].", dataCenterId)); SecondaryStorageVmAllocator allocator = getCurrentAllocator(); assert (allocator != null); List runningList = _secStorageVmDao.getSecStorageVmListInStates(role, dataCenterId, State.Running); if (CollectionUtils.isNotEmpty(runningList)) { - s_logger.debug(String.format("Running secondary storage VM pool size [%s].", runningList.size())); + logger.debug(String.format("Running secondary storage VM pool size [%s].", runningList.size())); for (SecondaryStorageVmVO secStorageVm : runningList) { - s_logger.debug(String.format("Running secondary storage %s.", secStorageVm.toString())); + logger.debug(String.format("Running secondary storage %s.", secStorageVm.toString())); } Map loadInfo = new HashMap<>(); return allocator.allocSecondaryStorageVm(runningList, loadInfo, dataCenterId); } else { - s_logger.debug(String.format("There is no running secondary storage VM right now in the zone [%s].", dataCenterId)); + logger.debug(String.format("There is no running secondary storage VM right now in the zone [%s].", dataCenterId)); } return null; } @@ -712,11 +710,11 @@ public SecondaryStorageVmVO assignSecStorageVmFromStoppedPool(long dataCenterId, } public void allocCapacity(long dataCenterId, SecondaryStorageVm.Role role) { - s_logger.debug(String.format("Allocate secondary storage VM standby capacity for zone [%s].", dataCenterId)); + logger.debug(String.format("Allocate secondary storage VM standby capacity for zone [%s].", dataCenterId)); if (!isSecondaryStorageVmRequired(dataCenterId)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Secondary storage VM not required in zone [%s] according to zone config.", dataCenterId)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Secondary storage VM not required in zone [%s] according to zone config.", dataCenterId)); } return; } @@ -726,8 +724,8 @@ public void allocCapacity(long dataCenterId, SecondaryStorageVm.Role role) { boolean secStorageVmFromStoppedPool = false; secStorageVm = assignSecStorageVmFromStoppedPool(dataCenterId, role); if (secStorageVm == null) { - if (s_logger.isInfoEnabled()) { - s_logger.info("No stopped secondary storage VM is available, need to allocate a new secondary storage VM."); + if (logger.isInfoEnabled()) { + logger.info("No stopped secondary storage VM is available, need to allocate a new secondary storage VM."); } if (_allocLock.lock(ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_SYNC_IN_SECONDS)) { @@ -737,14 +735,14 @@ public void allocCapacity(long dataCenterId, SecondaryStorageVm.Role role) { _allocLock.unlock(); } } else { - if (s_logger.isInfoEnabled()) { - s_logger.info("Unable to acquire synchronization lock for secondary storage VM allocation, wait for next scan."); + if (logger.isInfoEnabled()) { + logger.info("Unable to acquire synchronization lock for secondary storage VM allocation, wait for next scan."); } return; } } else { - if (s_logger.isInfoEnabled()) { - s_logger.info(String.format("Found a stopped secondary storage %s, starting it.", secStorageVm.toString())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Found a stopped secondary storage %s, starting it.", secStorageVm.toString())); } secStorageVmFromStoppedPool = true; } @@ -760,8 +758,8 @@ public void allocCapacity(long dataCenterId, SecondaryStorageVm.Role role) { secStorageVmLock.unlock(); } } else { - if (s_logger.isInfoEnabled()) { - s_logger.info(String.format("Unable to acquire synchronization lock for starting secondary storage %s.", secStorageVm.toString())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Unable to acquire synchronization lock for starting secondary storage %s.", secStorageVm.toString())); } return; } @@ -770,8 +768,8 @@ public void allocCapacity(long dataCenterId, SecondaryStorageVm.Role role) { } if (secStorageVm == null) { - if (s_logger.isInfoEnabled()) { - s_logger.info(String.format("Unable to start secondary storage VM [%s] for standby capacity, it will be recycled and will start a new one.", secStorageVmId)); + if (logger.isInfoEnabled()) { + logger.info(String.format("Unable to start secondary storage VM [%s] for standby capacity, it will be recycled and will start a new one.", secStorageVmId)); } if (secStorageVmFromStoppedPool) { @@ -780,8 +778,8 @@ public void allocCapacity(long dataCenterId, SecondaryStorageVm.Role role) { } else { SubscriptionMgr.getInstance().notifySubscribers(ALERT_SUBJECT, this, new SecStorageVmAlertEventArgs(SecStorageVmAlertEventArgs.SSVM_UP, dataCenterId, secStorageVmId, secStorageVm, null)); - if (s_logger.isInfoEnabled()) { - s_logger.info(String.format("Secondary storage %s was started.", secStorageVm.toString())); + if (logger.isInfoEnabled()) { + logger.info(String.format("Secondary storage %s was started.", secStorageVm.toString())); } } } @@ -798,8 +796,8 @@ public void allocCapacity(long dataCenterId, SecondaryStorageVm.Role role) { public boolean isZoneReady(Map zoneHostInfoMap, long dataCenterId) { List hosts = _hostDao.listByDataCenterId(dataCenterId); if (CollectionUtils.isEmpty(hosts)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Zone " + dataCenterId + " has no host available which is enabled and in Up state"); + if (logger.isDebugEnabled()) { + logger.debug("Zone " + dataCenterId + " has no host available which is enabled and in Up state"); } return false; } @@ -807,21 +805,21 @@ public boolean isZoneReady(Map zoneHostInfoMap, long dataCen if (zoneHostInfo != null && (zoneHostInfo.getFlags() & RunningHostInfoAgregator.ZoneHostInfo.ROUTING_HOST_MASK) != 0) { VMTemplateVO template = _templateDao.findSystemVMReadyTemplate(dataCenterId, HypervisorType.Any); if (template == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("System VM template is not ready at zone [%s], wait until it is ready to launch secondary storage VM.", dataCenterId)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("System VM template is not ready at zone [%s], wait until it is ready to launch secondary storage VM.", dataCenterId)); } return false; } List stores = _dataStoreMgr.getImageStoresByScopeExcludingReadOnly(new ZoneScope(dataCenterId)); if (CollectionUtils.isEmpty(stores)) { - s_logger.debug(String.format("No image store added in zone [%s], wait until it is ready to launch secondary storage VM.", dataCenterId)); + logger.debug(String.format("No image store added in zone [%s], wait until it is ready to launch secondary storage VM.", dataCenterId)); return false; } if (!template.isDirectDownload() && templateMgr.getImageStore(dataCenterId, template.getId()) == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("No secondary storage available in zone [%s], wait until it is ready to launch secondary storage VM.", dataCenterId)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("No secondary storage available in zone [%s], wait until it is ready to launch secondary storage VM.", dataCenterId)); } return false; } @@ -831,9 +829,9 @@ public boolean isZoneReady(Map zoneHostInfoMap, long dataCen if (CollectionUtils.isNotEmpty(storagePoolHostInfos) && storagePoolHostInfos.get(0).second() > 0) { return true; } else { - if (s_logger.isDebugEnabled()) { + if (logger.isDebugEnabled()) { String configKey = ConfigurationManagerImpl.SystemVMUseLocalStorage.key(); - s_logger.debug(String.format("Primary storage is not ready, wait until it is ready to launch secondary storage VM. {\"dataCenterId\": %s, \"%s\": \"%s\"}. " + logger.debug(String.format("Primary storage is not ready, wait until it is ready to launch secondary storage VM. {\"dataCenterId\": %s, \"%s\": \"%s\"}. " + "If you want to use local storage to start secondary storage VM, you need to set the configuration [%s] to \"true\".", dataCenterId, configKey, useLocalStorage, configKey)); } } @@ -858,8 +856,8 @@ private synchronized Map getZoneHostInfo() { @Override public boolean start() { - if (s_logger.isInfoEnabled()) { - s_logger.info("Start secondary storage vm manager"); + if (logger.isInfoEnabled()) { + logger.info("Start secondary storage vm manager"); } return true; @@ -875,8 +873,8 @@ public boolean stop() { @Override public boolean configure(String name, Map params) throws ConfigurationException { - if (s_logger.isInfoEnabled()) { - s_logger.info("Start configuring secondary storage vm manager : " + name); + if (logger.isInfoEnabled()) { + logger.info("Start configuring secondary storage vm manager : " + name); } Map configs = _configDao.getConfiguration("management-server", params); @@ -887,7 +885,7 @@ public boolean configure(String name, Map params) throws Configu String ssvmUrlDomain = _configDao.getValue("secstorage.ssl.cert.domain"); if(_useSSlCopy && StringUtils.isEmpty(ssvmUrlDomain)){ - s_logger.warn("Empty secondary storage url domain, explicitly disabling SSL"); + logger.warn("Empty secondary storage url domain, explicitly disabling SSL"); _useSSlCopy = false; } @@ -913,14 +911,14 @@ public boolean configure(String name, Map params) throws Configu _serviceOffering = _offeringDao.findByUuid(ssvmSrvcOffIdStr); if (_serviceOffering == null) { try { - s_logger.debug(String.format("Unable to find a service offering by the UUID for secondary storage VM with the value [%s] set in the configuration [%s]. Trying to find by the ID.", ssvmSrvcOffIdStr, configKey)); + logger.debug(String.format("Unable to find a service offering by the UUID for secondary storage VM with the value [%s] set in the configuration [%s]. Trying to find by the ID.", ssvmSrvcOffIdStr, configKey)); _serviceOffering = _offeringDao.findById(Long.parseLong(ssvmSrvcOffIdStr)); if (_serviceOffering == null) { - s_logger.info(String.format("Unable to find a service offering by the UUID or ID for secondary storage VM with the value [%s] set in the configuration [%s]", ssvmSrvcOffIdStr, configKey)); + logger.info(String.format("Unable to find a service offering by the UUID or ID for secondary storage VM with the value [%s] set in the configuration [%s]", ssvmSrvcOffIdStr, configKey)); } } catch (NumberFormatException ex) { - s_logger.warn(String.format("Unable to find a service offering by the ID for secondary storage VM with the value [%s] set in the configuration [%s]. The value is not a valid integer number. Error: [%s].", ssvmSrvcOffIdStr, configKey, ex.getMessage()), ex); + logger.warn(String.format("Unable to find a service offering by the ID for secondary storage VM with the value [%s] set in the configuration [%s]. The value is not a valid integer number. Error: [%s].", ssvmSrvcOffIdStr, configKey, ex.getMessage()), ex); } } } @@ -934,7 +932,7 @@ public boolean configure(String name, Map params) throws Configu if (offerings == null || offerings.size() < 2) { String msg = "Unable to set a service offering for secondary storage VM. Verify if it was removed."; - s_logger.error(msg); + logger.error(msg); throw new ConfigurationException(msg); } } @@ -963,17 +961,17 @@ public boolean configure(String name, Map params) throws Configu } catch (URISyntaxException e) { errMsg = e.toString(); valid = false; - s_logger.error(String.format("Unable to configure HTTP proxy [%s] on secondary storage VM manager [%s] due to [%s].", _httpProxy, name, errMsg), e); + logger.error(String.format("Unable to configure HTTP proxy [%s] on secondary storage VM manager [%s] due to [%s].", _httpProxy, name, errMsg), e); } finally { if (!valid) { String message = String.format("Unable to configure HTTP proxy [%s] on secondary storage VM manager [%s] due to [%s].", _httpProxy, name, errMsg); - s_logger.warn(message); + logger.warn(message); throw new ConfigurationException(message); } } } - s_logger.info(String.format("Secondary storage VM manager [%s] was configured.", name)); + logger.info(String.format("Secondary storage VM manager [%s] was configured.", name)); _resourceMgr.registerResourceStateAdapter(this.getClass().getSimpleName(), this); return true; @@ -983,8 +981,8 @@ public boolean configure(String name, Map params) throws Configu public boolean stopSecStorageVm(long secStorageVmId) { SecondaryStorageVmVO secStorageVm = _secStorageVmDao.findById(secStorageVmId); if (secStorageVm == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Unable to stop secondary storage VM [%s] due to it no longer exists.", secStorageVmId)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Unable to stop secondary storage VM [%s] due to it no longer exists.", secStorageVmId)); } return false; } @@ -1000,7 +998,7 @@ public boolean stopSecStorageVm(long secStorageVmId) { secStorageVmLock.unlock(); } } else { - s_logger.debug(String.format("Unable to acquire secondary storage VM [%s] lock.", secStorageVm.toString())); + logger.debug(String.format("Unable to acquire secondary storage VM [%s] lock.", secStorageVm.toString())); return false; } } finally { @@ -1010,7 +1008,7 @@ public boolean stopSecStorageVm(long secStorageVmId) { return true; } catch (ResourceUnavailableException e) { - s_logger.error(String.format("Unable to stop secondary storage VM [%s] due to [%s].", secStorageVm.getHostName(), e.toString()), e); + logger.error(String.format("Unable to stop secondary storage VM [%s] due to [%s].", secStorageVm.getHostName(), e.toString()), e); return false; } } @@ -1030,8 +1028,8 @@ public boolean rebootSecStorageVm(long secStorageVmId) { String secondaryStorageVmName = secStorageVm.getHostName(); if (answer != null && answer.getResult()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Successfully reboot secondary storage VM [%s].", secondaryStorageVmName)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Successfully reboot secondary storage VM [%s].", secondaryStorageVmName)); } SubscriptionMgr.getInstance().notifySubscribers(ALERT_SUBJECT, this, @@ -1039,8 +1037,8 @@ public boolean rebootSecStorageVm(long secStorageVmId) { return true; } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Unable to reboot secondary storage VM [%s] due to [%s].", secondaryStorageVmName, answer == null ? "answer null" : answer.getDetails())); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Unable to reboot secondary storage VM [%s] due to [%s].", secondaryStorageVmName, answer == null ? "answer null" : answer.getDetails())); } return false; } @@ -1058,14 +1056,14 @@ public boolean destroySecStorageVm(long vmId) { _secStorageVmDao.remove(ssvm.getId()); HostVO host = _hostDao.findByTypeNameAndZoneId(ssvm.getDataCenterId(), ssvm.getHostName(), Host.Type.SecondaryStorageVM); if (host != null) { - s_logger.debug(String.format("Removing host entry for secondary storage VM [%s].", vmId)); + logger.debug(String.format("Removing host entry for secondary storage VM [%s].", vmId)); _hostDao.remove(host.getId()); _tmplStoreDao.expireDnldUrlsForZone(host.getDataCenterId()); _volumeStoreDao.expireDnldUrlsForZone(host.getDataCenterId()); } return true; } catch (ResourceUnavailableException e) { - s_logger.error(String.format("Unable to expunge secondary storage [%s] due to [%s].", ssvm.toString(), e.getMessage()), e); + logger.error(String.format("Unable to expunge secondary storage [%s] due to [%s].", ssvm.toString(), e.getMessage()), e); return false; } } @@ -1090,7 +1088,7 @@ public boolean finalizeVirtualMachineProfile(VirtualMachineProfile profile, Depl List secStores= _dataStoreMgr.listImageStoresWithFreeCapacity(dest.getDataCenter().getId()); if (CollectionUtils.isEmpty(secStores)) { - s_logger.warn(String.format("Unable to finalize virtual machine profile [%s] as it has no secondary storage available to satisfy storage needs for zone [%s].", profile.toString(), dest.getDataCenter().getUuid())); + logger.warn(String.format("Unable to finalize virtual machine profile [%s] as it has no secondary storage available to satisfy storage needs for zone [%s].", profile.toString(), dest.getDataCenter().getUuid())); return false; } Collections.shuffle(secStores); @@ -1118,7 +1116,7 @@ public boolean finalizeVirtualMachineProfile(VirtualMachineProfile profile, Depl buf.append(" authorized_key=").append(VirtualMachineGuru.getEncodedMsPublicKey(msPublicKey)); if (_configDao.isPremium()) { - s_logger.debug("VMWare hypervisor was configured, informing secondary storage VM to load the PremiumSecondaryStorageResource."); + logger.debug("VMWare hypervisor was configured, informing secondary storage VM to load the PremiumSecondaryStorageResource."); buf.append(" resource=com.cloud.storage.resource.PremiumSecondaryStorageResource"); } else { buf.append(" resource=org.apache.cloudstack.storage.resource.NfsSecondaryStorageResource"); @@ -1158,10 +1156,10 @@ public boolean finalizeVirtualMachineProfile(VirtualMachineProfile profile, Depl if (nic.getTrafficType() == TrafficType.Management) { String mgmt_cidr = _configDao.getValue(Config.ManagementNetwork.key()); if (NetUtils.isValidCidrList(mgmt_cidr)) { - s_logger.debug("Management server cidr list is " + mgmt_cidr); + logger.debug("Management server cidr list is " + mgmt_cidr); buf.append(" mgmtcidr=").append(mgmt_cidr); } else { - s_logger.error("Invalid management server cidr list: " + mgmt_cidr); + logger.error("Invalid management server cidr list: " + mgmt_cidr); } buf.append(" localgw=").append(dest.getPod().getGateway()); buf.append(" private.network.device=").append("eth").append(deviceId); @@ -1191,12 +1189,12 @@ public boolean finalizeVirtualMachineProfile(VirtualMachineProfile profile, Depl buf.append(" nfsVersion=").append(nfsVersion); buf.append(" keystore_password=").append(VirtualMachineGuru.getEncodedString(PasswordGenerator.generateRandomPassword(16))); String bootArgs = buf.toString(); - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Boot args for machine profile [%s]: [%s].", profile.toString(), bootArgs)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Boot args for machine profile [%s]: [%s].", profile.toString(), bootArgs)); } boolean useHttpsToUpload = BooleanUtils.toBooleanDefaultIfNull(VolumeApiService.UseHttpsToUpload.value(), true); - s_logger.debug(String.format("Setting UseHttpsToUpload config on cmdline with [%s] value.", useHttpsToUpload)); + logger.debug(String.format("Setting UseHttpsToUpload config on cmdline with [%s] value.", useHttpsToUpload)); buf.append(" useHttpsToUpload=").append(useHttpsToUpload); addSecondaryStorageServerAddressToBuffer(buf, secStores, vmName); @@ -1213,26 +1211,26 @@ protected void addSecondaryStorageServerAddressToBuffer(StringBuilder buffer, Li String url = dataStore.getTO().getUrl(); String[] urlArray = url.split("/"); - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Found [%s] as secondary storage [%s] URL for SSVM [%s].", dataStore.getName(), url, vmName)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Found [%s] as secondary storage [%s] URL for SSVM [%s].", dataStore.getName(), url, vmName)); } if (ArrayUtils.getLength(urlArray) < 3) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Could not retrieve secondary storage [%s] address from URL [%s] of SSVM [%s].", dataStore.getName(), url, vmName)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Could not retrieve secondary storage [%s] address from URL [%s] of SSVM [%s].", dataStore.getName(), url, vmName)); } continue; } String address = urlArray[2]; - s_logger.info(String.format("Using [%s] as address of secondary storage [%s] of SSVM [%s].", address, dataStore.getName(), vmName)); + logger.info(String.format("Using [%s] as address of secondary storage [%s] of SSVM [%s].", address, dataStore.getName(), vmName)); if (!addresses.contains(address)) { addresses.add(address); } } if (addresses.isEmpty()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("No address found for the secondary storages: [%s] of SSVM: [%s]", StringUtils.join(dataStores.stream().map(DataStore::getName).collect(Collectors.toList()), ","), vmName)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("No address found for the secondary storages: [%s] of SSVM: [%s]", StringUtils.join(dataStores.stream().map(DataStore::getName).collect(Collectors.toList()), ","), vmName)); } return; } @@ -1277,7 +1275,7 @@ public boolean finalizeCommandsOnStart(Commands cmds, VirtualMachineProfile prof if (controlNic == null) { if (managementNic == null) { - s_logger.warn(String.format("Management network does not exist for the secondary storage %s.", profile. toString())); + logger.warn(String.format("Management network does not exist for the secondary storage %s.", profile. toString())); return false; } controlNic = managementNic; @@ -1303,7 +1301,7 @@ protected NicProfile verifySshAccessOnManagementNicForSystemVm(VirtualMachinePro public boolean finalizeStart(VirtualMachineProfile profile, long hostId, Commands cmds, ReservationContext context) { CheckSshAnswer answer = (CheckSshAnswer)cmds.getAnswer("checkSsh"); if (!answer.getResult()) { - s_logger.warn(String.format("Unable to connect via SSH to the VM [%s] due to [%s] ", profile.toString(), answer.getDetails())); + logger.warn(String.format("Unable to connect via SSH to the VM [%s] due to [%s] ", profile.toString(), answer.getDetails())); return false; } @@ -1316,7 +1314,7 @@ public boolean finalizeStart(VirtualMachineProfile profile, long hostId, Command _secStorageVmDao.update(secVm.getId(), secVm); } } catch (InsufficientAddressCapacityException ex) { - s_logger.error(String.format("Failed to get system IP and enable static NAT for the VM [%s] due to [%s].", profile.toString(), ex.getMessage()), ex); + logger.error(String.format("Failed to get system IP and enable static NAT for the VM [%s] due to [%s].", profile.toString(), ex.getMessage()), ex); return false; } @@ -1331,7 +1329,7 @@ public void finalizeStop(VirtualMachineProfile profile, Answer answer) { try { _rulesMgr.disableStaticNat(ip.getId(), ctx.getCallingAccount(), ctx.getCallingUserId(), true); } catch (ResourceUnavailableException ex) { - s_logger.error(String.format("Failed to disable static NAT and release system IP [%s] as a part of VM [%s] stop due to [%s].", ip, profile.toString(), ex.getMessage()), ex); + logger.error(String.format("Failed to disable static NAT and release system IP [%s] as a part of VM [%s] stop due to [%s].", ip, profile.toString(), ex.getMessage()), ex); } } } @@ -1364,8 +1362,8 @@ public void onScanStart() { @Override public Long[] getScannablePools() { List zoneIds = _dcDao.listEnabledNonEdgeZoneIds(); - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Enabled non-edge zones available for scan: %s", StringUtils.join(zoneIds, ","))); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Enabled non-edge zones available for scan: %s", StringUtils.join(zoneIds, ","))); } return zoneIds.toArray(Long[]::new); } @@ -1373,14 +1371,14 @@ public Long[] getScannablePools() { @Override public boolean isPoolReadyForScan(Long dataCenterId) { if (!isZoneReady(_zoneHostInfoMap, dataCenterId)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Zone [%s] is not ready to launch secondary storage VM.", dataCenterId)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Zone [%s] is not ready to launch secondary storage VM.", dataCenterId)); } return false; } - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Zone [%s] is ready to launch secondary storage VM.", dataCenterId)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Zone [%s] is ready to launch secondary storage VM.", dataCenterId)); } return true; } @@ -1394,7 +1392,7 @@ public Pair scanPool(Long dataCenterId) { List ssStores = _dataStoreMgr.getImageStoresByScopeExcludingReadOnly(new ZoneScope(dataCenterId)); int storeSize = (ssStores == null) ? 0 : ssStores.size(); if (storeSize > vmSize) { - s_logger.info(String.format("No secondary storage VM found in zone [%s], starting a new one.", dataCenterId)); + logger.info(String.format("No secondary storage VM found in zone [%s], starting a new one.", dataCenterId)); return new Pair<>(AfterScanAction.expand, SecondaryStorageVm.Role.templateProcessor); } diff --git a/services/secondary-storage/server/pom.xml b/services/secondary-storage/server/pom.xml index dc5d01f3faac..3690899d687b 100644 --- a/services/secondary-storage/server/pom.xml +++ b/services/secondary-storage/server/pom.xml @@ -29,8 +29,12 @@ - ch.qos.reload4j - reload4j + org.apache.logging.log4j + log4j-core + + + org.apache.logging.log4j + log4j-api com.google.code.gson diff --git a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/HttpUploadServerHandler.java b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/HttpUploadServerHandler.java index aec1560f4113..9cbd8f8de9fc 100644 --- a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/HttpUploadServerHandler.java +++ b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/HttpUploadServerHandler.java @@ -30,7 +30,8 @@ import org.apache.cloudstack.storage.template.UploadEntity; import org.apache.cloudstack.utils.imagestore.ImageStoreUtil; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.exception.InvalidParameterValueException; @@ -63,7 +64,7 @@ import io.netty.util.CharsetUtil; public class HttpUploadServerHandler extends SimpleChannelInboundHandler { - private static final Logger logger = Logger.getLogger(HttpUploadServerHandler.class.getName()); + protected Logger logger = LogManager.getLogger(getClass()); private static final HttpDataFactory factory = new DefaultHttpDataFactory(true); diff --git a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/LocalNfsSecondaryStorageResource.java b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/LocalNfsSecondaryStorageResource.java index 6f189ef5f3c6..ab55f6545159 100644 --- a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/LocalNfsSecondaryStorageResource.java +++ b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/LocalNfsSecondaryStorageResource.java @@ -19,7 +19,6 @@ import java.net.URI; import java.util.concurrent.Executors; -import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.storage.template.DownloadManagerImpl; @@ -33,7 +32,6 @@ @Component public class LocalNfsSecondaryStorageResource extends NfsSecondaryStorageResource { - private static final Logger s_logger = Logger.getLogger(LocalNfsSecondaryStorageResource.class); public LocalNfsSecondaryStorageResource() { this._dlMgr = new DownloadManagerImpl(); @@ -60,7 +58,7 @@ synchronized public String getRootDir(String secUrl, String nfsVersion) { return _parent + "/" + dir; } catch (Exception e) { String msg = "GetRootDir for " + secUrl + " failed due to " + e.toString(); - s_logger.error(msg, e); + logger.error(msg, e); throw new CloudRuntimeException(msg); } } @@ -76,15 +74,15 @@ protected void mount(String localRootPath, String remoteDevice, URI uri, String attemptMount(localRootPath, remoteDevice, uri, nfsVersion); // Change permissions for the mountpoint - seems to bypass authentication - Script script = new Script(true, "chmod", _timeout, s_logger); + Script script = new Script(true, "chmod", _timeout, logger); script.add("777", localRootPath); String result = script.execute(); if (result != null) { String errMsg = "Unable to set permissions for " + localRootPath + " due to " + result; - s_logger.error(errMsg); + logger.error(errMsg); throw new CloudRuntimeException(errMsg); } - s_logger.debug("Successfully set 777 permission for " + localRootPath); + logger.debug("Successfully set 777 permission for " + localRootPath); // XXX: Adding the check for creation of snapshots dir here. Might have // to move it somewhere more logical later. diff --git a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/LocalSecondaryStorageResource.java b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/LocalSecondaryStorageResource.java index d953338c911d..5313cbc9970a 100644 --- a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/LocalSecondaryStorageResource.java +++ b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/LocalSecondaryStorageResource.java @@ -21,7 +21,6 @@ import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; import org.apache.cloudstack.storage.command.DownloadCommand; import org.apache.cloudstack.storage.command.DownloadProgressCommand; @@ -53,7 +52,6 @@ import com.cloud.utils.component.ComponentContext; public class LocalSecondaryStorageResource extends ServerResourceBase implements SecondaryStorageResource { - private static final Logger s_logger = Logger.getLogger(LocalSecondaryStorageResource.class); int _timeout; String _instance; @@ -161,11 +159,11 @@ public boolean configure(String name, Map params) throws Configu } if (!_storage.mkdirs(_parent)) { - s_logger.warn("Unable to create the directory " + _parent); + logger.warn("Unable to create the directory " + _parent); throw new ConfigurationException("Unable to create the directory " + _parent); } - s_logger.info("Mount point established at " + _parent); + logger.info("Mount point established at " + _parent); params.put("template.parent", _parent); params.put(StorageLayer.InstanceConfigKey, _storage); diff --git a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java index cc17e48fe079..af422f1ab4f1 100644 --- a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java +++ b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java @@ -94,7 +94,8 @@ import org.apache.http.client.methods.HttpGet; import org.apache.http.client.utils.URLEncodedUtils; import org.apache.http.impl.client.DefaultHttpClient; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.joda.time.DateTime; import org.joda.time.format.ISODateTimeFormat; @@ -195,7 +196,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements SecondaryStorageResource { - public static final Logger s_logger = Logger.getLogger(NfsSecondaryStorageResource.class); + protected Logger logger = LogManager.getLogger(NfsSecondaryStorageResource.class); private static final String TEMPLATE_ROOT_DIR = "template/tmpl"; private static final String VOLUME_ROOT_DIR = "volumes"; @@ -281,7 +282,7 @@ public static String retrieveNfsVersionFromParams(Map params) { @Override public Answer executeRequest(Command cmd) { - s_logger.debug(LogUtils.logGsonWithoutException("Executing command %s [%s].", cmd.getClass().getSimpleName(), cmd)); + logger.debug(LogUtils.logGsonWithoutException("Executing command %s [%s].", cmd.getClass().getSimpleName(), cmd)); if (cmd instanceof DownloadProgressCommand) { return _dlMgr.handleDownloadCommand(this, (DownloadProgressCommand)cmd); } else if (cmd instanceof DownloadCommand) { @@ -347,7 +348,7 @@ private Answer execute(HandleConfigDriveIsoCommand cmd) { String nfsMountPoint = getRootDir(cmd.getDestStore().getUrl(), _nfsVersion); File isoFile = new File(nfsMountPoint, cmd.getIsoFile()); if(isoFile.exists()) { - s_logger.debug("config drive iso already exists"); + logger.debug("config drive iso already exists"); } Path tempDir = null; try { @@ -362,7 +363,7 @@ private Answer execute(HandleConfigDriveIsoCommand cmd) { FileUtils.deleteDirectory(tempDir.toFile()); } } catch (IOException ioe) { - s_logger.warn("Failed to delete ConfigDrive temporary directory: " + tempDir.toString(), ioe); + logger.warn("Failed to delete ConfigDrive temporary directory: " + tempDir.toString(), ioe); } } return new HandleConfigDriveIsoAnswer(cmd, NetworkElement.Location.SECONDARY, "Successfully saved config drive at secondary storage"); @@ -391,14 +392,14 @@ protected void copyLocalToNfs(File localFile, File isoFile, DataStoreTO destData if (createVolScr == null) { throw new ConfigurationException("Unable to find createvolume.sh"); } - s_logger.info("createvolume.sh found in " + createVolScr); + logger.info("createvolume.sh found in " + createVolScr); int installTimeoutPerGig = 180 * 60 * 1000; int imgSizeGigs = (int) Math.ceil(localFile.length() * 1.0d / (1024 * 1024 * 1024)); imgSizeGigs++; // add one just in case long timeout = imgSizeGigs * installTimeoutPerGig; - Script scr = new Script(createVolScr, timeout, s_logger); + Script scr = new Script(createVolScr, timeout, logger); scr.add("-s", Integer.toString(imgSizeGigs)); scr.add("-n", isoFile.getName()); scr.add("-t", getRootDir(destData.getUrl(), _nfsVersion) + "/" + isoFile.getParent()); @@ -427,65 +428,65 @@ public Answer execute(GetDatadisksCommand cmd) { String templateUrl = secondaryStorageUrl + File.separator + srcData.getPath(); String templateDetails = ReflectionToStringBuilderUtils.reflectOnlySelectedFields(template, "uuid", "path", "name"); - s_logger.debug(String.format("Trying to get disks of template [%s], using path [%s].", templateDetails, templateUrl)); + logger.debug(String.format("Trying to get disks of template [%s], using path [%s].", templateDetails, templateUrl)); Pair templateInfo = decodeTemplateRelativePathAndNameFromUrl(secondaryStorageUrl, templateUrl, template.getName()); String templateRelativeFolderPath = templateInfo.first(); try { String secondaryMountPoint = getRootDir(secondaryStorageUrl, _nfsVersion); - s_logger.info(String.format("Trying to find template [%s] in secondary storage root mount point [%s].", templateDetails, secondaryMountPoint)); + logger.info(String.format("Trying to find template [%s] in secondary storage root mount point [%s].", templateDetails, secondaryMountPoint)); String srcOVAFileName = getTemplateOnSecStorageFilePath(secondaryMountPoint, templateRelativeFolderPath, templateInfo.second(), ImageFormat.OVA.getFileExtension()); String ovfFilePath = getOVFFilePath(srcOVAFileName); if (ovfFilePath == null) { - Script command = new Script("tar", 0, s_logger); + Script command = new Script("tar", 0, logger); command.add("--no-same-owner"); command.add("--no-same-permissions"); command.add("-xf", srcOVAFileName); command.setWorkDir(secondaryMountPoint + File.separator + templateRelativeFolderPath); - s_logger.info(String.format("Trying to decompress OVA file [%s] using command [%s].", srcOVAFileName, command.toString())); + logger.info(String.format("Trying to decompress OVA file [%s] using command [%s].", srcOVAFileName, command.toString())); String result = command.execute(); if (result != null) { String msg = String.format("Unable to unpack snapshot OVA file [%s] due to [%s].", srcOVAFileName, result); - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } String directory = secondaryMountPoint + File.separator + templateRelativeFolderPath; - command = new Script("chmod", 0, s_logger); + command = new Script("chmod", 0, logger); command.add("-R"); command.add("666", directory); - s_logger.debug(String.format("Trying to add, recursivelly, permission 666 to directory [%s] using command [%s].", directory, command.toString())); + logger.debug(String.format("Trying to add, recursivelly, permission 666 to directory [%s] using command [%s].", directory, command.toString())); result = command.execute(); if (result != null) { - s_logger.warn(String.format("Unable to set permissions 666 for directory [%s] due to [%s].", directory, result)); + logger.warn(String.format("Unable to set permissions 666 for directory [%s] due to [%s].", directory, result)); } } - Script command = new Script("cp", _timeout, s_logger); + Script command = new Script("cp", _timeout, logger); command.add(ovfFilePath); command.add(ovfFilePath + ORIGINAL_FILE_EXTENSION); - s_logger.debug(String.format("Trying to copy file from [%s] to [%s] using command [%s].", ovfFilePath, ovfFilePath + ORIGINAL_FILE_EXTENSION, command.toString())); + logger.debug(String.format("Trying to copy file from [%s] to [%s] using command [%s].", ovfFilePath, ovfFilePath + ORIGINAL_FILE_EXTENSION, command.toString())); String result = command.execute(); if (result != null) { String msg = String.format("Unable to copy original OVF file [%s] to [%s] due to [%s].", ovfFilePath, ovfFilePath + ORIGINAL_FILE_EXTENSION, result); - s_logger.error(msg); + logger.error(msg); } - s_logger.debug(String.format("Reading OVF file [%s] to retrive the number of disks present in OVA file.", ovfFilePath)); + logger.debug(String.format("Reading OVF file [%s] to retrive the number of disks present in OVA file.", ovfFilePath)); OVFHelper ovfHelper = new OVFHelper(); List disks = ovfHelper.getOVFVolumeInfoFromFile(ovfFilePath, configurationId); - s_logger.debug(LogUtils.logGsonWithoutException("Found %s disks reading OVF file [%s] and using configuration id [%s]. The disks specifications are [%s].", + logger.debug(LogUtils.logGsonWithoutException("Found %s disks reading OVF file [%s] and using configuration id [%s]. The disks specifications are [%s].", disks.size(), ovfFilePath, configurationId, disks)); return new GetDatadisksAnswer(disks); } catch (Exception e) { String msg = String.format("Failed to get disks from template [%s] due to [%s].", templateDetails, e.getMessage()); - s_logger.error(msg, e); + logger.error(msg, e); return new GetDatadisksAnswer(msg); } } @@ -506,8 +507,8 @@ public Answer execute(CreateDatadiskTemplateCommand cmd) { long templateId = dataDiskTemplate.getId(); String templateUniqueName = dataDiskTemplate.getUniqueName(); - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("no cmd? %s", cmd.stringRepresentation())); + if (logger.isDebugEnabled()) { + logger.debug(String.format("no cmd? %s", cmd.stringRepresentation())); } String origDisk = cmd.getPath(); long virtualSize = dataDiskTemplate.getSize(); @@ -520,34 +521,34 @@ public Answer execute(CreateDatadiskTemplateCommand cmd) { if (!cmd.getBootable()) { // Create folder to hold datadisk template synchronized (newTmplDir.intern()) { - Script command = new Script("mkdir", _timeout, s_logger); + Script command = new Script("mkdir", _timeout, logger); command.add("-p"); command.add(newTmplDirAbsolute); String result = command.execute(); if (result != null) { String msg = "Unable to prepare template directory: " + newTmplDir + ", storage: " + secondaryStorageUrl + ", error msg: " + result; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } } // Move Datadisk VMDK from parent template folder to Datadisk template folder synchronized (origDisk.intern()) { - Script command = new Script("mv", _timeout, s_logger); + Script command = new Script("mv", _timeout, logger); command.add(origDisk); command.add(newTmplDirAbsolute); String result = command.execute(); if (result != null) { String msg = "Unable to copy VMDK from parent template folder to datadisk template folder" + ", error msg: " + result; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } - command = new Script("cp", _timeout, s_logger); + command = new Script("cp", _timeout, logger); command.add(ovfFilePath + ORIGINAL_FILE_EXTENSION); command.add(newTmplDirAbsolute); result = command.execute(); if (result != null) { String msg = "Unable to copy VMDK from parent template folder to datadisk template folder" + ", error msg: " + result; - s_logger.error(msg); + logger.error(msg); throw new Exception(msg); } } @@ -572,7 +573,7 @@ public Answer execute(CreateDatadiskTemplateCommand cmd) { diskTemplate.setPhysicalSize(physicalSize); } catch (Exception e) { String msg = "Create Datadisk template failed due to " + e.getMessage(); - s_logger.error(msg, e); + logger.error(msg, e); return new CreateDatadiskTemplateAnswer(msg); } return new CreateDatadiskTemplateAnswer(diskTemplate); @@ -591,18 +592,18 @@ public Answer execute(MoveVolumeCommand cmd) { Path destPath = Paths.get(rootDir + cmd.getDestPath()); try { - s_logger.debug(String.format("Trying to create missing directories (if any) to move volume [%s].", volumeToString)); + logger.debug(String.format("Trying to create missing directories (if any) to move volume [%s].", volumeToString)); Files.createDirectories(destPath.getParent()); - s_logger.debug(String.format("Trying to move volume [%s] to [%s].", volumeToString, destPath)); + logger.debug(String.format("Trying to move volume [%s] to [%s].", volumeToString, destPath)); Files.move(srcPath, destPath); String msg = String.format("Moved volume [%s] from [%s] to [%s].", volumeToString, srcPath, destPath); - s_logger.debug(msg); + logger.debug(msg); return new Answer(cmd, true, msg); } catch (IOException ioException) { - s_logger.error(String.format("Failed to move volume [%s] from [%s] to [%s] due to [%s].", volumeToString, srcPath, destPath, ioException.getMessage()), + logger.error(String.format("Failed to move volume [%s] from [%s] to [%s] due to [%s].", volumeToString, srcPath, destPath, ioException.getMessage()), ioException); return new Answer(cmd, ioException); } @@ -612,8 +613,8 @@ public Answer execute(MoveVolumeCommand cmd) { * return Pair of