signature = "sIgNaTuRe" # vprob.net.connectivity.lost.category = "error" vprob.net.connectivity.lost.description = "Lost Network Connectivity" vprob.net.connectivity.lost.formatOnVm = "" vprob.net.connectivity.lost.formatOnHost = "" vprob.net.connectivity.lost.formatOnComputeResource = "" vprob.net.connectivity.lost.formatOnDatacenter = "" vprob.net.connectivity.lost.fullFormat = "Lost network connectivity on virtual switch {1}. Physical NIC {2} is down. Affected portgroups:{3}." # vprob.net.redundancy.lost.category = "warning" vprob.net.redundancy.lost.description = "Lost Network Redundancy" vprob.net.redundancy.lost.formatOnVm = "" vprob.net.redundancy.lost.formatOnHost = "" vprob.net.redundancy.lost.formatOnComputeResource = "" vprob.net.redundancy.lost.formatOnDatacenter = "" vprob.net.redundancy.lost.fullFormat = "Lost uplink redundancy on virtual switch {1}. Physical NIC {2} is down. Affected portgroups:{3}." # vprob.net.redundancy.degraded.category = "warning" vprob.net.redundancy.degraded.description = "Network Redundancy Degraded" vprob.net.redundancy.degraded.formatOnVm = "" vprob.net.redundancy.degraded.formatOnHost = "" vprob.net.redundancy.degraded.formatOnComputeResource = "" vprob.net.redundancy.degraded.formatOnDatacenter = "" vprob.net.redundancy.degraded.fullFormat = "Uplink redundancy degraded on virtual switch {1}. Physical NIC {2} is down. {3} uplinks still up. Affected portgroups:{4}." # vprob.storage.connectivity.lost.category = "error" vprob.storage.connectivity.lost.description = "Lost Storage Connectivity" vprob.storage.connectivity.lost.formatOnVm = "" vprob.storage.connectivity.lost.formatOnHost = "" vprob.storage.connectivity.lost.formatOnComputeResource= "" vprob.storage.connectivity.lost.formatOnDatacenter = "" vprob.storage.connectivity.lost.fullFormat = "Lost connectivity to storage device {1}. Path {2} is down. Affected datastores: {3}." # vprob.storage.redundancy.lost.category = "warning" vprob.storage.redundancy.lost.description = "Lost Storage Path Redundancy" vprob.storage.redundancy.lost.formatOnVm = "" vprob.storage.redundancy.lost.formatOnHost = "" vprob.storage.redundancy.lost.formatOnComputeResource = "" vprob.storage.redundancy.lost.formatOnDatacenter = "" vprob.storage.redundancy.lost.fullFormat = "Lost path redundancy to storage device {1}. Path {2} is down. Affected datastores: {3}." # vprob.storage.redundancy.degraded.category = "warning" vprob.storage.redundancy.degraded.description = "Degraded Storage Path Redundancy" vprob.storage.redundancy.degraded.formatOnVm = "" vprob.storage.redundancy.degraded.formatOnHost = "" vprob.storage.redundancy.degraded.formatOnComputeResource = "" vprob.storage.redundancy.degraded.formatOnDatacenter = "" vprob.storage.redundancy.degraded.fullFormat = "Path redundancy to storage device {1} degraded. Path {2} is down. {3} remaining active paths. Affected datastores: {4}." # vprob.vob.vsan.pdl.offline.category = "error" vprob.vob.vsan.pdl.offline.description = "vSAN device has gone offline." vprob.vob.vsan.pdl.offline.formatOnVm = "" vprob.vob.vsan.pdl.offline.formatOnHost = "vSAN device {1} has gone offline." vprob.vob.vsan.pdl.offline.formatOnComputeResource = "" vprob.vob.vsan.pdl.offline.formatOnDatacenter = "" vprob.vob.vsan.pdl.offline.fullFormat = "vSAN device {1} has gone offline." # esx.problem.storage.apd.start.category = "warning" esx.problem.storage.apd.start.description = "All paths are down" esx.problem.storage.apd.start.formatOnVm = "" esx.problem.storage.apd.start.formatOnHost = "" esx.problem.storage.apd.start.formatOnComputeResource = "" esx.problem.storage.apd.start.formatOnDatacenter = "" esx.problem.storage.apd.start.fullFormat = "Device or filesystem with identifier {1} has entered the All Paths Down state." # esx.clear.storage.apd.exit.category = "info" esx.clear.storage.apd.exit.description = "Exited the All Paths Down state" esx.clear.storage.apd.exit.formatOnVm = "" esx.clear.storage.apd.exit.formatOnHost = "" esx.clear.storage.apd.exit.formatOnComputeResource = "" esx.clear.storage.apd.exit.formatOnDatacenter = "" esx.clear.storage.apd.exit.fullFormat = "Device or filesystem with identifier {1} has exited the All Paths Down state." # esx.problem.storage.apd.timeout.category = "warning" esx.problem.storage.apd.timeout.description = "All Paths Down timed out, I/Os will be fast failed" esx.problem.storage.apd.timeout.formatOnVm = "" esx.problem.storage.apd.timeout.formatOnHost = "" esx.problem.storage.apd.timeout.formatOnComputeResource = "" esx.problem.storage.apd.timeout.formatOnDatacenter = "" esx.problem.storage.apd.timeout.fullFormat = "Device or filesystem with identifier {1} has entered the All Paths Down Timeout state after being in the All Paths Down state for {2} seconds. I/Os will now be fast failed." # esx.problem.storage.connectivity.pathstatechanges.category = "info" esx.problem.storage.connectivity.pathstatechanges.description = "Frequent State Changes of Storage Path" esx.problem.storage.connectivity.pathstatechanges.formatOnVm = "" esx.problem.storage.connectivity.pathstatechanges.formatOnHost = "" esx.problem.storage.connectivity.pathstatechanges.formatOnComputeResource = "" esx.problem.storage.connectivity.pathstatechanges.formatOnDatacenter = "" esx.problem.storage.connectivity.pathstatechanges.fullFormat = "Frequent path state changes are occurring for path {1}. This might indicate a storage problem. Affected device: {2}. Affected datastores: {3}" # esx.problem.storage.connectivity.pathpor.category = "warning" esx.problem.storage.connectivity.pathpor.description = "Frequent PowerOn Reset Unit Attention of Storage Path" esx.problem.storage.connectivity.pathpor.formatOnVm = "" esx.problem.storage.connectivity.pathpor.formatOnHost = "" esx.problem.storage.connectivity.pathpor.formatOnComputeResource = "" esx.problem.storage.connectivity.pathpor.formatOnDatacenter = "" esx.problem.storage.connectivity.pathpor.fullFormat = "Frequent PowerOn Reset Unit Attentions are occurring on path {1}. This might indicate a storage problem. Affected device: {2}. Affected datastores: {3}" # esx.problem.storage.connectivity.devicepor.category = "warning" esx.problem.storage.connectivity.devicepor.description = "Frequent PowerOn Reset Unit Attention of Storage Path" esx.problem.storage.connectivity.devicepor.formatOnVm = "" esx.problem.storage.connectivity.devicepor.formatOnHost = "" esx.problem.storage.connectivity.devicepor.formatOnComputeResource = "" esx.problem.storage.connectivity.devicepor.formatOnDatacenter = "" esx.problem.storage.connectivity.devicepor.fullFormat = "Frequent PowerOn Reset Unit Attentions are occurring on device {1}. This might indicate a storage problem. Affected datastores: {2}" # esx.problem.scsi.device.limitreached.category = "error" esx.problem.scsi.device.limitreached.description = "Maximum number of storage devices" esx.problem.scsi.device.limitreached.formatOnVm = "" esx.problem.scsi.device.limitreached.formatOnHost = "" esx.problem.scsi.device.limitreached.formatOnComputeResource = "" esx.problem.scsi.device.limitreached.formatOnDatacenter = "" esx.problem.scsi.device.limitreached.fullFormat = "The maximum number of supported devices of {1} has been reached. A device from plugin {2} could not be created." # esx.problem.scsi.scsipath.limitreached.category = "error" esx.problem.scsi.scsipath.limitreached.description = "Maximum number of storage paths" esx.problem.scsi.scsipath.limitreached.formatOnVm = "" esx.problem.scsi.scsipath.limitreached.formatOnHost = "" esx.problem.scsi.scsipath.limitreached.formatOnComputeResource = "" esx.problem.scsi.scsipath.limitreached.formatOnDatacenter = "" esx.problem.scsi.scsipath.limitreached.fullFormat = "The maximum number of supported paths of {1} has been reached. Path {2} could not be added." # esx.problem.scsi.scsipath.badpath.unsafepe.category = "error" esx.problem.scsi.scsipath.badpath.unsafepe.description = "Cannot safely determine vVol PE" esx.problem.scsi.scsipath.badpath.unsafepe.formatOnVm = "" esx.problem.scsi.scsipath.badpath.unsafepe.formatOnHost = "" esx.problem.scsi.scsipath.badpath.unsafepe.formatOnComputeResource = "" esx.problem.scsi.scsipath.badpath.unsafepe.formatOnDatacenter = "" esx.problem.scsi.scsipath.badpath.unsafepe.fullFormat = "Sanity check failed for path {1}. Could not safely determine if the path is to a vVol PE. Path dropped." # esx.problem.scsi.scsipath.badpath.unreachpe.category = "error" esx.problem.scsi.scsipath.badpath.unreachpe.description = "vVol PE path going out of vVol-incapable adapter" esx.problem.scsi.scsipath.badpath.unreachpe.formatOnVm = "" esx.problem.scsi.scsipath.badpath.unreachpe.formatOnHost = "" esx.problem.scsi.scsipath.badpath.unreachpe.formatOnComputeResource = "" esx.problem.scsi.scsipath.badpath.unreachpe.formatOnDatacenter = "" esx.problem.scsi.scsipath.badpath.unreachpe.fullFormat = "Sanity check failed for path {1}. The path is to a vVol PE, but it goes out of adapter {2} which is not PE capable. Path dropped." # vprob.net.e1000.tso6.notsupported.category = "error" vprob.net.e1000.tso6.notsupported.description = "No IPv6 TSO support" vprob.net.e1000.tso6.notsupported.formatOnVm = "" vprob.net.e1000.tso6.notsupported.formatOnHost = "" vprob.net.e1000.tso6.notsupported.formatOnComputeResource = "" vprob.net.e1000.tso6.notsupported.formatOnDatacenter = "" vprob.net.e1000.tso6.notsupported.fullFormat = "Guest-initiated IPv6 TCP Segmentation Offload (TSO) packets ignored. Manually disable TSO inside the guest operating system in virtual machine {1}, or use a different virtual adapter." # esx.problem.migrate.vmotion.default.heap.create.failed.category = "warning" esx.problem.migrate.vmotion.default.heap.create.failed.description = "Failed to create default migration heap" esx.problem.migrate.vmotion.default.heap.create.failed.formatOnVm = "" esx.problem.migrate.vmotion.default.heap.create.failed.formatOnHost = "" esx.problem.migrate.vmotion.default.heap.create.failed.formatOnComputeResource = "" esx.problem.migrate.vmotion.default.heap.create.failed.formatOnDatacenter = "" esx.problem.migrate.vmotion.default.heap.create.failed.fullFormat = "Failed to create default migration heap. This might be the result of severe host memory pressure or virtual address space exhaustion. Migration might still be possible, but will be unreliable in cases of extreme host memory pressure." # esx.problem.migrate.vmotion.server.pending.cnx.listen.socket.shutdown.category = "error" esx.problem.migrate.vmotion.server.pending.cnx.listen.socket.shutdown.description = "Error with migration listen socket" esx.problem.migrate.vmotion.server.pending.cnx.listen.socket.shutdown.formatOnVm = "" esx.problem.migrate.vmotion.server.pending.cnx.listen.socket.shutdown.formatOnHost = "" esx.problem.migrate.vmotion.server.pending.cnx.listen.socket.shutdown.formatOnComputeResource = "" esx.problem.migrate.vmotion.server.pending.cnx.listen.socket.shutdown.formatOnDatacenter = "" esx.problem.migrate.vmotion.server.pending.cnx.listen.socket.shutdown.fullFormat = "The ESXi host's vMotion network server encountered an error while monitoring incoming network connections. Shutting down listener socket. vMotion might not be possible with this host until vMotion is manually re-enabled. Failure status: {1}" # vprob.net.migrate.bindtovmk.category = "warning" vprob.net.migrate.bindtovmk.description = "Invalid vmknic specified in /Migrate/Vmknic" vprob.net.migrate.bindtovmk.formatOnVm = "" vprob.net.migrate.bindtovmk.formatOnHost = "" vprob.net.migrate.bindtovmk.formatOnComputeResource = "" vprob.net.migrate.bindtovmk.formatOnDatacenter = "" vprob.net.migrate.bindtovmk.fullFormat = "The ESX advanced config option /Migrate/Vmknic is set to an invalid vmknic: {1}. /Migrate/Vmknic specifies a vmknic that vMotion binds to for improved performance. Please update the config option with a valid vmknic or, if you do not want vMotion to bind to a specific vmknic, remove the invalid vmknic and leave the option blank." # esx.problem.net.migrate.unsupported.latency.category = "warning" esx.problem.net.migrate.unsupported.latency.description = "Unsupported vMotion network latency detected" esx.problem.net.migrate.unsupported.latency.formatOnVm = "" esx.problem.net.migrate.unsupported.latency.formatOnHost = "" esx.problem.net.migrate.unsupported.latency.formatOnComputeResource = "" esx.problem.net.migrate.unsupported.latency.formatOnDatacenter = "" esx.problem.net.migrate.unsupported.latency.fullFormat = "ESXi has detected {1}ms round-trip vMotion network latency between host {2} and {3}. High latency vMotion networks are supported only if both ESXi hosts have been configured for vMotion latency tolerance." # esx.problem.net.portset.port.full.category = "error" esx.problem.net.portset.port.full.description = "Failed to apply for free ports" esx.problem.net.portset.port.full.formatOnVm = "" esx.problem.net.portset.port.full.formatOnHost = "" esx.problem.net.portset.port.full.formatOnComputeResource = "" esx.problem.net.portset.port.full.formatOnDatacenter = "" esx.problem.net.portset.port.full.fullFormat = "Portset {1} has reached the maximum number of ports ({2}). Cannot apply for any more free ports." # esx.problem.net.portset.unsupported.psclass.category = "warning" esx.problem.net.portset.unsupported.psclass.description = "Try to register an unsupported portset class" esx.problem.net.portset.unsupported.psclass.formatOnVm = "" esx.problem.net.portset.unsupported.psclass.formatOnHost = "" esx.problem.net.portset.unsupported.psclass.formatOnComputeResource = "" esx.problem.net.portset.unsupported.psclass.formatOnDatacenter = "" esx.problem.net.portset.unsupported.psclass.fullFormat = "{1} is not a VMware supported portset class, the relevant module must be unloaded." # vprob.net.proxyswitch.port.unavailable.category = "warning" vprob.net.proxyswitch.port.unavailable.description = "Virtual NIC connection to switch failed" vprob.net.proxyswitch.port.unavailable.formatOnVm = "" vprob.net.proxyswitch.port.unavailable.formatOnHost = "" vprob.net.proxyswitch.port.unavailable.formatOnComputeResource = "" vprob.net.proxyswitch.port.unavailable.formatOnDatacenter = "" vprob.net.proxyswitch.port.unavailable.fullFormat = "Virtual NIC with hardware address {1} failed to connect to distributed virtual port {2} on switch {3}. There are no more ports available on the host proxy switch." # esx.problem.net.proxyswitch.port.unavailable.category = "warning" esx.problem.net.proxyswitch.port.unavailable.description = "Virtual NIC connection to switch failed" esx.problem.net.proxyswitch.port.unavailable.formatOnVm = "" esx.problem.net.proxyswitch.port.unavailable.formatOnHost = "" esx.problem.net.proxyswitch.port.unavailable.formatOnComputeResource = "" esx.problem.net.proxyswitch.port.unavailable.formatOnDatacenter = "" esx.problem.net.proxyswitch.port.unavailable.fullFormat = "Virtual NIC with hardware address {1} failed to connect to distributed virtual port {2} on switch {3}. There are no more ports available on the host proxy switch." # esx.problem.net.uplink.mtu.failed.category = "warning" esx.problem.net.uplink.mtu.failed.description = "Failed to set MTU on an uplink" esx.problem.net.uplink.mtu.failed.formatOnVm = "" esx.problem.net.uplink.mtu.failed.formatOnHost = "" esx.problem.net.uplink.mtu.failed.formatOnComputeResource = "" esx.problem.net.uplink.mtu.failed.formatOnDatacenter = "" esx.problem.net.uplink.mtu.failed.fullFormat = "VMkernel failed to set the MTU value {1} on the uplink {2}." # esx.problem.net.vmknic.ip.duplicate.category = "warning" esx.problem.net.vmknic.ip.duplicate.description = "A duplicate IP address was detected on a vmknic interface" esx.problem.net.vmknic.ip.duplicate.formatOnVm = "" esx.problem.net.vmknic.ip.duplicate.formatOnHost = "" esx.problem.net.vmknic.ip.duplicate.formatOnComputeResource = "" esx.problem.net.vmknic.ip.duplicate.formatOnDatacenter = "" esx.problem.net.vmknic.ip.duplicate.fullFormat = "A duplicate IP address was detected for {1} on the interface {2}. The current owner is {3}." # esx.audit.net.firewall.config.changed.category = "info" esx.audit.net.firewall.config.changed.description = "Firewall configuration has changed." esx.audit.net.firewall.config.changed.formatOnVm = "" esx.audit.net.firewall.config.changed.formatOnHost = "" esx.audit.net.firewall.config.changed.formatOnComputeResource = "" esx.audit.net.firewall.config.changed.formatOnDatacenter = "" esx.audit.net.firewall.config.changed.fullFormat = "Firewall configuration has changed. Operation '{1}' for rule set {2} succeeded." # esx.problem.net.firewall.config.failed.category = "error" esx.problem.net.firewall.config.failed.description = "Firewall configuration operation failed. The changes were not applied." esx.problem.net.firewall.config.failed.formatOnVm = "" esx.problem.net.firewall.config.failed.formatOnHost = "" esx.problem.net.firewall.config.failed.formatOnComputeResource = "" esx.problem.net.firewall.config.failed.formatOnDatacenter = "" esx.problem.net.firewall.config.failed.fullFormat = "Firewall configuration operation '{1}' failed. The changes were not applied to rule set {2}." # esx.audit.net.firewall.port.hooked.category = "info" esx.audit.net.firewall.port.hooked.description = "Port is now protected by Firewall." esx.audit.net.firewall.port.hooked.formatOnVm = "" esx.audit.net.firewall.port.hooked.formatOnHost = "" esx.audit.net.firewall.port.hooked.formatOnComputeResource = "" esx.audit.net.firewall.port.hooked.formatOnDatacenter = "" esx.audit.net.firewall.port.hooked.fullFormat = "Port {1} is now protected by Firewall." # esx.problem.net.firewall.port.hookfailed.category = "error" esx.problem.net.firewall.port.hookfailed.description = "Adding port to Firewall failed." esx.problem.net.firewall.port.hookfailed.formatOnVm = "" esx.problem.net.firewall.port.hookfailed.formatOnHost = "" esx.problem.net.firewall.port.hookfailed.formatOnComputeResource = "" esx.problem.net.firewall.port.hookfailed.formatOnDatacenter = "" esx.problem.net.firewall.port.hookfailed.fullFormat = "Adding port {1} to Firewall failed." # esx.audit.net.firewall.port.removed.category = "warning" esx.audit.net.firewall.port.removed.description = "Port is no longer protected with Firewall." esx.audit.net.firewall.port.removed.formatOnVm = "" esx.audit.net.firewall.port.removed.formatOnHost = "" esx.audit.net.firewall.port.removed.formatOnComputeResource = "" esx.audit.net.firewall.port.removed.formatOnDatacenter = "" esx.audit.net.firewall.port.removed.fullFormat = "Port {1} is no longer protected with Firewall." # esx.audit.net.firewall.disabled.category = "warning" esx.audit.net.firewall.disabled.description = "Firewall has been disabled." esx.audit.net.firewall.disabled.formatOnVm = "" esx.audit.net.firewall.disabled.formatOnHost = "" esx.audit.net.firewall.disabled.formatOnComputeResource = "" esx.audit.net.firewall.disabled.formatOnDatacenter = "" esx.audit.net.firewall.disabled.fullFormat = "Firewall has been disabled." # esx.audit.net.firewall.enabled.category = "info" esx.audit.net.firewall.enabled.description = "Firewall has been enabled for port." esx.audit.net.firewall.enabled.formatOnVm = "" esx.audit.net.firewall.enabled.formatOnHost = "" esx.audit.net.firewall.enabled.formatOnComputeResource = "" esx.audit.net.firewall.enabled.formatOnDatacenter = "" esx.audit.net.firewall.enabled.fullFormat = "Firewall has been enabled for port {1}." # esx.problem.net.fence.port.badfenceid.category = "error" esx.problem.net.fence.port.badfenceid.description = "Invalid fenceId configuration on dvPort" esx.problem.net.fence.port.badfenceid.formatOnVm = "" esx.problem.net.fence.port.badfenceid.formatOnHost = "" esx.problem.net.fence.port.badfenceid.formatOnComputeResource = "" esx.problem.net.fence.port.badfenceid.formatOnDatacenter = "" esx.problem.net.fence.port.badfenceid.fullFormat = "VMkernel failed to set fenceId {1} on distributed virtual port {2} on switch {3}. Reason: invalid fenceId." # esx.problem.net.fence.switch.unavailable.category = "error" esx.problem.net.fence.switch.unavailable.description = "Switch fence property is not set" esx.problem.net.fence.switch.unavailable.formatOnVm = "" esx.problem.net.fence.switch.unavailable.formatOnHost = "" esx.problem.net.fence.switch.unavailable.formatOnComputeResource = "" esx.problem.net.fence.switch.unavailable.formatOnDatacenter = "" esx.problem.net.fence.switch.unavailable.fullFormat = "Vmkernel failed to set fenceId {1} on distributed virtual port {2} on switch {3}. Reason: dvSwitch fence property is not set." # esx.problem.net.fence.resource.limited.category = "error" esx.problem.net.fence.resource.limited.description = "Maximum number of fence networks or ports" esx.problem.net.fence.resource.limited.formatOnVm = "" esx.problem.net.fence.resource.limited.formatOnHost = "" esx.problem.net.fence.resource.limited.formatOnComputeResource = "" esx.problem.net.fence.resource.limited.formatOnDatacenter = "" esx.problem.net.fence.resource.limited.fullFormat = "Vmkernel failed to set fenceId {1} on distributed virtual port {2} on switch {3}. Reason: maximum number of fence networks or ports have been reached." # esx.audit.net.lacp.disable.category = "info" esx.audit.net.lacp.disable.description = "LACP disabled" esx.audit.net.lacp.disable.formatOnVm = "" esx.audit.net.lacp.disable.formatOnHost = "" esx.audit.net.lacp.disable.formatOnComputeResource = "" esx.audit.net.lacp.disable.formatOnDatacenter = "" esx.audit.net.lacp.disable.fullFormat = "LACP for VDS {1} is disabled." # esx.audit.net.lacp.enable.category = "info" esx.audit.net.lacp.enable.description = "LACP eabled" esx.audit.net.lacp.enable.formatOnVm = "" esx.audit.net.lacp.enable.formatOnHost = "" esx.audit.net.lacp.enable.formatOnComputeResource = "" esx.audit.net.lacp.enable.formatOnDatacenter = "" esx.audit.net.lacp.enable.fullFormat = "LACP for VDS {1} is enabled." # esx.problem.net.lacp.policy.incompatible.category = "error" esx.problem.net.lacp.policy.incompatible.description = "Current teaming policy is incompatible" esx.problem.net.lacp.policy.incompatible.formatOnVm = "" esx.problem.net.lacp.policy.incompatible.formatOnHost = "" esx.problem.net.lacp.policy.incompatible.formatOnComputeResource = "" esx.problem.net.lacp.policy.incompatible.formatOnDatacenter = "" esx.problem.net.lacp.policy.incompatible.fullFormat = "LACP error: Current teaming policy on VDS {1} is incompatible, supported is IP hash only." # esx.problem.net.lacp.policy.linkstatus.category = "error" esx.problem.net.lacp.policy.linkstatus.description = "Current teaming policy is incompatible" esx.problem.net.lacp.policy.linkstatus.formatOnVm = "" esx.problem.net.lacp.policy.linkstatus.formatOnHost = "" esx.problem.net.lacp.policy.linkstatus.formatOnComputeResource = "" esx.problem.net.lacp.policy.linkstatus.formatOnDatacenter = "" esx.problem.net.lacp.policy.linkstatus.fullFormat = "LACP error: Current teaming policy on VDS {1} is incompatible, supported link failover detection is link status only." # esx.problem.net.lacp.uplink.inactive.category = "error" esx.problem.net.lacp.uplink.inactive.description = "All uplinks must be active" esx.problem.net.lacp.uplink.inactive.formatOnVm = "" esx.problem.net.lacp.uplink.inactive.formatOnHost = "" esx.problem.net.lacp.uplink.inactive.formatOnComputeResource = "" esx.problem.net.lacp.uplink.inactive.formatOnDatacenter = "" esx.problem.net.lacp.uplink.inactive.fullFormat = "LACP error: All uplinks on VDS {1} must be active." # esx.problem.net.lacp.uplink.fail.speed.category = "error" esx.problem.net.lacp.uplink.fail.speed.description = "uplink speed is different" esx.problem.net.lacp.uplink.fail.speed.formatOnVm = "" esx.problem.net.lacp.uplink.fail.speed.formatOnHost = "" esx.problem.net.lacp.uplink.fail.speed.formatOnComputeResource = "" esx.problem.net.lacp.uplink.fail.speed.formatOnDatacenter = "" esx.problem.net.lacp.uplink.fail.speed.fullFormat = "LACP error: Speed across all uplink ports must be same, VDS {1} uplink {2} has different speed." # esx.problem.net.lacp.uplink.fail.duplex.category = "error" esx.problem.net.lacp.uplink.fail.duplex.description = "uplink duplex mode is different" esx.problem.net.lacp.uplink.fail.duplex.formatOnVm = "" esx.problem.net.lacp.uplink.fail.duplex.formatOnHost = "" esx.problem.net.lacp.uplink.fail.duplex.formatOnComputeResource = "" esx.problem.net.lacp.uplink.fail.duplex.formatOnDatacenter = "" esx.problem.net.lacp.uplink.fail.duplex.fullFormat = "LACP error: Duplex mode across all uplink ports must be full, VDS {1} uplink {2} has different mode." # esx.problem.net.lacp.uplink.transition.down.category = "warning" esx.problem.net.lacp.uplink.transition.down.description = "uplink transition down" esx.problem.net.lacp.uplink.transition.down.formatOnVm = "" esx.problem.net.lacp.uplink.transition.down.formatOnHost = "" esx.problem.net.lacp.uplink.transition.down.formatOnComputeResource = "" esx.problem.net.lacp.uplink.transition.down.formatOnDatacenter = "" esx.problem.net.lacp.uplink.transition.down.fullFormat = "LACP warning: uplink {1} on VDS {2} is moved out of link aggregation group." # esx.clear.net.lacp.uplink.transition.up.category = "info" esx.clear.net.lacp.uplink.transition.up.description = "uplink transition up" esx.clear.net.lacp.uplink.transition.up.formatOnVm = "" esx.clear.net.lacp.uplink.transition.up.formatOnHost = "" esx.clear.net.lacp.uplink.transition.up.formatOnComputeResource = "" esx.clear.net.lacp.uplink.transition.up.formatOnDatacenter = "" esx.clear.net.lacp.uplink.transition.up.fullFormat = "LACP info: uplink {1} on VDS {2} is moved into link aggregation group." # esx.problem.net.lacp.uplink.blocked.category = "warning" esx.problem.net.lacp.uplink.blocked.description = "uplink is blocked" esx.problem.net.lacp.uplink.blocked.formatOnVm = "" esx.problem.net.lacp.uplink.blocked.formatOnHost = "" esx.problem.net.lacp.uplink.blocked.formatOnComputeResource = "" esx.problem.net.lacp.uplink.blocked.formatOnDatacenter = "" esx.problem.net.lacp.uplink.blocked.fullFormat = "LACP warning: uplink {1} on VDS {2} is blocked." # esx.clear.net.lacp.uplink.unblocked.category = "info" esx.clear.net.lacp.uplink.unblocked.description = "uplink is unblocked" esx.clear.net.lacp.uplink.unblocked.formatOnVm = "" esx.clear.net.lacp.uplink.unblocked.formatOnHost = "" esx.clear.net.lacp.uplink.unblocked.formatOnComputeResource = "" esx.clear.net.lacp.uplink.unblocked.formatOnDatacenter = "" esx.clear.net.lacp.uplink.unblocked.fullFormat = "LACP info: uplink {1} on VDS {2} is unblocked." # esx.problem.net.lacp.peer.noresponse.category = "error" esx.problem.net.lacp.peer.noresponse.description = "No peer response" esx.problem.net.lacp.peer.noresponse.formatOnVm = "" esx.problem.net.lacp.peer.noresponse.formatOnHost = "" esx.problem.net.lacp.peer.noresponse.formatOnComputeResource = "" esx.problem.net.lacp.peer.noresponse.formatOnDatacenter = "" esx.problem.net.lacp.peer.noresponse.fullFormat = "LACP error: No peer response on uplink {1} for VDS {2}." # esx.problem.net.lacp.peer.noresponse.2.category = "error" esx.problem.net.lacp.peer.noresponse.2.description = "No peer response" esx.problem.net.lacp.peer.noresponse.2.formatOnVm = "" esx.problem.net.lacp.peer.noresponse.2.formatOnHost = "" esx.problem.net.lacp.peer.noresponse.2.formatOnComputeResource = "" esx.problem.net.lacp.peer.noresponse.2.formatOnDatacenter = "" esx.problem.net.lacp.peer.noresponse.2.fullFormat = "LACP error: No peer response on VDS {1}." # esx.problem.net.lacp.uplink.disconnected.category = "warning" esx.problem.net.lacp.uplink.disconnected.description = "uplink is disconnected" esx.problem.net.lacp.uplink.disconnected.formatOnVm = "" esx.problem.net.lacp.uplink.disconnected.formatOnHost = "" esx.problem.net.lacp.uplink.disconnected.formatOnComputeResource = "" esx.problem.net.lacp.uplink.disconnected.formatOnDatacenter = "" esx.problem.net.lacp.uplink.disconnected.fullFormat = "LACP warning: uplink {1} on VDS {2} got disconnected." # esx.audit.net.lacp.uplink.connected.category = "info" esx.audit.net.lacp.uplink.connected.description = "uplink is connected" esx.audit.net.lacp.uplink.connected.formatOnVm = "" esx.audit.net.lacp.uplink.connected.formatOnHost = "" esx.audit.net.lacp.uplink.connected.formatOnComputeResource = "" esx.audit.net.lacp.uplink.connected.formatOnDatacenter = "" esx.audit.net.lacp.uplink.connected.fullFormat = "LACP info: uplink {1} on VDS {2} got connected." # esx.problem.net.lacp.lag.transition.down.category = "warning" esx.problem.net.lacp.lag.transition.down.description = "lag transition down" esx.problem.net.lacp.lag.transition.down.formatOnVm = "" esx.problem.net.lacp.lag.transition.down.formatOnHost = "" esx.problem.net.lacp.lag.transition.down.formatOnComputeResource = "" esx.problem.net.lacp.lag.transition.down.formatOnDatacenter = "" esx.problem.net.lacp.lag.transition.down.fullFormat = "LACP warning: LAG {1} on VDS {2} is down." # esx.clear.net.lacp.lag.transition.up.category = "info" esx.clear.net.lacp.lag.transition.up.description = "lag transition up" esx.clear.net.lacp.lag.transition.up.formatOnVm = "" esx.clear.net.lacp.lag.transition.up.formatOnHost = "" esx.clear.net.lacp.lag.transition.up.formatOnComputeResource = "" esx.clear.net.lacp.lag.transition.up.formatOnDatacenter = "" esx.clear.net.lacp.lag.transition.up.fullFormat = "LACP info: LAG {1} on VDS {2} is up." # esx.problem.net.portset.port.vlan.invalidid.category = "error" esx.problem.net.portset.port.vlan.invalidid.description = "Vlan ID of the port is invalid" esx.problem.net.portset.port.vlan.invalidid.formatOnVm = "" esx.problem.net.portset.port.vlan.invalidid.formatOnHost = "" esx.problem.net.portset.port.vlan.invalidid.formatOnComputeResource = "" esx.problem.net.portset.port.vlan.invalidid.formatOnDatacenter = "" esx.problem.net.portset.port.vlan.invalidid.fullFormat = "{1} VLANID {2} is invalid. VLAN ID must be between 0 and 4095." # esx.problem.net.gateway.set.failed.category = "error" esx.problem.net.gateway.set.failed.description = "Failed to set gateway" esx.problem.net.gateway.set.failed.formatOnVm = "" esx.problem.net.gateway.set.failed.formatOnHost = "" esx.problem.net.gateway.set.failed.formatOnComputeResource = "" esx.problem.net.gateway.set.failed.formatOnDatacenter = "" esx.problem.net.gateway.set.failed.fullFormat = "Cannot connect to the specified gateway {1}. Failed to set it." # esx.problem.net.heap.belowthreshold.category = "warning" esx.problem.net.heap.belowthreshold.description = "Network memory pool threshold" esx.problem.net.heap.belowthreshold.formatOnVm = "" esx.problem.net.heap.belowthreshold.formatOnHost = "" esx.problem.net.heap.belowthreshold.formatOnComputeResource = "" esx.problem.net.heap.belowthreshold.formatOnDatacenter = "" esx.problem.net.heap.belowthreshold.fullFormat = "{1} free size dropped below {2} percent." # esx.problem.cpu.mce.invalid.category = "error" esx.problem.cpu.mce.invalid.description = "MCE monitoring will be disabled as an unsupported CPU was detected. Please consult the ESX HCL for information on supported hardware." esx.problem.cpu.mce.invalid.formatOnVm = "" esx.problem.cpu.mce.invalid.formatOnHost = "" esx.problem.cpu.mce.invalid.formatOnComputeResource = "" esx.problem.cpu.mce.invalid.formatOnDatacenter = "" esx.problem.cpu.mce.invalid.fullFormat = "MCE monitoring will be disabled as an unsupported CPU was detected. Please consult the ESX HCL for information on supported hardware." # esx.problem.cpu.page.correctederrors.high.category = "info" esx.problem.cpu.page.correctederrors.high.description = "High number of corrected errors on a page." esx.problem.cpu.page.correctederrors.high.formatOnVm = "" esx.problem.cpu.page.correctederrors.high.formatOnHost = "" esx.problem.cpu.page.correctederrors.high.formatOnComputeResource = "" esx.problem.cpu.page.correctederrors.high.formatOnDatacenter = "" esx.problem.cpu.page.correctederrors.high.fullFormat = "High number of corrected errors on host physical page number {1}" # esx.problem.cpu.amd.mce.dram.disabled.category = "error" esx.problem.cpu.amd.mce.dram.disabled.description = "DRAM ECC not enabled. Please enable it in BIOS." esx.problem.cpu.amd.mce.dram.disabled.formatOnVm = "" esx.problem.cpu.amd.mce.dram.disabled.formatOnHost = "" esx.problem.cpu.amd.mce.dram.disabled.formatOnComputeResource = "" esx.problem.cpu.amd.mce.dram.disabled.formatOnDatacenter = "" esx.problem.cpu.amd.mce.dram.disabled.fullFormat = "DRAM ECC not enabled. Please enable it in BIOS." # esx.problem.cpu.intel.ioapic.listing.error.category = "error" esx.problem.cpu.intel.ioapic.listing.error.description = "Not all IO-APICs are listed in the DMAR. Not enabling interrupt remapping on this platform. " esx.problem.cpu.intel.ioapic.listing.error.formatOnVm = "" esx.problem.cpu.intel.ioapic.listing.error.formatOnHost = "" esx.problem.cpu.intel.ioapic.listing.error.formatOnComputeResource = "" esx.problem.cpu.intel.ioapic.listing.error.formatOnDatacenter = "" esx.problem.cpu.intel.ioapic.listing.error.fullFormat = "Not all IO-APICs are listed in the DMAR. Not enabling interrupt remapping on this platform. " # esx.problem.cpu.smp.ht.partner.missing.category = "error" esx.problem.cpu.smp.ht.partner.missing.description = "Disabling HyperThreading due to invalid configuration: HT partner {1} is missing from PCPU {2}." esx.problem.cpu.smp.ht.partner.missing.formatOnVm = "" esx.problem.cpu.smp.ht.partner.missing.formatOnHost = "" esx.problem.cpu.smp.ht.partner.missing.formatOnComputeResource = "" esx.problem.cpu.smp.ht.partner.missing.formatOnDatacenter = "" esx.problem.cpu.smp.ht.partner.missing.fullFormat = "Disabling HyperThreading due to invalid configuration: HT partner {1} is missing from PCPU {2}." # esx.problem.cpu.smp.ht.invalid.category = "error" esx.problem.cpu.smp.ht.invalid.description = "Disabling HyperThreading due to invalid configuration: Number of threads: {1}, Number of PCPUs: {2}." esx.problem.cpu.smp.ht.invalid.formatOnVm = "" esx.problem.cpu.smp.ht.invalid.formatOnHost = "" esx.problem.cpu.smp.ht.invalid.formatOnComputeResource = "" esx.problem.cpu.smp.ht.invalid.formatOnDatacenter = "" esx.problem.cpu.smp.ht.invalid.fullFormat = "Disabling HyperThreading due to invalid configuration: Number of threads: {1}, Number of PCPUs: {2}." # esx.problem.cpu.smp.ht.numpcpus.max.category = "error" esx.problem.cpu.smp.ht.numpcpus.max.description = "Found {1} PCPUs, but only using {2} of them due to specified limit." esx.problem.cpu.smp.ht.numpcpus.max.formatOnVm = "" esx.problem.cpu.smp.ht.numpcpus.max.formatOnHost = "" esx.problem.cpu.smp.ht.numpcpus.max.formatOnComputeResource = "" esx.problem.cpu.smp.ht.numpcpus.max.formatOnDatacenter = "" esx.problem.cpu.smp.ht.numpcpus.max.fullFormat = "Found {1} PCPUs, but only using {2} of them due to specified limit." # esx.problem.hardware.acpi.interrupt.routing.device.invalid.category = "error" esx.problem.hardware.acpi.interrupt.routing.device.invalid.description = "Skipping interrupt routing entry with bad device number: {1}. This is a BIOS bug." esx.problem.hardware.acpi.interrupt.routing.device.invalid.formatOnVm = "" esx.problem.hardware.acpi.interrupt.routing.device.invalid.formatOnHost = "" esx.problem.hardware.acpi.interrupt.routing.device.invalid.formatOnComputeResource = "" esx.problem.hardware.acpi.interrupt.routing.device.invalid.formatOnDatacenter = "" esx.problem.hardware.acpi.interrupt.routing.device.invalid.fullFormat = "Skipping interrupt routing entry with bad device number: {1}. This is a BIOS bug." # esx.problem.hardware.acpi.interrupt.routing.pin.invalid.category = "error" esx.problem.hardware.acpi.interrupt.routing.pin.invalid.description = "Skipping interrupt routing entry with bad device pin: {1}. This is a BIOS bug." esx.problem.hardware.acpi.interrupt.routing.pin.invalid.formatOnVm = "" esx.problem.hardware.acpi.interrupt.routing.pin.invalid.formatOnHost = "" esx.problem.hardware.acpi.interrupt.routing.pin.invalid.formatOnComputeResource = "" esx.problem.hardware.acpi.interrupt.routing.pin.invalid.formatOnDatacenter = "" esx.problem.hardware.acpi.interrupt.routing.pin.invalid.fullFormat = "Skipping interrupt routing entry with bad device pin: {1}. This is a BIOS bug." # esx.problem.hardware.nvd.health.alarms.es.lifetime.error.category = "error" esx.problem.hardware.nvd.health.alarms.es.lifetime.error.description = "NVDIMM: Energy Source Lifetime Error tripped." esx.problem.hardware.nvd.health.alarms.es.lifetime.error.formatOnVm = "" esx.problem.hardware.nvd.health.alarms.es.lifetime.error.formatOnHost = "" esx.problem.hardware.nvd.health.alarms.es.lifetime.error.formatOnComputeResource = "" esx.problem.hardware.nvd.health.alarms.es.lifetime.error.formatOnDatacenter = "" esx.problem.hardware.nvd.health.alarms.es.lifetime.error.fullFormat = "NVDIMM (handle {1}, idString {2}): Energy Source Lifetime ({3}) Error tripped." # esx.audit.hardware.nvd.health.alarms.es.lifetime.warning.category = "warning" esx.audit.hardware.nvd.health.alarms.es.lifetime.warning.description = "NVDIMM: Energy Source Lifetime Warning tripped." esx.audit.hardware.nvd.health.alarms.es.lifetime.warning.formatOnVm = "" esx.audit.hardware.nvd.health.alarms.es.lifetime.warning.formatOnHost = "" esx.audit.hardware.nvd.health.alarms.es.lifetime.warning.formatOnComputeResource = "" esx.audit.hardware.nvd.health.alarms.es.lifetime.warning.formatOnDatacenter = "" esx.audit.hardware.nvd.health.alarms.es.lifetime.warning.fullFormat = "NVDIMM (handle {1}, idString {2}): Energy Source Lifetime ({3}) Warning tripped." # esx.problem.hardware.nvd.health.alarms.es.temperature.error.category = "error" esx.problem.hardware.nvd.health.alarms.es.temperature.error.description = "NVDIMM: Energy Source Temperature Error tripped." esx.problem.hardware.nvd.health.alarms.es.temperature.error.formatOnVm = "" esx.problem.hardware.nvd.health.alarms.es.temperature.error.formatOnHost = "" esx.problem.hardware.nvd.health.alarms.es.temperature.error.formatOnComputeResource = "" esx.problem.hardware.nvd.health.alarms.es.temperature.error.formatOnDatacenter = "" esx.problem.hardware.nvd.health.alarms.es.temperature.error.fullFormat = "NVDIMM (handle {1}, idString {2}): Energy Source Temperature ({3} C) Error tripped." # esx.audit.hardware.nvd.health.alarms.es.temperature.warning.category = "warning" esx.audit.hardware.nvd.health.alarms.es.temperature.warning.description = "NVDIMM: Energy Source Temperature Warning tripped." esx.audit.hardware.nvd.health.alarms.es.temperature.warning.formatOnVm = "" esx.audit.hardware.nvd.health.alarms.es.temperature.warning.formatOnHost = "" esx.audit.hardware.nvd.health.alarms.es.temperature.warning.formatOnComputeResource = "" esx.audit.hardware.nvd.health.alarms.es.temperature.warning.formatOnDatacenter = "" esx.audit.hardware.nvd.health.alarms.es.temperature.warning.fullFormat = "NVDIMM (handle {1}, idString {2}): Energy Source Temperature ({3} C) Warning tripped." # esx.problem.hardware.nvd.health.alarms.lifetime.error.category = "error" esx.problem.hardware.nvd.health.alarms.lifetime.error.description = "NVDIMM: Lifetime Error tripped." esx.problem.hardware.nvd.health.alarms.lifetime.error.formatOnVm = "" esx.problem.hardware.nvd.health.alarms.lifetime.error.formatOnHost = "" esx.problem.hardware.nvd.health.alarms.lifetime.error.formatOnComputeResource = "" esx.problem.hardware.nvd.health.alarms.lifetime.error.formatOnDatacenter = "" esx.problem.hardware.nvd.health.alarms.lifetime.error.fullFormat = "NVDIMM (handle {1}, idString {2}): Lifetime ({3}) Error tripped." # esx.audit.hardware.nvd.health.alarms.lifetime.warning.category = "warning" esx.audit.hardware.nvd.health.alarms.lifetime.warning.description = "NVDIMM: Lifetime Warning tripped." esx.audit.hardware.nvd.health.alarms.lifetime.warning.formatOnVm = "" esx.audit.hardware.nvd.health.alarms.lifetime.warning.formatOnHost = "" esx.audit.hardware.nvd.health.alarms.lifetime.warning.formatOnComputeResource = "" esx.audit.hardware.nvd.health.alarms.lifetime.warning.formatOnDatacenter = "" esx.audit.hardware.nvd.health.alarms.lifetime.warning.fullFormat = "NVDIMM (handle {1}, idString {2}): Lifetime ({3}) Warning tripped." # esx.audit.hardware.nvd.health.alarms.spareblocks.category = "warning" esx.audit.hardware.nvd.health.alarms.spareblocks.description = "NVDIMM (handle {1}, idString {2}): SpareBlocksPct ({3}) has reached the pre-programmed threshold limit." esx.audit.hardware.nvd.health.alarms.spareblocks.formatOnVm = "" esx.audit.hardware.nvd.health.alarms.spareblocks.formatOnHost = "" esx.audit.hardware.nvd.health.alarms.spareblocks.formatOnComputeResource = "" esx.audit.hardware.nvd.health.alarms.spareblocks.formatOnDatacenter = "" esx.audit.hardware.nvd.health.alarms.spareblocks.fullFormat = "NVDIMM (handle {1}, idString {2}): SpareBlocksPct ({3}) has reached the pre-programmed threshold limit." # esx.audit.hardware.nvd.health.alarms.temperature.category = "warning" esx.audit.hardware.nvd.health.alarms.temperature.description = "NVDIMM (handle {1}, idString {2}): Temperature ({3} C) has reached the pre-programmed threshold limit." esx.audit.hardware.nvd.health.alarms.temperature.formatOnVm = "" esx.audit.hardware.nvd.health.alarms.temperature.formatOnHost = "" esx.audit.hardware.nvd.health.alarms.temperature.formatOnComputeResource = "" esx.audit.hardware.nvd.health.alarms.temperature.formatOnDatacenter = "" esx.audit.hardware.nvd.health.alarms.temperature.fullFormat = "NVDIMM (handle {1}, idString {2}): Temperature ({3} C) has reached the pre-programmed threshold limit." # esx.problem.hardware.nvd.health.lastshutdownstatus.category = "error" esx.problem.hardware.nvd.health.lastshutdownstatus.description = "NVDIMM (handle {1}, idString {2}): Last Shutdown Status ({3}) Not a clean Shutdown, there was either a platform or memory device-related failure while saving data targeted for this memory device." esx.problem.hardware.nvd.health.lastshutdownstatus.formatOnVm = "" esx.problem.hardware.nvd.health.lastshutdownstatus.formatOnHost = "" esx.problem.hardware.nvd.health.lastshutdownstatus.formatOnComputeResource = "" esx.problem.hardware.nvd.health.lastshutdownstatus.formatOnDatacenter = "" esx.problem.hardware.nvd.health.lastshutdownstatus.fullFormat = "NVDIMM (handle {1}, idString {2}): Last Shutdown Status ({3}) Not a clean Shutdown, there was either a platform or memory device-related failure while saving data targeted for this memory device." # esx.audit.hardware.nvd.health.life.pctused.category = "warning" esx.audit.hardware.nvd.health.life.pctused.description = "NVDIMM (handle {1}, idString {2}): Life Percentage Used ({3}) has reached the threshold limit ({4})." esx.audit.hardware.nvd.health.life.pctused.formatOnVm = "" esx.audit.hardware.nvd.health.life.pctused.formatOnHost = "" esx.audit.hardware.nvd.health.life.pctused.formatOnComputeResource = "" esx.audit.hardware.nvd.health.life.pctused.formatOnDatacenter = "" esx.audit.hardware.nvd.health.life.pctused.fullFormat = "NVDIMM (handle {1}, idString {2}): Life Percentage Used ({3}) has reached the threshold limit ({4})." # esx.audit.hardware.nvd.health.module.ce.category = "info" esx.audit.hardware.nvd.health.module.ce.description = "NVDIMM Count of DRAM correctable ECC errors above threshold." esx.audit.hardware.nvd.health.module.ce.formatOnVm = "" esx.audit.hardware.nvd.health.module.ce.formatOnHost = "" esx.audit.hardware.nvd.health.module.ce.formatOnComputeResource = "" esx.audit.hardware.nvd.health.module.ce.formatOnDatacenter = "" esx.audit.hardware.nvd.health.module.ce.fullFormat = "NVDIMM (handle {1}, idString {2}): Count of DRAM correctable ECC errors above threshold." # esx.problem.hardware.nvd.health.module.config.error.category = "error" esx.problem.hardware.nvd.health.module.config.error.description = "NVDIMM Configuration error detected." esx.problem.hardware.nvd.health.module.config.error.formatOnVm = "" esx.problem.hardware.nvd.health.module.config.error.formatOnHost = "" esx.problem.hardware.nvd.health.module.config.error.formatOnComputeResource = "" esx.problem.hardware.nvd.health.module.config.error.formatOnDatacenter = "" esx.problem.hardware.nvd.health.module.config.error.fullFormat = "NVDIMM (handle {1}, idString {2}): Configuration error detected." # esx.problem.hardware.nvd.health.module.ctlr.fail.category = "error" esx.problem.hardware.nvd.health.module.ctlr.fail.description = "NVDIMM Controller failure detected." esx.problem.hardware.nvd.health.module.ctlr.fail.formatOnVm = "" esx.problem.hardware.nvd.health.module.ctlr.fail.formatOnHost = "" esx.problem.hardware.nvd.health.module.ctlr.fail.formatOnComputeResource = "" esx.problem.hardware.nvd.health.module.ctlr.fail.formatOnDatacenter = "" esx.problem.hardware.nvd.health.module.ctlr.fail.fullFormat = "NVDIMM (handle {1}, idString {2}): Controller failure detected. Access to the device and its capabilities are lost." # esx.problem.hardware.nvd.health.module.ctlr.fw.error.category = "error" esx.problem.hardware.nvd.health.module.ctlr.fw.error.description = "NVDIMM Controller firmware error detected." esx.problem.hardware.nvd.health.module.ctlr.fw.error.formatOnVm = "" esx.problem.hardware.nvd.health.module.ctlr.fw.error.formatOnHost = "" esx.problem.hardware.nvd.health.module.ctlr.fw.error.formatOnComputeResource = "" esx.problem.hardware.nvd.health.module.ctlr.fw.error.formatOnDatacenter = "" esx.problem.hardware.nvd.health.module.ctlr.fw.error.fullFormat = "NVDIMM (handle {1}, idString {2}): Controller firmware error detected." # esx.clear.hardware.nvd.health.module.es.charged.category = "info" esx.clear.hardware.nvd.health.module.es.charged.description = "NVDIMM Energy Source is sufficiently charged." esx.clear.hardware.nvd.health.module.es.charged.formatOnVm = "" esx.clear.hardware.nvd.health.module.es.charged.formatOnHost = "" esx.clear.hardware.nvd.health.module.es.charged.formatOnComputeResource = "" esx.clear.hardware.nvd.health.module.es.charged.formatOnDatacenter = "" esx.clear.hardware.nvd.health.module.es.charged.fullFormat = "NVDIMM (handle {1}, idString {2}): Energy Source is sufficiently charged." # esx.problem.hardware.nvd.health.module.es.charging.category = "warning" esx.problem.hardware.nvd.health.module.es.charging.description = "NVDIMM Energy Source still charging." esx.problem.hardware.nvd.health.module.es.charging.formatOnVm = "" esx.problem.hardware.nvd.health.module.es.charging.formatOnHost = "" esx.problem.hardware.nvd.health.module.es.charging.formatOnComputeResource = "" esx.problem.hardware.nvd.health.module.es.charging.formatOnDatacenter = "" esx.problem.hardware.nvd.health.module.es.charging.fullFormat = "NVDIMM (handle {1}, idString {2}): Energy Source still charging but does not have sufficient charge to support a backup. Persistency is temporarily lost for the device." # esx.problem.hardware.nvd.health.module.es.fail.category = "error" esx.problem.hardware.nvd.health.module.es.fail.description = "NVDIMM Energy Source failure detected." esx.problem.hardware.nvd.health.module.es.fail.formatOnVm = "" esx.problem.hardware.nvd.health.module.es.fail.formatOnHost = "" esx.problem.hardware.nvd.health.module.es.fail.formatOnComputeResource = "" esx.problem.hardware.nvd.health.module.es.fail.formatOnDatacenter = "" esx.problem.hardware.nvd.health.module.es.fail.fullFormat = "NVDIMM (handle {1}, idString {2}): Energy Source failure detected. Persistency is lost for the device." # esx.problem.hardware.nvd.health.module.ops.arm.fail.category = "warning" esx.problem.hardware.nvd.health.module.ops.arm.fail.description = "NVDIMM Previous ARM operation failed." esx.problem.hardware.nvd.health.module.ops.arm.fail.formatOnVm = "" esx.problem.hardware.nvd.health.module.ops.arm.fail.formatOnHost = "" esx.problem.hardware.nvd.health.module.ops.arm.fail.formatOnComputeResource = "" esx.problem.hardware.nvd.health.module.ops.arm.fail.formatOnDatacenter = "" esx.problem.hardware.nvd.health.module.ops.arm.fail.fullFormat = "NVDIMM (handle {1}, idString {2}): Previous ARM operation failed." # esx.problem.hardware.nvd.health.module.ops.erase.fail.category = "warning" esx.problem.hardware.nvd.health.module.ops.erase.fail.description = "NVDIMM Previous ERASE operation failed." esx.problem.hardware.nvd.health.module.ops.erase.fail.formatOnVm = "" esx.problem.hardware.nvd.health.module.ops.erase.fail.formatOnHost = "" esx.problem.hardware.nvd.health.module.ops.erase.fail.formatOnComputeResource = "" esx.problem.hardware.nvd.health.module.ops.erase.fail.formatOnDatacenter = "" esx.problem.hardware.nvd.health.module.ops.erase.fail.fullFormat = "NVDIMM (handle {1}, idString {2}): Previous ERASE operation failed." # esx.problem.hardware.nvd.health.module.ops.restore.fail.category = "error" esx.problem.hardware.nvd.health.module.ops.restore.fail.description = "NVDIMM Last RESTORE operation failed." esx.problem.hardware.nvd.health.module.ops.restore.fail.formatOnVm = "" esx.problem.hardware.nvd.health.module.ops.restore.fail.formatOnHost = "" esx.problem.hardware.nvd.health.module.ops.restore.fail.formatOnComputeResource = "" esx.problem.hardware.nvd.health.module.ops.restore.fail.formatOnDatacenter = "" esx.problem.hardware.nvd.health.module.ops.restore.fail.fullFormat = "NVDIMM (handle {1}, idString {2}): Last RESTORE operation failed." # esx.problem.hardware.nvd.health.module.ops.save.fail.category = "error" esx.problem.hardware.nvd.health.module.ops.save.fail.description = "NVDIMM Previous SAVE operation failed." esx.problem.hardware.nvd.health.module.ops.save.fail.formatOnVm = "" esx.problem.hardware.nvd.health.module.ops.save.fail.formatOnHost = "" esx.problem.hardware.nvd.health.module.ops.save.fail.formatOnComputeResource = "" esx.problem.hardware.nvd.health.module.ops.save.fail.formatOnDatacenter = "" esx.problem.hardware.nvd.health.module.ops.save.fail.fullFormat = "NVDIMM (handle {1}, idString {2}): Previous SAVE operation failed." # esx.problem.hardware.nvd.health.module.uce.category = "warning" esx.problem.hardware.nvd.health.module.uce.description = "NVDIMM Count of DRAM uncorrectable ECC errors above threshold." esx.problem.hardware.nvd.health.module.uce.formatOnVm = "" esx.problem.hardware.nvd.health.module.uce.formatOnHost = "" esx.problem.hardware.nvd.health.module.uce.formatOnComputeResource = "" esx.problem.hardware.nvd.health.module.uce.formatOnDatacenter = "" esx.problem.hardware.nvd.health.module.uce.fullFormat = "NVDIMM (handle {1}, idString {2}): Count of DRAM uncorrectable ECC errors above threshold." # esx.problem.hardware.nvd.health.module.vendor.error.category = "error" esx.problem.hardware.nvd.health.module.vendor.error.description = "NVDIMM Vendor specific error." esx.problem.hardware.nvd.health.module.vendor.error.formatOnVm = "" esx.problem.hardware.nvd.health.module.vendor.error.formatOnHost = "" esx.problem.hardware.nvd.health.module.vendor.error.formatOnComputeResource = "" esx.problem.hardware.nvd.health.module.vendor.error.formatOnDatacenter = "" esx.problem.hardware.nvd.health.module.vendor.error.fullFormat = "NVDIMM (handle {1}, idString {2}): Vendor specific error." # esx.audit.hardware.nvd.health.summary.noncritical.category = "info" esx.audit.hardware.nvd.health.summary.noncritical.description = "NVDIMM Health status summary: Non-Critical." esx.audit.hardware.nvd.health.summary.noncritical.formatOnVm = "" esx.audit.hardware.nvd.health.summary.noncritical.formatOnHost = "" esx.audit.hardware.nvd.health.summary.noncritical.formatOnComputeResource = "" esx.audit.hardware.nvd.health.summary.noncritical.formatOnDatacenter = "" esx.audit.hardware.nvd.health.summary.noncritical.fullFormat = "NVDIMM (handle {1}, idString {2}): Health status summary: Non-Critical condition, maintenance required but no data loss detected." # esx.audit.hardware.nvd.health.summary.critical.category = "warning" esx.audit.hardware.nvd.health.summary.critical.description = "NVDIMM Health status summary: Critical." esx.audit.hardware.nvd.health.summary.critical.formatOnVm = "" esx.audit.hardware.nvd.health.summary.critical.formatOnHost = "" esx.audit.hardware.nvd.health.summary.critical.formatOnComputeResource = "" esx.audit.hardware.nvd.health.summary.critical.formatOnDatacenter = "" esx.audit.hardware.nvd.health.summary.critical.fullFormat = "NVDIMM (handle {1}, idString {2}): Health status summary: Critical condition, features or performance degraded due to failures but no data loss detected." # esx.problem.hardware.nvd.health.summary.fatal.category = "error" esx.problem.hardware.nvd.health.summary.fatal.description = "NVDIMM Health status summary: Fatal." esx.problem.hardware.nvd.health.summary.fatal.formatOnVm = "" esx.problem.hardware.nvd.health.summary.fatal.formatOnHost = "" esx.problem.hardware.nvd.health.summary.fatal.formatOnComputeResource = "" esx.problem.hardware.nvd.health.summary.fatal.formatOnDatacenter = "" esx.problem.hardware.nvd.health.summary.fatal.fullFormat = "NVDIMM (handle {1}, idString {2}): Health status summary: Fatal condition, data loss detected or is imminent." # esx.problem.hardware.ioapic.missing.category = "error" esx.problem.hardware.ioapic.missing.description = "IOAPIC Num {1} is missing. Please check BIOS settings to enable this IOAPIC." esx.problem.hardware.ioapic.missing.formatOnVm = "" esx.problem.hardware.ioapic.missing.formatOnHost = "" esx.problem.hardware.ioapic.missing.formatOnComputeResource = "" esx.problem.hardware.ioapic.missing.formatOnDatacenter = "" esx.problem.hardware.ioapic.missing.fullFormat = "IOAPIC Num {1} is missing. Please check BIOS settings to enable this IOAPIC." # vprob.vmfs.journal.createfailed.category = "error" vprob.vmfs.journal.createfailed.description = "No Space To Create VMFS Journal" vprob.vmfs.journal.createfailed.formatOnVm = "" vprob.vmfs.journal.createfailed.formatOnHost = "" vprob.vmfs.journal.createfailed.formatOnComputeResource = "" vprob.vmfs.journal.createfailed.formatOnDatacenter = "" vprob.vmfs.journal.createfailed.fullFormat = "No space for journal on volume {1} ({2}). Opening volume in read-only metadata mode with limited write support." # vprob.vmfs.heartbeat.timedout.category = "info" vprob.vmfs.heartbeat.timedout.description = "VMFS Volume Connectivity Degraded" vprob.vmfs.heartbeat.timedout.formatOnVm = "" vprob.vmfs.heartbeat.timedout.formatOnHost = "" vprob.vmfs.heartbeat.timedout.formatOnComputeResource = "" vprob.vmfs.heartbeat.timedout.formatOnDatacenter = "" vprob.vmfs.heartbeat.timedout.fullFormat = "Lost access to volume {1} ({2}) due to connectivity issues. Recovery attempt is in progress and outcome will be reported shortly." # vprob.vmfs.heartbeat.recovered.category = "info" vprob.vmfs.heartbeat.recovered.description = "VMFS Volume Connectivity Restored" vprob.vmfs.heartbeat.recovered.formatOnVm = "" vprob.vmfs.heartbeat.recovered.formatOnHost = "" vprob.vmfs.heartbeat.recovered.formatOnComputeResource = "" vprob.vmfs.heartbeat.recovered.formatOnDatacenter = "" vprob.vmfs.heartbeat.recovered.fullFormat = "Successfully restored access to volume {1} ({2}) following connectivity issues." # vprob.vmfs.heartbeat.unrecoverable.category = "error" vprob.vmfs.heartbeat.unrecoverable.description = "VMFS Volume Connectivity Lost" vprob.vmfs.heartbeat.unrecoverable.formatOnVm = "" vprob.vmfs.heartbeat.unrecoverable.formatOnHost = "" vprob.vmfs.heartbeat.unrecoverable.formatOnComputeResource = "" vprob.vmfs.heartbeat.unrecoverable.formatOnDatacenter = "" vprob.vmfs.heartbeat.unrecoverable.fullFormat = "Lost connectivity to volume {1} ({2}) and subsequent recovery attempts have failed." # vprob.vmfs.lock.corruptondisk.category = "error" vprob.vmfs.lock.corruptondisk.description = "VMFS Lock Corruption Detected" vprob.vmfs.lock.corruptondisk.formatOnVm = "" vprob.vmfs.lock.corruptondisk.formatOnHost = "" vprob.vmfs.lock.corruptondisk.formatOnComputeResource = "" vprob.vmfs.lock.corruptondisk.formatOnDatacenter = "" vprob.vmfs.lock.corruptondisk.fullFormat = "At least one corrupt on-disk lock was detected on volume {1} ({2}). Other regions of the volume may be damaged too." # vprob.vmfs.heartbeat.corruptondisk.category = "error" vprob.vmfs.heartbeat.corruptondisk.description = "VMFS Heartbeat Corruption Detected" vprob.vmfs.heartbeat.corruptondisk.formatOnVm = "" vprob.vmfs.heartbeat.corruptondisk.formatOnHost = "" vprob.vmfs.heartbeat.corruptondisk.formatOnComputeResource = "" vprob.vmfs.heartbeat.corruptondisk.formatOnDatacenter = "" vprob.vmfs.heartbeat.corruptondisk.fullFormat = "At least one corrupt HB slot was detected on volume {1} ({2}). Other regions of the volume may be damaged too." # vprob.vmfs.resource.corruptondisk.category = "error" vprob.vmfs.resource.corruptondisk.description = "VMFS Resource Corruption Detected" vprob.vmfs.resource.corruptondisk.formatOnVm = "" vprob.vmfs.resource.corruptondisk.formatOnHost = "" vprob.vmfs.resource.corruptondisk.formatOnComputeResource = "" vprob.vmfs.resource.corruptondisk.formatOnDatacenter = "" vprob.vmfs.resource.corruptondisk.fullFormat = "At least one corrupt resource metadata region was detected on volume {1} ({2}). Other regions of the volume might be damaged too." # vprob.vmfs.error.volume.is.locked.category = "error" vprob.vmfs.error.volume.is.locked.description = "VMFS Locked By Remote Host" vprob.vmfs.error.volume.is.locked.formatOnVm = "" vprob.vmfs.error.volume.is.locked.formatOnHost = "" vprob.vmfs.error.volume.is.locked.formatOnComputeResource = "" vprob.vmfs.error.volume.is.locked.formatOnDatacenter = "" vprob.vmfs.error.volume.is.locked.fullFormat = "Volume on device {1} is locked, possibly because some remote host encountered an error during a volume operation and could not recover." # vprob.vmfs.extent.offline.category = "error" vprob.vmfs.extent.offline.description = "Device backing an extent of a file system is offline." vprob.vmfs.extent.offline.formatOnVm = "" vprob.vmfs.extent.offline.formatOnHost = "" vprob.vmfs.extent.offline.formatOnComputeResource = "" vprob.vmfs.extent.offline.formatOnDatacenter = "" vprob.vmfs.extent.offline.fullFormat = "An attached device {1} might be offline. The file system {2} is now in a degraded state. While the datastore is still available, parts of data that reside on the extent that went offline might be inaccessible." # vprob.vmfs.extent.online.category = "info" vprob.vmfs.extent.online.description = "Device backing an extent of a file system is online." vprob.vmfs.extent.online.formatOnVm = "" vprob.vmfs.extent.online.formatOnHost = "" vprob.vmfs.extent.online.formatOnComputeResource = "" vprob.vmfs.extent.online.formatOnDatacenter = "" vprob.vmfs.extent.online.fullFormat = "Device {1} backing file system {2} came online. This extent was previously offline. All resources on this device are now available." # vprob.scsi.device.thinprov.atquota.category = "warning" vprob.scsi.device.thinprov.atquota.description = "Thin Provisioned Device Nearing Capacity" vprob.scsi.device.thinprov.atquota.formatOnVm = "" vprob.scsi.device.thinprov.atquota.formatOnHost = "" vprob.scsi.device.thinprov.atquota.formatOnComputeResource = "" vprob.scsi.device.thinprov.atquota.formatOnDatacenter = "" vprob.scsi.device.thinprov.atquota.fullFormat = "Space utilization on thin-provisioned device {1} exceeded configured threshold." # vprob.vmfs.nfs.server.disconnect.category = "error" vprob.vmfs.nfs.server.disconnect.description = "Lost connection to NFS server" vprob.vmfs.nfs.server.disconnect.formatOnVm = "" vprob.vmfs.nfs.server.disconnect.formatOnHost = "" vprob.vmfs.nfs.server.disconnect.formatOnComputeResource = "" vprob.vmfs.nfs.server.disconnect.formatOnDatacenter = "" vprob.vmfs.nfs.server.disconnect.fullFormat = "Lost connection to server {1} mount point {2} mounted as {3} ({4})." # vprob.vmfs.nfs.server.restored.category = "info" vprob.vmfs.nfs.server.restored.description = "Restored connection to NFS server" vprob.vmfs.nfs.server.restored.formatOnVm = "" vprob.vmfs.nfs.server.restored.formatOnHost = "" vprob.vmfs.nfs.server.restored.formatOnComputeResource = "" vprob.vmfs.nfs.server.restored.formatOnDatacenter = "" vprob.vmfs.nfs.server.restored.fullFormat = "Restored connection to server {1} mount point {2} mounted as {3} ({4})." # com.vmware.vc.vcp.VmNetworkFailedEvent.category = "error" com.vmware.vc.vcp.VmNetworkFailedEvent.description = "Virtual machine lost VM network accessibility" com.vmware.vc.vcp.VmNetworkFailedEvent.formatOnVm = "Virtual machine lost access to {network}" com.vmware.vc.vcp.VmNetworkFailedEvent.formatOnHost = "Virtual machine {vm.name} lost access to {network}" com.vmware.vc.vcp.VmNetworkFailedEvent.formatOnComputeResource = "Virtual machine {vm.name} on host {host.name} lost access to {network}" com.vmware.vc.vcp.VmNetworkFailedEvent.formatOnDatacenter = "Virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} lost access to {network}" com.vmware.vc.vcp.VmNetworkFailedEvent.fullFormat = "Virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} lost access to {network}" # com.vmware.vc.vcp.VmDatastoreFailedEvent.category = "error" com.vmware.vc.vcp.VmDatastoreFailedEvent.description = "Virtual machine lost datastore access" com.vmware.vc.vcp.VmDatastoreFailedEvent.formatOnVm = "Virtual machine lost access to {datastore}" com.vmware.vc.vcp.VmDatastoreFailedEvent.formatOnHost = "Virtual machine {vm.name} lost access to {datastore}" com.vmware.vc.vcp.VmDatastoreFailedEvent.formatOnComputeResource = "Virtual machine {vm.name} on host {host.name} lost access to {datastore}" com.vmware.vc.vcp.VmDatastoreFailedEvent.formatOnDatacenter = "Virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} lost access to {datastore}" com.vmware.vc.vcp.VmDatastoreFailedEvent.fullFormat = "Virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} lost access to {datastore}" # com.vmware.vc.vcp.VmRestartEvent.category = "info" com.vmware.vc.vcp.VmRestartEvent.description = "Restarting VM due to component failure" com.vmware.vc.vcp.VmRestartEvent.formatOnVm = "HA VM Component Protection is restarting virtual machine due to component failure" com.vmware.vc.vcp.VmRestartEvent.formatOnHost = "HA VM Component Protection is restarting virtual machine {vm.name} due to component failure" com.vmware.vc.vcp.VmRestartEvent.formatOnComputeResource = "HA VM Component Protection is restarting virtual machine {vm.name} due to component failure on host {host.name}" com.vmware.vc.vcp.VmRestartEvent.formatOnDatacenter = "HA VM Component Protection is restarting virtual machine {vm.name} due to component failure on host {host.name} in cluster {computeResource.name}" com.vmware.vc.vcp.VmRestartEvent.fullFormat = "HA VM Component Protection is restarting virtual machine {vm.name} due to component failure on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name}" # com.vmware.vc.vcp.VmRestartFailedEvent.category = "error" com.vmware.vc.vcp.VmRestartFailedEvent.description = "Virtual machine affected by component failure failed to restart" com.vmware.vc.vcp.VmRestartFailedEvent.formatOnVm = "Virtual machine affected by component failure failed to restart" com.vmware.vc.vcp.VmRestartFailedEvent.formatOnHost = "Virtual machine {vm.name} affected by component failure failed to restart" com.vmware.vc.vcp.VmRestartFailedEvent.formatOnComputeResource = "Virtual machine {vm.name} affected by component failure on host {host.name} failed to restart" com.vmware.vc.vcp.VmRestartFailedEvent.formatOnDatacenter = "Virtual machine {vm.name} affected by component failure on host {host.name} in cluster {computeResource.name} failed to restart" com.vmware.vc.vcp.VmRestartFailedEvent.fullFormat = "Virtual machine {vm.name} affected by component failure on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} failed to restart" # com.vmware.vc.vcp.FtSecondaryRestartEvent.category = "info" com.vmware.vc.vcp.FtSecondaryRestartEvent.description = "Restarting FT secondary due to component failure" com.vmware.vc.vcp.FtSecondaryRestartEvent.formatOnVm = "HA VM Component Protection is restarting FT secondary virtual machine due to component failure" com.vmware.vc.vcp.FtSecondaryRestartEvent.formatOnHost = "HA VM Component Protection is restarting FT secondary virtual machine {vm.name} due to component failure" com.vmware.vc.vcp.FtSecondaryRestartEvent.formatOnComputeResource = "HA VM Component Protection is restarting FT secondary virtual machine {vm.name} on host {host.name} due to component failure" com.vmware.vc.vcp.FtSecondaryRestartEvent.formatOnDatacenter = "HA VM Component Protection is restarting FT secondary virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} due to component failure" com.vmware.vc.vcp.FtSecondaryRestartEvent.fullFormat = "HA VM Component Protection is restarting FT secondary virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} due to component failure" # com.vmware.vc.vcp.FtSecondaryRestartFailedEvent.category = "error" com.vmware.vc.vcp.FtSecondaryRestartFailedEvent.description = "FT secondary VM restart failed" com.vmware.vc.vcp.FtSecondaryRestartFailedEvent.formatOnVm = "FT Secondary VM failed to restart" com.vmware.vc.vcp.FtSecondaryRestartFailedEvent.formatOnHost = "FT Secondary VM {vm.name} failed to restart" com.vmware.vc.vcp.FtSecondaryRestartFailedEvent.formatOnComputeResource = "FT Secondary VM {vm.name} on host {host.name} failed to restart" com.vmware.vc.vcp.FtSecondaryRestartFailedEvent.formatOnDatacenter = "FT Secondary VM {vm.name} on host {host.name} in cluster {computeResource.name} failed to restart" com.vmware.vc.vcp.FtSecondaryRestartFailedEvent.fullFormat = "FT Secondary VM {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} failed to restart" # com.vmware.vc.vcp.FtFailoverEvent.category = "info" com.vmware.vc.vcp.FtFailoverEvent.description = "Failover FT VM due to component failure" com.vmware.vc.vcp.FtFailoverEvent.formatOnVm = "FT Primary VM is going to fail over to Secondary VM due to component failure" com.vmware.vc.vcp.FtFailoverEvent.formatOnHost = "FT Primary VM {vm.name} is going to fail over to Secondary VM due to component failure" com.vmware.vc.vcp.FtFailoverEvent.formatOnComputeResource = "FT Primary VM {vm.name} on host {host.name} is going to fail over to Secondary VM due to component failure" com.vmware.vc.vcp.FtFailoverEvent.formatOnDatacenter = "FT Primary VM {vm.name} on host {host.name} in cluster {computeResource.name} is going to fail over to Secondary VM due to component failure" com.vmware.vc.vcp.FtFailoverEvent.fullFormat = "FT Primary VM {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} is going to fail over to Secondary VM due to component failure " # com.vmware.vc.vcp.FtFailoverFailedEvent.category = "error" com.vmware.vc.vcp.FtFailoverFailedEvent.description = "FT VM failover failed" com.vmware.vc.vcp.FtFailoverFailedEvent.formatOnVm = "FT virtual machine failed to failover to secondary" com.vmware.vc.vcp.FtFailoverFailedEvent.formatOnHost = "FT virtual machine {vm.name} failed to failover to secondary" com.vmware.vc.vcp.FtFailoverFailedEvent.formatOnComputeResource = "FT virtual machine {vm.name} on host {host.name} failed to failover to secondary" com.vmware.vc.vcp.FtFailoverFailedEvent.formatOnDatacenter = "FT virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} failed to failover to secondary" com.vmware.vc.vcp.FtFailoverFailedEvent.fullFormat = "FT virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} failed to failover to secondary" # com.vmware.vc.vcp.VmPowerOffHangEvent.category = "error" com.vmware.vc.vcp.VmPowerOffHangEvent.description = "VM power off hang" com.vmware.vc.vcp.VmPowerOffHangEvent.formatOnVm = "HA VM Component Protection could not power off virtual machine successfully after trying {numTimes} times and will keep trying" com.vmware.vc.vcp.VmPowerOffHangEvent.formatOnHost = "HA VM Component Protection could not power off virtual machine {vm.name} successfully after trying {numTimes} times and will keep trying" com.vmware.vc.vcp.VmPowerOffHangEvent.formatOnComputeResource = "HA VM Component Protection could not power off virtual machine {vm.name} on host {host.name} successfully after trying {numTimes} times and will keep trying" com.vmware.vc.vcp.VmPowerOffHangEvent.formatOnDatacenter = "HA VM Component Protection could not power off virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} successfully after trying {numTimes} times and will keep trying" com.vmware.vc.vcp.VmPowerOffHangEvent.fullFormat = "HA VM Component Protection could not power off virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} successfully after trying {numTimes} times and will keep trying" # com.vmware.vc.vcp.VmWaitForCandidateHostEvent.category = "error" com.vmware.vc.vcp.VmWaitForCandidateHostEvent.description = "No candidate host to restart" com.vmware.vc.vcp.VmWaitForCandidateHostEvent.formatOnVm = "HA VM Component Protection could not find a destination host for this virtual machine after waiting {numSecWait} seconds and will keep trying" com.vmware.vc.vcp.VmWaitForCandidateHostEvent.formatOnHost = "HA VM Component Protection could not find a destination host for virtual machine {vm.name} after waiting {numSecWait} seconds and will keep trying" com.vmware.vc.vcp.VmWaitForCandidateHostEvent.formatOnComputeResource = "HA VM Component Protection could not find a destination host for virtual machine {vm.name} on host {host.name} after waiting {numSecWait} seconds and will keep trying" com.vmware.vc.vcp.VmWaitForCandidateHostEvent.formatOnDatacenter = "HA VM Component Protection could not find a destination host for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} after waiting {numSecWait} seconds and will keep trying" com.vmware.vc.vcp.VmWaitForCandidateHostEvent.fullFormat = "HA VM Component Protection could not find a destination host for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} after waiting {numSecWait} seconds and will keep trying" # com.vmware.vc.vcp.VcpNoActionEvent.category = "info" com.vmware.vc.vcp.VcpNoActionEvent.description = "No action on VM" com.vmware.vc.vcp.VcpNoActionEvent.formatOnVm = "HA VM Component Protection did not take action due to the feature configuration setting" com.vmware.vc.vcp.VcpNoActionEvent.formatOnHost = "HA VM Component Protection did not take action on virtual machine {vm.name} due to the feature configuration setting" com.vmware.vc.vcp.VcpNoActionEvent.formatOnComputeResource = "HA VM Component Protection did not take action on virtual machine {vm.name} on host {host.name} due to the feature configuration setting" com.vmware.vc.vcp.VcpNoActionEvent.formatOnDatacenter = "HA VM Component Protection did not take action on virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} due to the feature configuration setting" com.vmware.vc.vcp.VcpNoActionEvent.fullFormat = "HA VM Component Protection did not take action on virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} due to the feature configuration setting" # com.vmware.vc.vcp.FtDisabledVmTreatAsNonFtEvent.category = "info" com.vmware.vc.vcp.FtDisabledVmTreatAsNonFtEvent.description = "FT Disabled VM protected as non-FT VM" com.vmware.vc.vcp.FtDisabledVmTreatAsNonFtEvent.formatOnVm = "HA VM Component Protection will protect this virtul machine as non-FT virtual machine because the FT state is disabled" com.vmware.vc.vcp.FtDisabledVmTreatAsNonFtEvent.formatOnHost = "HA VM Component Protection protects virtual machine {vm.name} as non-FT virtual machine because the FT state is disabled" com.vmware.vc.vcp.FtDisabledVmTreatAsNonFtEvent.formatOnComputeResource = "HA VM Component Protection protects virtual machine {vm.name} on host {host.name} as non-FT virtual machine because the FT state is disabled" com.vmware.vc.vcp.FtDisabledVmTreatAsNonFtEvent.formatOnDatacenter = "HA VM Component Protection protects virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} as non-FT virtual machine because the FT state is disabled" com.vmware.vc.vcp.FtDisabledVmTreatAsNonFtEvent.fullFormat = "HA VM Component Protection protects virtual machine {vm.name} on {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} as non-FT virtual machine because the FT state is disabled" # com.vmware.vc.vcp.NeedSecondaryFtVmTreatAsNonFtEvent.category = "info" com.vmware.vc.vcp.NeedSecondaryFtVmTreatAsNonFtEvent.description = "Need secondary VM protected as non-FT VM" com.vmware.vc.vcp.NeedSecondaryFtVmTreatAsNonFtEvent.formatOnVm = "HA VM Component Protection protects this virtul machine as non-FT virtual machine because it has been in the needSecondary state too long" com.vmware.vc.vcp.NeedSecondaryFtVmTreatAsNonFtEvent.formatOnHost = "HA VM Component Protection protects virtual machine {vm.name} as non-FT virtual machine because it has been in the needSecondary state too long" com.vmware.vc.vcp.NeedSecondaryFtVmTreatAsNonFtEvent.formatOnComputeResource = "HA VM Component Protection protects virtual machine {vm.name} on host {host.name} as non-FT virtual machine because it has been in the needSecondary state too long" com.vmware.vc.vcp.NeedSecondaryFtVmTreatAsNonFtEvent.formatOnDatacenter = "HA VM Component Protection protects virtual machine {vm.name} on host {host.name} as non-FT virtual machine since it has been in the needSecondary state too long" com.vmware.vc.vcp.NeedSecondaryFtVmTreatAsNonFtEvent.fullFormat = "HA VM Component Protection protects virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} as non-FT virtual machine because it has been in the needSecondary state too long" # com.vmware.vc.vcp.TestStartEvent.category = "info" com.vmware.vc.vcp.TestStartEvent.description = "VM Component Protection test starts" com.vmware.vc.vcp.TestStartEvent.formatOnVm = "" com.vmware.vc.vcp.TestStartEvent.formatOnHost = "VM Component Protection test starts" com.vmware.vc.vcp.TestStartEvent.formatOnComputeResource = "VM Component Protection test starts on host {host.name}" com.vmware.vc.vcp.TestStartEvent.formatOnDatacenter = "VM Component Protection test starts on host {host.name} in cluster {computeResource.name}" com.vmware.vc.vcp.TestStartEvent.fullFormat = "VM Component Protection test starts on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name}" # com.vmware.vc.vcp.TestEndEvent.category = "info" com.vmware.vc.vcp.TestEndEvent.description = "VM Component Protection test ends" com.vmware.vc.vcp.TestEndEvent.formatOnVm = "" com.vmware.vc.vcp.TestEndEvent.formatOnHost = "VM Component Protection test ends" com.vmware.vc.vcp.TestEndEvent.formatOnComputeResource = "VM Component Protection test ends on host {host.name}" com.vmware.vc.vcp.TestEndEvent.formatOnDatacenter = "VM Component Protection test ends on host {host.name} in cluster {computeResource.name}" com.vmware.vc.vcp.TestEndEvent.fullFormat = "VM Component Protection test ends on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name}" # com.vmware.vc.npt.VmAdapterEnteredPassthroughEvent.category = "info" com.vmware.vc.npt.VmAdapterEnteredPassthroughEvent.description = "Virtual NIC entered passthrough mode" com.vmware.vc.npt.VmAdapterEnteredPassthroughEvent.formatOnVm = "Network passthrough is active on adapter {deviceLabel}" com.vmware.vc.npt.VmAdapterEnteredPassthroughEvent.formatOnHost = "Network passthrough is active on adapter {deviceLabel} of virtual machine {vm.name}" com.vmware.vc.npt.VmAdapterEnteredPassthroughEvent.formatOnComputeResource = "Network passthrough is active on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name}" com.vmware.vc.npt.VmAdapterEnteredPassthroughEvent.formatOnDatacenter = "Network passthrough is active on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name}" com.vmware.vc.npt.VmAdapterEnteredPassthroughEvent.fullFormat = "Network passthrough is active on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name} in {datacenter.name}" # com.vmware.vc.npt.VmAdapterExitedPassthroughEvent.category = "info" com.vmware.vc.npt.VmAdapterExitedPassthroughEvent.description = "Virtual NIC exited passthrough mode" com.vmware.vc.npt.VmAdapterExitedPassthroughEvent.formatOnVm = "Network passthrough is inactive on adapter {deviceLabel}" com.vmware.vc.npt.VmAdapterExitedPassthroughEvent.formatOnHost = "Network passthrough is inactive on adapter {deviceLabel} of virtual machine {vm.name}" com.vmware.vc.npt.VmAdapterExitedPassthroughEvent.formatOnComputeResource = "Network passthrough is inactive on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name}" com.vmware.vc.npt.VmAdapterExitedPassthroughEvent.formatOnDatacenter = "Network passthrough is inactive on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name}" com.vmware.vc.npt.VmAdapterExitedPassthroughEvent.fullFormat = "Network passthrough is inactive on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name} in {datacenter.name}" # com.vmware.vc.datastore.UpdatingVmFilesEvent.category = "info" com.vmware.vc.datastore.UpdatingVmFilesEvent.description = "Updating VM Files" com.vmware.vc.datastore.UpdatingVmFilesEvent.formatOnComputeResource = "" com.vmware.vc.datastore.UpdatingVmFilesEvent.formatOnDatacenter = "" com.vmware.vc.datastore.UpdatingVmFilesEvent.formatOnHost = "Updating VM files on datastore {ds.name}" com.vmware.vc.datastore.UpdatingVmFilesEvent.formatOnVm = "" com.vmware.vc.datastore.UpdatingVmFilesEvent.fullFormat = "Updating VM files on datastore {ds.name} using host {hostName}" # com.vmware.vc.datastore.UpdatedVmFilesEvent.category = "info" com.vmware.vc.datastore.UpdatedVmFilesEvent.description = "Updated VM files" com.vmware.vc.datastore.UpdatedVmFilesEvent.formatOnComputeResource = "" com.vmware.vc.datastore.UpdatedVmFilesEvent.formatOnDatacenter = "" com.vmware.vc.datastore.UpdatedVmFilesEvent.formatOnHost = "Updated VM files on datastore {ds.name}" com.vmware.vc.datastore.UpdatedVmFilesEvent.formatOnVm = "" com.vmware.vc.datastore.UpdatedVmFilesEvent.fullFormat = "Updated VM files on datastore {ds.name} using host {hostName}" # com.vmware.vc.datastore.UpdateVmFilesFailedEvent.category = "error" com.vmware.vc.datastore.UpdateVmFilesFailedEvent.description = "Failed to update VM files" com.vmware.vc.datastore.UpdateVmFilesFailedEvent.formatOnComputeResource = "" com.vmware.vc.datastore.UpdateVmFilesFailedEvent.formatOnDatacenter = "" com.vmware.vc.datastore.UpdateVmFilesFailedEvent.formatOnHost = "Failed to update VM files on datastore {ds.name}" com.vmware.vc.datastore.UpdateVmFilesFailedEvent.formatOnVm = "" com.vmware.vc.datastore.UpdateVmFilesFailedEvent.fullFormat = "Failed to update VM files on datastore {ds.name} using host {hostName}" # com.vmware.vc.vmam.AppMonitoringNotSupported.category = "warning" com.vmware.vc.vmam.AppMonitoringNotSupported.description = "Application Monitoring Is Not Supported" com.vmware.vc.vmam.AppMonitoringNotSupported.formatOnComputeResource = "Application monitoring is not supported on {host.name}" com.vmware.vc.vmam.AppMonitoringNotSupported.formatOnDatacenter = "Application monitoring is not supported on {host.name} in cluster {computeResource.name}" com.vmware.vc.vmam.AppMonitoringNotSupported.formatOnHost = "Application monitoring is not supported" com.vmware.vc.vmam.AppMonitoringNotSupported.formatOnVm = "" com.vmware.vc.vmam.AppMonitoringNotSupported.fullFormat = "Application monitoring is not supported on {host.name} in cluster {computeResource.name} in {datacenter.name}" # esx.problem.net.connectivity.lost.category = "error" esx.problem.net.connectivity.lost.description = "Lost Network Connectivity" esx.problem.net.connectivity.lost.formatOnVm = "" esx.problem.net.connectivity.lost.formatOnHost = "" esx.problem.net.connectivity.lost.formatOnComputeResource = "" esx.problem.net.connectivity.lost.formatOnDatacenter = "" esx.problem.net.connectivity.lost.fullFormat = "Lost network connectivity on virtual switch {1}. Physical NIC {2} is down. Affected portgroups:{3}." # esx.problem.net.redundancy.lost.category = "warning" esx.problem.net.redundancy.lost.description = "Lost Network Redundancy" esx.problem.net.redundancy.lost.formatOnVm = "" esx.problem.net.redundancy.lost.formatOnHost = "" esx.problem.net.redundancy.lost.formatOnComputeResource = "" esx.problem.net.redundancy.lost.formatOnDatacenter = "" esx.problem.net.redundancy.lost.fullFormat = "Lost uplink redundancy on virtual switch {1}. Physical NIC {2} is down. Affected portgroups:{3}." # esx.problem.net.redundancy.degraded.category = "warning" esx.problem.net.redundancy.degraded.description = "Network Redundancy Degraded" esx.problem.net.redundancy.degraded.formatOnVm = "" esx.problem.net.redundancy.degraded.formatOnHost = "" esx.problem.net.redundancy.degraded.formatOnComputeResource = "" esx.problem.net.redundancy.degraded.formatOnDatacenter = "" esx.problem.net.redundancy.degraded.fullFormat = "Uplink redundancy degraded on virtual switch {1}. Physical NIC {2} is down. Affected portgroups:{3}." # esx.problem.net.dvport.connectivity.lost.category = "error" esx.problem.net.dvport.connectivity.lost.description = "Lost Network Connectivity to DVPorts" esx.problem.net.dvport.connectivity.lost.formatOnVm = "" esx.problem.net.dvport.connectivity.lost.formatOnHost = "" esx.problem.net.dvport.connectivity.lost.formatOnComputeResource = "" esx.problem.net.dvport.connectivity.lost.formatOnDatacenter = "" esx.problem.net.dvport.connectivity.lost.fullFormat = "Lost network connectivity on DVPorts: {1}. Physical NIC {2} is down." # esx.problem.net.dvport.redundancy.lost.category = "warning" esx.problem.net.dvport.redundancy.lost.description = "Lost Network Redundancy on DVPorts" esx.problem.net.dvport.redundancy.lost.formatOnVm = "" esx.problem.net.dvport.redundancy.lost.formatOnHost = "" esx.problem.net.dvport.redundancy.lost.formatOnComputeResource = "" esx.problem.net.dvport.redundancy.lost.formatOnDatacenter = "" esx.problem.net.dvport.redundancy.lost.fullFormat = "Lost uplink redundancy on DVPorts: {1}. Physical NIC {2} is down." # esx.problem.net.dvport.redundancy.degraded.category = "warning" esx.problem.net.dvport.redundancy.degraded.description = "Network Redundancy Degraded on DVPorts" esx.problem.net.dvport.redundancy.degraded.formatOnVm = "" esx.problem.net.dvport.redundancy.degraded.formatOnHost = "" esx.problem.net.dvport.redundancy.degraded.formatOnComputeResource = "" esx.problem.net.dvport.redundancy.degraded.formatOnDatacenter = "" esx.problem.net.dvport.redundancy.degraded.fullFormat = "Uplink redundancy degraded on DVPorts: {1}. Physical NIC {2} is down." # esx.clear.net.dvport.connectivity.restored.category = "info" esx.clear.net.dvport.connectivity.restored.description = "Restored Network Connectivity to DVPorts" esx.clear.net.dvport.connectivity.restored.formatOnVm = "" esx.clear.net.dvport.connectivity.restored.formatOnHost = "" esx.clear.net.dvport.connectivity.restored.formatOnComputeResource = "" esx.clear.net.dvport.connectivity.restored.formatOnDatacenter = "" esx.clear.net.dvport.connectivity.restored.fullFormat = "Network connectivity restored on DVPorts: {1}. Physical NIC {2} is up." # esx.clear.net.dvport.redundancy.restored.category = "info" esx.clear.net.dvport.redundancy.restored.description = "Restored Network Redundancy to DVPorts" esx.clear.net.dvport.redundancy.restored.formatOnVm = "" esx.clear.net.dvport.redundancy.restored.formatOnHost = "" esx.clear.net.dvport.redundancy.restored.formatOnComputeResource = "" esx.clear.net.dvport.redundancy.restored.formatOnDatacenter = "" esx.clear.net.dvport.redundancy.restored.fullFormat = "Uplink redundancy restored on DVPorts: {1}. Physical NIC {2} is up." # esx.problem.net.vmnic.watchdog.reset.category = "warning" esx.problem.net.vmnic.watchdog.reset.description = "Nic Watchdog Reset" esx.problem.net.vmnic.watchdog.reset.formatOnVm = "" esx.problem.net.vmnic.watchdog.reset.formatOnHost = "" esx.problem.net.vmnic.watchdog.reset.formatOnComputeResource = "" esx.problem.net.vmnic.watchdog.reset.formatOnDatacenter = "" esx.problem.net.vmnic.watchdog.reset.fullFormat = "Uplink {1} has recovered from a transient failure due to watchdog timeout" # esx.problem.net.vmnic.linkstate.down.category = "warning" esx.problem.net.vmnic.linkstate.down.description = "Link state down" esx.problem.net.vmnic.linkstate.down.formatOnVm = "" esx.problem.net.vmnic.linkstate.down.formatOnHost = "" esx.problem.net.vmnic.linkstate.down.formatOnComputeResource = "" esx.problem.net.vmnic.linkstate.down.formatOnDatacenter = "" esx.problem.net.vmnic.linkstate.down.fullFormat = "Physical NIC {1} linkstate is down." # esx.clear.net.vmnic.linkstate.up.category = "info" esx.clear.net.vmnic.linkstate.up.description = "Link state up" esx.clear.net.vmnic.linkstate.up.formatOnVm = "" esx.clear.net.vmnic.linkstate.up.formatOnHost = "" esx.clear.net.vmnic.linkstate.up.formatOnComputeResource = "" esx.clear.net.vmnic.linkstate.up.formatOnDatacenter = "" esx.clear.net.vmnic.linkstate.up.fullFormat = "Physical NIC {1} linkstate is up." # esx.problem.net.vmnic.linkstate.flapping.category = "warning" esx.problem.net.vmnic.linkstate.flapping.description = "Link state unstable" esx.problem.net.vmnic.linkstate.flapping.formatOnVm = "" esx.problem.net.vmnic.linkstate.flapping.formatOnHost = "" esx.problem.net.vmnic.linkstate.flapping.formatOnComputeResource = "" esx.problem.net.vmnic.linkstate.flapping.formatOnDatacenter = "" esx.problem.net.vmnic.linkstate.flapping.fullFormat = "Taking down physical NIC {1} because the link is unstable." # esx.problem.storage.connectivity.lost.category = "error" esx.problem.storage.connectivity.lost.description = "Lost Storage Connectivity" esx.problem.storage.connectivity.lost.formatOnVm = "" esx.problem.storage.connectivity.lost.formatOnHost = "" esx.problem.storage.connectivity.lost.formatOnComputeResource= "" esx.problem.storage.connectivity.lost.formatOnDatacenter = "" esx.problem.storage.connectivity.lost.fullFormat = "Lost connectivity to storage device {1}. Path {2} is down. Affected datastores: {3}." # esx.problem.storage.redundancy.lost.category = "warning" esx.problem.storage.redundancy.lost.description = "Lost Storage Path Redundancy" esx.problem.storage.redundancy.lost.formatOnVm = "" esx.problem.storage.redundancy.lost.formatOnHost = "" esx.problem.storage.redundancy.lost.formatOnComputeResource = "" esx.problem.storage.redundancy.lost.formatOnDatacenter = "" esx.problem.storage.redundancy.lost.fullFormat = "Lost path redundancy to storage device {1}. Path {2} is down. Affected datastores: {3}." # esx.problem.storage.redundancy.degraded.category = "warning" esx.problem.storage.redundancy.degraded.description = "Degraded Storage Path Redundancy" esx.problem.storage.redundancy.degraded.formatOnVm = "" esx.problem.storage.redundancy.degraded.formatOnHost = "" esx.problem.storage.redundancy.degraded.formatOnComputeResource = "" esx.problem.storage.redundancy.degraded.formatOnDatacenter = "" esx.problem.storage.redundancy.degraded.fullFormat = "Path redundancy to storage device {1} degraded. Path {2} is down. Affected datastores: {3}." # esx.problem.storage.iscsi.target.login.error.category = "error" esx.problem.storage.iscsi.target.login.error.description = "iSCSI Target login error" esx.problem.storage.iscsi.target.login.error.formatOnVm = "" esx.problem.storage.iscsi.target.login.error.formatOnHost = "" esx.problem.storage.iscsi.target.login.error.formatOnComputeResource = "" esx.problem.storage.iscsi.target.login.error.formatOnDatacenter = "" esx.problem.storage.iscsi.target.login.error.fullFormat = "Login to iSCSI target {1} on {2} failed. Target returned login error of: {3}." # esx.problem.storage.iscsi.target.connect.error.category = "error" esx.problem.storage.iscsi.target.connect.error.description = "iSCSI Target login connection problem" esx.problem.storage.iscsi.target.connect.error.formatOnVm = "" esx.problem.storage.iscsi.target.connect.error.formatOnHost = "" esx.problem.storage.iscsi.target.connect.error.formatOnComputeResource = "" esx.problem.storage.iscsi.target.connect.error.formatOnDatacenter = "" esx.problem.storage.iscsi.target.connect.error.fullFormat = "Login to iSCSI target {1} on {2} failed. The iSCSI initiator could not establish a network connection to the target." # esx.problem.storage.iscsi.target.permanently.lost.category = "error" esx.problem.storage.iscsi.target.permanently.lost.description = "iSCSI target permanently removed" esx.problem.storage.iscsi.target.permanently.lost.formatOnVm = "" esx.problem.storage.iscsi.target.permanently.lost.formatOnHost = "" esx.problem.storage.iscsi.target.permanently.lost.formatOnComputeResource = "" esx.problem.storage.iscsi.target.permanently.lost.formatOnDatacenter = "" esx.problem.storage.iscsi.target.permanently.lost.fullFormat = "The iSCSI target {2} was permanently removed from {1}." # esx.problem.storage.iscsi.discovery.login.error.category = "error" esx.problem.storage.iscsi.discovery.login.error.description = "iSCSI Discovery target login error" esx.problem.storage.iscsi.discovery.login.error.formatOnVm = "" esx.problem.storage.iscsi.discovery.login.error.formatOnHost = "" esx.problem.storage.iscsi.discovery.login.error.formatOnComputeResource = "" esx.problem.storage.iscsi.discovery.login.error.formatOnDatacenter = "" esx.problem.storage.iscsi.discovery.login.error.fullFormat = "iSCSI discovery to {1} on {2} failed. The Discovery target returned a login error of: {3}." # esx.problem.storage.iscsi.isns.discovery.error.category = "error" esx.problem.storage.iscsi.isns.discovery.error.description = "iSCSI iSns Discovery error" esx.problem.storage.iscsi.isns.discovery.error.formatOnVm = "" esx.problem.storage.iscsi.isns.discovery.error.formatOnHost = "" esx.problem.storage.iscsi.isns.discovery.error.formatOnComputeResource = "" esx.problem.storage.iscsi.isns.discovery.error.formatOnDatacenter = "" esx.problem.storage.iscsi.isns.discovery.error.fullFormat = "iSCSI iSns discovery to {1} on {2} failed. ({3} : {4})." # esx.problem.storage.iscsi.discovery.connect.error.category = "error" esx.problem.storage.iscsi.discovery.connect.error.description = "iSCSI discovery target login connection problem" esx.problem.storage.iscsi.discovery.connect.error.formatOnVm = "" esx.problem.storage.iscsi.discovery.connect.error.formatOnHost = "" esx.problem.storage.iscsi.discovery.connect.error.formatOnComputeResource = "" esx.problem.storage.iscsi.discovery.connect.error.formatOnDatacenter = "" esx.problem.storage.iscsi.discovery.connect.error.fullFormat = "iSCSI discovery to {1} on {2} failed. The iSCSI Initiator could not establish a network connection to the discovery address." # esx.problem.storage.iscsi.target.permanently.removed.category = "error" esx.problem.storage.iscsi.target.permanently.removed.description = "iSCSI target was permanently removed" esx.problem.storage.iscsi.target.permanently.removed.formatOnVm = "" esx.problem.storage.iscsi.target.permanently.removed.formatOnHost = "" esx.problem.storage.iscsi.target.permanently.removed.formatOnComputeResource = "" esx.problem.storage.iscsi.target.permanently.removed.formatOnDatacenter = "" esx.problem.storage.iscsi.target.permanently.removed.fullFormat = "The iSCSI target {1} was permanently removed from {2}." # esx.problem.net.e1000.tso6.notsupported.category = "error" esx.problem.net.e1000.tso6.notsupported.description = "No IPv6 TSO support" esx.problem.net.e1000.tso6.notsupported.formatOnVm = "" esx.problem.net.e1000.tso6.notsupported.formatOnHost = "" esx.problem.net.e1000.tso6.notsupported.formatOnComputeResource = "" esx.problem.net.e1000.tso6.notsupported.formatOnDatacenter = "" esx.problem.net.e1000.tso6.notsupported.fullFormat = "Guest-initiated IPv6 TCP Segmentation Offload (TSO) packets ignored. Manually disable TSO inside the guest operating system in virtual machine {1}, or use a different virtual adapter." # esx.problem.net.migrate.bindtovmk.category = "warning" esx.problem.net.migrate.bindtovmk.description = "Invalid vmknic specified in /Migrate/Vmknic" esx.problem.net.migrate.bindtovmk.formatOnVm = "" esx.problem.net.migrate.bindtovmk.formatOnHost = "" esx.problem.net.migrate.bindtovmk.formatOnComputeResource = "" esx.problem.net.migrate.bindtovmk.formatOnDatacenter = "" esx.problem.net.migrate.bindtovmk.fullFormat = "The ESX advanced configuration option /Migrate/Vmknic is set to an invalid vmknic: {1}. /Migrate/Vmknic specifies a vmknic that vMotion binds to for improved performance. Update the configuration option with a valid vmknic. Alternatively, if you do not want vMotion to bind to a specific vmknic, remove the invalid vmknic and leave the option blank." # esx.problem.vmfs.journal.createfailed.category = "error" esx.problem.vmfs.journal.createfailed.description = "No Space To Create VMFS Journal" esx.problem.vmfs.journal.createfailed.formatOnVm = "" esx.problem.vmfs.journal.createfailed.formatOnHost = "" esx.problem.vmfs.journal.createfailed.formatOnComputeResource = "" esx.problem.vmfs.journal.createfailed.formatOnDatacenter = "" esx.problem.vmfs.journal.createfailed.fullFormat = "No space for journal on volume {1} ({2}). Volume will remain in read-only metadata mode with limited write support until journal can be created." # esx.problem.vmfs.heartbeat.timedout.category = "info" esx.problem.vmfs.heartbeat.timedout.description = "VMFS Volume Connectivity Degraded" esx.problem.vmfs.heartbeat.timedout.formatOnVm = "" esx.problem.vmfs.heartbeat.timedout.formatOnHost = "" esx.problem.vmfs.heartbeat.timedout.formatOnComputeResource = "" esx.problem.vmfs.heartbeat.timedout.formatOnDatacenter = "" esx.problem.vmfs.heartbeat.timedout.fullFormat = "Lost access to volume {1} ({2}) due to connectivity issues. Recovery attempt is in progress and outcome will be reported shortly." # esx.problem.vmfs.heartbeat.corruptondisk.category = "error" esx.problem.vmfs.heartbeat.corruptondisk.description = "VMFS Heartbeat Corruption Detected." esx.problem.vmfs.heartbeat.corruptondisk.formatOnVm = "" esx.problem.vmfs.heartbeat.corruptondisk.formatOnHost = "" esx.problem.vmfs.heartbeat.corruptondisk.formatOnComputeResource = "" esx.problem.vmfs.heartbeat.corruptondisk.formatOnDatacenter = "" esx.problem.vmfs.heartbeat.corruptondisk.fullFormat = "At least one corrupt on-disk heartbeat region was detected on volume {1} ({2}). Other regions of the volume might be damaged too." # esx.problem.vmfs.heartbeat.recovered.category = "info" esx.problem.vmfs.heartbeat.recovered.description = "VMFS Volume Connectivity Restored" esx.problem.vmfs.heartbeat.recovered.formatOnVm = "" esx.problem.vmfs.heartbeat.recovered.formatOnHost = "" esx.problem.vmfs.heartbeat.recovered.formatOnComputeResource = "" esx.problem.vmfs.heartbeat.recovered.formatOnDatacenter = "" esx.problem.vmfs.heartbeat.recovered.fullFormat = "Successfully restored access to volume {1} ({2}) following connectivity issues." # esx.problem.vmfs.heartbeat.unrecoverable.category = "error" esx.problem.vmfs.heartbeat.unrecoverable.description = "VMFS Volume Connectivity Lost" esx.problem.vmfs.heartbeat.unrecoverable.formatOnVm = "" esx.problem.vmfs.heartbeat.unrecoverable.formatOnHost = "" esx.problem.vmfs.heartbeat.unrecoverable.formatOnComputeResource = "" esx.problem.vmfs.heartbeat.unrecoverable.formatOnDatacenter = "" esx.problem.vmfs.heartbeat.unrecoverable.fullFormat = "Lost connectivity to volume {1} ({2}) and subsequent recovery attempts have failed." # esx.problem.vmfs.lock.corruptondisk.category = "error" esx.problem.vmfs.lock.corruptondisk.description = "VMFS Lock Corruption Detected" esx.problem.vmfs.lock.corruptondisk.formatOnVm = "" esx.problem.vmfs.lock.corruptondisk.formatOnHost = "" esx.problem.vmfs.lock.corruptondisk.formatOnComputeResource = "" esx.problem.vmfs.lock.corruptondisk.formatOnDatacenter = "" esx.problem.vmfs.lock.corruptondisk.fullFormat = "At least one corrupt on-disk lock was detected on volume {1} ({2}). Other regions of the volume might be damaged too." # esx.problem.vmfs.lock.corruptondisk.v2.category = "error" esx.problem.vmfs.lock.corruptondisk.v2.description = "VMFS Lock Corruption Detected" esx.problem.vmfs.lock.corruptondisk.v2.formatOnVm = "" esx.problem.vmfs.lock.corruptondisk.v2.formatOnHost = "" esx.problem.vmfs.lock.corruptondisk.v2.formatOnComputeResource = "" esx.problem.vmfs.lock.corruptondisk.v2.formatOnDatacenter = "" esx.problem.vmfs.lock.corruptondisk.v2.fullFormat = "At least one corrupt on-disk lock was detected on volume {1} ({2}). Other regions of the volume might be damaged too." # esx.problem.vmfs.resource.corruptondisk.category = "error" esx.problem.vmfs.resource.corruptondisk.description = "VMFS Resource Corruption Detected" esx.problem.vmfs.resource.corruptondisk.formatOnVm = "" esx.problem.vmfs.resource.corruptondisk.formatOnHost = "" esx.problem.vmfs.resource.corruptondisk.formatOnComputeResource = "" esx.problem.vmfs.resource.corruptondisk.formatOnDatacenter = "" esx.problem.vmfs.resource.corruptondisk.fullFormat = "At least one corrupt resource metadata region was detected on volume {1} ({2}). Other regions of the volume might be damaged too." # esx.problem.vmfs.error.volume.is.locked.category = "error" esx.problem.vmfs.error.volume.is.locked.description = "VMFS Locked By Remote Host" esx.problem.vmfs.error.volume.is.locked.formatOnVm = "" esx.problem.vmfs.error.volume.is.locked.formatOnHost = "" esx.problem.vmfs.error.volume.is.locked.formatOnComputeResource = "" esx.problem.vmfs.error.volume.is.locked.formatOnDatacenter = "" esx.problem.vmfs.error.volume.is.locked.fullFormat = "Volume on device {1} is locked, possibly because some remote host encountered an error during a volume operation and could not recover." # esx.problem.vmfs.extent.offline.category = "error" esx.problem.vmfs.extent.offline.description = "Device backing an extent of a file system is offline." esx.problem.vmfs.extent.offline.formatOnVm = "" esx.problem.vmfs.extent.offline.formatOnHost = "" esx.problem.vmfs.extent.offline.formatOnComputeResource = "" esx.problem.vmfs.extent.offline.formatOnDatacenter = "" esx.problem.vmfs.extent.offline.fullFormat = "An attached device {1} may be offline. The file system {2} is now in a degraded state. While the datastore is still available, parts of data that reside on the extent that went offline might be inaccessible." # esx.problem.vmfs.extent.online.category = "info" esx.problem.vmfs.extent.online.description = "Device backing an extent of a file system came online" esx.problem.vmfs.extent.online.formatOnVm = "" esx.problem.vmfs.extent.online.formatOnHost = "" esx.problem.vmfs.extent.online.formatOnComputeResource = "" esx.problem.vmfs.extent.online.formatOnDatacenter = "" esx.problem.vmfs.extent.online.fullFormat = "Device {1} backing file system {2} came online. This extent was previously offline. All resources on this device are now available." # esx.audit.vmfs.lvm.device.discovered.category = "info" esx.audit.vmfs.lvm.device.discovered.description = "LVM device discovered." esx.audit.vmfs.lvm.device.discovered.formatOnVm = "" esx.audit.vmfs.lvm.device.discovered.formatOnHost = "" esx.audit.vmfs.lvm.device.discovered.formatOnComputeResource = "" esx.audit.vmfs.lvm.device.discovered.formatOnDatacenter = "" esx.audit.vmfs.lvm.device.discovered.fullFormat = "One or more LVM devices have been discovered on this host." # esx.audit.vmfs.volume.mounted.category = "info" esx.audit.vmfs.volume.mounted.description = "File system mounted." esx.audit.vmfs.volume.mounted.formatOnVm = "" esx.audit.vmfs.volume.mounted.formatOnHost = "" esx.audit.vmfs.volume.mounted.formatOnComputeResource = "" esx.audit.vmfs.volume.mounted.formatOnDatacenter = "" esx.audit.vmfs.volume.mounted.fullFormat = "File system {1} on volume {2} has been mounted in {3} mode on this host." # esx.audit.vmfs.volume.umounted.category = "info" esx.audit.vmfs.volume.umounted.description = "LVM volume un-mounted." esx.audit.vmfs.volume.umounted.formatOnVm = "" esx.audit.vmfs.volume.umounted.formatOnHost = "" esx.audit.vmfs.volume.umounted.formatOnComputeResource = "" esx.audit.vmfs.volume.umounted.formatOnDatacenter = "" esx.audit.vmfs.volume.umounted.fullFormat = "The volume {1} has been safely un-mounted. The datastore is no longer accessible on this host." # esx.problem.vmfs.ats.incompatibility.detected.category = "error" esx.problem.vmfs.ats.incompatibility.detected.description = "Multi-extent ATS-only VMFS Volume unable to use ATS" esx.problem.vmfs.ats.incompatibility.detected.formatOnVm = "" esx.problem.vmfs.ats.incompatibility.detected.formatOnHost = "" esx.problem.vmfs.ats.incompatibility.detected.formatOnComputeResource = "" esx.problem.vmfs.ats.incompatibility.detected.formatOnDatacenter = "" esx.problem.vmfs.ats.incompatibility.detected.fullFormat = "Multi-extent ATS-only volume '{1}' ({2}) is unable to use ATS because HardwareAcceleratedLocking is disabled on this host: potential for introducing filesystem corruption. Volume should not be used from other hosts." # esx.problem.vmfs.ats.support.lost.category = "error" esx.problem.vmfs.ats.support.lost.description = "Device Backing VMFS has lost ATS Support" esx.problem.vmfs.ats.support.lost.formatOnVm = "" esx.problem.vmfs.ats.support.lost.formatOnHost = "" esx.problem.vmfs.ats.support.lost.formatOnComputeResource = "" esx.problem.vmfs.ats.support.lost.formatOnDatacenter = "" esx.problem.vmfs.ats.support.lost.fullFormat = "ATS-Only VMFS volume '{1}' not mounted. Host does not support ATS or ATS initialization has failed." # esx.problem.scsi.device.state.permanentloss.category = "error" esx.problem.scsi.device.state.permanentloss.description = "Device has been removed or is permanently inaccessible." esx.problem.scsi.device.state.permanentloss.formatOnVm = "" esx.problem.scsi.device.state.permanentloss.formatOnHost = "" esx.problem.scsi.device.state.permanentloss.formatOnComputeResource = "" esx.problem.scsi.device.state.permanentloss.formatOnDatacenter = "" esx.problem.scsi.device.state.permanentloss.fullFormat = "Device {1} has been removed or is permanently inaccessible. Affected datastores (if any): {2}." # esx.problem.scsi.device.state.permanentloss.withreservationheld.category = "error" esx.problem.scsi.device.state.permanentloss.withreservationheld.description = "Device has been removed or is permanently inaccessible." esx.problem.scsi.device.state.permanentloss.withreservationheld.formatOnVm = "" esx.problem.scsi.device.state.permanentloss.withreservationheld.formatOnHost = "" esx.problem.scsi.device.state.permanentloss.withreservationheld.formatOnComputeResource = "" esx.problem.scsi.device.state.permanentloss.withreservationheld.formatOnDatacenter = "" esx.problem.scsi.device.state.permanentloss.withreservationheld.fullFormat = "Device {1} has been removed or is permanently inaccessible, while holding a reservation. Affected datastores (if any): {2}." # esx.problem.scsi.device.state.permanentloss.noopens.category = "info" esx.problem.scsi.device.state.permanentloss.noopens.description = "Permanently inaccessible device has no more opens." esx.problem.scsi.device.state.permanentloss.noopens.formatOnVm = "" esx.problem.scsi.device.state.permanentloss.noopens.formatOnHost = "" esx.problem.scsi.device.state.permanentloss.noopens.formatOnComputeResource = "" esx.problem.scsi.device.state.permanentloss.noopens.formatOnDatacenter = "" esx.problem.scsi.device.state.permanentloss.noopens.fullFormat = "Permanently inaccessible device {1} has no more opens. It is now safe to unmount datastores (if any) {2} and delete the device." # esx.problem.scsi.device.state.permanentloss.pluggedback.category = "error" esx.problem.scsi.device.state.permanentloss.pluggedback.description = "Device has been plugged back in after being marked permanently inaccessible." esx.problem.scsi.device.state.permanentloss.pluggedback.formatOnVm = "" esx.problem.scsi.device.state.permanentloss.pluggedback.formatOnHost = "" esx.problem.scsi.device.state.permanentloss.pluggedback.formatOnComputeResource = "" esx.problem.scsi.device.state.permanentloss.pluggedback.formatOnDatacenter = "" esx.problem.scsi.device.state.permanentloss.pluggedback.fullFormat = "Device {1} has been plugged back in after being marked permanently inaccessible. No data consistency guarantees." # esx.clear.scsi.device.state.permanentloss.deviceonline.category = "info" esx.clear.scsi.device.state.permanentloss.deviceonline.description = "Device that was permanently inaccessible is now online." esx.clear.scsi.device.state.permanentloss.deviceonline.formatOnVm = "" esx.clear.scsi.device.state.permanentloss.deviceonline.formatOnHost = "" esx.clear.scsi.device.state.permanentloss.deviceonline.formatOnComputeResource = "" esx.clear.scsi.device.state.permanentloss.deviceonline.formatOnDatacenter = "" esx.clear.scsi.device.state.permanentloss.deviceonline.fullFormat = "Device {1}, that was permanently inaccessible is now online. No data consistency guarantees." # esx.problem.scsi.device.state.off.category = "info" esx.problem.scsi.device.state.off.description = "Device has been turned off administratively." esx.problem.scsi.device.state.off.formatOnVm = "" esx.problem.scsi.device.state.off.formatOnHost = "" esx.problem.scsi.device.state.off.formatOnComputeResource = "" esx.problem.scsi.device.state.off.formatOnDatacenter = "" esx.problem.scsi.device.state.off.fullFormat = "Device {1}, has been turned off administratively." # esx.clear.scsi.device.state.on.category = "info" esx.clear.scsi.device.state.on.description = "Device has been turned on administratively." esx.clear.scsi.device.state.on.formatOnVm = "" esx.clear.scsi.device.state.on.formatOnHost = "" esx.clear.scsi.device.state.on.formatOnComputeResource = "" esx.clear.scsi.device.state.on.formatOnDatacenter = "" esx.clear.scsi.device.state.on.fullFormat = "Device {1}, has been turned on administratively." # esx.problem.scsi.device.thinprov.atquota.category = "warning" esx.problem.scsi.device.thinprov.atquota.description = "Thin Provisioned Device Nearing Capacity" esx.problem.scsi.device.thinprov.atquota.formatOnVm = "" esx.problem.scsi.device.thinprov.atquota.formatOnHost = "" esx.problem.scsi.device.thinprov.atquota.formatOnComputeResource = "" esx.problem.scsi.device.thinprov.atquota.formatOnDatacenter = "" esx.problem.scsi.device.thinprov.atquota.fullFormat = "Space utilization on thin-provisioned device {1} exceeded configured threshold. Affected datastores (if any): {2}." # esx.problem.scsi.device.io.bad.plugin.type.category = "warning" esx.problem.scsi.device.io.bad.plugin.type.description = "Plugin trying to issue command to device does not have a valid storage plugin type." esx.problem.scsi.device.io.bad.plugin.type.formatOnVm = "" esx.problem.scsi.device.io.bad.plugin.type.formatOnHost = "" esx.problem.scsi.device.io.bad.plugin.type.formatOnComputeResource = "" esx.problem.scsi.device.io.bad.plugin.type.formatOnDatacenter = "" esx.problem.scsi.device.io.bad.plugin.type.fullFormat = "Bad plugin type for device {1}, plugin {2}" # esx.problem.scsi.device.io.qerr.change.config.category = "warning" esx.problem.scsi.device.io.qerr.change.config.description = "QErr cannot be changed on device. Please change it manually on the device if possible." esx.problem.scsi.device.io.qerr.change.config.formatOnVm = "" esx.problem.scsi.device.io.qerr.change.config.formatOnHost = "" esx.problem.scsi.device.io.qerr.change.config.formatOnComputeResource = "" esx.problem.scsi.device.io.qerr.change.config.formatOnDatacenter = "" esx.problem.scsi.device.io.qerr.change.config.fullFormat = "QErr set to 0x{1} for device {2}. This may cause unexpected behavior. The system is not configured to change the QErr setting of device. The QErr value supported by system is 0x{3}. Please check the SCSI ChangeQErrSetting configuration value for ESX." # esx.problem.scsi.device.io.qerr.changed.category = "warning" esx.problem.scsi.device.io.qerr.changed.description = "Scsi Device QErr setting changed" esx.problem.scsi.device.io.qerr.changed.formatOnVm = "" esx.problem.scsi.device.io.qerr.changed.formatOnHost = "" esx.problem.scsi.device.io.qerr.changed.formatOnComputeResource = "" esx.problem.scsi.device.io.qerr.changed.formatOnDatacenter = "" esx.problem.scsi.device.io.qerr.changed.fullFormat = "QErr set to 0x{1} for device {2}. This may cause unexpected behavior. The device was originally configured to the supported QErr setting of 0x{3}, but this has been changed and could not be changed back." # esx.problem.scsi.device.io.inquiry.failed.category = "warning" esx.problem.scsi.device.io.inquiry.failed.description = "Failed to obtain INQUIRY data from the device" esx.problem.scsi.device.io.inquiry.failed.formatOnVm = "" esx.problem.scsi.device.io.inquiry.failed.formatOnHost = "" esx.problem.scsi.device.io.inquiry.failed.formatOnComputeResource = "" esx.problem.scsi.device.io.inquiry.failed.formatOnDatacenter = "" esx.problem.scsi.device.io.inquiry.failed.fullFormat = "Failed to get standard inquiry for device {1} from Plugin {2}." # esx.problem.scsi.device.is.pseudo.failed.category = "warning" esx.problem.scsi.device.is.pseudo.failed.description = "Plugin's isPseudo entry point failed" esx.problem.scsi.device.is.pseudo.failed.formatOnVm = "" esx.problem.scsi.device.is.pseudo.failed.formatOnHost = "" esx.problem.scsi.device.is.pseudo.failed.formatOnComputeResource = "" esx.problem.scsi.device.is.pseudo.failed.formatOnDatacenter = "" esx.problem.scsi.device.is.pseudo.failed.fullFormat = "Failed to verify if the device {1} from plugin {2} is a pseudo device" # esx.problem.scsi.device.is.ssd.failed.category = "warning" esx.problem.scsi.device.is.ssd.failed.description = "Plugin's isSSD entry point failed" esx.problem.scsi.device.is.ssd.failed.formatOnVm = "" esx.problem.scsi.device.is.ssd.failed.formatOnHost = "" esx.problem.scsi.device.is.ssd.failed.formatOnComputeResource = "" esx.problem.scsi.device.is.ssd.failed.formatOnDatacenter = "" esx.problem.scsi.device.is.ssd.failed.fullFormat = "Failed to verify if the device {1} from plugin {2} is a Solid State Disk device" # esx.problem.scsi.device.is.local.failed.category = "warning" esx.problem.scsi.device.is.local.failed.description = "Plugin's isLocal entry point failed" esx.problem.scsi.device.is.local.failed.formatOnVm = "" esx.problem.scsi.device.is.local.failed.formatOnHost = "" esx.problem.scsi.device.is.local.failed.formatOnComputeResource = "" esx.problem.scsi.device.is.local.failed.formatOnDatacenter = "" esx.problem.scsi.device.is.local.failed.fullFormat = "Failed to verify if the device {1} from plugin {2} is a local - not shared - device" # esx.problem.scsi.apd.event.descriptor.alloc.failed.category = "warning" esx.problem.scsi.apd.event.descriptor.alloc.failed.description = "No memory to allocate APD Event" esx.problem.scsi.apd.event.descriptor.alloc.failed.formatOnVm = "" esx.problem.scsi.apd.event.descriptor.alloc.failed.formatOnHost = "" esx.problem.scsi.apd.event.descriptor.alloc.failed.formatOnComputeResource = "" esx.problem.scsi.apd.event.descriptor.alloc.failed.formatOnDatacenter = "" esx.problem.scsi.apd.event.descriptor.alloc.failed.fullFormat = "No memory to allocate APD (All Paths Down) event subsystem." # esx.problem.scsi.device.close.failed.category = "warning" esx.problem.scsi.device.close.failed.description = "Scsi Device close failed." esx.problem.scsi.device.close.failed.formatOnVm = "" esx.problem.scsi.device.close.failed.formatOnHost = "" esx.problem.scsi.device.close.failed.formatOnComputeResource = "" esx.problem.scsi.device.close.failed.formatOnDatacenter = "" esx.problem.scsi.device.close.failed.fullFormat = ""Failed to close the device {1} properly, plugin {2}." # esx.problem.scsi.device.filter.attach.failed.category = "warning" esx.problem.scsi.device.filter.attach.failed.description = "Failed to attach filter to device." esx.problem.scsi.device.filter.attach.failed.formatOnVm = "" esx.problem.scsi.device.filter.attach.failed.formatOnHost = "" esx.problem.scsi.device.filter.attach.failed.formatOnComputeResource = "" esx.problem.scsi.device.filter.attach.failed.formatOnDatacenter = "" esx.problem.scsi.device.filter.attach.failed.fullFormat = "Failed to attach filters to device '%s' during registration. Plugin load failed or the filter rules are incorrect." # esx.problem.scsi.unsupported.plugin.type.category = "warning" esx.problem.scsi.unsupported.plugin.type.description = "Storage plugin of unsupported type tried to register." esx.problem.scsi.unsupported.plugin.type.formatOnVm = "" esx.problem.scsi.unsupported.plugin.type.formatOnHost = "" esx.problem.scsi.unsupported.plugin.type.formatOnComputeResource = "" esx.problem.scsi.unsupported.plugin.type.formatOnDatacenter = "" esx.problem.scsi.unsupported.plugin.type.fullFormat = "Scsi Device Allocation not supported for plugin type {1}" # esx.problem.scsi.device.detach.failed.category = "warning" esx.problem.scsi.device.detach.failed.description = "Device detach failed" esx.problem.scsi.device.detach.failed.formatOnVm = "" esx.problem.scsi.device.detach.failed.formatOnHost = "" esx.problem.scsi.device.detach.failed.formatOnComputeResource = "" esx.problem.scsi.device.detach.failed.formatOnDatacenter = "" esx.problem.scsi.device.detach.failed.fullFormat = "Detach failed for device :{1}. Exceeded the number of devices that can be detached, please cleanup stale detach entries." # esx.problem.scsi.device.io.invalid.disk.qfull.value.category = "warning" esx.problem.scsi.device.io.invalid.disk.qfull.value.description = "Scsi device queue parameters incorrectly set." esx.problem.scsi.device.io.invalid.disk.qfull.value.formatOnVm = "" esx.problem.scsi.device.io.invalid.disk.qfull.value.formatOnHost = "" esx.problem.scsi.device.io.invalid.disk.qfull.value.formatOnComputeResource = "" esx.problem.scsi.device.io.invalid.disk.qfull.value.formatOnDatacenter = "" esx.problem.scsi.device.io.invalid.disk.qfull.value.fullFormat = "QFullSampleSize should be bigger than QFullThreshold. LUN queue depth throttling algorithm will not function as expected. Please set the QFullSampleSize and QFullThreshold disk configuration values in ESX correctly." # esx.problem.scsi.device.io.latency.high.category = "warning" esx.problem.scsi.device.io.latency.high.description = "Scsi Device I/O Latency going high" esx.problem.scsi.device.io.latency.high.formatOnVm = "" esx.problem.scsi.device.io.latency.high.formatOnHost = "" esx.problem.scsi.device.io.latency.high.formatOnComputeResource = "" esx.problem.scsi.device.io.latency.high.formatOnDatacenter = "" esx.problem.scsi.device.io.latency.high.fullFormat = "Device {1} performance has deteriorated. I/O latency increased from average value of {2} microseconds to {3} microseconds." # esx.clear.scsi.device.io.latency.improved.category = "info" esx.clear.scsi.device.io.latency.improved.description = "Scsi Device I/O Latency has improved" esx.clear.scsi.device.io.latency.improved.formatOnVm = "" esx.clear.scsi.device.io.latency.improved.formatOnHost = "" esx.clear.scsi.device.io.latency.improved.formatOnComputeResource = "" esx.clear.scsi.device.io.latency.improved.formatOnDatacenter = "" esx.clear.scsi.device.io.latency.improved.fullFormat = "Device {1} performance has improved. I/O latency reduced from {2} microseconds to {3} microseconds." # esx.problem.vmfs.nfs.server.disconnect.category = "error" esx.problem.vmfs.nfs.server.disconnect.description = "Lost connection to NFS server" esx.problem.vmfs.nfs.server.disconnect.formatOnVm = "" esx.problem.vmfs.nfs.server.disconnect.formatOnHost = "" esx.problem.vmfs.nfs.server.disconnect.formatOnComputeResource = "" esx.problem.vmfs.nfs.server.disconnect.formatOnDatacenter = "" esx.problem.vmfs.nfs.server.disconnect.fullFormat = "Lost connection to server {1} mount point {2} mounted as {3} ({4})." # esx.problem.vmfs.nfs.server.restored.category = "info" esx.problem.vmfs.nfs.server.restored.description = "Restored connection to NFS server" esx.problem.vmfs.nfs.server.restored.formatOnVm = "" esx.problem.vmfs.nfs.server.restored.formatOnHost = "" esx.problem.vmfs.nfs.server.restored.formatOnComputeResource = "" esx.problem.vmfs.nfs.server.restored.formatOnDatacenter = "" esx.problem.vmfs.nfs.server.restored.fullFormat = "Restored connection to server {1} mount point {2} mounted as {3} ({4})." # esx.problem.vmfs.nfs.mount.connect.failed.category = "error" esx.problem.vmfs.nfs.mount.connect.failed.description = "Unable to connect to NFS server" esx.problem.vmfs.nfs.mount.connect.failed.formatOnVm = "" esx.problem.vmfs.nfs.mount.connect.failed.formatOnHost = "" esx.problem.vmfs.nfs.mount.connect.failed.formatOnComputeResource = "" esx.problem.vmfs.nfs.mount.connect.failed.formatOnDatacenter = "" esx.problem.vmfs.nfs.mount.connect.failed.fullFormat = "Failed to mount to the server {1} mount point {2}. {3}" # esx.problem.vmfs.nfs.mount.limit.exceeded.category = "error" esx.problem.vmfs.nfs.mount.limit.exceeded.description = "NFS has reached the maximum number of supported volumes" esx.problem.vmfs.nfs.mount.limit.exceeded.formatOnVm = "" esx.problem.vmfs.nfs.mount.limit.exceeded.formatOnHost = "" esx.problem.vmfs.nfs.mount.limit.exceeded.formatOnComputeResource = "" esx.problem.vmfs.nfs.mount.limit.exceeded.formatOnDatacenter = "" esx.problem.vmfs.nfs.mount.limit.exceeded.fullFormat = "Failed to mount to the server {1} mount point {2}. {3}" # esx.clear.storage.connectivity.restored.category = "info" esx.clear.storage.connectivity.restored.description = "Restored connectivity to storage device" esx.clear.storage.connectivity.restored.formatOnVm = "" esx.clear.storage.connectivity.restored.formatOnHost = "" esx.clear.storage.connectivity.restored.formatOnComputeResource = "" esx.clear.storage.connectivity.restored.formatOnDatacenter = "" esx.clear.storage.connectivity.restored.fullFormat = "Connectivity to storage device {1} (Datastores: {2}) restored. Path {3} is active again." # esx.clear.storage.redundancy.restored.category = "info" esx.clear.storage.redundancy.restored.description = "Restored path redundancy to storage device" esx.clear.storage.redundancy.restored.formatOnVm = "" esx.clear.storage.redundancy.restored.formatOnHost = "" esx.clear.storage.redundancy.restored.formatOnComputeResource = "" esx.clear.storage.redundancy.restored.formatOnDatacenter = "" esx.clear.storage.redundancy.restored.fullFormat = "Path redundancy to storage device {1} (Datastores: {2}) restored. Path {3} is active again." # esx.problem.iorm.nonviworkload.category = "info" esx.problem.iorm.nonviworkload.description = "Unmanaged workload detected on SIOC-enabled datastore" esx.problem.iorm.nonviworkload.formatOnVm = "" esx.problem.iorm.nonviworkload.formatOnHost = "" esx.problem.iorm.nonviworkload.formatOnComputeResource = "" esx.problem.iorm.nonviworkload.formatOnDatacenter = "" esx.problem.iorm.nonviworkload.fullFormat = "An unmanaged I/O workload is detected on a SIOC-enabled datastore: {1}." # esx.problem.iorm.badversion.category = "info" esx.problem.iorm.badversion.description = "Storage I/O Control version mismatch" esx.problem.iorm.badversion.formatOnVm = "" esx.problem.iorm.badversion.formatOnHost = "" esx.problem.iorm.badversion.formatOnComputeResource = "" esx.problem.iorm.badversion.formatOnDatacenter = "" esx.problem.iorm.badversion.fullFormat = "Host {1} cannot participate in Storage I/O Control(SIOC) on datastore {2} because the version number {3} of the SIOC agent on this host is incompatible with number {4} of its counterparts on other hosts connected to this datastore." # esx.audit.dcui.login.passwd.changed.category = "info" esx.audit.dcui.login.passwd.changed.description = "DCUI login password changed." esx.audit.dcui.login.passwd.changed.formatOnVm = "" esx.audit.dcui.login.passwd.changed.formatOnHost = "" esx.audit.dcui.login.passwd.changed.formatOnComputeResource = "" esx.audit.dcui.login.passwd.changed.formatOnDatacenter = "" esx.audit.dcui.login.passwd.changed.fullFormat = "Login password for user {1} has been changed. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information." # esx.audit.dcui.login.failed.category = "error" esx.audit.dcui.login.failed.description = "Login authentication on DCUI failed" esx.audit.dcui.login.failed.formatOnVm = "" esx.audit.dcui.login.failed.formatOnHost = "" esx.audit.dcui.login.failed.formatOnComputeResource = "" esx.audit.dcui.login.failed.formatOnDatacenter = "" esx.audit.dcui.login.failed.fullFormat = "Authentication of user {1} has failed. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information." # esx.audit.dcui.network.factoryrestore.category = "warning" esx.audit.dcui.network.factoryrestore.description = "Factory network settings restored through DCUI." esx.audit.dcui.network.factoryrestore.formatOnVm = "" esx.audit.dcui.network.factoryrestore.formatOnHost = "" esx.audit.dcui.network.factoryrestore.formatOnComputeResource = "" esx.audit.dcui.network.factoryrestore.formatOnDatacenter = "" esx.audit.dcui.network.factoryrestore.fullFormat = "The host has been restored to factory network settings. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information." # esx.audit.dcui.hostagents.restart.category = "info" esx.audit.dcui.hostagents.restart.description = "Restarting host agents through DCUI." esx.audit.dcui.hostagents.restart.formatOnVm = "" esx.audit.dcui.hostagents.restart.formatOnHost = "" esx.audit.dcui.hostagents.restart.formatOnComputeResource = "" esx.audit.dcui.hostagents.restart.formatOnDatacenter = "" esx.audit.dcui.hostagents.restart.fullFormat = "The management agents on the host are being restarted. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information." # esx.audit.dcui.network.restart.category = "info" esx.audit.dcui.network.restart.description = "Restarting network through DCUI." esx.audit.dcui.network.restart.formatOnVm = "" esx.audit.dcui.network.restart.formatOnHost = "" esx.audit.dcui.network.restart.formatOnComputeResource = "" esx.audit.dcui.network.restart.formatOnDatacenter = "" esx.audit.dcui.network.restart.fullFormat = "A management interface {1} has been restarted. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information." # esx.audit.dcui.defaults.factoryrestore.category = "warning" esx.audit.dcui.defaults.factoryrestore.description = "Restoring factory defaults through DCUI." esx.audit.dcui.defaults.factoryrestore.formatOnVm = "" esx.audit.dcui.defaults.factoryrestore.formatOnHost = "" esx.audit.dcui.defaults.factoryrestore.formatOnComputeResource = "" esx.audit.dcui.defaults.factoryrestore.formatOnDatacenter = "" esx.audit.dcui.defaults.factoryrestore.fullFormat = "The host has been restored to default factory settings. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information." # esx.audit.dcui.host.reboot.category = "warning" esx.audit.dcui.host.reboot.description = "Rebooting host through DCUI." esx.audit.dcui.host.reboot.formatOnVm = "" esx.audit.dcui.host.reboot.formatOnHost = "" esx.audit.dcui.host.reboot.formatOnComputeResource = "" esx.audit.dcui.host.reboot.formatOnDatacenter = "" esx.audit.dcui.host.reboot.fullFormat = "The host is being rebooted through the Direct Console User Interface (DCUI). Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information." # esx.audit.dcui.host.shutdown.category = "warning" esx.audit.dcui.host.shutdown.description = "Shutting down host through DCUI." esx.audit.dcui.host.shutdown.formatOnVm = "" esx.audit.dcui.host.shutdown.formatOnHost = "" esx.audit.dcui.host.shutdown.formatOnComputeResource = "" esx.audit.dcui.host.shutdown.formatOnDatacenter = "" esx.audit.dcui.host.shutdown.fullFormat = "The host is being shut down through the Direct Console User Interface (DCUI). Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information." # esx.audit.esxcli.host.reboot.category = "warning" esx.audit.esxcli.host.reboot.description = "Rebooting host through esxcli" esx.audit.esxcli.host.reboot.formatOnVm = "" esx.audit.esxcli.host.reboot.formatOnHost = "" esx.audit.esxcli.host.reboot.formatOnComputeResource = "" esx.audit.esxcli.host.reboot.formatOnDatacenter = "" esx.audit.esxcli.host.reboot.fullFormat = "The host is being rebooted through esxcli. Reason for reboot: {1}. Please consult vSphere Documentation Center or follow the Ask VMware link for more information." # esx.audit.esxcli.host.restart.category = "warning" esx.audit.esxcli.host.restart.description = "Rebooting host through esxcli" esx.audit.esxcli.host.restart.formatOnVm = "" esx.audit.esxcli.host.restart.formatOnHost = "" esx.audit.esxcli.host.restart.formatOnComputeResource = "" esx.audit.esxcli.host.restart.formatOnDatacenter = "" esx.audit.esxcli.host.restart.fullFormat = "The host is being rebooted through esxcli. Reason for reboot: {1}. Please consult vSphere Documentation Center or follow the Ask VMware link for more information." # esx.audit.esxcli.host.poweroff.category = "warning" esx.audit.esxcli.host.poweroff.description = "Powering off host through esxcli" esx.audit.esxcli.host.poweroff.formatOnVm = "" esx.audit.esxcli.host.poweroff.formatOnHost = "" esx.audit.esxcli.host.poweroff.formatOnComputeResource = "" esx.audit.esxcli.host.poweroff.formatOnDatacenter = "" esx.audit.esxcli.host.poweroff.fullFormat = "The host is being powered off through esxcli. Reason for powering off: {1}. Please consult vSphere Documentation Center or follow the Ask VMware link for more information." # esx.problem.apei.bert.memory.error.fatal.category = "error" esx.problem.apei.bert.memory.error.fatal.description = "A fatal memory error occurred" esx.problem.apei.bert.memory.error.fatal.formatOnVm = "" esx.problem.apei.bert.memory.error.fatal.formatOnHost = "" esx.problem.apei.bert.memory.error.fatal.formatOnComputeResource = "" esx.problem.apei.bert.memory.error.fatal.formatOnDatacenter = "" esx.problem.apei.bert.memory.error.fatal.fullFormat = "A fatal memory error occurred in the last boot. The following details were reported. Physical Addr: {1}, Physical Addr Mask: {2}, Node: {3}, Card: {4}, Module: {5}, Bank: {6}, Device: {7}, Row: {8}, Column: {9} Error type: {10}" # esx.problem.apei.bert.memory.error.recoverable.category = "error" esx.problem.apei.bert.memory.error.recoverable.description = "A recoverable memory error occurred" esx.problem.apei.bert.memory.error.recoverable.formatOnVm = "" esx.problem.apei.bert.memory.error.recoverable.formatOnHost = "" esx.problem.apei.bert.memory.error.recoverable.formatOnComputeResource = "" esx.problem.apei.bert.memory.error.recoverable.formatOnDatacenter = "" esx.problem.apei.bert.memory.error.recoverable.fullFormat = "A recoverable memory error occurred in last boot. The following details were reported. Physical Addr: {1}, Physical Addr Mask: {2}, Node: {3}, Card: {4}, Module: {5}, Bank: {6}, Device: {7}, Row: {8}, Column: {9} Error type: {10}" # esx.problem.apei.bert.memory.error.corrected.category = "error" esx.problem.apei.bert.memory.error.corrected.description = "A corrected memory error occurred" esx.problem.apei.bert.memory.error.corrected.formatOnVm = "" esx.problem.apei.bert.memory.error.corrected.formatOnHost = "" esx.problem.apei.bert.memory.error.corrected.formatOnComputeResource = "" esx.problem.apei.bert.memory.error.corrected.formatOnDatacenter = "" esx.problem.apei.bert.memory.error.corrected.fullFormat = "A corrected memory error occurred in last boot. The following details were reported. Physical Addr: {1}, Physical Addr Mask: {2}, Node: {3}, Card: {4}, Module: {5}, Bank: {6}, Device: {7}, Row: {8}, Column: {9} Error type: {10}" # esx.problem.apei.bert.pcie.error.corrected.category = "error" esx.problem.apei.bert.pcie.error.corrected.description = "A corrected PCIe error occurred" esx.problem.apei.bert.pcie.error.corrected.formatOnVm = "" esx.problem.apei.bert.pcie.error.corrected.formatOnHost = "" esx.problem.apei.bert.pcie.error.corrected.formatOnComputeResource = "" esx.problem.apei.bert.pcie.error.corrected.formatOnDatacenter = "" esx.problem.apei.bert.pcie.error.corrected.fullFormat = "A corrected PCIe error occurred in last boot. The following details were reported. Port Type: {1}, Device: {2}, Bus #: {3}, Function: {4}, Slot: {5}, Device Vendor: {6}, Version: {7}, Command Register: {8}, Status Register: {9}." # esx.problem.apei.bert.pcie.error.fatal.category = "error" esx.problem.apei.bert.pcie.error.fatal.description = "A fatal PCIe error occurred" esx.problem.apei.bert.pcie.error.fatal.formatOnVm = "" esx.problem.apei.bert.pcie.error.fatal.formatOnHost = "" esx.problem.apei.bert.pcie.error.fatal.formatOnComputeResource = "" esx.problem.apei.bert.pcie.error.fatal.formatOnDatacenter = "" esx.problem.apei.bert.pcie.error.fatal.fullFormat = "Platform encounterd a fatal PCIe error in last boot. The following details were reported. Port Type: {1}, Device: {2}, Bus #: {3}, Function: {4}, Slot: {5}, Device Vendor: {6}, Version: {7}, Command Register: {8}, Status Register: {9}." # esx.problem.apei.bert.pcie.error.recoverable.category = "error" esx.problem.apei.bert.pcie.error.recoverable.description = "A recoverable PCIe error occurred" esx.problem.apei.bert.pcie.error.recoverable.formatOnVm = "" esx.problem.apei.bert.pcie.error.recoverable.formatOnHost = "" esx.problem.apei.bert.pcie.error.recoverable.formatOnComputeResource = "" esx.problem.apei.bert.pcie.error.recoverable.formatOnDatacenter = "" esx.problem.apei.bert.pcie.error.recoverable.fullFormat = "A recoverable PCIe error occurred in last boot. The following details were reported. Port Type: {1}, Device: {2}, Bus #: {3}, Function: {4}, Slot: {5}, Device Vendor: {6}, Version: {7}, Command Register: {8}, Status Register: {9}." # esx.problem.hostd.core.dumped.category = "warning" esx.problem.hostd.core.dumped.description = "Hostd crashed and a core file was created." esx.problem.hostd.core.dumped.formatOnVm = "" esx.problem.hostd.core.dumped.formatOnHost = "" esx.problem.hostd.core.dumped.formatOnComputeResource = "" esx.problem.hostd.core.dumped.formatOnDatacenter = "" esx.problem.hostd.core.dumped.fullFormat = "{1} crashed ({2} time(s) so far) and a core file might have been created at {3}. This might have caused connections to the host to be dropped." # esx.problem.vpxa.core.dumped.category = "warning" esx.problem.vpxa.core.dumped.description = "Vpxa crashed and a core file was created." esx.problem.vpxa.core.dumped.formatOnVm = "" esx.problem.vpxa.core.dumped.formatOnHost = "" esx.problem.vpxa.core.dumped.formatOnComputeResource = "" esx.problem.vpxa.core.dumped.formatOnDatacenter = "" esx.problem.vpxa.core.dumped.fullFormat = "{1} crashed ({2} time(s) so far) and a core file might have been created at {3}. This might have caused connections to the host to be dropped." # esx.problem.application.core.dumped.category = "warning" esx.problem.application.core.dumped.description = "An application running on ESXi host has crashed and a core file was created." esx.problem.application.core.dumped.formatOnVm = "" esx.problem.application.core.dumped.formatOnHost = "" esx.problem.application.core.dumped.formatOnComputeResource = "" esx.problem.application.core.dumped.formatOnDatacenter = "" esx.problem.application.core.dumped.fullFormat = "An application ({1}) running on ESXi host has crashed ({2} time(s) so far). A core file might have been created at {3}." # esx.clear.net.connectivity.restored.category = "info" esx.clear.net.connectivity.restored.description = "Restored network connectivity to portgroups" esx.clear.net.connectivity.restored.formatOnVm = "" esx.clear.net.connectivity.restored.formatOnHost = "" esx.clear.net.connectivity.restored.formatOnComputeResource = "" esx.clear.net.connectivity.restored.formatOnDatacenter = "" esx.clear.net.connectivity.restored.fullFormat = "Network connectivity restored on virtual switch {1}, portgroups: {2}. Physical NIC {3} is up." # esx.clear.net.redundancy.restored.category = "info" esx.clear.net.redundancy.restored.description = "Restored uplink redundancy to portgroups" esx.clear.net.redundancy.restored.formatOnVm = "" esx.clear.net.redundancy.restored.formatOnHost = "" esx.clear.net.redundancy.restored.formatOnComputeResource = "" esx.clear.net.redundancy.restored.formatOnDatacenter = "" esx.clear.net.redundancy.restored.fullFormat = "Uplink redundancy restored on virtual switch {1}, portgroups: {2}. Physical NIC {3} is up." # esx.problem.3rdParty.warning.category = "warning" esx.problem.3rdParty.warning.description = "A 3rd party component on ESXi has reported a warning." esx.problem.3rdParty.warning.formatOnVm = "" esx.problem.3rdParty.warning.formatOnHost = "" esx.problem.3rdParty.warning.formatOnComputeResource = "" esx.problem.3rdParty.warning.formatOnDatacenter = "" esx.problem.3rdParty.warning.fullFormat = "A 3rd party component, {1}, running on ESXi has reported a warning related to a problem. Please follow the knowledge base link ({2}) to see the steps to remedy the problem as reported by {3}. The message reported is: {4}." # esx.problem.3rdParty.error.category = "error" esx.problem.3rdParty.error.description = "A 3rd party component on ESXi has reported an error." esx.problem.3rdParty.error.formatOnVm = "" esx.problem.3rdParty.error.formatOnHost = "" esx.problem.3rdParty.error.formatOnComputeResource = "" esx.problem.3rdParty.error.formatOnDatacenter = "" esx.problem.3rdParty.error.fullFormat = "A 3rd party component, {1}, running on ESXi has reported an error. Please follow the knowledge base link ({2}) to see the steps to remedy the problem as reported by {3}. The message reported is: {4}." # esx.problem.3rdParty.info.category = "info" esx.problem.3rdParty.info.description = "A 3rd party component on ESXi has reported an informational event." esx.problem.3rdParty.info.formatOnVm = "" esx.problem.3rdParty.info.formatOnHost = "" esx.problem.3rdParty.info.formatOnComputeResource = "" esx.problem.3rdParty.info.formatOnDatacenter = "" esx.problem.3rdParty.info.fullFormat = "A 3rd party component, {1}, running on ESXi has reported an informational event. If needed, please follow the knowledge base link ({2}) to see the steps to remedy the problem as reported by {3}. The message reported is: {4}." # esx.problem.3rdParty.information.category = "info" esx.problem.3rdParty.information.description = "A 3rd party component on ESXi has reported an informational event." esx.problem.3rdParty.information.formatOnVm = "" esx.problem.3rdParty.information.formatOnHost = "" esx.problem.3rdParty.information.formatOnComputeResource = "" esx.problem.3rdParty.information.formatOnDatacenter = "" esx.problem.3rdParty.information.fullFormat = "A 3rd party component, {1}, running on ESXi has reported an informational event. If needed, please follow the knowledge base link ({2}) to see the steps to remedy the problem as reported by {3}. The message reported is: {4}." # com.vmware.vc.vmam.VmDasAppHeartbeatFailedEvent.category = "warning" com.vmware.vc.vmam.VmDasAppHeartbeatFailedEvent.description = "vSphere HA detected application heartbeat failure" com.vmware.vc.vmam.VmDasAppHeartbeatFailedEvent.formatOnComputeResource = "vSphere HA detected application heartbeat failure for {vm.name} on {host.name}" com.vmware.vc.vmam.VmDasAppHeartbeatFailedEvent.formatOnDatacenter = "vSphere HA detected application heartbeat failure for {vm.name} on {host.name} in cluster {computeResource.name}" com.vmware.vc.vmam.VmDasAppHeartbeatFailedEvent.formatOnHost = "vSphere HA detected application heartbeat failure for {vm.name}" com.vmware.vc.vmam.VmDasAppHeartbeatFailedEvent.formatOnVm = "vSphere HA detected application heartbeat failure for this virtual machine" com.vmware.vc.vmam.VmDasAppHeartbeatFailedEvent.fullFormat = "vSphere HA detected application heartbeat failure for {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name}" # com.vmware.vc.vmam.VmAppHealthMonitoringStateChangedEvent.category = "warning" com.vmware.vc.vmam.VmAppHealthMonitoringStateChangedEvent.description = "vSphere HA detected application heartbeat status change" com.vmware.vc.vmam.VmAppHealthMonitoringStateChangedEvent.formatOnComputeResource = "vSphere HA detected that the application heartbeat status changed to {status.@enum.VirtualMachine.AppHeartbeatStatusType} for {vm.name} on {host.name}" com.vmware.vc.vmam.VmAppHealthMonitoringStateChangedEvent.formatOnDatacenter = "vSphere HA detected that the application heartbeat status changed to {status.@enum.VirtualMachine.AppHeartbeatStatusType} for {vm.name} on {host.name} in cluster {computeResource.name}" com.vmware.vc.vmam.VmAppHealthMonitoringStateChangedEvent.formatOnHost = "vSphere HA detected that the application heartbeat status changed to {status.@enum.VirtualMachine.AppHeartbeatStatusType} for {vm.name}" com.vmware.vc.vmam.VmAppHealthMonitoringStateChangedEvent.formatOnVm = "vSphere HA detected that the application heartbeat status changed to {status.@enum.VirtualMachine.AppHeartbeatStatusType} for this virtual machine" com.vmware.vc.vmam.VmAppHealthMonitoringStateChangedEvent.fullFormat = "vSphere HA detected that the application heartbeat status changed to {status.@enum.VirtualMachine.AppHeartbeatStatusType} for {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name}" # com.vmware.vc.vmam.VmAppHealthStateChangedEvent.category = "warning" com.vmware.vc.vmam.VmAppHealthStateChangedEvent.description = "vSphere HA detected application state change" com.vmware.vc.vmam.VmAppHealthStateChangedEvent.formatOnComputeResource = "vSphere HA detected that the application state changed to {state.@enum.vm.GuestInfo.AppStateType} for {vm.name} on {host.name}" com.vmware.vc.vmam.VmAppHealthStateChangedEvent.formatOnDatacenter = "vSphere HA detected that the application state changed to {state.@enum.vm.GuestInfo.AppStateType} for {vm.name} on {host.name} in cluster {computeResource.name}" com.vmware.vc.vmam.VmAppHealthStateChangedEvent.formatOnHost = "vSphere HA detected that the application state changed to {state.@enum.vm.GuestInfo.AppStateType} for {vm.name}" com.vmware.vc.vmam.VmAppHealthStateChangedEvent.formatOnVm = "vSphere HA detected that the application state changed to {state.@enum.vm.GuestInfo.AppStateType} for this virtual machine" com.vmware.vc.vmam.VmAppHealthStateChangedEvent.fullFormat = "vSphere HA detected that the application state changed to {state.@enum.vm.GuestInfo.AppStateType} for {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name}" # com.vmware.vc.ft.VmAffectedByDasDisabledEvent.category = "warning" com.vmware.vc.ft.VmAffectedByDasDisabledEvent.description = "Fault Tolerance VM restart disabled" com.vmware.vc.ft.VmAffectedByDasDisabledEvent.formatOnVm = "vSphere HA has been disabled. vSphere HA will not restart this VM or its Secondary VM after a failure." com.vmware.vc.ft.VmAffectedByDasDisabledEvent.formatOnHost = "vSphere HA has been disabled. vSphere HA will not restart VM {vm.name} or its Secondary VM after a failure." com.vmware.vc.ft.VmAffectedByDasDisabledEvent.formatOnComputeResource = "vSphere HA has been disabled. vSphere HA will not restart VM {vm.name} or its Secondary VM after a failure." com.vmware.vc.ft.VmAffectedByDasDisabledEvent.formatOnDatacenter = "vSphere HA has been disabled in cluster {computeResource.name}. vSphere HA will not restart VM {vm.name} or its Secondary VM after a failure." com.vmware.vc.ft.VmAffectedByDasDisabledEvent.fullFormat = "vSphere HA has been disabled in cluster {computeResource.name} of datacenter {datacenter.name}. vSphere HA will not restart VM {vm.name} or its Secondary VM after a failure." # com.vmware.vc.HA.CreateConfigVvolFailedEvent.category = "error" com.vmware.vc.HA.CreateConfigVvolFailedEvent.description = "vSphere HA failed to create a configuration vVol for this datastore and so will not be able to protect virtual machines on the datastore until the problem is resolved. Error: {fault}" com.vmware.vc.HA.CreateConfigVvolFailedEvent.formatOnComputeResource = "vSphere HA failed to create a configuration vVol for this datastore and so will not be able to protect virtual machines on the datastore until the problem is resolved. Error: {fault}" com.vmware.vc.HA.CreateConfigVvolFailedEvent.formatOnDatacenter = "vSphere HA failed to create a configuration vVol for this datastore and so will not be able to protect virtual machines on the datastore until the problem is resolved. Error: {fault}" com.vmware.vc.HA.CreateConfigVvolFailedEvent.formatOnHost = "vSphere HA failed to create a configuration vVol for this datastore and so will not be able to protect virtual machines on the datastore until the problem is resolved. Error: {fault}" com.vmware.vc.HA.CreateConfigVvolFailedEvent.formatOnVm = "" com.vmware.vc.HA.CreateConfigVvolFailedEvent.fullFormat = "vSphere HA failed to create a configuration vVol for this datastore and so will not be able to protect virtual machines on the datastore until the problem is resolved. Error: {fault}" # com.vmware.vc.HA.CreateConfigVvolSucceededEvent.category = "info" com.vmware.vc.HA.CreateConfigVvolSucceededEvent.description = "vSphere HA successfully created a configuration vVol after the previous failure" com.vmware.vc.HA.CreateConfigVvolSucceededEvent.formatOnComputeResource = "vSphere HA successfully created a configuration vVol after the previous failure" com.vmware.vc.HA.CreateConfigVvolSucceededEvent.formatOnDatacenter = "vSphere HA successfully created a configuration vVol after the previous failure" com.vmware.vc.HA.CreateConfigVvolSucceededEvent.formatOnHost = "vSphere HA successfully created a configuration vVol after the previous failure" com.vmware.vc.HA.CreateConfigVvolSucceededEvent.formatOnVm = "" com.vmware.vc.HA.CreateConfigVvolSucceededEvent.fullFormat = "vSphere HA successfully created a configuration vVol after the previous failure" # com.vmware.vc.HA.DasHostCompleteDatastoreFailureEvent.category = "error" com.vmware.vc.HA.DasHostCompleteDatastoreFailureEvent.description = "Host complete datastore failure" com.vmware.vc.HA.DasHostCompleteDatastoreFailureEvent.formatOnComputeResource = "All shared datastores failed on the host {hostName}" com.vmware.vc.HA.DasHostCompleteDatastoreFailureEvent.formatOnDatacenter = "All shared datastores failed on the host {hostName} in cluster {computeResource.name}" com.vmware.vc.HA.DasHostCompleteDatastoreFailureEvent.formatOnHost = "All shared datastores failed on the host {hostName}" com.vmware.vc.HA.DasHostCompleteDatastoreFailureEvent.formatOnVm = "" com.vmware.vc.HA.DasHostCompleteDatastoreFailureEvent.fullFormat = "All shared datastores failed on the host {hostName} in cluster {computeResource.name} in {datacenter.name}" # com.vmware.vc.HA.DasHostCompleteNetworkFailureEvent.category = "error" com.vmware.vc.HA.DasHostCompleteNetworkFailureEvent.description = "Host complete network failure" com.vmware.vc.HA.DasHostCompleteNetworkFailureEvent.formatOnComputeResource = "All VM networks failed on the host {hostName}" com.vmware.vc.HA.DasHostCompleteNetworkFailureEvent.formatOnDatacenter = "All VM networks failed on the host {hostName} in cluster {computeResource.name}" com.vmware.vc.HA.DasHostCompleteNetworkFailureEvent.formatOnHost = "All VM networks failed on the host {hostName}" com.vmware.vc.HA.DasHostCompleteNetworkFailureEvent.formatOnVm = "" com.vmware.vc.HA.DasHostCompleteNetworkFailureEvent.fullFormat = "All VM networks failed on the host {hostName} in cluster {computeResource.name} in {datacenter.name}" # vim.event.LicenseDowngradedEvent.category = "warning" vim.event.LicenseDowngradedEvent.description = "License downgrade" vim.event.LicenseDowngradedEvent.formatOnComputeResource = "License downgrade" vim.event.LicenseDowngradedEvent.formatOnDatacenter = "License downgrade" vim.event.LicenseDowngradedEvent.formatOnHost = "License downgrade" vim.event.LicenseDowngradedEvent.formatOnVm = "" vim.event.LicenseDowngradedEvent.fullFormat = "License downgrade: {licenseKey} removes the following features: {lostFeatures}" # vim.event.SubscriptionLicenseExpiredEvent.category = "warning" vim.event.SubscriptionLicenseExpiredEvent.description = "The time-limited license on the host has expired." vim.event.SubscriptionLicenseExpiredEvent.formatOnComputeResource = "The time-limited license on host {host.name} has expired." vim.event.SubscriptionLicenseExpiredEvent.formatOnDatacenter = "The time-limited license on host {host.name} has expired." vim.event.SubscriptionLicenseExpiredEvent.formatOnHost = "The time-limited license on the host has expired." vim.event.SubscriptionLicenseExpiredEvent.formatOnVm = "" vim.event.SubscriptionLicenseExpiredEvent.fullFormat = "The time-limited license on host {host.name} has expired. To comply with the EULA, renew the license at http://my.vmware.com" # vim.event.UnsupportedHardwareVersionEvent.category = "warning" vim.event.UnsupportedHardwareVersionEvent.description = "This virtual machine uses hardware version {version}, which is no longer supported. Upgrade is recommended." vim.event.UnsupportedHardwareVersionEvent.formatOnComputeResource = "Virtual machine {vm.name} on {host.name} uses hardware version {version}, which is no longer supported. Upgrade is recommended." vim.event.UnsupportedHardwareVersionEvent.formatOnDatacenter = "Virtual machine {vm.name} on {host.name} in cluster {computeResource.name} uses hardware version {version}, which is no longer supported. Upgrade is recommended." vim.event.UnsupportedHardwareVersionEvent.formatOnHost = "Virtual machine {vm.name} uses hardware version {version}, which is no longer supported. Upgrade is recommended." vim.event.UnsupportedHardwareVersionEvent.formatOnVm = "This virtual machine uses hardware version {version}, which is no longer supported. Upgrade is recommended." vim.event.UnsupportedHardwareVersionEvent.fullFormat = "Virtual machine {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} uses hardware version {version}, which is no longer supported. Upgrade is recommended." # ad.event.JoinDomainFailedEvent.category = "error" ad.event.JoinDomainFailedEvent.description = "Join domain failure" ad.event.JoinDomainFailedEvent.fullFormat = "Join domain failed." ad.event.JoinDomainFailedEvent.formatOnComputeResource = "Join domain failed." ad.event.JoinDomainFailedEvent.formatOnDatacenter = "Join domain failed." ad.event.JoinDomainFailedEvent.formatOnHost = "Join domain failed." ad.event.JoinDomainFailedEvent.formatOnVm = "Join domain failed." # ad.event.JoinDomainEvent.category = "info" ad.event.JoinDomainEvent.description = "Join domain success" ad.event.JoinDomainEvent.fullFormat = "Join domain succeeded." ad.event.JoinDomainEvent.formatOnComputeResource = "Join domain succeeded." ad.event.JoinDomainEvent.formatOnDatacenter = "Join domain succeeded." ad.event.JoinDomainEvent.formatOnHost = "Join domain succeeded." ad.event.JoinDomainEvent.formatOnVm = "Join domain succeeded." # ad.event.LeaveDomainFailedEvent.category = "error" ad.event.LeaveDomainFailedEvent.description = "Leave domain failure" ad.event.LeaveDomainFailedEvent.fullFormat = "Leave domain failed." ad.event.LeaveDomainFailedEvent.formatOnComputeResource = "Leave domain failed." ad.event.LeaveDomainFailedEvent.formatOnDatacenter = "Leave domain failed." ad.event.LeaveDomainFailedEvent.formatOnHost = "Leave domain failed." ad.event.LeaveDomainFailedEvent.formatOnVm = "Leave domain failed." # ad.event.LeaveDomainEvent.category = "info" ad.event.LeaveDomainEvent.description = "Leave domain success" ad.event.LeaveDomainEvent.fullFormat = "Leave domain succeeded." ad.event.LeaveDomainEvent.formatOnComputeResource = "Leave domain succeeded." ad.event.LeaveDomainEvent.formatOnDatacenter = "Leave domain succeeded." ad.event.LeaveDomainEvent.formatOnHost = "Leave domain succeeded." ad.event.LeaveDomainEvent.formatOnVm = "Leave domain succeeded." # ad.event.ImportCertFailedEvent.category = "error" ad.event.ImportCertFailedEvent.description = "Import certificate failure" ad.event.ImportCertFailedEvent.fullFormat = "Import certificate failed." ad.event.ImportCertFailedEvent.formatOnComputeResource = "Import certificate failed." ad.event.ImportCertFailedEvent.formatOnDatacenter = "Import certificate failed." ad.event.ImportCertFailedEvent.formatOnHost = "Import certificate failed." ad.event.ImportCertFailedEvent.formatOnVm = "Import certificate failed." # ad.event.ImportCertEvent.category = "info" ad.event.ImportCertEvent.description = "Import certificate success" ad.event.ImportCertEvent.fullFormat = "Import certificate succeeded." ad.event.ImportCertEvent.formatOnComputeResource = "Import certificate succeeded." ad.event.ImportCertEvent.formatOnDatacenter = "Import certificate succeeded." ad.event.ImportCertEvent.formatOnHost = "Import certificate succeeded." ad.event.ImportCertEvent.formatOnVm = "Import certificate succeeded." # vim.event.SystemSwapInaccessible.category = "warning" vim.event.SystemSwapInaccessible.description = "System swap inaccessible" vim.event.SystemSwapInaccessible.formatOnComputeResource = "System swap inaccessible" vim.event.SystemSwapInaccessible.formatOnDatacenter = "System swap inaccessible" vim.event.SystemSwapInaccessible.formatOnHost = "System swap inaccessible" vim.event.SystemSwapInaccessible.formatOnVm = "" vim.event.SystemSwapInaccessible.fullFormat = "System swap is inaccessible because the datastore '{datastore}' is permanently lost. Please modify the system swap configuration and/or the local system swap directory." # esx.problem.swap.systemSwap.isPDL.removed.reconfig.failure.category = "warning" esx.problem.swap.systemSwap.isPDL.removed.reconfig.failure.description = "System swap at path {1} was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed.", esx.problem.swap.systemSwap.isPDL.removed.reconfig.failure.formatOnComputeResource = "System swap at path {1} was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed.", esx.problem.swap.systemSwap.isPDL.removed.reconfig.failure.formatOnDatacenter = "System swap at path {1} was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed.", esx.problem.swap.systemSwap.isPDL.removed.reconfig.failure.formatOnHost = "System swap at path {1} was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed.", esx.problem.swap.systemSwap.isPDL.removed.reconfig.failure.formatOnVm = "" esx.problem.swap.systemSwap.isPDL.removed.reconfig.failure.fullFormat = "System swap at path {1} was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed." # esx.problem.swap.systemSwap.isPDL.cannot.remove.category = "warning" esx.problem.swap.systemSwap.isPDL.cannot.remove.description = "System swap at path {1} was affected by the PDL of its datastore and was removed. System swap has been reconfigured.", esx.problem.swap.systemSwap.isPDL.cannot.remove.formatOnComputeResource = "System swap at path {1} was affected by the PDL of its datastore and was removed. System swap has been reconfigured.", esx.problem.swap.systemSwap.isPDL.cannot.remove.formatOnDatacenter = "System swap at path {1} was affected by the PDL of its datastore and was removed. System swap has been reconfigured.", esx.problem.swap.systemSwap.isPDL.cannot.remove.formatOnHost = "System swap at path {1} was affected by the PDL of its datastore and was removed. System swap has been reconfigured.", esx.problem.swap.systemSwap.isPDL.cannot.remove.formatOnVm = "" esx.problem.swap.systemSwap.isPDL.cannot.remove.fullFormat = "System swap at path {1} was affected by the PDL of its datastore and was removed. System swap has been reconfigured." # esx.problem.swap.systemSwap.isPDL.removed.reconfig.failure.2.category = "warning" esx.problem.swap.systemSwap.isPDL.removed.reconfig.failure.2.description = "System swap was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed.", esx.problem.swap.systemSwap.isPDL.removed.reconfig.failure.2.formatOnComputeResource = "", esx.problem.swap.systemSwap.isPDL.removed.reconfig.failure.2.formatOnDatacenter = "", esx.problem.swap.systemSwap.isPDL.removed.reconfig.failure.2.formatOnHost = "", esx.problem.swap.systemSwap.isPDL.removed.reconfig.failure.2.formatOnVm = "" esx.problem.swap.systemSwap.isPDL.removed.reconfig.failure.2.fullFormat = "System swap was affected by the PDL of {1}. It was removed but the subsequent reconfiguration failed." # esx.problem.swap.systemSwap.isPDL.cannot.remove.2.category = "warning" esx.problem.swap.systemSwap.isPDL.cannot.remove.2.description = "System swap was affected by the PDL of its datastore and was removed. System swap has been reconfigured.", esx.problem.swap.systemSwap.isPDL.cannot.remove.2.formatOnComputeResource = "", esx.problem.swap.systemSwap.isPDL.cannot.remove.2.formatOnDatacenter = "", esx.problem.swap.systemSwap.isPDL.cannot.remove.2.formatOnHost = "", esx.problem.swap.systemSwap.isPDL.cannot.remove.2.formatOnVm = "" esx.problem.swap.systemSwap.isPDL.cannot.remove.2.fullFormat = "System swap was affected by the PDL of {1} and was removed. System swap has been reconfigured." # # vSphere Replication events # # VR configuration events # hbr.primary.VmReplicationConfigurationChangedEvent.category = "info" hbr.primary.VmReplicationConfigurationChangedEvent.description = "Replication configuration changed." hbr.primary.VmReplicationConfigurationChangedEvent.formatOnVm = "Replication configuration changed ({numDisks} disks, {rpo} minutes RPO, VR Server is {vrServerAddress}:{vrServerPort})." hbr.primary.VmReplicationConfigurationChangedEvent.formatOnHost = "Replication configuration changed for virtual machine {vm.name} ({numDisks} disks, {rpo} minutes RPO, VR Server is {vrServerAddress}:{vrServerPort})." hbr.primary.VmReplicationConfigurationChangedEvent.formatOnComputeResource = "Replication configuration changed for virtual machine {vm.name} on host {host.name} ({numDisks} disks, {rpo} minutes RPO, VR Server is {vrServerAddress}:{vrServerPort})." hbr.primary.VmReplicationConfigurationChangedEvent.formatOnDatacenter = "Replication configuration changed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} ({numDisks} disks, {rpo} minutes RPO, VR Server is {vrServerAddress}:{vrServerPort})." hbr.primary.VmReplicationConfigurationChangedEvent.fullFormat = "Replication configuration changed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({numDisks} disks, {rpo} minutes RPO, VR Server is {vrServerAddress}:{vrServerPort})." # hbr.primary.InvalidVmReplicationConfigurationEvent.category = "error" hbr.primary.InvalidVmReplicationConfigurationEvent.description = "Virtual machine replication configuration is invalid." hbr.primary.InvalidVmReplicationConfigurationEvent.formatOnVm = "Replication configuration is invalid: {reasonForFault.@enum.fault.ReplicationVmConfigFault.ReasonForFault}" hbr.primary.InvalidVmReplicationConfigurationEvent.formatOnHost = "Replication configuration is invalid for virtual machine {vm.name}: {reasonForFault.@enum.fault.ReplicationVmConfigFault.ReasonForFault}" hbr.primary.InvalidVmReplicationConfigurationEvent.formatOnComputeResource = "Replication configuration is invalid for virtual machine {vm.name} on host {host.name}: {reasonForFault.@enum.fault.ReplicationVmConfigFault.ReasonForFault}" hbr.primary.InvalidVmReplicationConfigurationEvent.formatOnDatacenter = "Replication configuration is invalid for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reasonForFault.@enum.fault.ReplicationVmConfigFault.ReasonForFault}" hbr.primary.InvalidVmReplicationConfigurationEvent.fullFormat = "Replication configuration is invalid for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reasonForFault.@enum.fault.ReplicationVmConfigFault.ReasonForFault}" # hbr.primary.InvalidDiskReplicationConfigurationEvent.category = "error" hbr.primary.InvalidDiskReplicationConfigurationEvent.description = "Disk replication configuration is invalid." hbr.primary.InvalidDiskReplicationConfigurationEvent.formatOnVm = "Replication configuration is invalid for disk {diskKey}: {reasonForFault.@enum.fault.ReplicationDiskConfigFault.ReasonForFault}" hbr.primary.InvalidDiskReplicationConfigurationEvent.formatOnHost = "Replication configuration is invalid for virtual machine {vm.name} disk {diskKey}: {reasonForFault.@enum.fault.ReplicationDiskConfigFault.ReasonForFault}" hbr.primary.InvalidDiskReplicationConfigurationEvent.formatOnComputeResource = "Replication configuration is invalid for virtual machine {vm.name} on host {host.name} disk {diskKey}: {reasonForFault.@enum.fault.ReplicationDiskConfigFault.ReasonForFault}" hbr.primary.InvalidDiskReplicationConfigurationEvent.formatOnDatacenter = "Replication configuration is invalid for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}, disk {diskKey}: {reasonForFault.@enum.fault.ReplicationDiskConfigFault.ReasonForFault}" hbr.primary.InvalidDiskReplicationConfigurationEvent.fullFormat = "Replication configuration is invalid for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}, disk {diskKey}: {reasonForFault.@enum.fault.ReplicationDiskConfigFault.ReasonForFault}" # # VR delta events # hbr.primary.DeltaStartedEvent.category = "info" hbr.primary.DeltaStartedEvent.description = "Sync started." hbr.primary.DeltaStartedEvent.formatOnVm = "Sync started by {userName}." hbr.primary.DeltaStartedEvent.formatOnHost = "Sync started by {userName} for virtual machine {vm.name}." hbr.primary.DeltaStartedEvent.formatOnComputeResource = "Sync started by {userName} for virtual machine {vm.name} on host {host.name}." hbr.primary.DeltaStartedEvent.formatOnDatacenter = "Sync started by {userName} for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}." hbr.primary.DeltaStartedEvent.fullFormat = "Sync started by {userName} for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}." # hbr.primary.DeltaCompletedEvent.category = "info" hbr.primary.DeltaCompletedEvent.description = "Sync completed." hbr.primary.DeltaCompletedEvent.formatOnVm = "Sync completed." hbr.primary.DeltaCompletedEvent.formatOnHost = "Sync completed for virtual machine {vm.name}." hbr.primary.DeltaCompletedEvent.formatOnComputeResource = "Sync completed for virtual machine {vm.name} on host {host.name}." hbr.primary.DeltaCompletedEvent.formatOnDatacenter = "Sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}." hbr.primary.DeltaCompletedEvent.fullFormat = "Sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({bytes} bytes transferred)." # hbr.primary.AppQuiescedDeltaCompletedEvent.category = "info" hbr.primary.AppQuiescedDeltaCompletedEvent.description = "Application consistent sync completed." hbr.primary.AppQuiescedDeltaCompletedEvent.formatOnVm = "Application consistent sync completed." hbr.primary.AppQuiescedDeltaCompletedEvent.formatOnHost = "Application consistent sync completed for virtual machine {vm.name}." hbr.primary.AppQuiescedDeltaCompletedEvent.formatOnComputeResource = "Application consistent sync completed for virtual machine {vm.name} on host {host.name}." hbr.primary.AppQuiescedDeltaCompletedEvent.formatOnDatacenter = "Application consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}." hbr.primary.AppQuiescedDeltaCompletedEvent.fullFormat = "Application consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({bytes} bytes transferred)" # hbr.primary.FSQuiescedDeltaCompletedEvent.category = "info" hbr.primary.FSQuiescedDeltaCompletedEvent.description = "File system consistent sync completed." hbr.primary.FSQuiescedDeltaCompletedEvent.formatOnVm = "File system consistent sync completed." hbr.primary.FSQuiescedDeltaCompletedEvent.formatOnHost = "File system consistent sync completed for virtual machine {vm.name}." hbr.primary.FSQuiescedDeltaCompletedEvent.formatOnComputeResource = "File system consistent sync completed for virtual machine {vm.name} on host {host.name}." hbr.primary.FSQuiescedDeltaCompletedEvent.formatOnDatacenter = "File system consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}." hbr.primary.FSQuiescedDeltaCompletedEvent.fullFormat = "File system consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({bytes} bytes transferred)" # hbr.primary.UnquiescedDeltaCompletedEvent.category = "warning" hbr.primary.UnquiescedDeltaCompletedEvent.description = "Quiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed." hbr.primary.UnquiescedDeltaCompletedEvent.formatOnVm = "Quiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed." hbr.primary.UnquiescedDeltaCompletedEvent.formatOnHost = "Quiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed for virtual machine {vm.name}." hbr.primary.UnquiescedDeltaCompletedEvent.formatOnComputeResource = "Quiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed for virtual machine {vm.name} on host {host.name}." hbr.primary.UnquiescedDeltaCompletedEvent.formatOnDatacenter = "Quiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}." hbr.primary.UnquiescedDeltaCompletedEvent.fullFormat = "Quiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({bytes} bytes transferred)." # hbr.primary.QuiesceNotSupported.category = "warning" hbr.primary.QuiesceNotSupported.description = "Quiescing is not supported for this virtual machine." hbr.primary.QuiesceNotSupported.formatOnVm = "Quiescing is not supported for this virtual machine." hbr.primary.QuiesceNotSupported.formatOnHost = "Quiescing is not supported for virtual machine {vm.name}." hbr.primary.QuiesceNotSupported.formatOnComputeResource = "Quiescing is not supported for virtual machine {vm.name} on host {host.name}." hbr.primary.QuiesceNotSupported.formatOnDatacenter = "Quiescing is not supported for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}." hbr.primary.QuiesceNotSupported.fullFormat = "Quiescing is not supported for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}." # hbr.primary.DeltaAbortedEvent.category = "warning" hbr.primary.DeltaAbortedEvent.description = "Sync aborted." hbr.primary.DeltaAbortedEvent.formatOnVm = "Sync aborted: {reason.@enum.hbr.primary.ReasonForDeltaAbort}" hbr.primary.DeltaAbortedEvent.formatOnHost = "Sync aborted for virtual machine {vm.name}: {reason.@enum.hbr.primary.ReasonForDeltaAbort}" hbr.primary.DeltaAbortedEvent.formatOnComputeResource = "Sync aborted for virtual machine {vm.name} on host {host.name}: {reason.@enum.hbr.primary.ReasonForDeltaAbort}" hbr.primary.DeltaAbortedEvent.formatOnDatacenter = "Sync aborted for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reason.@enum.hbr.primary.ReasonForDeltaAbort}" hbr.primary.DeltaAbortedEvent.fullFormat = "Sync aborted for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.hbr.primary.ReasonForDeltaAbort}" # hbr.primary.FailedToStartDeltaEvent.category = "error" hbr.primary.FailedToStartDeltaEvent.description = "Failed to start sync." hbr.primary.FailedToStartDeltaEvent.formatOnVm = "Failed to start sync: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}" hbr.primary.FailedToStartDeltaEvent.formatOnHost = "Failed to start sync for virtual machine {vm.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}" hbr.primary.FailedToStartDeltaEvent.formatOnComputeResource = "Failed to start sync for virtual machine {vm.name} on host {host.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}" hbr.primary.FailedToStartDeltaEvent.formatOnDatacenter = "Failed to start sync for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}" hbr.primary.FailedToStartDeltaEvent.fullFormat = "Failed to start sync for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}" # hbr.primary.HostLicenseFailedEvent.category = "error" hbr.primary.HostLicenseFailedEvent.description = "vSphere Replication is not licensed, replication is disabled." hbr.primary.HostLicenseFailedEvent.formatOnVm = "vSphere Replication is not licensed, replication is disabled." hbr.primary.HostLicenseFailedEvent.formatOnHost = "vSphere Replication is not licensed, replication is disabled." hbr.primary.HostLicenseFailedEvent.formatOnComputeResource = "vSphere Replication is not licensed, replication is disabled on host {host.name}" hbr.primary.HostLicenseFailedEvent.formatOnDatacenter = "vSphere Replication is not licensed, replication is disabled on host {host.name} in cluster {computeResource.name}" hbr.primary.HostLicenseFailedEvent.fullFormat = "vSphere Replication is not licensed, replication is disabled on host {host.name} in cluster {computeResource.name} in {datacenter.name}" # hbr.primary.VmLicenseFailedEvent.category = "error" hbr.primary.VmLicenseFailedEvent.description = "vSphere Replication is not licensed, replication is disabled." hbr.primary.VmLicenseFailedEvent.formatOnVm = "vSphere Replication is not licensed, replication is disabled." hbr.primary.VmLicenseFailedEvent.formatOnHost = "vSphere Replication is not licensed, replication is disabled for virtual machine {vm.name}." hbr.primary.VmLicenseFailedEvent.formatOnComputeResource = "vSphere Replication is not licensed, replication is disabled for virtual machine {vm.name} on host {host.name}" hbr.primary.VmLicenseFailedEvent.formatOnDatacenter = "vSphere Replication is not licensed, replication is disabled for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}" hbr.primary.VmLicenseFailedEvent.fullFormat = "vSphere Replication is not licensed, replication is disabled for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}" # # VR sync events # hbr.primary.SyncStartedEvent.category = "info" hbr.primary.SyncStartedEvent.description = "Full sync started." hbr.primary.SyncStartedEvent.formatOnVm = "Full sync started." hbr.primary.SyncStartedEvent.formatOnHost = "Full sync started for virtual machine {vm.name}." hbr.primary.SyncStartedEvent.formatOnComputeResource = "Full sync started for virtual machine {vm.name} on host {host.name}." hbr.primary.SyncStartedEvent.formatOnDatacenter = "Full sync started for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}." hbr.primary.SyncStartedEvent.fullFormat = "Full sync started by {userName} for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}." # hbr.primary.SyncCompletedEvent.category = "info" hbr.primary.SyncCompletedEvent.description = "Full sync completed." hbr.primary.SyncCompletedEvent.formatOnVm = "Full sync completed." hbr.primary.SyncCompletedEvent.formatOnHost = "Full sync completed for virtual machine {vm.name}." hbr.primary.SyncCompletedEvent.formatOnComputeResource = "Full sync completed for virtual machine {vm.name} on host {host.name}." hbr.primary.SyncCompletedEvent.formatOnDatacenter = "Full sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}." hbr.primary.SyncCompletedEvent.fullFormat = "Full sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({bytes} bytes transferred)." # hbr.primary.FailedToStartSyncEvent.category = "error" hbr.primary.FailedToStartSyncEvent.description = "Failed to start full sync." hbr.primary.FailedToStartSyncEvent.formatOnVm = "Failed to start full sync: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}" hbr.primary.FailedToStartSyncEvent.formatOnHost = "Failed to start full sync for virtual machine {vm.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}" hbr.primary.FailedToStartSyncEvent.formatOnComputeResource = "Failed to start full sync for virtual machine {vm.name} on host {host.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}" hbr.primary.FailedToStartSyncEvent.formatOnDatacenter = "Failed to start full sync for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}" hbr.primary.FailedToStartSyncEvent.fullFormat = "Failed to start full sync for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}" # # VR problem events # hbr.primary.NoConnectionToHbrServerEvent.category = "warning" hbr.primary.NoConnectionToHbrServerEvent.description = "No connection to VR Server." hbr.primary.NoConnectionToHbrServerEvent.formatOnVm = "No connection to VR Server: {reason.@enum.hbr.primary.ReasonForNoServerConnection}" hbr.primary.NoConnectionToHbrServerEvent.formatOnHost = "No connection to VR Server for virtual machine {vm.name}: {reason.@enum.hbr.primary.ReasonForNoServerConnection}" hbr.primary.NoConnectionToHbrServerEvent.formatOnComputeResource = "No connection to VR Server for virtual machine {vm.name} on host {host.name}: {reason.@enum.hbr.primary.ReasonForNoServerConnection}" hbr.primary.NoConnectionToHbrServerEvent.formatOnDatacenter = "No connection to VR Server for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reason.@enum.hbr.primary.ReasonForNoServerConnection}" hbr.primary.NoConnectionToHbrServerEvent.fullFormat = "No connection to VR Server for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.hbr.primary.ReasonForNoServerConnection}" # hbr.primary.ConnectionRestoredToHbrServerEvent.category = "info" hbr.primary.ConnectionRestoredToHbrServerEvent.description = "Connection to VR Server restored." hbr.primary.ConnectionRestoredToHbrServerEvent.formatOnVm = "Connection to VR Server restored." hbr.primary.ConnectionRestoredToHbrServerEvent.formatOnHost = "Connection to VR Server restored for virtual machine {vm.name}." hbr.primary.ConnectionRestoredToHbrServerEvent.formatOnComputeResource = "Connection to VR Server restored for virtual machine {vm.name} on host {host.name}." hbr.primary.ConnectionRestoredToHbrServerEvent.formatOnDatacenter = "Connection to VR Server restored for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}." hbr.primary.ConnectionRestoredToHbrServerEvent.fullFormat = "Connection to VR Server restored for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}." # hbr.primary.NoProgressWithHbrServerEvent.category = "error" hbr.primary.NoProgressWithHbrServerEvent.description = "VR Server error: {reason.@enum.hbr.primary.ReasonForNoServerProgress}" hbr.primary.NoProgressWithHbrServerEvent.formatOnVm = "VR Server error: {reason.@enum.hbr.primary.ReasonForNoServerProgress}" hbr.primary.NoProgressWithHbrServerEvent.formatOnHost = "VR Server error for virtual machine {vm.name}: {reason.@enum.hbr.primary.ReasonForNoServerProgress}" hbr.primary.NoProgressWithHbrServerEvent.formatOnComputeResource = "VR Server error for virtual machine {vm.name} on host {host.name}: {reason.@enum.hbr.primary.ReasonForNoServerProgress}" hbr.primary.NoProgressWithHbrServerEvent.formatOnDatacenter = "VR Server error for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reason.@enum.hbr.primary.ReasonForNoServerProgress}" hbr.primary.NoProgressWithHbrServerEvent.fullFormat = "VR Server error for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.hbr.primary.ReasonForNoServerProgress}" # hbr.primary.SystemPausedReplication.category = "warning" hbr.primary.SystemPausedReplication.description = "System has paused replication." hbr.primary.SystemPausedReplication.formatOnVm = "System has paused replication: {reason.@enum.hbr.primary.ReasonForPausedReplication}" hbr.primary.SystemPausedReplication.formatOnHost = "System has paused replication for virtual machine {vm.name}: {reason.@enum.hbr.primary.ReasonForPausedReplication}" hbr.primary.SystemPausedReplication.formatOnComputeResource = "System has paused replication for virtual machine {vm.name} on host {host.name}: {reason.@enum.hbr.primary.ReasonForPausedReplication}" hbr.primary.SystemPausedReplication.formatOnDatacenter = "System has paused replication for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reason.@enum.hbr.primary.ReasonForPausedReplication}" hbr.primary.SystemPausedReplication.fullFormat = "System has paused replication for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.hbr.primary.ReasonForPausedReplication}" # hbr.primary.UnquiescedSnapshot.category = "warning" hbr.primary.UnquiescedSnapshot.description = "Unable to quiesce the guest." hbr.primary.UnquiescedSnapshot.formatOnVm = "Unable to quiesce the guest: VSS failure" hbr.primary.UnquiescedSnapshot.formatOnHost = "Unable to quiesce the guest for virtual machine {vm.name}: VSS failure" hbr.primary.UnquiescedSnapshot.formatOnComputeResource = "Unable to quiesce the guest for virtual machine {vm.name} on host {host.name}: VSS failure" hbr.primary.UnquiescedSnapshot.formatOnDatacenter = "Unable to quiesce the guest for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: VSS failure" hbr.primary.UnquiescedSnapshot.fullFormat = "Unable to quiesce the guest for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: VSS failure" # hbr.primary.FSQuiescedSnapshot.category = "warning" hbr.primary.FSQuiescedSnapshot.description = "Application quiescing failed during replication." hbr.primary.FSQuiescedSnapshot.formatOnVm = "Application quiescing failed during replication: {reason.@enum.hbr.primary.ReasonForAppQuiesceFailure}" hbr.primary.FSQuiescedSnapshot.formatOnHost = "Application quiescing failed during replication for virtual machine {vm.name}: {reason.@enum.hbr.primary.ReasonForAppQuiesceFailure}" hbr.primary.FSQuiescedSnapshot.formatOnComputeResource = "Application quiescing failed during replication for virtual machine {vm.name} on host {host.name}: {reason.@enum.hbr.primary.ReasonForAppQuiesceFailure}" hbr.primary.FSQuiescedSnapshot.formatOnDatacenter = "Application quiescing failed during replication for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reason.@enum.hbr.primary.ReasonForAppQuiesceFailure}" hbr.primary.FSQuiescedSnapshot.fullFormat = "Application quiescing failed during replication for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.hbr.primary.ReasonForAppQuiesceFailure}" # hbr.primary.DemandLogCorrupt.category = "warning" hbr.primary.DemandLogCorrupt.description = "Demand log header corruption detected." hbr.primary.DemandLogCorrupt.formatOnVm = "Demand log header corruption detected." hbr.primary.DemandLogCorrupt.formatOnHost = "Demand log header corruption detected during replication of virtual machine {vm.name}." hbr.primary.DemandLogCorrupt.formatOnComputeResource = "Demand log header corruption detected during replication of virtual machine {vm.name} on host {host.name}" hbr.primary.DemandLogCorrupt.formatOnDatacenter = "Demand log header corruption detected during replication of virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}." hbr.primary.DemandLogCorrupt.fullFormat = "Demand log header corruption detected during replication of virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}" # hbr.primary.PsfFileLost.category = "error" hbr.primary.PsfFileLost.description = "VR persistent state file was lost." hbr.primary.PsfFileLost.formatOnVm = "VR persistent state file was lost. System has paused replication." hbr.primary.PsfFileLost.formatOnHost = "VR persistent state file was lost during replication of virtual machine {vm.name}. System has paused replication." hbr.primary.PsfFileLost.formatOnComputeResource = "VR persistent state file was lost during replication of virtual machine {vm.name} on host {host.name}. System has paused replication." hbr.primary.PsfFileLost.formatOnDatacenter = "VR persistent state file was lost during replication of virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}. System has paused replication." hbr.primary.PsfFileLost.fullFormat = "VR persistent state file was lost during replication of virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}. System has paused replication." # hbr.primary.DisableOfflineError.category = "warning" hbr.primary.DisableOfflineError.description = "Cannot disable the offline instance." hbr.primary.DisableOfflineError.formatOnVm = "Cannot disable the offline instance. Stop the offline synchronization and restart it." hbr.primary.DisableOfflineError.formatOnHost = "Could not disable the offline instance during replication of virtual machine {vm.name}. Stop the offline synchronization and restart it." hbr.primary.DisableOfflineError.formatOnComputeResource = "Could not disable the offline instance during replication of virtual machine {vm.name} on host {host.name}. Stop the offline synchronization and restart it." hbr.primary.DisableOfflineError.formatOnDatacenter = "Could not disable the offline instance during replication of virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}. Stop the offline synchronization and restart it." hbr.primary.DisableOfflineError.fullFormat = "Could not disable the offline instance during replication of virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}. Stop the offline synchronization and restart it." # hbr.primary.RpoTooLowForServerEvent.category = "warning" hbr.primary.RpoTooLowForServerEvent.description = "VR Server does not support the configured RPO." hbr.primary.RpoTooLowForServerEvent.formatOnVm = "VR Server does not support the configured RPO." hbr.primary.RpoTooLowForServerEvent.formatOnHost = "VR Server does not support the configured RPO for virtual machine {vm.name}." hbr.primary.RpoTooLowForServerEvent.formatOnComputeResource = "VR Server does not support the configured RPO for virtual machine {vm.name} on host {host.name}." hbr.primary.RpoTooLowForServerEvent.formatOnDatacenter = "VR Server does not support the configured RPO for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}." hbr.primary.RpoTooLowForServerEvent.fullFormat = "VR Server does not support the configured RPO for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}." # hbr.primary.RpoOkForServerEvent.category = "info" hbr.primary.RpoOkForServerEvent.description = "VR Server is compatible with the configured RPO." hbr.primary.RpoOkForServerEvent.formatOnVm = "VR Server is compatible with the configured RPO." hbr.primary.RpoOkForServerEvent.formatOnHost = "VR Server is compatible with the configured RPO for virtual machine {vm.name}." hbr.primary.RpoOkForServerEvent.formatOnComputeResource = "VR Server is compatible with the configured RPO for virtual machine {vm.name} on host {host.name}." hbr.primary.RpoOkForServerEvent.formatOnDatacenter = "VR Server is compatible with support the configured RPO for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}." hbr.primary.RpoOkForServerEvent.fullFormat = "VR Server is compatible with support the configured RPO for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}." # hbr.primary.NetCompressionNotOkForServerEvent.category = "warning" hbr.primary.NetCompressionNotOkForServerEvent.description = "VR Server does not support network compression." hbr.primary.NetCompressionNotOkForServerEvent.formatOnVm = "VR Server does not support network compression." hbr.primary.NetCompressionNotOkForServerEvent.formatOnHost = "VR Server does not support network compression for virtual machine {vm.name}." hbr.primary.NetCompressionNotOkForServerEvent.formatOnComputeResource = "VR Server does not support network compression for virtual machine {vm.name} on host {host.name}." hbr.primary.NetCompressionNotOkForServerEvent.formatOnDatacenter = "VR Server does not support network compression for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}." hbr.primary.NetCompressionNotOkForServerEvent.fullFormat = "VR Server does not support network compression for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}." # hbr.primary.NetCompressionOkForServerEvent.category = "info" hbr.primary.NetCompressionOkForServerEvent.description = "VR Server supports network compression." hbr.primary.NetCompressionOkForServerEvent.formatOnVm = "VR Server supports network compression." hbr.primary.NetCompressionOkForServerEvent.formatOnHost = "VR Server supports network compression for virtual machine {vm.name}." hbr.primary.NetCompressionOkForServerEvent.formatOnComputeResource = "VR Server supports network compression for virtual machine {vm.name} on host {host.name}." hbr.primary.NetCompressionOkForServerEvent.formatOnDatacenter = "VR Server supports network compression for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}." hbr.primary.NetCompressionOkForServerEvent.fullFormat = "VR Server supports network compression for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}." # # VR replication events end here # # Virtual machine provisioning events com.vmware.vc.VmDiskFailedToConsolidateEvent.category = "warning" com.vmware.vc.VmDiskFailedToConsolidateEvent.description = "Virtual machine disks consolidation failed." com.vmware.vc.VmDiskFailedToConsolidateEvent.formatOnComputeResource = "Virtual machine {vm.name} disks consolidation failed on {host.name}." com.vmware.vc.VmDiskFailedToConsolidateEvent.formatOnDatacenter = "Virtual machine {vm.name} disks consolidation failed on {host.name} in cluster {computeResource.name}." com.vmware.vc.VmDiskFailedToConsolidateEvent.formatOnHost = "Virtual machine {vm.name} disks consolidation failed." com.vmware.vc.VmDiskFailedToConsolidateEvent.formatOnVm = "Virtual machine disks consolidation failed." com.vmware.vc.VmDiskFailedToConsolidateEvent.fullFormat = "Virtual machine {vm.name} disks consolidation failed on {host.name} in cluster {computeResource.name} in {datacenter.name}." com.vmware.vc.VmDiskConsolidatedEvent.category = "info" com.vmware.vc.VmDiskConsolidatedEvent.description = "Virtual machine disks consolidation succeeded." com.vmware.vc.VmDiskConsolidatedEvent.formatOnComputeResource = "Virtual machine {vm.name} disks consolidation succeeded on {host.name}." com.vmware.vc.VmDiskConsolidatedEvent.formatOnDatacenter = "Virtual machine {vm.name} disks consolidatation succeeded on {host.name} in cluster {computeResource.name}." com.vmware.vc.VmDiskConsolidatedEvent.formatOnHost = "Virtual machine {vm.name} disks consolidation succeeded." com.vmware.vc.VmDiskConsolidatedEvent.formatOnVm = "Virtual machine disks consolidation succeeded." com.vmware.vc.VmDiskConsolidatedEvent.fullFormat = "Virtual machine {vm.name} disks consolidated successfully on {host.name} in cluster {computeResource.name} in {datacenter.name}." com.vmware.vc.VmDiskConsolidationNeeded.category = "warning" com.vmware.vc.VmDiskConsolidationNeeded.description = "Virtual machine disks consolidation needed." com.vmware.vc.VmDiskConsolidationNeeded.formatOnComputeResource = "Virtual machine {vm.name} disks consolidation is needed on {host.name}." com.vmware.vc.VmDiskConsolidationNeeded.formatOnDatacenter = "Virtual machine {vm.name} disks consolidatation is needed on {host.name} in cluster {computeResource.name}." com.vmware.vc.VmDiskConsolidationNeeded.formatOnHost = "Virtual machine {vm.name} disks consolidation is needed." com.vmware.vc.VmDiskConsolidationNeeded.formatOnVm = "Virtual machine disks consolidation is needed." com.vmware.vc.VmDiskConsolidationNeeded.fullFormat = "Virtual machine {vm.name} disks consolidation is needed on {host.name} in cluster {computeResource.name} in {datacenter.name}." com.vmware.vc.VmDiskConsolidationNoLongerNeeded.category = "info" com.vmware.vc.VmDiskConsolidationNoLongerNeeded.description = "Virtual machine disks consolidation no longer needed." com.vmware.vc.VmDiskConsolidationNoLongerNeeded.formatOnComputeResource = "Virtual machine {vm.name} disks consolidation is no longer needed on {host.name}." com.vmware.vc.VmDiskConsolidationNoLongerNeeded.formatOnDatacenter = "Virtual machine {vm.name} disks consolidatation is no longer needed on {host.name} in cluster {computeResource.name}." com.vmware.vc.VmDiskConsolidationNoLongerNeeded.formatOnHost = "Virtual machine {vm.name} disks consolidation is no longer needed." com.vmware.vc.VmDiskConsolidationNoLongerNeeded.formatOnVm = "Virtual machine disks consolidation is no longer needed." com.vmware.vc.VmDiskConsolidationNoLongerNeeded.fullFormat = "Virtual machine {vm.name} disks consolidation is no longer needed on {host.name} in cluster {computeResource.name} in {datacenter.name}." com.vmware.vc.VmCloneToResourcePoolFailedEvent.category = "error" com.vmware.vc.VmCloneToResourcePoolFailedEvent.description = "Cannot complete virtual machine clone." com.vmware.vc.VmCloneToResourcePoolFailedEvent.formatOnComputeResource = "" com.vmware.vc.VmCloneToResourcePoolFailedEvent.formatOnDatacenter = "" com.vmware.vc.VmCloneToResourcePoolFailedEvent.formatOnHost = "" com.vmware.vc.VmCloneToResourcePoolFailedEvent.formatOnVm = "" com.vmware.vc.VmCloneToResourcePoolFailedEvent.fullFormat = "Cannot clone {vm.name} as {destVmName} to resource pool {destResourcePool}: {fault}" com.vmware.vc.VmCloneFailedInvalidDestinationEvent.category = "error" com.vmware.vc.VmCloneFailedInvalidDestinationEvent.description = "Cannot complete virtual machine clone." com.vmware.vc.VmCloneFailedInvalidDestinationEvent.formatOnComputeResource = "" com.vmware.vc.VmCloneFailedInvalidDestinationEvent.formatOnDatacenter = "" com.vmware.vc.VmCloneFailedInvalidDestinationEvent.formatOnHost = "" com.vmware.vc.VmCloneFailedInvalidDestinationEvent.formatOnVm = "" com.vmware.vc.VmCloneFailedInvalidDestinationEvent.fullFormat = "Cannot clone {vm.name} as {destVmName} to invalid or non-existent destination with ID {invalidMoRef}: {fault}" # com.vmware.vc.vm.VmStateRevertedToSnapshot.category = "info" com.vmware.vc.vm.VmStateRevertedToSnapshot.description = "The virtual machine state has been reverted to a snapshot" com.vmware.vc.vm.VmStateRevertedToSnapshot.formatOnVm = "The execution state of the virtual machine has been reverted to the state of snapshot {snapshotName}, with ID {snapshotId}" com.vmware.vc.vm.VmStateRevertedToSnapshot.formatOnHost = "The execution state of the virtual machine {vm.name} has been reverted to the state of snapshot {snapshotName}, with ID {snapshotId}" com.vmware.vc.vm.VmStateRevertedToSnapshot.formatOnComputeResource = "The execution state of the virtual machine {vm.name} on host {host.name} has been reverted to the state of snapshot {snapshotName}, with ID {snapshotId}" com.vmware.vc.vm.VmStateRevertedToSnapshot.formatOnDatacenter = "The execution state of the virtual machine {vm.name} on host {host.name}, in compute resource {computeResource.name} has been reverted to the state of snapshot {snapshotName}, with ID {snapshotId}" com.vmware.vc.vm.VmStateRevertedToSnapshot.fullFormat = "The execution state of the virtual machine {vm.name} on host {host.name}, in compute resource {computeResource.name} has been reverted to the state of snapshot {snapshotName}, with ID {snapshotId}" # com.vmware.vc.vm.VmStateFailedToRevertToSnapshot.category = "error" com.vmware.vc.vm.VmStateFailedToRevertToSnapshot.description = "Failed to revert the virtual machine state to a snapshot" com.vmware.vc.vm.VmStateFailedToRevertToSnapshot.formatOnVm = "Failed to revert the execution state of the virtual machine to snapshot {snapshotName}, with ID {snapshotId}" com.vmware.vc.vm.VmStateFailedToRevertToSnapshot.formatOnHost = "Failed to revert the execution state of the virtual machine {vm.name} to snapshot {snapshotName}, with ID {snapshotId}" com.vmware.vc.vm.VmStateFailedToRevertToSnapshot.formatOnComputeResource = "Failed to revert the execution state of the virtual machine {vm.name} on host {host.name} to snapshot {snapshotName}, with ID {snapshotId}" com.vmware.vc.vm.VmStateFailedToRevertToSnapshot.formatOnDatacenter = "Failed to revert the execution state of the virtual machine {vm.name} on host {host.name}, in compute resource {computeResource.name} to snapshot {snapshotName}, with ID {snapshotId}" com.vmware.vc.vm.VmStateFailedToRevertToSnapshot.fullFormat = "Failed to revert the execution state of the virtual machine {vm.name} on host {host.name}, in compute resource {computeResource.name} to snapshot {snapshotName}, with ID {snapshotId}" # # host issues # esx.audit.host.maxRegisteredVMsExceeded.category = "warning" esx.audit.host.maxRegisteredVMsExceeded.description = "The number of virtual machines registered on the host exceeded limit." esx.audit.host.maxRegisteredVMsExceeded.formatOnVm = "" esx.audit.host.maxRegisteredVMsExceeded.formatOnHost = "The number of virtual machines registered exceeded limit: {current} registered, {limit} is the maximum supported." esx.audit.host.maxRegisteredVMsExceeded.formatOnComputeResource = "The number of virtual machines registered on host {host.name} exceeded limit: {current} registered, {limit} is the maximum supported." esx.audit.host.maxRegisteredVMsExceeded.formatOnDatacenter = "The number of virtual machines registered on host {host.name} in cluster {computeResource.name} exceeded limit: {current} registered, {limit} is the maximum supported." esx.audit.host.maxRegisteredVMsExceeded.fullFormat = "The number of virtual machines registered on host {host.name} in cluster {computeResource.name} in {datacenter.name} exceeded limit: {current} registered, {limit} is the maximum supported." # Secpolicy related events esx.audit.uw.secpolicy.domain.level.changed.category = "warning" esx.audit.uw.secpolicy.domain.level.changed.description = "Enforcement level changed for security domain." esx.audit.uw.secpolicy.domain.level.changed.formatOnVm = "" esx.audit.uw.secpolicy.domain.level.changed.formatOnHost = "" esx.audit.uw.secpolicy.domain.level.changed.formatOnComputeResource = "" esx.audit.uw.secpolicy.domain.level.changed.formatOnDatacenter = "" esx.audit.uw.secpolicy.domain.level.changed.fullFormat = "The enforcement level for security domain {1} has been changed to {2}. The enforcement level must always be set to enforcing." # esx.audit.uw.secpolicy.alldomains.level.changed.category = "warning" esx.audit.uw.secpolicy.alldomains.level.changed.description = "Enforcement level changed for all security domains." esx.audit.uw.secpolicy.alldomains.level.changed.formatOnVm = "" esx.audit.uw.secpolicy.alldomains.level.changed.formatOnHost = "" esx.audit.uw.secpolicy.alldomains.level.changed.formatOnComputeResource = "" esx.audit.uw.secpolicy.alldomains.level.changed.formatOnDatacenter = "" esx.audit.uw.secpolicy.alldomains.level.changed.fullFormat = "The enforcement level for all security domains has been changed to {1}. The enforcement level must always be set to enforcing." # esx.audit.usb.config.changed.category = "info" esx.audit.usb.config.changed.description = "USB configuration has changed." esx.audit.usb.config.changed.formatOnVm = "" esx.audit.usb.config.changed.formatOnHost = "USB configuration has changed." esx.audit.usb.config.changed.formatOnComputeResource = "USB configuration has changed on host {host.name}." esx.audit.usb.config.changed.formatOnDatacenter = "USB configuration has changed on host {host.name} in cluster {computeResource.name}." esx.audit.usb.config.changed.fullFormat = "USB configuration has changed on host {host.name} in cluster {computeResource.name} in {datacenter.name}." # Events for unexpected VM terminations esx.problem.vm.kill.unexpected.noSwapResponse.category = "error" esx.problem.vm.kill.unexpected.noSwapResponse.description = "A VM did not respond to swap actions and is forcefully powered off to prevent system instability." esx.problem.vm.kill.unexpected.noSwapResponse.formatOnVm = "" esx.problem.vm.kill.unexpected.noSwapResponse.formatOnHost = "" esx.problem.vm.kill.unexpected.noSwapResponse.formatOnComputeResource = "" esx.problem.vm.kill.unexpected.noSwapResponse.formatOnDatacenter = "" esx.problem.vm.kill.unexpected.noSwapResponse.fullFormat = "The VM using the config file {1} did not respond to {2} swap actions in {3} seconds and is forcefully powered off to prevent system instability." esx.problem.vm.kill.unexpected.forcefulPageRetire.category = "error" esx.problem.vm.kill.unexpected.forcefulPageRetire.description = "A VM did not respond to swap actions and is forcefully powered off to prevent system instability." esx.problem.vm.kill.unexpected.forcefulPageRetire.formatOnVm = "" esx.problem.vm.kill.unexpected.forcefulPageRetire.formatOnHost = "" esx.problem.vm.kill.unexpected.forcefulPageRetire.formatOnComputeResource = "" esx.problem.vm.kill.unexpected.forcefulPageRetire.formatOnDatacenter = "" esx.problem.vm.kill.unexpected.forcefulPageRetire.fullFormat = "The VM using the config file {1} contains the host physical page {2} which was scheduled for immediate retirement. To avoid system instability the VM is forcefully powered off." esx.problem.vm.kill.unexpected.forcefulPageRetire.64.category = "error" esx.problem.vm.kill.unexpected.forcefulPageRetire.64.description = "A VM did not respond to swap actions and is forcefully powered off to prevent system instability." esx.problem.vm.kill.unexpected.forcefulPageRetire.64.formatOnVm = "" esx.problem.vm.kill.unexpected.forcefulPageRetire.64.formatOnHost = "" esx.problem.vm.kill.unexpected.forcefulPageRetire.64.formatOnComputeResource = "" esx.problem.vm.kill.unexpected.forcefulPageRetire.64.formatOnDatacenter = "" esx.problem.vm.kill.unexpected.forcefulPageRetire.64.fullFormat = "The VM using the config file {1} contains the host physical page {2} which was scheduled for immediate retirement. To avoid system instability the VM is forcefully powered off." esx.problem.vm.kill.unexpected.vmtrack.category = "error" esx.problem.vm.kill.unexpected.vmtrack.description = "A VM is allocating too many pages while system is critically low in free memory. It is forcefully terminated to prevent system instability." esx.problem.vm.kill.unexpected.vmtrack.formatOnVm = "" esx.problem.vm.kill.unexpected.vmtrack.formatOnHost = "" esx.problem.vm.kill.unexpected.vmtrack.formatOnComputeResource = "" esx.problem.vm.kill.unexpected.vmtrack.formatOnDatacenter = "" esx.problem.vm.kill.unexpected.vmtrack.fullFormat = "The VM using the config file {1} is allocating too many pages while system is critically low in free memory. It is forcefully terminated to prevent system instability." esx.problem.vm.kill.unexpected.fault.failure.category = "error" esx.problem.vm.kill.unexpected.fault.failure.description = "A VM could not fault in the a page. The VM is terminated as further progress is impossible." esx.problem.vm.kill.unexpected.fault.failure.formatOnVm = "" esx.problem.vm.kill.unexpected.fault.failure.formatOnHost = "" esx.problem.vm.kill.unexpected.fault.failure.formatOnComputeResource = "" esx.problem.vm.kill.unexpected.fault.failure.formatOnDatacenter = "" esx.problem.vm.kill.unexpected.fault.failure.fullFormat = "The VM using the config file {1} could not fault in a guest physical page from the hypervisor level swap file at {2}. The VM is terminated as further progress is impossible." esx.problem.vm.kill.unexpected.noSwapResponse.2.category = "error" esx.problem.vm.kill.unexpected.noSwapResponse.2.description = "A virtual machine did not respond to swap actions. It is terminated as further progress is impossible." esx.problem.vm.kill.unexpected.noSwapResponse.2.formatOnVm = "The virtual machine did not respond to {2} swap actions in {3} seconds and is forcefully powered off to prevent system instability." esx.problem.vm.kill.unexpected.noSwapResponse.2.formatOnHost = "" esx.problem.vm.kill.unexpected.noSwapResponse.2.formatOnComputeResource = "" esx.problem.vm.kill.unexpected.noSwapResponse.2.formatOnDatacenter = "" esx.problem.vm.kill.unexpected.noSwapResponse.2.fullFormat = "{1} did not respond to {2} swap actions in {3} seconds and is forcefully powered off to prevent system instability." esx.problem.vm.kill.unexpected.vmx.fault.failure.2.category = "error" esx.problem.vm.kill.unexpected.vmx.fault.failure.2.description = "A user world daemon of a virtual machine could not fault in the a page. The VM is terminated as further progress is impossible." esx.problem.vm.kill.unexpected.vmx.fault.failure.2.formatOnVm = "The user world daemon of this virtual machine could not fault in a page. The virtual machine is terminated as further progress is impossible." esx.problem.vm.kill.unexpected.vmx.fault.failure.2.formatOnHost = "" esx.problem.vm.kill.unexpected.vmx.fault.failure.2.formatOnComputeResource = "" esx.problem.vm.kill.unexpected.vmx.fault.failure.2.formatOnDatacenter = "" esx.problem.vm.kill.unexpected.vmx.fault.failure.2.fullFormat = "The user world daemon of {1} could not fault in a page. The virtual machine is terminated as further progress is impossible." esx.problem.vm.kill.unexpected.fault.failure.2.category = "error" esx.problem.vm.kill.unexpected.fault.failure.2.description = "A virtual machine could not fault in the a page. It is terminated as further progress is impossible." esx.problem.vm.kill.unexpected.fault.failure.2.formatOnVm = "The virtual machine could not fault in a guest physical page from the hypervisor level swap file on {2}. The VM is terminated as further progress is impossible" esx.problem.vm.kill.unexpected.fault.failure.2.formatOnHost = "" esx.problem.vm.kill.unexpected.fault.failure.2.formatOnComputeResource = "" esx.problem.vm.kill.unexpected.fault.failure.2.formatOnDatacenter = "" esx.problem.vm.kill.unexpected.fault.failure.2.fullFormat = "{1} could not fault in a guest physical page from the hypervisor level swap file on {2}. The VM is terminated as further progress is impossible" esx.problem.vm.kill.unexpected.forcefulPageRetire.64.2.category = "error" esx.problem.vm.kill.unexpected.forcefulPageRetire.64.2.description = "A virtual machine cointained a host physical page that was scheduled for immediate retirement. To avoid system instability the virtual machine is forcefully powered off." esx.problem.vm.kill.unexpected.forcefulPageRetire.64.2.formatOnVm = "The virtual machine contained the host physical page {2} which was scheduled for immediate retirement. To avoid system instability the virtual machine is forcefully powered off." esx.problem.vm.kill.unexpected.forcefulPageRetire.64.2.formatOnHost = "" esx.problem.vm.kill.unexpected.forcefulPageRetire.64.2.formatOnComputeResource = "" esx.problem.vm.kill.unexpected.forcefulPageRetire.64.2.formatOnDatacenter = "" esx.problem.vm.kill.unexpected.forcefulPageRetire.64.2.fullFormat = "{1} contained the host physical page {2} which was scheduled for immediate retirement. To avoid system instability the virtual machine is forcefully powered off." esx.problem.vm.kill.unexpected.vmtrack.2.category = "error" esx.problem.vm.kill.unexpected.vmtrack.2.description = "A virtual machine is allocating too many pages while system is critically low in free memory. It is forcefully terminated to prevent system instability." esx.problem.vm.kill.unexpected.vmtrack.2.formatOnVm = "The virtual machine is allocating too many pages while system is critically low in free memory. It is forcefully terminated to prevent system instability." esx.problem.vm.kill.unexpected.vmtrack.2.formatOnHost = "" esx.problem.vm.kill.unexpected.vmtrack.2.formatOnComputeResource = "" esx.problem.vm.kill.unexpected.vmtrack.2.formatOnDatacenter = "" esx.problem.vm.kill.unexpected.vmtrack.2.fullFormat = "{1} is allocating too many pages while system is critically low in free memory. It is forcefully terminated to prevent system instability." # guest operations events com.vmware.vc.guestOperations.GuestOperation.category = "info" com.vmware.vc.guestOperations.GuestOperation.description = "Guest operation" com.vmware.vc.guestOperations.GuestOperation.formatOnComputeResource = "" com.vmware.vc.guestOperations.GuestOperation.formatOnDatacenter = "" com.vmware.vc.guestOperations.GuestOperation.formatOnHost = "" com.vmware.vc.guestOperations.GuestOperation.formatOnVm = "Guest operation {operationName.@enum.com.vmware.vc.guestOp} performed." com.vmware.vc.guestOperations.GuestOperation.fullFormat = "Guest operation {operationName.@enum.com.vmware.vc.guestOp} performed on Virtual machine {vm.name}." com.vmware.vc.guestOperations.GuestOperationAuthFailure.category = "warning" com.vmware.vc.guestOperations.GuestOperationAuthFailure.description = "Guest operation authentication failure" com.vmware.vc.guestOperations.GuestOperationAuthFailure.formatOnComputeResource = "" com.vmware.vc.guestOperations.GuestOperationAuthFailure.formatOnDatacenter = "" com.vmware.vc.guestOperations.GuestOperationAuthFailure.formatOnHost = "" com.vmware.vc.guestOperations.GuestOperationAuthFailure.formatOnVm = "Guest operation authentication failed for operation {operationName.@enum.com.vmware.vc.guestOp}." com.vmware.vc.guestOperations.GuestOperationAuthFailure.fullFormat = "Guest operation authentication failed for operation {operationName.@enum.com.vmware.vc.guestOp} on Virtual machine {vm.name}." # management agent events esx.audit.agent.hostd.started.category = "info" esx.audit.agent.hostd.started.description = "VMware Host Agent started" esx.audit.agent.hostd.started.formatOnComputeResource = "" esx.audit.agent.hostd.started.formatOnDatacenter = "" esx.audit.agent.hostd.started.formatOnHost = "VMware Host Agent started" esx.audit.agent.hostd.started.formatOnVm = "" esx.audit.agent.hostd.started.fullFormat = "VMware Host Agent started on host {host.name}." esx.audit.agent.hostd.stopped.category = "info" esx.audit.agent.hostd.stopped.description = "VMware Host Agent stopped" esx.audit.agent.hostd.stopped.formatOnComputeResource = "" esx.audit.agent.hostd.stopped.formatOnDatacenter = "" esx.audit.agent.hostd.stopped.formatOnHost = "VMware Host Agent stopped" esx.audit.agent.hostd.stopped.formatOnVm = "" esx.audit.agent.hostd.stopped.fullFormat = "VMware Host Agent stopped on host {host.name}." # Page Retire events esx.problem.pageretire.selectedmpnthreshold.host.exceeded.category = "warning" esx.problem.pageretire.selectedmpnthreshold.host.exceeded.description = "Number of host physical memory pages selected for retirement exceeds threshold." esx.problem.pageretire.selectedmpnthreshold.host.exceeded.formatOnVm = "" esx.problem.pageretire.selectedmpnthreshold.host.exceeded.formatOnHost = "" esx.problem.pageretire.selectedmpnthreshold.host.exceeded.formatOnComputeResource = "" esx.problem.pageretire.selectedmpnthreshold.host.exceeded.formatOnDatacenter = "" esx.problem.pageretire.selectedmpnthreshold.host.exceeded.fullFormat = "Number of host physical memory pages that have been selected for retirement ({1}) exceeds threshold ({2})." esx.problem.pageretire.platform.retire.request.category = "info" esx.problem.pageretire.platform.retire.request.description = "Memory page retirement requested by platform firmware." esx.problem.pageretire.platform.retire.request.formatOnVm = "" esx.problem.pageretire.platform.retire.request.formatOnHost = "" esx.problem.pageretire.platform.retire.request.formatOnComputeResource = "" esx.problem.pageretire.platform.retire.request.formatOnDatacenter = "" esx.problem.pageretire.platform.retire.request.fullFormat = "Memory page retirement requested by platform firmware. FRU ID: {1}. Refer to System Hardware Log: {2}" esx.problem.pageretire.selectedbutnotretired.high.category = "warning" esx.problem.pageretire.selectedbutnotretired.high.description = "Number of host physical memory pages that have been selected for retirement but could not yet be retired is high." esx.problem.pageretire.selectedbutnotretired.high.formatOnVm = "" esx.problem.pageretire.selectedbutnotretired.high.formatOnHost = "" esx.problem.pageretire.selectedbutnotretired.high.formatOnComputeResource = "" esx.problem.pageretire.selectedbutnotretired.high.formatOnDatacenter = "" esx.problem.pageretire.selectedbutnotretired.high.fullFormat = "Number of host physical memory pages that have been selected for retirement but could not yet be retired is high: ({1})" esx.problem.pageretire.mce.injected.category = "error" esx.problem.pageretire.mce.injected.description = "Virtual machine killed as it kept using a corrupted memory page." esx.problem.pageretire.mce.injected.formatOnVm = "" esx.problem.pageretire.mce.injected.formatOnHost = "" esx.problem.pageretire.mce.injected.formatOnComputeResource = "" esx.problem.pageretire.mce.injected.formatOnDatacenter = "" esx.problem.pageretire.mce.injected.fullFormat = "Killing virtual machine with config path {1} because at least {2} uncorrectable memory error machine check exceptions were injected for guest physical page {3} but the virtual machine's operating system kept using the page." esx.problem.pageretire.mce.injected.2.category = "error" esx.problem.pageretire.mce.injected.2.description = "A virtual machine was killed as it kept using a corrupted memory page." esx.problem.pageretire.mce.injected.2.formatOnVm = "The virtual machine was killed as it kept using a corrupted memory page {3} even though {2} uncorrectable memory machine check exceptions were injected." esx.problem.pageretire.mce.injected.2.formatOnHost = "" esx.problem.pageretire.mce.injected.2.formatOnComputeResource = "" esx.problem.pageretire.mce.injected.2.formatOnDatacenter = "" esx.problem.pageretire.mce.injected.2.fullFormat = "{1} was killed as it kept using a corrupted memory page {3} even though {2} uncorrectable memory machine check exceptions were injected." esx.audit.shell.enabled.category = "info" esx.audit.shell.enabled.description = "The ESXi command line shell has been enabled." esx.audit.shell.enabled.formatOnVm = "" esx.audit.shell.enabled.formatOnHost = "" esx.audit.shell.enabled.formatOnComputeResource = "" esx.audit.shell.enabled.formatOnDatacenter = "" esx.audit.shell.enabled.fullFormat = "The ESXi command line shell has been enabled." esx.audit.shell.disabled.category = "info" esx.audit.shell.disabled.description = "The ESXi command line shell has been disabled." esx.audit.shell.disabled.formatOnVm = "" esx.audit.shell.disabled.formatOnHost = "" esx.audit.shell.disabled.formatOnComputeResource = "" esx.audit.shell.disabled.formatOnDatacenter = "" esx.audit.shell.disabled.fullFormat = "The ESXi command line shell has been disabled." esx.audit.ssh.enabled.category = "info" esx.audit.ssh.enabled.description = "SSH access has been enabled." esx.audit.ssh.enabled.formatOnVm = "" esx.audit.ssh.enabled.formatOnHost = "" esx.audit.ssh.enabled.formatOnComputeResource = "" esx.audit.ssh.enabled.formatOnDatacenter = "" esx.audit.ssh.enabled.fullFormat = "SSH access has been enabled." esx.audit.ssh.disabled.category = "info" esx.audit.ssh.disabled.description = "SSH access has been disabled." esx.audit.ssh.disabled.formatOnVm = "" esx.audit.ssh.disabled.formatOnHost = "" esx.audit.ssh.disabled.formatOnComputeResource = "" esx.audit.ssh.disabled.formatOnDatacenter = "" esx.audit.ssh.disabled.fullFormat = "SSH access has been disabled." esx.audit.dcui.enabled.category = "info" esx.audit.dcui.enabled.description = "The DCUI has been enabled." esx.audit.dcui.enabled.formatOnVm = "" esx.audit.dcui.enabled.formatOnHost = "" esx.audit.dcui.enabled.formatOnComputeResource = "" esx.audit.dcui.enabled.formatOnDatacenter = "" esx.audit.dcui.enabled.fullFormat = "The DCUI has been enabled." esx.audit.dcui.disabled.category = "info" esx.audit.dcui.disabled.description = "The DCUI has been disabled." esx.audit.dcui.disabled.formatOnVm = "" esx.audit.dcui.disabled.formatOnHost = "" esx.audit.dcui.disabled.formatOnComputeResource = "" esx.audit.dcui.disabled.formatOnDatacenter = "" esx.audit.dcui.disabled.fullFormat = "The DCUI has been disabled." esx.audit.lockdownmode.enabled.category = "info" esx.audit.lockdownmode.enabled.description = "Administrator access to the host has been disabled." esx.audit.lockdownmode.enabled.formatOnVm = "" esx.audit.lockdownmode.enabled.formatOnHost = "" esx.audit.lockdownmode.enabled.formatOnComputeResource = "" esx.audit.lockdownmode.enabled.formatOnDatacenter = "" esx.audit.lockdownmode.enabled.fullFormat = "Administrator access to the host has been disabled." esx.audit.lockdownmode.disabled.category = "info" esx.audit.lockdownmode.disabled.description = "Administrator access to the host has been enabled." esx.audit.lockdownmode.disabled.formatOnVm = "" esx.audit.lockdownmode.disabled.formatOnHost = "" esx.audit.lockdownmode.disabled.formatOnComputeResource = "" esx.audit.lockdownmode.disabled.formatOnDatacenter = "" esx.audit.lockdownmode.disabled.fullFormat = "Administrator access to the host has been enabled." esx.audit.account.loginfailures.category = "warning" esx.audit.account.loginfailures.description = "Multiple remote login failures detected for an ESXi local user account." esx.audit.account.loginfailures.formatOnVm = "" esx.audit.account.loginfailures.formatOnHost = "" esx.audit.account.loginfailures.formatOnComputeResource = "" esx.audit.account.loginfailures.formatOnDatacenter = "" esx.audit.account.loginfailures.fullFormat = "Multiple remote login failures detected for ESXi local user account '{1}'." esx.audit.account.locked.category = "warning" esx.audit.account.locked.description = "Remote access for an ESXi local user account has been locked temporarilly due to multiple failed login attempts." esx.audit.account.locked.formatOnVm = "" esx.audit.account.locked.formatOnHost = "" esx.audit.account.locked.formatOnComputeResource = "" esx.audit.account.locked.formatOnDatacenter = "" esx.audit.account.locked.fullFormat = "Remote access for ESXi local user account '{1}' has been locked for {2} seconds after {3} failed login attempts." esx.audit.lockdownmode.exceptions.changed.category = "info" esx.audit.lockdownmode.exceptions.changed.description = "List of lockdown exception users has been changed." esx.audit.lockdownmode.exceptions.changed.formatOnVm = "" esx.audit.lockdownmode.exceptions.changed.formatOnHost = "" esx.audit.lockdownmode.exceptions.changed.formatOnComputeResource = "" esx.audit.lockdownmode.exceptions.changed.formatOnDatacenter = "" esx.audit.lockdownmode.exceptions.changed.fullFormat = "List of lockdown exception users has been changed." esx.audit.maintenancemode.entering.category = "info" esx.audit.maintenancemode.entering.description = "The host has begun entering maintenance mode." esx.audit.maintenancemode.entering.formatOnVm = "" esx.audit.maintenancemode.entering.formatOnHost = "" esx.audit.maintenancemode.entering.formatOnComputeResource = "" esx.audit.maintenancemode.entering.formatOnDatacenter = "" esx.audit.maintenancemode.entering.fullFormat = "The host has begun entering maintenance mode." esx.audit.maintenancemode.canceled.category = "info" esx.audit.maintenancemode.canceled.description = "The host has canceled entering maintenance mode." esx.audit.maintenancemode.canceled.formatOnVm = "" esx.audit.maintenancemode.canceled.formatOnHost = "" esx.audit.maintenancemode.canceled.formatOnComputeResource = "" esx.audit.maintenancemode.canceled.formatOnDatacenter = "" esx.audit.maintenancemode.canceled.fullFormat = "The host has canceled entering maintenance mode." esx.audit.maintenancemode.entered.category = "info" esx.audit.maintenancemode.entered.description = "The host has entered maintenance mode." esx.audit.maintenancemode.entered.formatOnVm = "" esx.audit.maintenancemode.entered.formatOnHost = "" esx.audit.maintenancemode.entered.formatOnComputeResource = "" esx.audit.maintenancemode.entered.formatOnDatacenter = "" esx.audit.maintenancemode.entered.fullFormat = "The host has entered maintenance mode." esx.audit.maintenancemode.exited.category = "info" esx.audit.maintenancemode.exited.description = "The host has exited maintenance mode." esx.audit.maintenancemode.exited.formatOnVm = "" esx.audit.maintenancemode.exited.formatOnHost = "" esx.audit.maintenancemode.exited.formatOnComputeResource = "" esx.audit.maintenancemode.exited.formatOnDatacenter = "" esx.audit.maintenancemode.exited.fullFormat = "The host has exited maintenance mode." esx.audit.maintenancemode.failed.category = "error" esx.audit.maintenancemode.failed.description = "The host has failed entering maintenance mode." esx.audit.maintenancemode.failed.formatOnVm = "" esx.audit.maintenancemode.failed.formatOnHost = "" esx.audit.maintenancemode.failed.formatOnComputeResource = "" esx.audit.maintenancemode.failed.formatOnDatacenter = "" esx.audit.maintenancemode.failed.fullFormat = "The host has failed entering maintenance mode." esx.problem.vmsyslogd.remote.failure.category = "error" esx.problem.vmsyslogd.remote.failure.description = "Remote logging host has become unreachable." esx.problem.vmsyslogd.remote.failure.formatOnVm = "" esx.problem.vmsyslogd.remote.failure.formatOnHost = "" esx.problem.vmsyslogd.remote.failure.formatOnComputeResource = "" esx.problem.vmsyslogd.remote.failure.formatOnDatacenter = "" esx.problem.vmsyslogd.remote.failure.fullFormat = "The host "{1}" has become unreachable. Remote logging to this host has stopped." esx.problem.vmsyslogd.storage.failure.category = "error" esx.problem.vmsyslogd.storage.failure.description = "Logging to storage has failed." esx.problem.vmsyslogd.storage.failure.formatOnVm = "" esx.problem.vmsyslogd.storage.failure.formatOnHost = "" esx.problem.vmsyslogd.storage.failure.formatOnComputeResource = "" esx.problem.vmsyslogd.storage.failure.formatOnDatacenter = "" esx.problem.vmsyslogd.storage.failure.fullFormat = "Logging to storage has failed. Logs are no longer being stored locally on this host." esx.problem.vmsyslogd.unexpected.category = "error" esx.problem.vmsyslogd.unexpected.description = "Log daemon has failed for an unexpected reason." esx.problem.vmsyslogd.unexpected.formatOnVm = "" esx.problem.vmsyslogd.unexpected.formatOnHost = "" esx.problem.vmsyslogd.unexpected.formatOnComputeResource = "" esx.problem.vmsyslogd.unexpected.formatOnDatacenter = "" esx.problem.vmsyslogd.unexpected.fullFormat = "Log daemon has failed for an unexpected reason: {1}" esx.problem.vmsyslogd.storage.logdir.invalid.category = "error" esx.problem.vmsyslogd.storage.logdir.invalid.description = "The configured log directory cannot be used. The default directory will be used instead." esx.problem.vmsyslogd.storage.logdir.invalid.formatOnVm = "" esx.problem.vmsyslogd.storage.logdir.invalid.formatOnHost = "" esx.problem.vmsyslogd.storage.logdir.invalid.formatOnComputeResource = "" esx.problem.vmsyslogd.storage.logdir.invalid.formatOnDatacenter = "" esx.problem.vmsyslogd.storage.logdir.invalid.fullFormat = "The configured log directory {1} cannot be used. The default directory {2} will be used instead." # Syslog is not configured esx.problem.syslog.config.category = "warning" esx.problem.syslog.config.description = "System logging is not configured." esx.problem.syslog.config.formatOnVm = "" esx.problem.syslog.config.formatOnHost = "System logging is not configured on host {host.name}." esx.problem.syslog.config.formatOnComputeResource = "" esx.problem.syslog.config.formatOnDatacenter = "" esx.problem.syslog.config.fullFormat = "System logging is not configured on host {host.name}. Please check Syslog options for the host under Configuration -> Software -> Advanced Settings in vSphere client." # Syslog location is not persistent esx.problem.syslog.nonpersistent.category = "warning" esx.problem.syslog.nonpersistent.description = "System logs are stored on non-persistent storage." esx.problem.syslog.nonpersistent.formatOnVm = "" esx.problem.syslog.nonpersistent.formatOnHost = "System logs on host {host.name} are stored on non-persistent storage." esx.problem.syslog.nonpersistent.formatOnComputeResource = "" esx.problem.syslog.nonpersistent.formatOnDatacenter = "" esx.problem.syslog.nonpersistent.fullFormat = "System logs on host {host.name} are stored on non-persistent storage. Consult product documentation to configure a syslog server or a scratch partition." esx.audit.esximage.hostacceptance.changed.category = "info" esx.audit.esximage.hostacceptance.changed.description = "Host acceptance level changed" esx.audit.esximage.hostacceptance.changed.formatOnVm = "" esx.audit.esximage.hostacceptance.changed.formatOnHost = "" esx.audit.esximage.hostacceptance.changed.formatOnComputeResource = "" esx.audit.esximage.hostacceptance.changed.formatOnDatacenter = "" esx.audit.esximage.hostacceptance.changed.fullFormat = "Host acceptance level changed from {1} to {2}" esx.problem.esximage.install.invalidhardware.category = "error" esx.problem.esximage.install.invalidhardware.description = "Host doesn't meet image profile hardware requirements." esx.problem.esximage.install.invalidhardware.formatOnVm = "" esx.problem.esximage.install.invalidhardware.formatOnHost = "" esx.problem.esximage.install.invalidhardware.formatOnComputeResource = "" esx.problem.esximage.install.invalidhardware.formatOnDatacenter = "" esx.problem.esximage.install.invalidhardware.fullFormat = "Host doesn't meet image profile '{1}' hardware requirements: {2}" esx.audit.esximage.install.securityalert.category = "warning" esx.audit.esximage.install.securityalert.description = "SECURITY ALERT: Installing image profile." esx.audit.esximage.install.securityalert.formatOnVm = "" esx.audit.esximage.install.securityalert.formatOnHost = "" esx.audit.esximage.install.securityalert.formatOnComputeResource = "" esx.audit.esximage.install.securityalert.formatOnDatacenter = "" esx.audit.esximage.install.securityalert.fullFormat = "SECURITY ALERT: Installing image profile '{1}' with {2}." esx.problem.esximage.install.stage.error.category = "error" esx.problem.esximage.install.stage.error.description = "Could not stage image profile." esx.problem.esximage.install.stage.error.formatOnVm = "" esx.problem.esximage.install.stage.error.formatOnHost = "" esx.problem.esximage.install.stage.error.formatOnComputeResource = "" esx.problem.esximage.install.stage.error.formatOnDatacenter = "" esx.problem.esximage.install.stage.error.fullFormat = "Could not stage image profile '{1}': {2}" esx.problem.esximage.install.error.category = "error" esx.problem.esximage.install.error.description = "Could not install image profile." esx.problem.esximage.install.error.formatOnVm = "" esx.problem.esximage.install.error.formatOnHost = "" esx.problem.esximage.install.error.formatOnComputeResource = "" esx.problem.esximage.install.error.formatOnDatacenter = "" esx.problem.esximage.install.error.fullFormat = "Could not install image profile: {1}" esx.audit.esximage.vib.install.successful.category = "info" esx.audit.esximage.vib.install.successful.description = "Successfully installed VIBs." esx.audit.esximage.vib.install.successful.formatOnVm = "" esx.audit.esximage.vib.install.successful.formatOnHost = "" esx.audit.esximage.vib.install.successful.formatOnComputeResource = "" esx.audit.esximage.vib.install.successful.formatOnDatacenter = "" esx.audit.esximage.vib.install.successful.fullFormat = "Successfully installed {1} VIB(s), removed {2} VIB(s). Please use 'esxcli software profile get' or see log for more detail about the transaction." esx.audit.esximage.vib.remove.successful.category = "info" esx.audit.esximage.vib.remove.successful.description = "Successfully removed VIBs" esx.audit.esximage.vib.remove.successful.formatOnVm = "" esx.audit.esximage.vib.remove.successful.formatOnHost = "" esx.audit.esximage.vib.remove.successful.formatOnComputeResource = "" esx.audit.esximage.vib.remove.successful.formatOnDatacenter = "" esx.audit.esximage.vib.remove.successful.fullFormat = "Successfully removed {1} VIB(s). Please use 'esxcli software profile get' or see log for more detail about the transaction." esx.audit.esximage.install.novalidation.category = "warning" esx.audit.esximage.install.novalidation.description = "Attempting to install an image profile with validation disabled." esx.audit.esximage.install.novalidation.formatOnVm = "" esx.audit.esximage.install.novalidation.formatOnHost = "" esx.audit.esximage.install.novalidation.formatOnComputeResource = "" esx.audit.esximage.install.novalidation.formatOnDatacenter = "" esx.audit.esximage.install.novalidation.fullFormat = "Attempting to install an image profile with validation disabled. This may result in an image with unsatisfied dependencies, file or package conflicts, and potential security violations." esx.audit.esximage.profile.install.successful.category = "info" esx.audit.esximage.profile.install.successful.description = "Successfully installed image profile." esx.audit.esximage.profile.install.successful.formatOnVm = "" esx.audit.esximage.profile.install.successful.formatOnHost = "" esx.audit.esximage.profile.install.successful.formatOnComputeResource = "" esx.audit.esximage.profile.install.successful.formatOnDatacenter = "" esx.audit.esximage.profile.install.successful.fullFormat = "Successfully installed image profile '{1}'. Installed {2} VIB(s), removed {3} VIB(s). Please use 'esxcli software profile get' or see log for more detail about the transaction." esx.audit.esximage.profile.update.successful.category = "info" esx.audit.esximage.profile.update.successful.description = "Successfully updated host to new image profile." esx.audit.esximage.profile.update.successful.formatOnVm = "" esx.audit.esximage.profile.update.successful.formatOnHost = "" esx.audit.esximage.profile.update.successful.formatOnComputeResource = "" esx.audit.esximage.profile.update.successful.formatOnDatacenter = "" esx.audit.esximage.profile.update.successful.fullFormat = "Successfully updated host to image profile '{1}'. Installed {2} VIB(s), removed {3} VIB(s). Please use 'esxcli software profile get' or see log for more detail about the transaction." esx.problem.dhclient.lease.none.category = "error" esx.problem.dhclient.lease.none.description = "Unable to obtain a DHCP lease." esx.problem.dhclient.lease.none.formatOnVm = "" esx.problem.dhclient.lease.none.formatOnHost = "" esx.problem.dhclient.lease.none.formatOnComputeResource = "" esx.problem.dhclient.lease.none.formatOnDatacenter = "" esx.problem.dhclient.lease.none.fullFormat = "Unable to obtain a DHCP lease on interface {1}." esx.problem.dhclient.lease.offered.error.category = "error" esx.problem.dhclient.lease.offered.error.description = "No expiry time on offered DHCP lease." esx.problem.dhclient.lease.offered.error.formatOnVm = "" esx.problem.dhclient.lease.offered.error.formatOnHost = "" esx.problem.dhclient.lease.offered.error.formatOnComputeResource = "" esx.problem.dhclient.lease.offered.error.formatOnDatacenter = "" esx.problem.dhclient.lease.offered.error.fullFormat = "No expiry time on offered DHCP lease from {1}." esx.problem.dhclient.lease.offered.noexpiry.category = "error" esx.problem.dhclient.lease.offered.noexpiry.description = "No expiry time on offered DHCP lease." esx.problem.dhclient.lease.offered.noexpiry.formatOnVm = "" esx.problem.dhclient.lease.offered.noexpiry.formatOnHost = "" esx.problem.dhclient.lease.offered.noexpiry.formatOnComputeResource = "" esx.problem.dhclient.lease.offered.noexpiry.formatOnDatacenter = "" esx.problem.dhclient.lease.offered.noexpiry.fullFormat = "No expiry time on offered DHCP lease from {1}." esx.problem.ntpd.clock.correction.error.category = "error" esx.problem.ntpd.clock.correction.error.description = "NTP daemon stopped. Time correction out of bounds." esx.problem.ntpd.clock.correction.error.formatOnVm = "" esx.problem.ntpd.clock.correction.error.formatOnHost = "" esx.problem.ntpd.clock.correction.error.formatOnComputeResource = "" esx.problem.ntpd.clock.correction.error.formatOnDatacenter = "" esx.problem.ntpd.clock.correction.error.fullFormat = "NTP daemon stopped. Time correction {1} > {2} seconds. Manually set the time and restart ntpd." esx.problem.visorfs.failure.category = "error" esx.problem.visorfs.failure.description = "An operation on the root filesystem has failed." esx.problem.visorfs.failure.formatOnVm = "" esx.problem.visorfs.failure.formatOnHost = "" esx.problem.visorfs.failure.formatOnComputeResource = "" esx.problem.visorfs.failure.formatOnDatacenter = "" esx.problem.visorfs.failure.fullFormat = "An operation on the root filesystem has failed." esx.problem.visorfs.ramdisk.full.category = "error" esx.problem.visorfs.ramdisk.full.description = "A ramdisk is full." esx.problem.visorfs.ramdisk.full.formatOnVm = "" esx.problem.visorfs.ramdisk.full.formatOnHost = "" esx.problem.visorfs.ramdisk.full.formatOnComputeResource = "" esx.problem.visorfs.ramdisk.full.formatOnDatacenter = "" esx.problem.visorfs.ramdisk.full.fullFormat = "The ramdisk '{1}' is full. As a result, the file {2} could not be written." esx.problem.visorfs.inodetable.full.category = "error" esx.problem.visorfs.inodetable.full.description = "The root filesystem's file table is full." esx.problem.visorfs.inodetable.full.formatOnVm = "" esx.problem.visorfs.inodetable.full.formatOnHost = "" esx.problem.visorfs.inodetable.full.formatOnComputeResource = "" esx.problem.visorfs.inodetable.full.formatOnDatacenter = "" esx.problem.visorfs.inodetable.full.fullFormat = "The root filesystem's file table is full. As a result, the file {1} could not be created by the application '{2}'." esx.problem.visorfs.ramdisk.inodetable.full.category = "error" esx.problem.visorfs.ramdisk.inodetable.full.description = "A ramdisk's file table is full." esx.problem.visorfs.ramdisk.inodetable.full.formatOnVm = "" esx.problem.visorfs.ramdisk.inodetable.full.formatOnHost = "" esx.problem.visorfs.ramdisk.inodetable.full.formatOnComputeResource = "" esx.problem.visorfs.ramdisk.inodetable.full.formatOnDatacenter = "" esx.problem.visorfs.ramdisk.inodetable.full.fullFormat = "The file table of the ramdisk '{1}' is full. As a result, the file {2} could not be created by the application '{3}'." # boot, reboot, &c. esx.audit.host.boot.category = "info" esx.audit.host.boot.description = "Host has booted." esx.audit.host.boot.formatOnVm = "" esx.audit.host.boot.formatOnHost = "" esx.audit.host.boot.formatOnComputeResource = "" esx.audit.host.boot.formatOnDatacenter = "" esx.audit.host.boot.fullFormat = "Host has booted." esx.audit.host.stop.reboot.category = "info" esx.audit.host.stop.reboot.description = "Host is rebooting." esx.audit.host.stop.reboot.formatOnVm = "" esx.audit.host.stop.reboot.formatOnHost = "" esx.audit.host.stop.reboot.formatOnComputeResource = "" esx.audit.host.stop.reboot.formatOnDatacenter = "" esx.audit.host.stop.reboot.fullFormat = "Host is rebooting." esx.audit.host.stop.shutdown.category = "info" esx.audit.host.stop.shutdown.description = "Host is shutting down." esx.audit.host.stop.shutdown.formatOnVm = "" esx.audit.host.stop.shutdown.formatOnHost = "" esx.audit.host.stop.shutdown.formatOnComputeResource = "" esx.audit.host.stop.shutdown.formatOnDatacenter = "" esx.audit.host.stop.shutdown.fullFormat = "Host is shutting down." esx.problem.host.coredump.category = "warning" esx.problem.host.coredump.description = "An unread host kernel core dump has been found." esx.problem.host.coredump.formatOnVm = "" esx.problem.host.coredump.formatOnHost = "" esx.problem.host.coredump.formatOnComputeResource = "" esx.problem.host.coredump.formatOnDatacenter = "" esx.problem.host.coredump.fullFormat = "An unread host kernel core dump has been found." esx.problem.coredump.unconfigured.category = "warning" esx.problem.coredump.unconfigured.description = "No vmkcore disk partition is available and no network coredump server has been configured. Host core dumps cannot be saved." esx.problem.coredump.unconfigured.formatOnVm = "" esx.problem.coredump.unconfigured.formatOnHost = "No vmkcore disk partition is available and no network coredump server has been configured. Host core dumps cannot be saved." esx.problem.coredump.unconfigured.formatOnComputeResource = "" esx.problem.coredump.unconfigured.formatOnDatacenter = "" esx.problem.coredump.unconfigured.fullFormat = "No vmkcore disk partition is available and no network coredump server has been configured. Host core dumps cannot be saved." esx.clear.coredump.configured.category = "info" esx.clear.coredump.configured.description = "A vmkcore disk partition is available and/or a network coredump server has been configured. Host core dumps will be saved." esx.clear.coredump.configured.formatOnVm = "" esx.clear.coredump.configured.formatOnHost = "A vmkcore disk partition is available and/or a network coredump server has been configured. Host core dumps will be saved." esx.clear.coredump.configured.formatOnComputeResource = "" esx.clear.coredump.configured.formatOnDatacenter = "" esx.clear.coredump.configured.fullFormat = "A vmkcore disk partition is available and/or a network coredump server has been configured. Host core dumps will be saved." esx.problem.coredump.unconfigured2.category = "warning" esx.problem.coredump.unconfigured2.description = "No coredump target has been configured. Host core dumps cannot be saved." esx.problem.coredump.unconfigured2.formatOnVm = "" esx.problem.coredump.unconfigured2.formatOnHost = "No coredump target has been configured. Host core dumps cannot be saved." esx.problem.coredump.unconfigured2.formatOnComputeResource = "" esx.problem.coredump.unconfigured2.formatOnDatacenter = "" esx.problem.coredump.unconfigured2.fullFormat = "No coredump target has been configured. Host core dumps cannot be saved." esx.clear.coredump.configured2.category = "info" esx.clear.coredump.configured2.description = "At least one coredump target has been configured. Host core dumps will be saved." esx.clear.coredump.configured2.formatOnVm = "" esx.clear.coredump.configured2.formatOnHost = "At least one coredump target has been configured. Host core dumps will be saved." esx.clear.coredump.configured2.formatOnComputeResource = "" esx.clear.coredump.configured2.formatOnDatacenter = "" esx.clear.coredump.configured2.fullFormat = "At least one coredump target has been configured. Host core dumps will be saved." esx.problem.coredump.capacity.insufficient.category = "warning" esx.problem.coredump.capacity.insufficient.description = "The storage capacity of the coredump targets is insufficient to capture a complete coredump." esx.problem.coredump.capacity.insufficient.formatOnVm = "" esx.problem.coredump.capacity.insufficient.formatOnHost = "The storage capacity of the coredump targets is insufficient to capture a complete coredump. Recommended coredump capacity is {1} MiB." esx.problem.coredump.capacity.insufficient.formatOnComputeResource = "" esx.problem.coredump.capacity.insufficient.formatOnDatacenter = "" esx.problem.coredump.capacity.insufficient.fullFormat = "The storage capacity of the coredump targets is insufficient to capture a complete coredump. Recommended coredump capacity is {1} MiB." esx.problem.coredump.copyspace.category = "warning" esx.problem.coredump.copyspace.description = "The free space available in default coredump copy location is insufficient to copy new coredumps." esx.problem.coredump.copyspace.formatOnVm = "" esx.problem.coredump.copyspace.formatOnHost = "The free space available in default coredump copy location is insufficient to copy new coredumps. Recommended free space is {1} MiB." esx.problem.coredump.copyspace.formatOnComputeResource = "" esx.problem.coredump.copyspace.formatOnDatacenter = "" esx.problem.coredump.copyspace.fullFormat = "The free space available in default coredump copy location is insufficient to copy new coredumps. Recommended free space is {1} MiB." esx.problem.coredump.extraction.failed.nospace.category = "warning" esx.problem.coredump.extraction.failed.nospace.description = "The given partition has insufficient amount of free space to extract the coredump." esx.problem.coredump.extraction.failed.nospace.formatOnVm = "" esx.problem.coredump.extraction.failed.nospace.formatOnHost = "The given partition has insufficient amount of free space to extract the coredump. At least {1} MiB is required." esx.problem.coredump.extraction.failed.nospace.formatOnComputeResource = "" esx.problem.coredump.extraction.failed.nospace.formatOnDatacenter = "" esx.problem.coredump.extraction.failed.nospace.fullFormat = "The given partition has insufficient amount of free space to extract the coredump. At least {1} MiB is required." # scratch partition esx.problem.scratch.partition.size.small.category = "warning" esx.problem.scratch.partition.size.small.description = "Size of scratch partition is too small." esx.problem.scratch.partition.size.small.formatOnVm = "" esx.problem.scratch.partition.size.small.formatOnHost = "Size of scratch partition {1} is too small. Recommended scratch partition size is {2} MiB." esx.problem.scratch.partition.size.small.formatOnComputeResource = "" esx.problem.scratch.partition.size.small.formatOnDatacenter = "" esx.problem.scratch.partition.size.small.fullFormat = "Size of scratch partition {1} is too small. Recommended scratch partition size is {2} MiB." esx.problem.scratch.partition.unconfigured.category = "warning" esx.problem.scratch.partition.unconfigured.description = "No scratch partition has been configured." esx.problem.scratch.partition.unconfigured.formatOnVm = "" esx.problem.scratch.partition.unconfigured.formatOnHost = "No scratch partition has been configured. Recommended scratch partition size is {} MiB." esx.problem.scratch.partition.unconfigured.formatOnComputeResource = "" esx.problem.scratch.partition.unconfigured.formatOnDatacenter = "" esx.problem.scratch.partition.unconfigured.fullFormat = "No scratch partition has been configured. Recommended scratch partition size is {} MiB." # IOFilter esx.problem.iofilter.disabled.category = "error" esx.problem.iofilter.disabled.description = "An iofilter installed on the host has stopped functioning." esx.problem.iofilter.disabled.formatOnVm = "" esx.problem.iofilter.disabled.formatOnHost = "IOFilter {1} has stopped functioning due to an unrecoverable error. Reason: {2}" esx.problem.iofilter.disabled.formatOnComputeResource = "" esx.problem.iofilter.disabled.formatOnDatacenter = "" esx.problem.iofilter.disabled.fullFormat = "IOFilter {1} has stopped functioning due to an unrecoverable error. Reason: {2}" # nominal/missing drivers esx.problem.driver.abnormal.category = "warning" esx.problem.driver.abnormal.description = "Some drivers need special notice." esx.problem.driver.abnormal.formatOnVm = "" esx.problem.driver.abnormal.formatOnHost = "Driver for device {1} is {2}. Please refer to KB article: {3}." esx.problem.driver.abnormal.formatOnComputeResource = "" esx.problem.driver.abnormal.formatOnDatacenter = "" esx.problem.driver.abnormal.fullFormat = "Driver for device {1} is {2}. Please refer to KB article: {3}." # VFAT esx.problem.vfat.filesystem.full.other.category = "error" esx.problem.vfat.filesystem.full.other.description = "A VFAT filesystem is full." esx.problem.vfat.filesystem.full.other.formatOnVm = "" esx.problem.vfat.filesystem.full.other.formatOnHost = "" esx.problem.vfat.filesystem.full.other.formatOnComputeResource = "" esx.problem.vfat.filesystem.full.other.formatOnDatacenter = "" esx.problem.vfat.filesystem.full.other.fullFormat = "The VFAT filesystem {1} (UUID {2}) is full." esx.problem.vfat.filesystem.full.scratch.category = "error" esx.problem.vfat.filesystem.full.scratch.description = "A VFAT filesystem, being used as the host's scratch partition, is full." esx.problem.vfat.filesystem.full.scratch.formatOnVm = "" esx.problem.vfat.filesystem.full.scratch.formatOnHost = "" esx.problem.vfat.filesystem.full.scratch.formatOnComputeResource = "" esx.problem.vfat.filesystem.full.scratch.formatOnDatacenter = "" esx.problem.vfat.filesystem.full.scratch.fullFormat = "The host's scratch partition, which is the VFAT filesystem {1} (UUID {2}), is full." # System clock esx.problem.clock.correction.delta.warning.category = "warning" esx.problem.clock.correction.delta.warning.description = "Allowed system clock update with large time change, but number of future updates limited" esx.problem.clock.correction.delta.warning.formatOnVm = "" esx.problem.clock.correction.delta.warning.formatOnHost = "" esx.problem.clock.correction.delta.warning.formatOnComputeResource = "" esx.problem.clock.correction.delta.warning.formatOnDatacenter = "" esx.problem.clock.correction.delta.warning.fullFormat = "Clock stepped to {1}.{2}, but delta {3} > {4} seconds, {5}/{6} large corrections" esx.problem.clock.correction.delta.failed.category = "error" esx.problem.clock.correction.delta.failed.description = "Failed system clock update with large time change" esx.problem.clock.correction.delta.failed.formatOnVm = "" esx.problem.clock.correction.delta.failed.formatOnHost = "" esx.problem.clock.correction.delta.failed.formatOnComputeResource = "" esx.problem.clock.correction.delta.failed.formatOnDatacenter = "" esx.problem.clock.correction.delta.failed.fullFormat = "Clock step to {1}.{2} failed, delta {3} > {4} seconds, number of large corrections > {5}" esx.problem.clock.correction.delta.allowed.category = "warning" esx.problem.clock.correction.delta.allowed.description = "Allowed system clock update with large time change" esx.problem.clock.correction.delta.allowed.formatOnVm = "" esx.problem.clock.correction.delta.allowed.formatOnHost = "" esx.problem.clock.correction.delta.allowed.formatOnComputeResource = "" esx.problem.clock.correction.delta.allowed.formatOnDatacenter = "" esx.problem.clock.correction.delta.allowed.fullFormat = "Clock stepped to {1}.{2}, but delta {3} > {4} seconds" esx.problem.clock.state.reset.category = "warning" esx.problem.clock.state.reset.description = "System clock state has been reset" esx.problem.clock.state.reset.formatOnVm = "" esx.problem.clock.state.reset.formatOnHost = "" esx.problem.clock.state.reset.formatOnComputeResource = "" esx.problem.clock.state.reset.formatOnDatacenter = "" esx.problem.clock.state.reset.fullFormat = "system clock state has been reset" esx.problem.clock.parameter.set.maxNegPhaseCorrection.category = "warning" esx.problem.clock.parameter.set.maxNegPhaseCorrection.description = "System clock maximum negative phase correction changed" esx.problem.clock.parameter.set.maxNegPhaseCorrection.formatOnVm = "" esx.problem.clock.parameter.set.maxNegPhaseCorrection.formatOnHost = "" esx.problem.clock.parameter.set.maxNegPhaseCorrection.formatOnComputeResource = "" esx.problem.clock.parameter.set.maxNegPhaseCorrection.formatOnDatacenter = "" esx.problem.clock.parameter.set.maxNegPhaseCorrection.fullFormat = "system clock max negative phase correction set to {1}" esx.problem.clock.parameter.set.maxPosPhaseCorrection.category = "warning" esx.problem.clock.parameter.set.maxPosPhaseCorrection.description = "System clock maximum positive phase correction changed" esx.problem.clock.parameter.set.maxPosPhaseCorrection.formatOnVm = "" esx.problem.clock.parameter.set.maxPosPhaseCorrection.formatOnHost = "" esx.problem.clock.parameter.set.maxPosPhaseCorrection.formatOnComputeResource = "" esx.problem.clock.parameter.set.maxPosPhaseCorrection.formatOnDatacenter = "" esx.problem.clock.parameter.set.maxPosPhaseCorrection.fullFormat = "system clock max positive phase correction set to {1}" esx.problem.clock.parameter.set.numLargeCorrections.category = "warning" esx.problem.clock.parameter.set.numLargeCorrections.description = "System clock count of number of large corrections changed" esx.problem.clock.parameter.set.numLargeCorrections.formatOnVm = "" esx.problem.clock.parameter.set.numLargeCorrections.formatOnHost = "" esx.problem.clock.parameter.set.numLargeCorrections.formatOnComputeResource = "" esx.problem.clock.parameter.set.numLargeCorrections.formatOnDatacenter = "" esx.problem.clock.parameter.set.numLargeCorrections.fullFormat = "system clock number of large correction set to {1}" esx.problem.clock.parameter.set.maxLargeCorrections.category = "warning" esx.problem.clock.parameter.set.maxLargeCorrections.description = "System clock maximum number of large corrections changed" esx.problem.clock.parameter.set.maxLargeCorrections.formatOnVm = "" esx.problem.clock.parameter.set.maxLargeCorrections.formatOnHost = "" esx.problem.clock.parameter.set.maxLargeCorrections.formatOnComputeResource = "" esx.problem.clock.parameter.set.maxLargeCorrections.formatOnDatacenter = "" esx.problem.clock.parameter.set.maxLargeCorrections.fullFormat = "system clock max number of correction set to {1}" esx.problem.clock.correction.step.unsync.category = "warning" esx.problem.clock.correction.step.unsync.description = "System clock stepped, lost synchronization" esx.problem.clock.correction.step.unsync.formatOnVm = "" esx.problem.clock.correction.step.unsync.formatOnHost = "" esx.problem.clock.correction.step.unsync.formatOnComputeResource = "" esx.problem.clock.correction.step.unsync.formatOnDatacenter = "" esx.problem.clock.correction.step.unsync.fullFormat = "system clock stepped to {1}.{2}, lost synchronization" esx.problem.clock.correction.adjtime.sync.category = "warning" esx.problem.clock.correction.adjtime.sync.description = "System clock synchronized to upstream time servers" esx.problem.clock.correction.adjtime.sync.formatOnVm = "" esx.problem.clock.correction.adjtime.sync.formatOnHost = "" esx.problem.clock.correction.adjtime.sync.formatOnComputeResource = "" esx.problem.clock.correction.adjtime.sync.formatOnDatacenter = "" esx.problem.clock.correction.adjtime.sync.fullFormat = "system clock synchronized to upstream time servers" esx.problem.clock.correction.adjtime.unsync.category = "warning" esx.problem.clock.correction.adjtime.unsync.description = "System clock lost synchronization to upstream time servers" esx.problem.clock.correction.adjtime.unsync.formatOnVm = "" esx.problem.clock.correction.adjtime.unsync.formatOnHost = "" esx.problem.clock.correction.adjtime.unsync.formatOnComputeResource = "" esx.problem.clock.correction.adjtime.unsync.formatOnDatacenter = "" esx.problem.clock.correction.adjtime.unsync.fullFormat = "system clock lost synchronization to upstream time servers" esx.problem.clock.correction.adjtime.lostsync.category = "warning" esx.problem.clock.correction.adjtime.lostsync.description = "System clock no longer synchronized to upstream time servers" esx.problem.clock.correction.adjtime.lostsync.formatOnVm = "" esx.problem.clock.correction.adjtime.lostsync.formatOnHost = "" esx.problem.clock.correction.adjtime.lostsync.formatOnComputeResource = "" esx.problem.clock.correction.adjtime.lostsync.formatOnDatacenter = "" esx.problem.clock.correction.adjtime.lostsync.fullFormat = "system clock no longer synchronized to upstream time servers" # esx.problem.boot.filesystem.down.category = "error" esx.problem.boot.filesystem.down.description = "Lost connectivity to the device backing the boot filesystem" esx.problem.boot.filesystem.down.formatOnVm = "" esx.problem.boot.filesystem.down.formatOnHost = "Lost connectivity to the device {1} backing the boot filesystem {2}. As a result, host configuration changes will not be saved to persistent storage." esx.problem.boot.filesystem.down.formatOnComputeResource = "" esx.problem.boot.filesystem.down.formatOnDatacenter = "" esx.problem.boot.filesystem.down.fullFormat = "Lost connectivity to the device {1} backing the boot filesystem {2}. As a result, host configuration changes will not be saved to persistent storage." # com.vmware.vc.host.problem.DeprecatedVMFSVolumeFound.category = "warning" com.vmware.vc.host.problem.DeprecatedVMFSVolumeFound.description = "Deprecated VMFS volume(s) found on the host. Please consider upgrading volume(s) to the latest version." com.vmware.vc.host.problem.DeprecatedVMFSVolumeFound.formatOnComputeResource = "Deprecated VMFS volume(s) found on the host. Please consider upgrading volume(s) to the latest version." com.vmware.vc.host.problem.DeprecatedVMFSVolumeFound.formatOnDatacenter = "Deprecated VMFS volume(s) found on the host. Please consider upgrading volume(s) to the latest version." com.vmware.vc.host.problem.DeprecatedVMFSVolumeFound.formatOnHost = "Deprecated VMFS volume(s) found on the host. Please consider upgrading volume(s) to the latest version." com.vmware.vc.host.problem.DeprecatedVMFSVolumeFound.formatOnVm = "" com.vmware.vc.host.problem.DeprecatedVMFSVolumeFound.fullFormat = "Deprecated VMFS volume(s) found on the host. Please consider upgrading volume(s) to the latest version." # vFlash com.vmware.vc.host.problem.vFlashResource.inaccessible.category = "warning" com.vmware.vc.host.problem.vFlashResource.inaccessible.description = "Host's virtual flash resource is inaccessible." com.vmware.vc.host.problem.vFlashResource.inaccessible.formatOnVm = "" com.vmware.vc.host.problem.vFlashResource.inaccessible.formatOnHost = "Host's virtual flash resource is inaccessible." com.vmware.vc.host.problem.vFlashResource.inaccessible.formatOnComputeResource = "Host's virtual flash resource is inaccessible." com.vmware.vc.host.problem.vFlashResource.inaccessible.formatOnDatacenter = "Host's virtual flash resource is inaccessible." com.vmware.vc.host.problem.vFlashResource.inaccessible.fullFormat = "Host's virtual flash resource is inaccessible." com.vmware.vc.host.clear.vFlashResource.inaccessible.category = "info" com.vmware.vc.host.clear.vFlashResource.inaccessible.description = "Host's virtual flash resource is accessible." com.vmware.vc.host.clear.vFlashResource.inaccessible.formatOnVm = "" com.vmware.vc.host.clear.vFlashResource.inaccessible.formatOnHost = "Host's virtual flash resource is restored to be accessible." com.vmware.vc.host.clear.vFlashResource.inaccessible.formatOnComputeResource = "Host's virtual flash resource is restored to be accessible." com.vmware.vc.host.clear.vFlashResource.inaccessible.formatOnDatacenter = "Host's virtual flash resource is restored to be accessible." com.vmware.vc.host.clear.vFlashResource.inaccessible.fullFormat = "Host's virtual flash resource is restored to be accessible." com.vmware.vc.host.problem.vFlashResource.reachthreshold.category = "warning" com.vmware.vc.host.problem.vFlashResource.reachthreshold.description = "Host's virtual flash resource usage exceeds the threshold." com.vmware.vc.host.problem.vFlashResource.reachthreshold.formatOnVm = "" com.vmware.vc.host.problem.vFlashResource.reachthreshold.formatOnHost = "Host's virtual flash resource usage is more than {1}%." com.vmware.vc.host.problem.vFlashResource.reachthreshold.formatOnComputeResource = "Host's virtual flash resource usage is more than {1}%." com.vmware.vc.host.problem.vFlashResource.reachthreshold.formatOnDatacenter = "Host's virtual flash resource usage is more than {1}%." com.vmware.vc.host.problem.vFlashResource.reachthreshold.fullFormat = "Host's virtual flash resource usage is more than {1}%." com.vmware.vc.host.clear.vFlashResource.reachthreshold.category = "info" com.vmware.vc.host.clear.vFlashResource.reachthreshold.description = "Host's virtual flash resource usage dropped below the threshold." com.vmware.vc.host.clear.vFlashResource.reachthreshold.formatOnVm = "" com.vmware.vc.host.clear.vFlashResource.reachthreshold.formatOnHost = "Host's virtual flash resource usage dropped below {1}%." com.vmware.vc.host.clear.vFlashResource.reachthreshold.formatOnComputeResource = "Host's virtual flash resource usage dropped below {1}%." com.vmware.vc.host.clear.vFlashResource.reachthreshold.formatOnDatacenter = "Host's virtual flash resource usage dropped below {1}%." com.vmware.vc.host.clear.vFlashResource.reachthreshold.fullFormat = "Host's virtual flash resource usage dropped below {1}%." com.vmware.vc.host.vFlash.VFlashResourceConfiguredEvent.category = "info" com.vmware.vc.host.vFlash.VFlashResourceConfiguredEvent.description = "Virtual flash resource is configured on the host" com.vmware.vc.host.vFlash.VFlashResourceConfiguredEvent.formatOnVm = "" com.vmware.vc.host.vFlash.VFlashResourceConfiguredEvent.formatOnHost = "Virtual flash resource is configured on the host" com.vmware.vc.host.vFlash.VFlashResourceConfiguredEvent.formatOnComputeResource = "Virtual flash resource is configured on the host" com.vmware.vc.host.vFlash.VFlashResourceConfiguredEvent.formatOnDatacenter = "Virtual flash resource is configured on the host" com.vmware.vc.host.vFlash.VFlashResourceConfiguredEvent.fullFormat = "Virtual flash resource is configured on the host" com.vmware.vc.host.vFlash.VFlashResourceRemovedEvent.category = "info" com.vmware.vc.host.vFlash.VFlashResourceRemovedEvent.description = "Virtual flash resource is removed from the host" com.vmware.vc.host.vFlash.VFlashResourceRemovedEvent.formatOnVm = "" com.vmware.vc.host.vFlash.VFlashResourceRemovedEvent.formatOnHost = "Virtual flash resource is removed from the host" com.vmware.vc.host.vFlash.VFlashResourceRemovedEvent.formatOnComputeResource = "Virtual flash resource is removed from the host" com.vmware.vc.host.vFlash.VFlashResourceRemovedEvent.formatOnDatacenter = "Virtual flash resource is removed from the host" com.vmware.vc.host.vFlash.VFlashResourceRemovedEvent.fullFormat = "Virtual flash resource is removed from the host" com.vmware.vc.host.vFlash.modulesLoadedEvent.category = "info" com.vmware.vc.host.vFlash.modulesLoadedEvent.description = "Virtual flash modules are loaded or reloaded on the host" com.vmware.vc.host.vFlash.modulesLoadedEvent.formatOnComputeResource = "Virtual flash modules are loaded or reloaded on the host" com.vmware.vc.host.vFlash.modulesLoadedEvent.formatOnDatacenter = "Virtual flash modules are loaded or reloaded on the host" com.vmware.vc.host.vFlash.modulesLoadedEvent.formatOnHost = "Virtual flash modules are loaded or reloaded on the host" com.vmware.vc.host.vFlash.modulesLoadedEvent.formatOnVm = "" com.vmware.vc.host.vFlash.modulesLoadedEvent.fullFormat = "Virtual flash modules are loaded or reloaded on the host" com.vmware.vc.host.vFlash.defaultModuleChangedEvent.category = "info" com.vmware.vc.host.vFlash.defaultModuleChangedEvent.description = "Default virtual flash module is changed to {vFlashModule} on the host" com.vmware.vc.host.vFlash.defaultModuleChangedEvent.formatOnComputeResource = "Default virtual flash module is changed to {vFlashModule} on the host" com.vmware.vc.host.vFlash.defaultModuleChangedEvent.formatOnDatacenter = "Default virtual flash module is changed to {vFlashModule} on the host" com.vmware.vc.host.vFlash.defaultModuleChangedEvent.formatOnHost = "Default virtual flash module is changed to {vFlashModule} on the host" com.vmware.vc.host.vFlash.defaultModuleChangedEvent.formatOnVm = "" com.vmware.vc.host.vFlash.defaultModuleChangedEvent.fullFormat = "Any new virtual Flash Read Cache configuration request will use {vFlashModule} as default virtual flash module. All existing virtual Flash Read Cache configurations remain unchanged." # # vSAN # esx.clear.vsan.vmknic.ready.category = "info" esx.clear.vsan.vmknic.ready.description = "A previously reported vmknic now has a valid IP." esx.clear.vsan.vmknic.ready.formatOnVm = "" esx.clear.vsan.vmknic.ready.formatOnHost = "vmknic {1} now has an IP address. Earlier reported connectivity problems, if any, can now be ignored because they are resolved." esx.clear.vsan.vmknic.ready.formatOnComputeResource = "" esx.clear.vsan.vmknic.ready.formatOnDatacenter = "" esx.clear.vsan.vmknic.ready.fullFormat = "vmknic {1} now has an IP address. Earlier reported connectivity problems, if any, can now be ignored because they are resolved." # esx.problem.vsan.net.not.ready.category = "error" esx.problem.vsan.net.not.ready.description = "A vmknic added to vSAN network configuration doesn't have valid IP. Network is not ready." esx.problem.vsan.net.not.ready.formatOnVm = "" esx.problem.vsan.net.not.ready.formatOnHost = "vmknic {1} that is currently configured to be used with vSAN doesn't have an IP address yet. There are no other active network configuration and therefore the vSAN node doesn't have network connectivity." esx.problem.vsan.net.not.ready.formatOnComputeResource = "" esx.problem.vsan.net.not.ready.formatOnDatacenter = "" esx.problem.vsan.net.not.ready.fullFormat = "vmknic {1} that is currently configured to be used with vSAN doesn't have an IP address yet. There are no other active network configuration and therefore the vSAN node doesn't have network connectivity." # esx.problem.vsan.vmknic.not.ready.category = "warning" esx.problem.vsan.vmknic.not.ready.description = "A vmknic added to vSAN network configuration doesn't have valid IP. It will not be in use." esx.problem.vsan.vmknic.not.ready.formatOnVm = "" esx.problem.vsan.vmknic.not.ready.formatOnHost = "vmknic {1} that is currently configured to be used with vSAN doesn't have an IP address yet. However, there are other network configuration which are active. If those configurations are removed that may cause problems." esx.problem.vsan.vmknic.not.ready.formatOnHost = "vSAN doesn't have any network configuration. This can severely impact several objects in the vSAN datastore." esx.problem.vsan.vmknic.not.ready.formatOnComputeResource = "" esx.problem.vsan.vmknic.not.ready.formatOnDatacenter = "" esx.problem.vsan.vmknic.not.ready.fullFormat = "vmknic {1} that is currently configured to be used with vSAN doesn't have an IP address yet. However, there are other network configuration which are active. If those configurations are removed that may cause problems." # esx.clear.vsan.network.available.category = "info" esx.clear.vsan.network.available.description = "vSAN now has at least one active network configuration." esx.clear.vsan.network.available.formatOnVm = "" esx.clear.vsan.network.available.formatOnHost = "vSAN now has a usable network configuration. Earlier reported connectivity problems, if any, can now be ignored because they are resolved." esx.clear.vsan.network.available.formatOnComputeResource = "" esx.clear.vsan.network.available.formatOnDatacenter = "" esx.clear.vsan.network.available.fullFormat = "vSAN now has a usable network configuration. Earlier reported connectivity problems, if any, can now be ignored because they are resolved." # esx.problem.vsan.no.network.connectivity.category = "error" esx.problem.vsan.no.network.connectivity.description = "vSAN doesn't have any network configuration for use." esx.problem.vsan.no.network.connectivity.formatOnVm = "" esx.problem.vsan.no.network.connectivity.formatOnHost = "vSAN doesn't have any network configuration. This can severely impact several objects in the vSAN datastore." esx.problem.vsan.no.network.connectivity.formatOnComputeResource = "" esx.problem.vsan.no.network.connectivity.formatOnDatacenter = "" esx.problem.vsan.no.network.connectivity.fullFormat = "vSAN doesn't have any network configuration. This can severely impact several objects in the vSAN datastore." # esx.problem.vsan.net.redundancy.reduced.category = "warning" esx.problem.vsan.net.redundancy.reduced.description = "vSAN is operating on reduced network redundancy." esx.problem.vsan.net.redundancy.reduced.formatOnVm = "" esx.problem.vsan.net.redundancy.reduced.formatOnHost = "vSAN network configuration redundancy has been reduced. This might be a problem if further network configuration is removed." esx.problem.vsan.net.redundancy.reduced.formatOnComputeResource = "" esx.problem.vsan.net.redundancy.reduced.formatOnDatacenter = "" esx.problem.vsan.net.redundancy.reduced.fullFormat = "vSAN network configuration redundancy has been reduced. This might be a problem if further network configuration is removed." # esx.problem.vsan.net.redundancy.lost.category = "warning" esx.problem.vsan.net.redundancy.lost.description = "vSAN doesn't haven any redundancy in its network configuration." esx.problem.vsan.net.redundancy.lost.formatOnVm = "" esx.problem.vsan.net.redundancy.lost.formatOnHost = "vSAN network configuration doesn't have any redundancy. This might be a problem if further network configuration is removed." esx.problem.vsan.net.redundancy.lost.formatOnComputeResource = "" esx.problem.vsan.net.redundancy.lost.formatOnDatacenter = "" esx.problem.vsan.net.redundancy.lost.fullFormat = "vSAN network configuration doesn't have any redundancy. This might be a problem if further network configuration is removed." # esx.audit.vsan.net.vnic.added.category = "info" esx.audit.vsan.net.vnic.added.description = "vSAN virtual NIC has been added." esx.audit.vsan.net.vnic.added.formatOnVm = "" esx.audit.vsan.net.vnic.added.formatOnHost = "vSAN virtual NIC has been added." esx.audit.vsan.net.vnic.added.formatOnComputeResource = "" esx.audit.vsan.net.vnic.added.formatOnDatacenter = "" esx.audit.vsan.net.vnic.added.fullFormat = "vSAN virtual NIC has been added." # esx.audit.vsan.net.vnic.deleted.category = "error" esx.audit.vsan.net.vnic.deleted.description = "vSAN network configuration has been removed." esx.audit.vsan.net.vnic.deleted.formatOnVm = "" esx.audit.vsan.net.vnic.deleted.formatOnHost = "vSAN network configuration has been removed. The host may experience problems communicating with other hosts in vSAN cluster." esx.audit.vsan.net.vnic.deleted.formatOnComputeResource = "" esx.audit.vsan.net.vnic.deleted.formatOnDatacenter = "" esx.audit.vsan.net.vnic.deleted.fullFormat = "vSAN network configuration has been removed. The host may experience problems communicating with other hosts in vSAN cluster." # esx.problem.vsan.clustering.disabled.category = "warning" esx.problem.vsan.clustering.disabled.description = "vSAN clustering services have been disabled." esx.problem.vsan.clustering.disabled.formatOnVm = "" esx.problem.vsan.clustering.disabled.formatOnHost = "vSAN clustering and directory services have been disabled thus will be no longer available." esx.problem.vsan.clustering.disabled.formatOnComputeResource = "" esx.problem.vsan.clustering.disabled.formatOnDatacenter = "" esx.problem.vsan.clustering.disabled.fullFormat = "vSAN clustering and directory services have been disabled thus will be no longer available." # esx.clear.vsan.clustering.enabled.category = "info" esx.clear.vsan.clustering.enabled.description = "vSAN clustering services have now been enabled." esx.clear.vsan.clustering.enabled.formatOnVm = "" esx.clear.vsan.clustering.enabled.formatOnHost = "vSAN clustering and directory services have now been enabled." esx.clear.vsan.clustering.enabled.formatOnComputeResource = "" esx.clear.vsan.clustering.enabled.formatOnDatacenter = "" esx.clear.vsan.clustering.enabled.fullFormat = "vSAN clustering and directory services have now been enabled." # esx.audit.vsan.clustering.enabled.category = "info" esx.audit.vsan.clustering.enabled.description = "vSAN clustering services have been enabled." esx.audit.vsan.clustering.enabled.formatOnVm = "" esx.audit.vsan.clustering.enabled.formatOnHost = "vSAN clustering and directory services have been enabled." esx.audit.vsan.clustering.enabled.formatOnComputeResource = "" esx.audit.vsan.clustering.enabled.formatOnDatacenter = "" esx.audit.vsan.clustering.enabled.fullFormat = "vSAN clustering and directory services have been enabled." # esx.problem.vsan.dom.init.failed.status.category = "warning" esx.problem.vsan.dom.init.failed.status.description = "vSAN Distributed Object Manager failed to initialize" esx.problem.vsan.dom.init.failed.status.formatOnVm = "" esx.problem.vsan.dom.init.failed.status.formatOnHost = "vSAN Distributed Object Manager failed to initialize. While the ESXi host might still be part of the vSAN cluster, some of the vSAN related services might fail until this problem is resolved. Failure Status: {1}." esx.problem.vsan.dom.init.failed.status.formatOnComputeResource = "" esx.problem.vsan.dom.init.failed.status.formatOnDatacenter = "" esx.problem.vsan.dom.init.failed.status.fullFormat = "vSAN Distributed Object Manager failed to initialize. While the ESXi host might still be part of the vSAN cluster, some of the vSAN related services might fail until this problem is resolved. Failure Status: {1}." # esx.problem.vob.vsan.dom.errorfixed.category = "warning" esx.problem.vob.vsan.dom.errorfixed.description = "vSAN detected and fixed a medium or checksum error." esx.problem.vob.vsan.dom.errorfixed.formatOnVm = "" esx.problem.vob.vsan.dom.errorfixed.formatOnHost = "vSAN detected and fixed a medium or checksum error for component {1} on disk group {2}." esx.problem.vob.vsan.dom.errorfixed.formatOnComputeResource = "" esx.problem.vob.vsan.dom.errorfixed.formatOnDatacenter = "" esx.problem.vob.vsan.dom.errorfixed.fullFormat = "vSAN detected and fixed a medium or checksum error for component {1} on disk group {2}." # esx.problem.vob.vsan.dom.unrecoverableerror.category = "warning" esx.problem.vob.vsan.dom.unrecoverableerror.description = "vSAN detected an unrecoverable medium or checksum error." esx.problem.vob.vsan.dom.unrecoverableerror.formatOnVm = "" esx.problem.vob.vsan.dom.unrecoverableerror.formatOnHost = "vSAN detected an unrecoverable medium or checksum error for component {1} on disk group {2}." esx.problem.vob.vsan.dom.unrecoverableerror.formatOnComputeResource = "" esx.problem.vob.vsan.dom.unrecoverableerror.formatOnDatacenter = "" esx.problem.vob.vsan.dom.unrecoverableerror.fullFormat = "vSAN detected an unrecoverable medium or checksum error for component {1} on disk group {2}." # esx.problem.vob.vsan.dom.nospaceduringresync.category = "warning" esx.problem.vob.vsan.dom.nospaceduringresync.description = "Resync encountered no space error" esx.problem.vob.vsan.dom.nospaceduringresync.formatOnVm = "" esx.problem.vob.vsan.dom.nospaceduringresync.formatOnHost = "Resync encountered no space error for component {1} on disk {2}." esx.problem.vob.vsan.dom.nospaceduringresync.formatOnComputeResource = "" esx.problem.vob.vsan.dom.nospaceduringresync.formatOnDatacenter = "" esx.problem.vob.vsan.dom.nospaceduringresync.fullFormat = "Resync encountered no space error for component {1} on disk {2}. Resync will resume once space is freed up on this disk. Need around {3}MB to resync the component on this disk" # esx.problem.vob.vsan.pdl.offline.category = "error" esx.problem.vob.vsan.pdl.offline.description = "vSAN device has gone offline." esx.problem.vob.vsan.pdl.offline.formatOnVm = "" esx.problem.vob.vsan.pdl.offline.formatOnHost = "vSAN device {1} has gone offline." esx.problem.vob.vsan.pdl.offline.formatOnComputeResource = "" esx.problem.vob.vsan.pdl.offline.formatOnDatacenter = "" esx.problem.vob.vsan.pdl.offline.fullFormat = "vSAN device {1} has gone offline." # esx.clear.vob.vsan.pdl.online.category = "info" esx.clear.vob.vsan.pdl.online.description = "vSAN device has come online." esx.clear.vob.vsan.pdl.online.formatOnVm = "" esx.clear.vob.vsan.pdl.online.formatOnHost = "vSAN device {1} has come online." esx.clear.vob.vsan.pdl.online.formatOnComputeResource = "" esx.clear.vob.vsan.pdl.online.formatOnDatacenter = "" esx.clear.vob.vsan.pdl.online.fullFormat = "vSAN device {1} has come online." # esx.problem.vob.vsan.lsom.diskerror.category = "error" esx.problem.vob.vsan.lsom.diskerror.description = "vSAN device is under permanent failure." esx.problem.vob.vsan.lsom.diskerror.formatOnVm = "" esx.problem.vob.vsan.lsom.diskerror.formatOnHost = "vSAN device {1} is under permanent failure." esx.problem.vob.vsan.lsom.diskerror.formatOnComputeResource = "" esx.problem.vob.vsan.lsom.diskerror.formatOnDataCenter = "" esx.problem.vob.vsan.lsom.diskerror.fullFormat = "vSAN device {1} is under permanent failure." # esx.problem.vob.vsan.lsom.diskpropagatedpermerror.category = "error" esx.problem.vob.vsan.lsom.diskpropagatedpermerror.description = "vSAN device is under propagated permanent error." esx.problem.vob.vsan.lsom.diskpropagatedpermerror.formatOnVm = "" esx.problem.vob.vsan.lsom.diskpropagatedpermerror.formatOnHost = "vSAN device {1} is under propagated permanent error" esx.problem.vob.vsan.lsom.diskpropagatedpermerror.formatOnComputeResource = "" esx.problem.vob.vsan.lsom.diskpropagatedpermerror.formatOnDataCenter = "" esx.problem.vob.vsan.lsom.diskpropagatedpermerror.fullFormat = "vSAN device {1} is under propagated permanent error." # esx.problem.vob.vsan.lsom.diskunhealthy.category = "error" esx.problem.vob.vsan.lsom.diskunhealthy.description = "vSAN device is unhealthy." esx.problem.vob.vsan.lsom.diskunhealthy.formatOnVm = "" esx.problem.vob.vsan.lsom.diskunhealthy.formatOnHost = "vSAN device {1} is unhealthy" esx.problem.vob.vsan.lsom.diskunhealthy.formatOnComputeResource = "" esx.problem.vob.vsan.lsom.diskunhealthy.formatOnDataCenter = "" esx.problem.vob.vsan.lsom.diskunhealthy.fullFormat = "vSAN device {1} is unhealthy." # esx.problem.vob.vsan.lsom.diskgrouplogcongested.category = "error" esx.problem.vob.vsan.lsom.diskgrouplogcongested.description = "vSAN diskgroup log is congested." esx.problem.vob.vsan.lsom.diskgrouplogcongested.formatOnVm = "" esx.problem.vob.vsan.lsom.diskgrouplogcongested.formatOnHost = "vSAN diskgroup {1} log is congested" esx.problem.vob.vsan.lsom.diskgrouplogcongested.formatOnComputeResource = "" esx.problem.vob.vsan.lsom.diskgrouplogcongested.formatOnDataCenter = "" esx.problem.vob.vsan.lsom.diskgrouplogcongested.fullFormat = "vSAN diskgroup {1} log is congested." # esx.problem.vob.vsan.lsom.diskpropagatederror.category = "error" esx.problem.vob.vsan.lsom.diskpropagatederror.description = "vSAN device is under propagated error." esx.problem.vob.vsan.lsom.diskpropagatederror.formatOnVm = "" esx.problem.vob.vsan.lsom.diskpropagatederror.formatOnHost = "vSAN device {1} is under propagated error" esx.problem.vob.vsan.lsom.diskpropagatederror.formatOnComputeResource = "" esx.problem.vob.vsan.lsom.diskpropagatederror.formatOnDataCenter = "" esx.problem.vob.vsan.lsom.diskpropagatederror.fullFormat = "vSAN device {1} is under propagated error." # esx.problem.vsan.lsom.congestionthreshold.category = "info" esx.problem.vsan.lsom.congestionthreshold.description = "vSAN device Memory/SSD congestion has changed." esx.problem.vsan.lsom.congestionthreshold.formatOnVm = "" esx.problem.vsan.lsom.congestionthreshold.formatOnHost = "LSOM {1} Congestion State: {2}. Congestion Threshold: {3} Current Congestion: {4}." esx.problem.vsan.lsom.congestionthreshold.formatOnComputeResource = "" esx.problem.vsan.lsom.congestionthreshold.formatOnDataCenter = "" esx.problem.vsan.lsom.congestionthreshold.fullFormat = "LSOM {1} Congestion State: {2}. Congestion Threshold: {3} Current Congestion: {4}." # esx.problem.vob.vsan.lsom.componentthreshold.category = "warning" esx.problem.vob.vsan.lsom.componentthreshold.description = "vSAN Node: Near node component count limit." esx.problem.vob.vsan.lsom.componentthreshold.formatOnVm = "" esx.problem.vob.vsan.lsom.componentthreshold.formatOnHost = "vSAN Node: {1} reached threshold of {2} %% opened components ({3} of {4})." esx.problem.vob.vsan.lsom.componentthreshold.formatOnComputeResource = "" esx.problem.vob.vsan.lsom.componentthreshold.formatOnDataCenter = "" esx.problem.vob.vsan.lsom.componentthreshold.fullFormat = "vSAN Node: {1} reached threshold of {2} %% opened components ({3} of {4})." # esx.problem.vob.vsan.lsom.disklimit2.category = "error" esx.problem.vob.vsan.lsom.disklimit2.description = "Failed to add disk to disk group." esx.problem.vob.vsan.lsom.disklimit2.formatOnVm = "" esx.problem.vob.vsan.lsom.disklimit2.formatOnHost = "Failed to add disk {1} to disk group. The system has reached the maximum amount of disks allowed {2} for the current amount of memory {3} GB. Add more memory." esx.problem.vob.vsan.lsom.disklimit2.formatOnComputeResource = "" esx.problem.vob.vsan.lsom.disklimit2.formatOnDataCenter = "" esx.problem.vob.vsan.lsom.disklimit2.fullFormat = "Failed to add disk {1} to disk group. The system has reached the maximum amount of disks allowed {2} for the current amount of memory {3} GB. Add more memory." # esx.problem.vob.vsan.lsom.diskgrouplimit.category = "error" esx.problem.vob.vsan.lsom.diskgrouplimit.description = "Failed to create a new disk group." esx.problem.vob.vsan.lsom.diskgrouplimit.formatOnVm = "" esx.problem.vob.vsan.lsom.diskgrouplimit.formatOnHost = "Failed to create new disk group {1}. The system has reached the maximum amount of disks groups allowed {2} for the current amount of memory {3}. Add more memory." esx.problem.vob.vsan.lsom.diskgrouplimit.formatOnComputeResource = "" esx.problem.vob.vsan.lsom.diskgrouplimit.formatOnDataCenter = "" esx.problem.vob.vsan.lsom.diskgrouplimit.fullFormat = "Failed to create new disk group {1}. The system has reached the maximum amount of disks groups allowed {2} for the current amount of memory {3}. Add more memory." # esx.problem.vsan.dom.component.datacomponent.on.witness.host.category = "warning" esx.problem.vsan.dom.component.datacomponent.on.witness.host.description = "Data component found on witness host." esx.problem.vsan.dom.component.datacomponent.on.witness.host.formatOnVm = "" esx.problem.vsan.dom.component.datacomponent.on.witness.host.formatOnHost = "Data component {1} found on witness host is ignored." esx.problem.vsan.dom.component.datacomponent.on.witness.host.formatOnComputeResource = "" esx.problem.vsan.dom.component.datacomponent.on.witness.host.formatOnDataCenter = "" esx.problem.vsan.dom.component.datacomponent.on.witness.host.fullFormat = "Data component {1} found on witness host is ignored." # esx.problem.vmfs.spanstate.incompatibility.detected.category = "error" esx.problem.vmfs.spanstate.incompatibility.detected.description = "Incompatible VMFS span state detected." esx.problem.vmfs.spanstate.incompatibility.detected.formatOnVm = "" esx.problem.vmfs.spanstate.incompatibility.detected.formatOnHost = "Incompatible span change detected for VMFS volume '{1} ({2})': volume was not spanned at time of open but now it is, and this host is using ATS-only lockmode but the volume is not ATS-only. Volume descriptor refresh operations will fail until this host unmounts and remounts the volume." esx.problem.vmfs.spanstate.incompatibility.detected.formatOnComputeResource = "" esx.problem.vmfs.spanstate.incompatibility.detected.formatOnDataCenter = "" esx.problem.vmfs.spanstate.incompatibility.detected.fullFormat = "Incompatible span change detected for VMFS volume '{1} ({2})': volume was not spanned at time of open but now it is, and this host is using ATS-only lockmode but the volume is not ATS-only. Volume descriptor refresh operations will fail until this host unmounts and remounts the volume." # esx.problem.vmfs.lockmode.inconsistency.detected.category = "error" esx.problem.vmfs.lockmode.inconsistency.detected.description = "Inconsistent VMFS lockmode detected." esx.problem.vmfs.lockmode.inconsistency.detected.formatOnVm = "" esx.problem.vmfs.lockmode.inconsistency.detected.formatOnHost = "Inconsistent lockmode change detected for VMFS volume '{1} ({2})': volume was configured for {3} lockmode at time of open and now it is configured for {4} lockmode but this host is not using {5} lockmode. Protocol error during ATS transition. Volume descriptor refresh operations will fail until this host unmounts and remounts the volume." esx.problem.vmfs.lockmode.inconsistency.detected.formatOnComputeResource = "" esx.problem.vmfs.lockmode.inconsistency.detected.formatOnDataCenter = "" esx.problem.vmfs.lockmode.inconsistency.detected.fullFormat = "Inconsistent lockmode change detected for VMFS volume '{1} ({2})': volume was configured for {3} lockmode at time of open and now it is configured for {4} lockmode but this host is not using {5} lockmode. Protocol error during ATS transition. Volume descriptor refresh operations will fail until this host unmounts and remounts the volume." esx.problem.vmfs.spanned.lockmode.inconsistency.detected.category = "error" esx.problem.vmfs.spanned.lockmode.inconsistency.detected.description = "Inconsistent VMFS lockmode detected on spanned volume." esx.problem.vmfs.spanned.lockmode.inconsistency.detected.formatOnVm = "" esx.problem.vmfs.spanned.lockmode.inconsistency.detected.formatOnHost = "Inconsistent lockmode change detected for spanned VMFS volume '{1} ({2})': volume was configured for {3} lockmode at time of open and now it is configured for {4} lockmode but this host is not using {5} lockmode. All operations on this volume will fail until this host unmounts and remounts the volume." esx.problem.vmfs.spanned.lockmode.inconsistency.detected.formatOnComputeResource = "" esx.problem.vmfs.spanned.lockmode.inconsistency.detected.formatOnDataCenter = "" esx.problem.vmfs.spanned.lockmode.inconsistency.detected.fullFormat = "Inconsistent lockmode change detected for spanned VMFS volume '{1} ({2})': volume was configured for {3} lockmode at time of open and now it is configured for {4} lockmode but this host is not using {5} lockmode. All operations on this volume will fail until this host unmounts and remounts the volume." esx.audit.vobdtestcorrelator.test.category = "info" esx.audit.vobdtestcorrelator.test.description = "Test with both int and sting arguments." esx.audit.vobdtestcorrelator.test.formatOnVm = "" esx.audit.vobdtestcorrelator.test.formatOnHost = "" esx.audit.vobdtestcorrelator.test.formatOnComputeResource = "" esx.audit.vobdtestcorrelator.test.formatOnDataCenter = "" esx.audit.vobdtestcorrelator.test.fullFormat = "Test with both string: {2} and int: {1}." esx.clear.vobdtestcorrelator.test.category = "info" esx.clear.vobdtestcorrelator.test.description = "Test with both int and sting arguments." esx.clear.vobdtestcorrelator.test.formatOnVm = "" esx.clear.vobdtestcorrelator.test.formatOnHost = "" esx.clear.vobdtestcorrelator.test.formatOnComputeResource = "" esx.clear.vobdtestcorrelator.test.formatOnDataCenter = "" esx.clear.vobdtestcorrelator.test.fullFormat = "Test with both string: {1} {3} and int: {2}." esx.problem.vobdtestcorrelator.test.hugestr.category = "info" esx.problem.vobdtestcorrelator.test.hugestr.description = "Test with huge sting argument." esx.problem.vobdtestcorrelator.test.hugestr.formatOnVm = "" esx.problem.vobdtestcorrelator.test.hugestr.formatOnHost = "" esx.problem.vobdtestcorrelator.test.hugestr.formatOnComputeResource = "" esx.problem.vobdtestcorrelator.test.hugestr.formatOnDataCenter = "" esx.problem.vobdtestcorrelator.test.hugestr.fullFormat = "Test with huge sting argument: {1}" esx.problem.vobdtestcorrelator.test.0.category = "info" esx.problem.vobdtestcorrelator.test.0.description = "Test with no arguments." esx.problem.vobdtestcorrelator.test.0.formatOnVm = "" esx.problem.vobdtestcorrelator.test.0.formatOnHost = "" esx.problem.vobdtestcorrelator.test.0.formatOnComputeResource = "" esx.problem.vobdtestcorrelator.test.0.formatOnDataCenter = "" esx.problem.vobdtestcorrelator.test.0.fullFormat = "Test with no arguments" esx.problem.vobdtestcorrelator.test.1d.category = "info" esx.problem.vobdtestcorrelator.test.1d.description = "Test with int argument." esx.problem.vobdtestcorrelator.test.1d.formatOnVm = "" esx.problem.vobdtestcorrelator.test.1d.formatOnHost = "" esx.problem.vobdtestcorrelator.test.1d.formatOnComputeResource = "" esx.problem.vobdtestcorrelator.test.1d.formatOnDataCenter = "" esx.problem.vobdtestcorrelator.test.1d.fullFormat = "Test with int argument: {1}" esx.problem.vobdtestcorrelator.test.1s.category = "info" esx.problem.vobdtestcorrelator.test.1s.description = "Test with sting argument." esx.problem.vobdtestcorrelator.test.1s.formatOnVm = "" esx.problem.vobdtestcorrelator.test.1s.formatOnHost = "" esx.problem.vobdtestcorrelator.test.1s.formatOnComputeResource = "" esx.problem.vobdtestcorrelator.test.1s.formatOnDataCenter = "" esx.problem.vobdtestcorrelator.test.1s.fullFormat = "Test with sting argument: {1}" esx.problem.test.test2.category = "error" esx.problem.test.test2.description = "Test with both int and string arguments" esx.problem.test.test2.formatOnVm = "" esx.problem.test.test2.formatOnHost = "" esx.problem.test.test2.formatOnComputeResource = "" esx.problem.test.test2.formatOnDataCenter = "" esx.problem.test.test2.fullFormat = "Test with both {1} and {2}" esx.audit.test.test1s.category = "error" esx.audit.test.test1s.description = "Test with a string argument" esx.audit.test.test1s.formatOnVm = "" esx.audit.test.test1s.formatOnHost = "" esx.audit.test.test1s.formatOnComputeResource = "" esx.audit.test.test1s.formatOnDataCenter = "" esx.audit.test.test1s.fullFormat = "Test with {1}" esx.problem.test.test0.category = "error" esx.problem.test.test0.description = "Test with no arguments" esx.problem.test.test0.formatOnVm = "" esx.problem.test.test0.formatOnHost = "" esx.problem.test.test0.formatOnComputeResource = "" esx.problem.test.test0.formatOnDataCenter = "" esx.problem.test.test0.fullFormat = "Test with no arguments" esx.audit.test.test1d.category = "error" esx.audit.test.test1d.description = "Test with an int argument" esx.audit.test.test1d.formatOnVm = "" esx.audit.test.test1d.formatOnHost = "" esx.audit.test.test1d.formatOnComputeResource = "" esx.audit.test.test1d.formatOnDataCenter = "" esx.audit.test.test1d.fullFormat = "Test with {1}" esx.problem.evc.incompatible.category = "warning" esx.problem.evc.incompatible.description = "The host can not support the applied EVC mode." esx.problem.evc.incompatible.formatOnVm = "" esx.problem.evc.incompatible.formatOnHost = "" esx.problem.evc.incompatible.formatOnComputeResource = "" esx.problem.evc.incompatible.formatOnDatacenter = "" esx.problem.evc.incompatible.fullFormat = "The host can not support the applied EVC mode."