-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
- Loading branch information
There are no files selected for viewing
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
{"id":1,"orgId":1,"name":"Graphite","type":"graphite","typeLogoUrl":"public/app/plugins/datasource/graphite/img/graphite_logo.png","access":"proxy","url":"http://172.17.0.1:8020","password":"","user":"","database":"","basicAuth":false,"isDefault":true,"jsonData":{"graphiteVersion":"1.1"},"readOnly":false} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
{"id":2,"orgId":1,"name":"InfluxDB","type":"influxdb","typeLogoUrl":"public/app/plugins/datasource/influxdb/img/influxdb_logo.svg","access":"proxy","url":"http://172.17.0.1:8086","password":"","user":"","database":"telegraf","basicAuth":false,"isDefault":false,"jsonData":{"httpMode":"GET"},"readOnly":false} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,11 @@ | ||
# # config file version | ||
apiVersion: 1 | ||
|
||
providers: | ||
- name: 'fio' | ||
orgId: 1 | ||
folder: '' | ||
folderUid: '' | ||
type: file | ||
options: | ||
path: /var/lib/grafana/dashboards/fio_dashboard |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,11 @@ | ||
# # config file version | ||
apiVersion: 1 | ||
|
||
providers: | ||
- name: 'vdbench' | ||
orgId: 1 | ||
folder: '' | ||
folderUid: '' | ||
type: file | ||
options: | ||
path: /var/lib/grafana/dashboards/vdbench_dashboard |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,11 @@ | ||
# # config file version | ||
apiVersion: 1 | ||
|
||
providers: | ||
- name: 'default' | ||
orgId: 1 | ||
folder: 'vSAN Monitoring' | ||
folderUid: '' | ||
type: file | ||
options: | ||
path: /var/lib/grafana/dashboards/vsan_dashboards |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,13 @@ | ||
#!/usr/bin/ruby | ||
require_relative "rvc-util.rb" | ||
require_relative "util.rb" | ||
|
||
@tvm_folder_path_escape = _get_tvm_folder_path_escape[0] | ||
@tvm_cleanup_log = "#{$log_path}/prevalidation/tvm-cleanup.log" | ||
begin | ||
puts `rvc #{$vc_rvc} --path #{@tvm_folder_path_escape} -c "kill *" -c 'exit' -q`,@tvm_cleanup_log | ||
puts `rvc #{$vc_rvc} --path #{@tvm_folder_path_escape} -c "destroy ." -c 'exit' -q 2> /dev/null`,@tvm_cleanup_log | ||
rescue Exception => e | ||
puts "dont worry, nothing critical",@tvm_cleanup_log | ||
puts "#{e.class}: #{e.message}",@tvm_cleanup_log | ||
end |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,21 @@ | ||
#!/usr/bin/ruby | ||
require_relative "rvc-util.rb" | ||
require_relative "util.rb" | ||
|
||
@vm_cleanup_log = "#{$log_path}/vm-cleanup.log" | ||
vm_folder_moid = _get_folder_moid("#{$vm_prefix}-#{$cluster_name}-vms",_get_folder_moid($fd_name,"")) | ||
begin | ||
tnode = [] | ||
_get_hosts_list.each do |host| | ||
host_moid = _get_moid("hs",host).join(":") | ||
tnode << Thread.new{ | ||
puts `govc find -type m -i -dc "#{Shellwords.escape($dc_name)}" . -runtime.host "#{host_moid}" -name "#{$vm_prefix}-*" | xargs govc vm.power -dc "#{Shellwords.escape($dc_name)}" -off -moid`, @vm_cleanup_log | ||
puts `govc find -type m -i -dc "#{Shellwords.escape($dc_name)}" . -runtime.host "#{host_moid}" -name "#{$vm_prefix}-*" | xargs govc object.destroy -dc "#{Shellwords.escape($dc_name)}" `, @vm_cleanup_log | ||
} | ||
end | ||
tnode.each{|t|t.join} | ||
puts `govc object.destroy -dc "#{Shellwords.escape($dc_name)}" "#{vm_folder_moid}" 2>/dev/null`,@vm_cleanup_log | ||
rescue Exception => e | ||
puts "dont worry, nothing critical",@vm_cleanup_log | ||
puts "#{e.class}: #{e.message}",@vm_cleanup_log | ||
end |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,75 @@ | ||
require 'yaml' | ||
require_relative '/opt/automation/lib/rvc-util.rb' | ||
|
||
@sleep_after_vmotion = 0 | ||
@times = 10 | ||
|
||
@hosts_list = _get_hosts_list | ||
host_entry = YAML.load_file("/root/servers.yml") | ||
@vm_prefix = "" | ||
@host_vms_map = {} | ||
@srcHost = "" | ||
#prepare host=>[vm1,vm2] map | ||
@hosts_list.each do |host| | ||
vms = `rvc #{$vc_rvc} -c "find #{$cl_path_escape}/hosts/#{host}/vms" -c "exit" -q | awk -F " " '{print $NF}'`.split("\n") | ||
@host_vms_map[host] = vms | ||
end | ||
|
||
def captureVsanStatus | ||
puts "Getting resync status" | ||
print `rvc #{$vc_rvc} -c "vsan.resync_dashboard #{$cl_path_escape}" -c "exit" -q` | ||
end | ||
|
||
def migrateVm(vms,srcHost,destHost) | ||
puts "Dest Host: #{destHost}" | ||
puts "Moving #{vms.size} VMs:" | ||
puts vms.join("\n") | ||
return false if vms == [] | ||
puts `rvc #{$vc_rvc} -c "vm.migrate -o #{$cl_path_escape}/hosts/#{destHost} #{vms.join(' ')}" -c "exit" -q` | ||
return true | ||
end | ||
|
||
def pickSrcHost | ||
@srcHost = @hosts_list[rand(@hosts_list.size)] | ||
while @host_vms_map[@srcHost].size == 0 | ||
@srcHost = @hosts_list[rand(@hosts_list.size)] | ||
end | ||
return @srcHost | ||
end | ||
|
||
for i in 1..@times | ||
puts "Starts moving VMs" | ||
puts "Source Host: #{pickSrcHost} with #{@host_vms_map[@srcHost].size} VMs" | ||
candidate_destHosts = [] | ||
|
||
while @host_vms_map[@srcHost].size != 0 | ||
if candidate_destHosts == [] | ||
temp_hosts = @hosts_list.clone | ||
temp_hosts.delete(@srcHost) | ||
candidate_destHosts = temp_hosts | ||
end | ||
puts "Candidate Dest Hosts: #{candidate_destHosts}" | ||
vms_to_move = rand(@host_vms_map[@srcHost].size) + 1 | ||
vms = @host_vms_map[@srcHost][0...vms_to_move] | ||
destHost = candidate_destHosts[rand(candidate_destHosts.size)] | ||
if migrateVm(vms,@srcHost,destHost) | ||
@host_vms_map[@srcHost] -= vms | ||
vms.each do |vm| | ||
vm.gsub!("/#{@srcHost}/","/#{destHost}/") | ||
end | ||
@host_vms_map[destHost] += vms | ||
candidate_destHosts.delete(destHost) | ||
end | ||
end | ||
sleep(@sleep_after_vmotion) if @sleep_after_vmotion != 0 | ||
idrac = host_entry[@srcHost] | ||
`sshpass -p 'ca$hc0w' ssh -o "StrictHostKeyChecking no" vmware@#{idrac} "racadm serveraction hardreset"` | ||
puts "rebooting Host #{@srcHost}, wait for 5 mins first" | ||
sleep(300) | ||
while `rvc #{$vc_rvc} -c "info #{$cl_path_escape}/hosts/#{@srcHost}" -c "exit" -q | grep "connection state" | awk -F " " '{print $NF}'`.chomp != "connected" | ||
puts "Host not coming back, sleeping 120s" | ||
sleep(120) | ||
end | ||
captureVsanStatus | ||
end | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,58 @@ | ||
#!/usr/bin/ruby | ||
|
||
require "rubygems" | ||
require "json" | ||
|
||
def percentile_by_value(values, percentile) | ||
values_sorted = values.sort | ||
k = (percentile*(values_sorted.length-1)+1).floor - 1 | ||
f = (percentile*(values_sorted.length-1)+1).modulo(1) | ||
return ((values_sorted[k] + (f * (values_sorted[k+1] - values_sorted[k])))/(1024*1024)).round(2) | ||
end | ||
|
||
if ARGV.empty? | ||
puts "Usage" | ||
exit(1) | ||
else | ||
ARGV.each do |dir| | ||
if File.directory?(dir) | ||
file = "" | ||
pcpu_usage= | ||
total_pcpu= | ||
dirName = File.basename(dir) | ||
parentDirName = File.basename(File.dirname(dir)) | ||
Dir.entries(dir).select {|entry| File.directory? File.join(dir,entry) and !(entry =='.' || entry == '..') and entry =~ /observerData/}.each do |ioFolder|#enter io folder | ||
jsonFile_list = `find "#{dir}/#{ioFolder}"/jsonstats/dom/ -type f -name 'domobj-client-*' |grep -v thumb ` | ||
jsonFile_list=jsonFile_list.split("\n").sort | ||
file_arr = [] | ||
w_bytes_arr = [] | ||
r_bytes_arr = [] | ||
jsonFile_list.sort.each do |file| # get each server's cpu_usage number | ||
file_arr << file | ||
jsonFile = open(file) | ||
json = jsonFile.read | ||
begin | ||
parsed = JSON.parse(json) | ||
rescue JSON::ParserError => e | ||
p "N/A" | ||
exit 1 | ||
end | ||
arr_r = parsed["stats"]["readLatency"]["avgs"][1..-1] | ||
arr_w = parsed["stats"]["writeLatency"]["avgs"][1..-1] | ||
|
||
arr_r_avg = (arr_r.inject { |sum, el| sum + el }.to_f / (arr_r.size * 1000)).round(2) | ||
|
||
arr_w_avg = (arr_w.inject { |sum, el| sum + el }.to_f / (arr_w.size * 1000)).round(2) | ||
|
||
r_bytes_arr << arr_r_avg | ||
w_bytes_arr << arr_w_avg | ||
end | ||
puts "Read Latency: #{r_bytes_arr}, Avg: #{(r_bytes_arr.inject{ |sum, el| sum + el }.to_f/r_bytes_arr.size).round(2)}" | ||
puts "Write Latency: #{w_bytes_arr}, Avg: #{(w_bytes_arr.inject{ |sum, el| sum + el }.to_f/r_bytes_arr.size).round(2)}" | ||
end | ||
else | ||
puts "#{dir} doesn't exist!" | ||
exit(1) | ||
end | ||
end | ||
end |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,59 @@ | ||
#!/usr/bin/ruby | ||
|
||
require "rubygems" | ||
require "json" | ||
|
||
def percentile_by_value(values, percentile) | ||
values_sorted = values.sort | ||
k = (percentile*(values_sorted.length-1)+1).floor - 1 | ||
f = (percentile*(values_sorted.length-1)+1).modulo(1) | ||
return ((values_sorted[k] + (f * (values_sorted[k+1] - values_sorted[k])))/(1024*1024)).round(2) | ||
end | ||
|
||
if ARGV.empty? | ||
puts "Usage" | ||
exit(1) | ||
else | ||
ARGV.each do |dir| | ||
if File.directory?(dir) | ||
file = "" | ||
pcpu_usage= | ||
total_pcpu= | ||
dirName = File.basename(dir) | ||
parentDirName = File.basename(File.dirname(dir)) | ||
Dir.entries(dir).select {|entry| File.directory? File.join(dir,entry) and !(entry =='.' || entry == '..') and entry =~ /observerData/}.each do |ioFolder|#enter io folder | ||
jsonFile_list = `find "#{dir}/#{ioFolder}"/jsonstats/dom/ -type f -name 'domobj-client-*' |grep -v thumb ` | ||
jsonFile_list = jsonFile_list.split("\n").sort | ||
file_arr = [] | ||
w_bytes_arr = [] | ||
r_bytes_arr = [] | ||
file_arr = [] | ||
jsonFile_list.sort.each do |file| # get each server's cpu_usage number | ||
file_arr << file | ||
jsonFile = open(file) | ||
json = jsonFile.read | ||
begin | ||
parsed = JSON.parse(json) | ||
rescue JSON::ParserError => e | ||
p "N/A" | ||
exit 1 | ||
end | ||
arr_r = parsed["stats"]["readBytes"]["avgs"][1..-1] | ||
arr_w = parsed["stats"]["writeBytes"]["avgs"][1..-1] | ||
|
||
arr_r_avg = (arr_r.inject { |sum, el| sum + el }.to_f / (arr_r.size * 1024 * 1024)).round(2) | ||
|
||
arr_w_avg = (arr_w.inject { |sum, el| sum + el }.to_f / (arr_w.size * 1024 * 1024)).round(2) | ||
|
||
r_bytes_arr << arr_r_avg | ||
w_bytes_arr << arr_w_avg | ||
end | ||
puts "Read throughput: #{r_bytes_arr}, Total: #{r_bytes_arr.inject{ |sum, el| sum + el }.round(2)}" | ||
puts "Write throughput: #{w_bytes_arr}, Total: #{w_bytes_arr.inject{ |sum, el| sum + el }.round(2)}" | ||
end | ||
else | ||
puts "#{dir} doesn't exist!" | ||
exit(1) | ||
end | ||
end | ||
end |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,58 @@ | ||
#!/usr/bin/ruby | ||
|
||
require "rubygems" | ||
require "json" | ||
|
||
def percentile_by_value(values, percentile) | ||
values_sorted = values.sort | ||
k = (percentile*(values_sorted.length-1)+1).floor - 1 | ||
f = (percentile*(values_sorted.length-1)+1).modulo(1) | ||
return ((values_sorted[k] + (f * (values_sorted[k+1] - values_sorted[k])))/(1024*1024)).round(2) | ||
end | ||
|
||
if ARGV.empty? | ||
puts "Usage" | ||
exit(1) | ||
else | ||
ARGV.each do |dir| | ||
if File.directory?(dir) | ||
file = "" | ||
pcpu_usage= | ||
total_pcpu= | ||
dirName = File.basename(dir) | ||
parentDirName = File.basename(File.dirname(dir)) | ||
Dir.entries(dir).select {|entry| File.directory? File.join(dir,entry) and !(entry =='.' || entry == '..') and entry =~ /observerData/}.each do |ioFolder|#enter io folder | ||
jsonFile_list = `find "#{dir}/#{ioFolder}"/jsonstats/dom/ -type f -name 'domobj-compmgr-*' |grep -v thumb ` | ||
jsonFile_list=jsonFile_list.split("\n").sort | ||
file_arr = [] | ||
w_bytes_arr = [] | ||
r_bytes_arr = [] | ||
jsonFile_list.sort.each do |file| # get each server's cpu_usage number | ||
file_arr << file | ||
jsonFile = open(file) | ||
json = jsonFile.read | ||
begin | ||
parsed = JSON.parse(json) | ||
rescue JSON::ParserError => e | ||
p "N/A" | ||
exit 1 | ||
end | ||
arr_r = parsed["stats"]["readLatency"]["avgs"][1..-1] | ||
arr_w = parsed["stats"]["writeLatency"]["avgs"][1..-1] | ||
|
||
arr_r_avg = (arr_r.inject { |sum, el| sum + el }.to_f / (arr_r.size * 1000)).round(2) | ||
|
||
arr_w_avg = (arr_w.inject { |sum, el| sum + el }.to_f / (arr_w.size * 1000)).round(2) | ||
|
||
r_bytes_arr << arr_r_avg | ||
w_bytes_arr << arr_w_avg | ||
end | ||
puts "Read Latency: #{r_bytes_arr}, Avg: #{(r_bytes_arr.inject{ |sum, el| sum + el }.to_f/r_bytes_arr.size).round(2)}" | ||
puts "Write Latency: #{w_bytes_arr}, Avg: #{(w_bytes_arr.inject{ |sum, el| sum + el }.to_f/r_bytes_arr.size).round(2)}" | ||
end | ||
else | ||
puts "#{dir} doesn't exist!" | ||
exit(1) | ||
end | ||
end | ||
end |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,59 @@ | ||
#!/usr/bin/ruby | ||
|
||
require "rubygems" | ||
require "json" | ||
|
||
def percentile_by_value(values, percentile) | ||
values_sorted = values.sort | ||
k = (percentile*(values_sorted.length-1)+1).floor - 1 | ||
f = (percentile*(values_sorted.length-1)+1).modulo(1) | ||
return ((values_sorted[k] + (f * (values_sorted[k+1] - values_sorted[k])))/(1024*1024)).round(2) | ||
end | ||
|
||
if ARGV.empty? | ||
puts "Usage" | ||
exit(1) | ||
else | ||
ARGV.each do |dir| | ||
puts dir | ||
if File.directory?(dir) | ||
file = "" | ||
pcpu_usage= | ||
total_pcpu= | ||
dirName = File.basename(dir) | ||
parentDirName = File.basename(File.dirname(dir)) | ||
Dir.entries(dir).select {|entry| File.directory? File.join(dir,entry) and !(entry =='.' || entry == '..') and entry =~ /observerData/}.each do |ioFolder|#enter io folder | ||
jsonFile_list = `find "#{dir}/#{ioFolder}"/jsonstats/dom/ -type f -name 'domobj-compmgr-*' |grep -v thumb ` | ||
jsonFile_list=jsonFile_list.split("\n").sort | ||
file_arr = [] | ||
w_bytes_arr = [] | ||
r_bytes_arr = [] | ||
jsonFile_list.sort.each do |file| # get each server's cpu_usage number | ||
file_arr << file | ||
jsonFile = open(file) | ||
json = jsonFile.read | ||
begin | ||
parsed = JSON.parse(json) | ||
rescue JSON::ParserError => e | ||
p "N/A" | ||
exit 1 | ||
end | ||
arr_r = parsed["stats"]["readBytes"]["avgs"][1..-1] | ||
arr_w = parsed["stats"]["writeBytes"]["avgs"][1..-1] | ||
|
||
arr_r_avg = (arr_r.inject { |sum, el| sum + el }.to_f / (arr_r.size * 1024 * 1024)).round(2) | ||
|
||
arr_w_avg = (arr_w.inject { |sum, el| sum + el }.to_f / (arr_w.size * 1024 * 1024)).round(2) | ||
|
||
r_bytes_arr << arr_r_avg | ||
w_bytes_arr << arr_w_avg | ||
end | ||
puts "Read throughput: #{r_bytes_arr}, Total: #{r_bytes_arr.inject{ |sum, el| sum + el }.round(2)}" | ||
puts "Write throughput: #{w_bytes_arr}, Total: #{w_bytes_arr.inject{ |sum, el| sum + el }.round(2)}" | ||
end | ||
else | ||
puts "#{dir} doesn't exist!" | ||
exit(1) | ||
end | ||
end | ||
end |