Skip to content

Commit

Permalink
new-version
Browse files Browse the repository at this point in the history
  • Loading branch information
kauedesousa committed Jan 25, 2024
1 parent 260f73b commit d657f2b
Show file tree
Hide file tree
Showing 6 changed files with 101 additions and 35 deletions.
13 changes: 13 additions & 0 deletions NEWS.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,16 @@
ClimMob-analysis v3.0 (2024-01-25)
=========================

### Improvements

* Adds analysis of variance for variety performance
* Adds pseudo ranking when network is poorly connected

### Bug fixes

* Fixes changes in reference for the log-worth plot


ClimMob-analysis v2.1 (2022-12-15)
=========================

Expand Down
84 changes: 72 additions & 12 deletions modules/01_functions.R
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,59 @@ library("janitor")
library("lubridate")
library("ggchicklet")


#' Add pseudo ranking
#' Adds pseudo values to weakly connected networks
#' @param x a PlackettLuce ranking object
force_pseudo_rank = function(x) {

# get membership in the network
members = PlackettLuce::connectivity(x)$membership
# put the members in order
members = sort(members)
# rankings into a matrix
r = unclass(x)

performance = coefficients(PlackettLuce::PlackettLuce(r))

# get the worst item per cluster
members = split(members, members)

members = lapply(members, function(z){
p = performance[names(z)]
worst = which.min(p)
names(p)[worst]
})

members = as.character(unlist(members))

# create a pseudo ranking for these members where they will always
# lose and win to each other
# number to start ranking
max_rank = max(r) + 1

# rows to add the pseudo rankings
to_input = rowSums(r) != 0

to_sample = c(rep(0, ceiling(length(members)/2)),
max_rank:(max_rank + ceiling(length(members)/2)))

# rows to add the pseudo rankings
r[to_input, members] = apply(r[to_input, members], 1, function(y){

where = y == 0

y[where] = sample(to_sample, size = length(y[where]))

y

})

r = as.rankings(r)

}


#'Get colour pallet
#' @param x an integer
#' @examples
Expand Down Expand Up @@ -577,6 +630,12 @@ decode_pars = function(x) {
tr = toupper(tr)
}

if (any(grepl("generalappreciation", tr))) {
i = which(grepl("generalappreciation", tr))[1]
questions$traitOrder[i] = "referenceTrait"
tr = toupper(tr)
}

if (any(grepl("yield", tr))) {
i = which(grepl("yield", tr))[1]
questions$traitOrder[i] = "referenceTrait"
Expand Down Expand Up @@ -794,7 +853,8 @@ multcompPL = function(mod, items = NULL, threshold = 0.05, adjust = "none", ...)
#' @param ci.level the confidence interval level
#' @param multcomp logical to add group letters
#' @param levels an optional vector with factor levels to plot
plot_logworth = function(x, ci.level = 0.95, ref = NULL, multcomp = TRUE, levels = NULL, ...) {
plot_logworth = function(x, ci.level = 0.95, ref = NULL,
multcomp = TRUE, levels = NULL, ...) {

frame = data.frame()

Expand All @@ -806,7 +866,7 @@ plot_logworth = function(x, ci.level = 0.95, ref = NULL, multcomp = TRUE, levels
}

if (is.null(levels)) {
levels = unique(frame$items)
levels = union(ref, sort(unique(frame$items)))
}

items = factor(frame$items, levels = levels)
Expand Down Expand Up @@ -838,29 +898,29 @@ plot_logworth = function(x, ci.level = 0.95, ref = NULL, multcomp = TRUE, levels
pdat$items = factor(pdat$items, levels = levels)

p = ggplot(data = pdat,
aes(x = items,
y = est,
ymax = tops,
ymin = tails,
aes(y = items,
x = est,
xmax = tops,
xmin = tails,
label = group)) +
geom_hline(yintercept = 0,
geom_vline(xintercept = 0,
colour = "#E5E7E9",
linewidth = 0.8) +
geom_point() +
geom_errorbar(width = 0.1) +
geom_text(vjust = 1.2, hjust = 1.2) +
geom_text(hjust = 1.2, vjust = 1.2) +
theme_bw() +
facet_wrap(~ ref, strip.position = "bottom") +
theme(panel.grid.major = element_blank(),
axis.text.x = element_text(angle = 45, vjust = 1, hjust=1,
size = 10, color = "grey20"),
strip.background.x = element_blank(),
axis.text.y = element_text(size = 10, color = "grey20"),
axis.text.x = element_text(size = 10, color = "grey20"),
text = element_text(color = "grey20"),
legend.position = "bottom",
legend.title = element_blank(),
strip.background.x = element_blank(),
strip.background.y = element_blank(),
strip.placement = "outside") +
labs(x = "", y = "Log-worth")
labs(y = "", x = "Log-worth")

p

Expand Down
11 changes: 3 additions & 8 deletions modules/05_spatial_overview.R
Original file line number Diff line number Diff line change
Expand Up @@ -60,20 +60,15 @@ get_testing_sites_map = function(cmdata, output_path, backward_path){
minimap = TRUE,
map_provider = "OpenStreetMap.Mapnik")

tempmap = paste0(getwd(), "/tempmap/")

dir.create(tempmap, recursive = TRUE, showWarnings = FALSE)

try(mapview::mapshot(trial_map,
url = paste0(tempmap, "/trial_map.html"),
file = paste0(tempmap, "/trial_map.png")),
url = paste0(output_path, "/trial_map.html"),
file = paste0(output_path, "/trial_map.png")),
silent = TRUE)

}

result = list(geoTRUE = TRUE,
mapDIR = tempmap,
map_path = paste0(tempmap, "/trial_map.png"),
map_path = paste0(fullpath, "/", output_path, "/trial_map.png"),
map = trial_map,
coords = lonlat)

Expand Down
23 changes: 11 additions & 12 deletions modules/06_PlackettLuce_models.R
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,14 @@ get_PlackettLuce_models = function(cmdata, rank_dat) {
validate.rankings = TRUE)
})

# # Handle rankings with poor connectivity
connection = lapply(R, function(x) connectivity(x, verbose = FALSE)$no)

connection = as.vector(unlist(connection) > 1)

# force a pseudo rank
R[connection] = lapply(R[connection], force_pseudo_rank)

# fit the model
mod = lapply(R, function(x){
PlackettLuce(x)
Expand All @@ -51,19 +58,11 @@ get_PlackettLuce_models = function(cmdata, rank_dat) {
logworth_plot = list()

for(m in seq_along(mod)) {
lwp =
try(plot_logworth(mod[[m]], ref = reference_tech, ci.level = 0.5) +
lwp = try(plot_logworth(mod[[m]],
ref = reference_tech,
ci.level = 0.5) +
labs(title = paste0(rank_dat$trait_names[m],
" (n = ", length(mod[[m]]$rankings),")")) +
coord_flip() +
theme(axis.text.x = element_text(angle = 0,
vjust = 0.5,
hjust = 0.5),
strip.background.x = element_blank(),
strip.placement = "outside",
strip.text = element_text(size = 10, color = "grey20"),
legend.text = element_text(size = 10, color = "grey20"),
axis.title = element_text(size = 10, color = "grey20")),
" (n = ", length(mod[[m]]$rankings),")")),
silent = TRUE)

if ("try-error" %in% class(lwp)) next
Expand Down
1 change: 0 additions & 1 deletion modules/08_PlackettLuce_tree.R
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,6 @@ get_PlackettLuce_tree = function(cmdata, rank_dat, agroclimate = NULL) {
validate.rankings = TRUE,
group = TRUE)


#..........................................................
# PlackettLuce tree ####
# data frame of explanatory variables
Expand Down
4 changes: 2 additions & 2 deletions report/mainreport.Rmd
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,9 @@ This report presents the results from your tricot experiment entitled `r project

# What you should expect from this report

This report will provide to you an overview of your tricot experiment data and results, with insights of the performance of the technologies tested in the tricot trial. This is a standard automated report that tries to accommodate the most important outputs from a tricot trial. After reading through this report, you should be able to: (i) identify the technology that outperforms the others in your trial, and under which conditions, and (ii) support a decision on advancement of a technology to the next stage of your product development program (e.g. breeding program, market assessment).
This report provides the results from your tricot experiment, with insights of the performance of the technologies tested in the trial. This is a standard automated report that accommodates the most important outputs from a tricot trial. After reading through this report, you should be able to: (i) identify the technology that outperforms the others in your trial, and under which conditions, and (ii) support a decision on advancement of a technology to the next stage of your product development program (e.g. breeding program, market assessment).

However, since this report was done in an automated process, is likely that it will not provide to you the full overview on the performance of the tested technologies as the automation may not capture all the factors that influence the performance of a technology in the target environment (under real-world conditions). If you would like to have an in-deep analysis, or to merge data from different projects or is planning a peer-review publication with your tricot data, send a proposal/request to Kauê de Sousa ([email protected]) and Jacob van Etten ([email protected]). Even being implemented with high-quality statistical analysis, the automated process for the production of this report limits the ability to test specific hypothesis with the data to match the needs of international peer-review journals. Therefore, we recommend to extend the analysis beyond the results described here.
However, since this report was done in an automated process, is likely that it will not provide to you the full overview on the performance of the tested technologies as the automation may not capture all the factors that influence the performance of a technology in the target environment (under real-world conditions). If you would like to have an in-deep analysis, or to merge data from different projects or is planning a peer-review publication with your tricot data, send a proposal/request to the ClimMob Team <[email protected]>.

# Methods

Expand Down

0 comments on commit d657f2b

Please sign in to comment.