diff --git a/docs/404.html b/docs/404.html index d85ca758..7f042d05 100644 --- a/docs/404.html +++ b/docs/404.html @@ -1,66 +1,27 @@ - - -
- + + + + -Dylan Beaudette. Maintainer, author. +
+Jay Skovlin. Author. +
+Stephen Roecker. Author. +
+Andrew Brown. Author. +
+USDA-NRCS Soil Survey Staff. Contributor. +
+DESCRIPTION
+ Dylan Beaudette. Maintainer, author. -
-Jay Skovlin. Author. -
-Stephen Roecker. Author. -
-USDA-NRCS Soil Survey Staff. Contributor. -
-Beaudette D, Skovlin J, Roecker S, Brown A (2022). +sharpshootR: A Soil Survey Toolkit. +R package version 2.0, https://CRAN.R-project.org/package=sharpshootR. +
+@Manual{, + title = {sharpshootR: A Soil Survey Toolkit}, + author = {Dylan Beaudette and Jay Skovlin and Stephen Roecker and Andrew Brown}, + year = {2022}, + note = {R package version 2.0}, + url = {https://CRAN.R-project.org/package=sharpshootR}, +}
This package contains a mish-mash of functionality and sample data related to the daily business of soil survey operations with the USDA-NRCS. Many of the functions are highly specialized and inherit default arguments from the names used by the various NCSS (National Cooperative Soil Survey) databases.
Install the stable version from CRAN:
-install.packages('sharpshootR', dep=TRUE)
install.packages('sharpshootR', dep=TRUE)
Install the development version from Github:
-remotes::install_github("ncss-tech/sharpshootR", dependencies=FALSE, upgrade=FALSE, build=FALSE)
http://ncss-tech.github.io/AQP/
+remotes::install_github("ncss-tech/sharpshootR", dependencies=FALSE, upgrade=FALSE, build=FALSE)
Dylan Beaudette, Jay Skovlin and Stephen Roecker (2021). sharpshootR: A Soil Survey Toolkit. R package version 1.8. https://CRAN.R-project.org/package=sharpshootR
+Dylan Beaudette, Jay Skovlin and Stephen Roecker (2021). sharpshootR: A Soil Survey Toolkit. R package version 1.8. https://CRAN.R-project.org/package=sharpshootR
aggregateColorPlot
aspect.plot
diagnosticPropertyPlot
dueling.dendrograms
plotAvailWater
plotProfileDendrogram
plotSoilRelationGraph
plotSoilRelationChordGraph
plotTransect
SoilTaxonomyDendrogram
vizGeomorphicComponent
vizHillslopePosition
vizFlatsPosition
vizTerracePosition
vizMountainPosition
vizAnnualClimate
plotWB
aggregateColorPlot
aspect.plot
diagnosticPropertyPlot
plotAvailWater
plotProfileDendrogram
plotSoilRelationGraph
plotSoilRelationChordGraph
plotTransect
SoilTaxonomyDendrogram
vizGeomorphicComponent
vizHillslopePosition
vizFlatsPosition
vizTerracePosition
vizMountainPosition
vizAnnualClimate
plotWB
plotWB_lines
-library(sharpshootR)
-data(loafercreek, package = 'soilDB')
-
-# generalize horizon names using REGEX rules
-n <- c('Oi', 'A', 'BA','Bt1','Bt2','Bt3','Cr','R')
-p <- c('O', '^A$|Ad|Ap|AB','BA$|Bw',
- 'Bt1$|^B$','^Bt$|^Bt2$','^Bt3|^Bt4|CBt$|BCt$|2Bt|2CB$|^C$','Cr','R')
-loafercreek$genhz <- generalize.hz(loafercreek$hzname, n, p)
-
-# remove non-matching generalized horizon names
-loafercreek$genhz[loafercreek$genhz == 'not-used'] <- NA
-loafercreek$genhz <- factor(loafercreek$genhz)
-
-# aggregate color data, this function is from the `aqp` package
-a <- aggregateColor(loafercreek, 'genhz', k = 8)
-
-# plot
-par(mar=c(3,4,4,1))
-aggregateColorPlot(a, print.n.hz = TRUE)
library(sharpshootR)
+data(loafercreek, package = 'soilDB')
+
+# generalize horizon names using REGEX rules
+n <- c('Oi', 'A', 'BA','Bt1','Bt2','Bt3','Cr','R')
+p <- c('O', '^A$|Ad|Ap|AB','BA$|Bw',
+ 'Bt1$|^B$','^Bt$|^Bt2$','^Bt3|^Bt4|CBt$|BCt$|2Bt|2CB$|^C$','Cr','R')
+loafercreek$genhz <- generalize.hz(loafercreek$hzname, n, p)
+
+# remove non-matching generalized horizon names
+loafercreek$genhz[loafercreek$genhz == 'not-used'] <- NA
+loafercreek$genhz <- factor(loafercreek$genhz)
+
+# aggregate color data, this function is from the `aqp` package
+a <- aggregateColor(loafercreek, 'genhz', k = 8)
+
+# plot
+par(mar=c(3,4,4,1))
+aggregateColorPlot(a, print.n.hz = TRUE)
Developed by Dylan Beaudette, Jay Skovlin, Stephen Roecker.
+ +Developed by Dylan Beaudette, Jay Skovlin, Stephen Roecker, Andrew Brown.
The CDEC snow course list, updated September 2019
data(CDEC.snow.courses)+
data(CDEC.snow.courses)
A data frame with 259 observations on the following 9 variables.
course_number
course number
name
connotative course label
id
course ID
elev_feet
course elevation in feet
latitude
latitude
longitude
longitude
april.1.Avg.inches
average inches of snow as of April 1st
agency
responsible agency
watershed
watershed label
A data frame with 259 observations on the following 9 variables.
course_number
course number
name
connotative course label
id
course ID
elev_feet
course elevation in feet
latitude
latitude
longitude
longitude
april.1.Avg.inches
average inches of snow as of April 1st
agency
responsible agency
watershed
watershed label
Data were scraped from http://cdec.water.ca.gov/misc/SnowCourses.html, 2019.
- -+#> course_number name id elev_feet latitude longitude -#> 2 1 PARKS CREEK PRK 6700 41.367 -122.550 -#> 3 2 LITTLE SHASTA LSH 6200 41.808 -122.195 -#> 4 3 SWEETWATER SWT 5850 41.382 -122.535 -#> 6 5 MIDDLE BOULDER 1 MBL 6600 41.217 -122.807 -#> 7 417 BOX CAMP BXC 6450 41.597 -123.165 -#> 8 311 MIDDLE BOULDER 3 MB3 6200 41.225 -122.811 -#> april.1.Avg.inches agency watershed -#> 2 35.1 Mount Shasta Ranger District SHASTA R -#> 3 16.6 Goosenest Ranger District SHASTA R -#> 4 13.1 Mount Shasta Ranger District SHASTA R -#> 6 31.5 Salmon/Scott River Ranger District SCOTT R -#> 7 35.2 Salmon/Scott River Ranger District SCOTT R -#> 8 27.4 Salmon/Scott River Ranger District SCOTT R
Data were scraped from http://cdec.water.ca.gov/misc/SnowCourses.html, 2019.
+data(CDEC.snow.courses)
+head(CDEC.snow.courses)
+#> course_number name id elev_feet latitude longitude
+#> 2 1 PARKS CREEK PRK 6700 41.367 -122.550
+#> 3 2 LITTLE SHASTA LSH 6200 41.808 -122.195
+#> 4 3 SWEETWATER SWT 5850 41.382 -122.535
+#> 6 5 MIDDLE BOULDER 1 MBL 6600 41.217 -122.807
+#> 7 417 BOX CAMP BXC 6450 41.597 -123.165
+#> 8 311 MIDDLE BOULDER 3 MB3 6200 41.225 -122.811
+#> april.1.Avg.inches agency watershed
+#> 2 35.1 Mount Shasta Ranger District SHASTA R
+#> 3 16.6 Goosenest Ranger District SHASTA R
+#> 4 13.1 Mount Shasta Ranger District SHASTA R
+#> 6 31.5 Salmon/Scott River Ranger District SCOTT R
+#> 7 35.2 Salmon/Scott River Ranger District SCOTT R
+#> 8 27.4 Salmon/Scott River Ranger District SCOTT R
+
R/CDEC_StationInfo.R
+ Source: R/CDEC_StationInfo.R
CDEC_StationInfo.Rd
Query CDEC Website for Sensor Details
CDEC_StationInfo(s)- -
s | -character, a single CDEC station ID (e.g. 'HHM') |
-
---|
CDEC_StationInfo(s)
A list
object containing site metadata, sensor metadata, and possibly comments about the site.
[CDECquery]
D.E. Beaudette
+-# \donttest{ -if(requireNamespace("curl") & - curl::has_internet() -) { - - # CDEC API needs a long timeout - options(timeout = 60000) - - CDEC_StationInfo('HHM') - -} -#> $site.meta -#> Station ID River Basin Hydrologic Area Latitude -#> 1 HHM MOKELUMNE R SAN JOAQUIN RIVER 38.49066 -#> Operator Elevation County Nearby City -#> 1 CA Dept of Water Resources/DFM-Hydro-SMN 8700 ALPINE BEAR VALLEY -#> Longitude Maintenance Name -#> 1 119.8056 CA Dept of Water Resources/DFM-Hydro-SMN HIGHLAND MEADOW -#> -#> $sensor.meta -#> sensor_details sensor interval sensor_name -#> 1 PRECIPITATION, ACCUMULATED, INCHES 2 (daily) (RAIN) -#> 2 PRECIPITATION, INCREMENTAL, INCHES 45 (daily) (PPT INC) -#> 3 SNOW, WATER CONTENT, INCHES 3 (daily) (SNOW WC) -#> 4 SNOW, WATER CONTENT(REVISED), INCHES 82 (daily) (SNO ADJ) -#> 5 TEMPERATURE, AIR AVERAGE, DEG F 30 (daily) (TEMP AV) -#> 6 TEMPERATURE, AIR MAXIMUM, DEG F 31 (daily) (TEMP MX) -#> 7 TEMPERATURE, AIR MINIMUM, DEG F 32 (daily) (TEMP MN) -#> 8 ATMOSPHERIC PRESSURE, INCHES 17 (event) (BAR PRE) -#> 9 BATTERY VOLTAGE, VOLTS 14 (event) (BAT VOL) -#> 10 PRECIPITATION, ACCUMULATED, INCHES 2 (event) (RAIN) -#> 11 REFLECTED IRRADIANCE AVG, W/M^2 119 (event) (RIRR AV) -#> 12 RELATIVE HUMIDITY, % 12 (event) (REL HUM) -#> 13 SNOW DEPTH, INCHES 18 (event) (SNOW DP) -#> 14 SNOW, WATER CONTENT, INCHES 3 (event) (SNOW WC) -#> 15 SOIL MOISTR, DEPTH 1, % 197 (event) (SOILMD1) -#> 16 SOIL MOISTR, DEPTH 2, % 198 (event) (SOILMD2) -#> 17 SOIL MOISTR, DEPTH 3, % 199 (event) (SOILMD3) -#> 18 SOIL TEMP, DEPTH 1, DEG F 194 (event) (SOILTD1) -#> 19 SOIL TEMP, DEPTH 2, DEG F 195 (event) (SOILTD2) -#> 20 SOIL TEMP, DEPTH 3, DEG F 196 (event) (SOILTD3) -#> 21 SOLAR RADIATION, W/M^2 26 (event) (SOLAR R) -#> 22 TEMPERATURE, AIR, DEG F 4 (event) (TEMP) -#> 23 WIND, DIRECTION, DEG 10 (event) (WIND DR) -#> 24 WIND, SPEED, MPH 9 (event) (WIND SP) -#> 25 BATTERY VOLTAGE, VOLTS 14 (hourly) (BAT VOL) -#> 26 PRECIPITATION, ACCUMULATED, INCHES 2 (hourly) (RAIN) -#> 27 SNOW, WATER CONTENT, INCHES 3 (hourly) (SNOW WC) -#> 28 TEMPERATURE, AIR, DEG F 4 (hourly) (TEMP) -#> 29 SNOW DEPTH, INCHES 18 (monthly) (SNOW DP) -#> 30 SNOW, WATER CONTENT, INCHES 3 (monthly) (SNOW WC) -#> 31 SNOW, WATER CONTENT(REVISED), INCHES 82 (monthly) (SNO ADJ) -#> 32 SOLAR RADIATION AVG, W/M^2 103 (hourly) (SLRR AV) -#> 33 SOLAR RADIATION MAX, W/M^2 105 (hourly) (SLRR MX) -#> 34 SOLAR RADIATION MIN, W/M^2 104 (hourly) (SLRR MN) -#> collection_method period_of_record -#> 1 COMPUTED 10/01/2002 to present -#> 2 COMPUTED 12/01/1996 to present -#> 3 COMPUTED 11/29/1989 to present -#> 4 COMPUTED 10/21/1989 to present -#> 5 COMPUTED 09/02/1999 to present -#> 6 COMPUTED 09/02/1999 to present -#> 7 COMPUTED 09/02/1999 to present -#> 8 SATELLITE 09/14/2016 to present -#> 9 SATELLITE 09/14/2016 to present -#> 10 SATELLITE 09/14/2016 to present -#> 11 SATELLITE 09/14/2016 to present -#> 12 SATELLITE 09/14/2016 to present -#> 13 SATELLITE 09/14/2016 to present -#> 14 SATELLITE 09/14/2016 to present -#> 15 SATELLITE 09/14/2016 to present -#> 16 SATELLITE 09/14/2016 to present -#> 17 SATELLITE 09/14/2016 to present -#> 18 SATELLITE 09/14/2016 to present -#> 19 SATELLITE 09/14/2016 to present -#> 20 SATELLITE 09/14/2016 to present -#> 21 SATELLITE 09/14/2016 to present -#> 22 SATELLITE 09/14/2016 to present -#> 23 SATELLITE 09/14/2016 to present -#> 24 SATELLITE 09/14/2016 to present -#> 25 SATELLITE 01/01/1995 to present -#> 26 SATELLITE 10/14/1989 to present -#> 27 SATELLITE 10/14/1989 to present -#> 28 SATELLITE 10/13/1989 to present -#> 29 MANUAL ENTRY 05/01/1952 to present -#> 30 MANUAL ENTRY 05/01/1952 to present -#> 31 MANUAL ENTRY 05/01/1952 to present -#> 32 SATELLITE 10/10/2012 to 09/14/2016 -#> 33 SATELLITE 10/10/2012 to 09/14/2016 -#> 34 SATELLITE 10/10/2012 to 09/14/2016 -#> -#> $comments -#> date -#> 1 04/27/2020 -#> 2 12/31/2016 -#> 3 07/23/2014 -#> comment -#> 1 Latitude and longitude updated from information provided by DFM-Statewide Monitoring Network Section. Datum WGS-84 -#> 2 Soil Tmp1, Soil Mois1 @ 16cm , Soil Tmp2, Soil Mois2 @ 31cm, Soil Tmp3, Soil Mois3 @ 62cm -#> 3 Latitude and longitude updated based on information from Hydrology Branch field technicians. -#>- -# } - -
A (relatively) simple interface to the CDEC website.
CDECquery(id, sensor, interval = "D", start, end)- -
id | -station ID (e.g. 'spw'), single value or vector of station IDs, see details |
-
---|---|
sensor | -the sensor ID, single value or vector of sensor numbers, see details |
-
interval | -character, 'D' for daily, 'H' for hourly, 'M' for monthly, 'E' for event: see Details. |
-
start | -starting date, in the format 'YYYY-MM-DD' |
-
end | -ending date, in the format 'YYYY-MM-DD' |
-
A data.frame
object with the following fields: datetime
, year
, month
, value
.
CDECquery(id, sensor, interval = "D", start, end)
Sensors that report data on an interval
other than monthly ('M'), daily ('D'), or hourly ('H') can be queried with an event
interval ('E'). Soil moisture and temperature sensors are an example of this type of reporting. See examples below.
Station IDs can be found here: http://cdec.water.ca.gov/staInfo.html
Sensor IDs can be found using this URL: http://cdec.water.ca.gov/dynamicapp/staMeta?station_id=, followed by the station ID.
Sensor details can be accessed using CDEC_StationInfo with the station ID.
Resevoir capacities can be found here: http://cdec.water.ca.gov/misc/resinfo.html
A new interactive map of CDEC stations can be found here: http://cdec.water.ca.gov
station ID (e.g. 'spw'), single value or vector of station IDs, see details
the sensor ID, single value or vector of sensor numbers, see details
http://cdec.water.ca.gov/queryCSV.html
-character, 'D' for daily, 'H' for hourly, 'M' for monthly, 'E' for event: see Details.
D.E. Beaudette
--# \donttest{ ++- start
+- -if(requireNamespace("curl") & -curl::has_internet() & - require(latticeExtra) & - require(plyr) & - require(e1071)) { - - library(RColorBrewer) - - # get daily reservoir storage (ac. ft) from - # Pinecrest, New Melones and Lyons reservoirs - pinecrest <- CDECquery(id='swb', sensor=15, interval='D', - start='2012-09-01', end='2015-01-01') - - new.melones <- CDECquery(id='nml', sensor=15, interval='D', - start='2012-09-01', end='2015-01-01') - - lyons <- CDECquery(id='lys', sensor=15, interval='D', - start='2012-09-01', end='2015-01-01') - - # compute storage capacity - pinecrest$capacity <- pinecrest$value / 18312 * 100 - new.melones$capacity <- new.melones$value / 2400000 * 100 - lyons$capacity <- lyons$value / 6228 * 100 - - # combine - g <- make.groups(new.melones, lyons, pinecrest) - - # resonable date scale - r <- range(g$datetime) - s.r <- seq(from=r[1], to=r[2], by='1 month') - - # better colors - tps <- list(superpose.line=list(lwd=2, col=brewer.pal(n=3, name='Set1'))) - - # plot - xyplot(capacity ~ datetime, groups=which, data=g, type='l', - xlab='', ylab='Capacity (%)', ylim=c(-5, 105), - scales=list(x=list(at=s.r, labels=format(s.r, "%b\n%Y"))), - auto.key=list(columns=3, lines=TRUE, points=FALSE), - par.settings=tps, - panel=function(...) { - panel.abline(h=seq(0, 100, by=10), col='grey') - panel.abline(v=s.r, col='grey') - panel.xyplot(...) - }) - - - ## - # New Melones monthly data, retrieve as far back in time as possible - new.melones.monthly <- CDECquery(id='nml', sensor=15, interval='M', - start='1900-01-01', end='2015-01-01') - - # convert to pct. capacity - new.melones.monthly$capacity <- new.melones.monthly$value / 2400000 * 100 - - - # make a nice color ramp function - cols <- colorRampPalette(brewer.pal(9, 'Spectral'), - space='Lab', interpolate='spline') - - # plot, each pixel is colored by the total precip by year/month - levelplot(capacity ~ year * month, data=new.melones.monthly, col.regions=cols, xlab='', - ylab='', scales=list(x=list(tick.number=20)), main='New Melones Capacity (%)') - - - ## - # get daily precip totals from Stan Powerhouse - x <- CDECquery(id='spw', sensor=45, interval='D', start='1900-01-01', end='2019-01-01') - - # compute total precip by year/month - a <- ddply(x, c('year', 'month'), summarize, s=sum(value, na.rm=TRUE)) - - # convert monthly precipitation values into Z-scores by month - a.scaled <- ddply(a, 'month', summarize, year=year, scaled.ppt=scale(s)) - - # make a nice color ramp function, scaled by the skewness of the underlying distribution - cols <- colorRampPalette( - brewer.pal(9, 'Spectral'), - space='Lab', interpolate='spline', bias=skewness(a.scaled$scaled.ppt, na.rm=TRUE)) - - # plot, each pixel is colored by the total precip by year/month - levelplot(scaled.ppt ~ year * month, data=a.scaled, col.regions=cols, xlab='', - ylab='', scales=list(x=list(tick.number=10)), - main='Monthly Total Precipitation (as z-score) SPW') - - - ## - # get pre-aggregated monthly data from Sonora RS - x <- CDECquery(id='sor', sensor=2, interval='M', start='1900-01-01', end='2019-01-01') - - # make a nice color ramp function, scaled by the skewness of the underlying distribution - cols <- colorRampPalette(brewer.pal(9, 'Spectral'), space='Lab', - interpolate='spline', bias=skewness(x$value, na.rm=TRUE)) - - # plot - levelplot(value ~ year * month, data=x, col.regions=cols, xlab='', - ylab='', scales=list(x=list(tick.number=20)), - main='Monthly Total Precipitation (inches) SOR') - - - ### query an 'event' type sensor - # Bryte test site (BYT) - # single request: air temperature and soil temperature at depth 1 (25cm) - # measurement interval is 20 minutes - x <- CDECquery('BYT', c(4, 194), 'E', '2016-01-01', '2017-01-01') - - # data are in long format, check number of records for each sensor - table(x$sensor_type) - - # plot grouped data - xyplot(value ~ datetime, groups=sensor_type, data=x, type=c('g', 'l'), - auto.key=list(columns=2, points=FALSE, lines=TRUE)) + +
starting date, in the format 'YYYY-MM-DD'
- end
+- + +
ending date, in the format 'YYYY-MM-DD'
+Value
- } -#> Loading required package: latticeExtra#> Warning: package 'latticeExtra' was built under R version 4.0.3#> Loading required package: lattice#> Loading required package: plyr#> Warning: package 'plyr' was built under R version 4.0.3#> -#> Attaching package: 'plyr'#> The following objects are masked from 'package:aqp': -#> -#> mutate, summarize#> Loading required package: e1071#> Warning: package 'e1071' was built under R version 4.0.5#> Warning: package 'RColorBrewer' was built under R version 4.0.3-# } ++A
+data.frame
object with the following fields:datetime
,year
,month
,value
.++Details
+Sensors that report data on an
+interval
other than monthly ('M'), daily ('D'), or hourly ('H') can be queried with anevent
interval ('E'). Soil moisture and temperature sensors are an example of this type of reporting. See examples below.
- 1.
+- + +
Station IDs can be found here: http://cdec.water.ca.gov/staInfo.html
- 2a.
+- + +
Sensor IDs can be found using this URL: http://cdec.water.ca.gov/dynamicapp/staMeta?station_id=, followed by the station ID.
- 2b.
+- +
Sensor details can be accessed using
CDEC_StationInfo with the station ID.
- 3.
+- + +
Reservoir capacities can be found here: http://cdec.water.ca.gov/misc/resinfo.html
- 4.
+- + + +
A new interactive map of CDEC stations can be found here: http://cdec.water.ca.gov
++References
+ +++See also
+ ++-Author
+D.E. Beaudette
+
R/CDECsnowQuery.R
CDECsnowQuery.Rd
Get snow survey data (California only) from the CDEC website.
CDECsnowQuery(course, start_yr, end_yr)- -
course | -integer, course number (e.g. 129) |
-
---|---|
start_yr | -integer, the starting year (e.g. 2010) |
-
end_yr | -integer, the ending year (e.g. 2013) |
-
This function downloads data from the CDEC website, therefore an internet connection is required. The `SWE` column contains adjusted SWE if available (`Adjusted` column), otherwise the reported SWE is used (`Water` column).
-CDECsnowQuery(course, start_yr, end_yr)
a data.frame
object, see examples
integer, course number (e.g. 129)
http://cdec.water.ca.gov/cgi-progs/snowQuery
-D.E. Beaudette
-integer, the starting year (e.g. 2010)
Snow course locations, ID numbers, and other information can be found here: http://cdec.water.ca.gov/misc/SnowCourses.html
-# \donttest{ ++- end_yr
+- -if(requireNamespace("curl") & - curl::has_internet() - ) { - - # get data for course numbe 129 - x <- CDECsnowQuery(course=129, start_yr=2010, end_yr=2011) - -} +
integer, the ending year (e.g. 2013)
++Value
+ +a
+data.frame
object, see examples++Details
+This function downloads data from the CDEC website, therefore an internet connection is required. The
+SWE
column contains adjusted SWE if available (Adjusted
column), otherwise the reported SWE is used (Water
column). See the tutorial for examples.++Note
+Snow course locations, ID numbers, and other information can be found here: http://cdec.water.ca.gov/misc/SnowCourses.html
+++References
+ ++-# } -Author
+D.E. Beaudette
+
Evaluation frost-free days and related metrics from daily climate records.
FFD(d, returnDailyPr = TRUE, minDays = 165, ...) -FFDplot(s, sub.title = NULL)- -
d | -
|
-
---|---|
returnDailyPr | -optionally return |
-
minDays | -min number of days / spring|fall required for a reasonable estimate of FFD |
-
... | -further arguments passed to |
-
s | -object returned by |
-
sub.title | -override default subtitle |
-
The default frostTemp=32
argument to frostFreePeriod
is suitable for use with minimum daily temperatures in degrees Fahrenheit. Use frostTemp=0
for temperatures in degrees Celsius.
a list
with the following elements:
FFD summary statistics as a data.frame
frost matrix
Pr(frost|day): daily probability of frost
FFD(
+ d,
+ returnDailyPr = TRUE,
+ minDays = 165,
+ frostTemp = 32,
+ endSpringDOY = 182,
+ startFallDOY = 213
+)
data.frame
with columns 'datetime' 'year', and 'value'; 'value' being daily minimum temperature, see details
optionally return list
with daily summaries
min number of days of non-NA data in spring | fall, required for a reasonable estimate of FFD
critical temperature that defines "frost" (same units as d$value
)
day of year that marks end of "spring" (typically Jan 1 -- June 30)
day of year that marks start of "fall" (typically Aug 1 -- Dec 31)
a data.frame
when a returnDailyPr=FALSE
, otherwise a list
with the following elements:
summary: FFD summary statistics as a data.frame
fm: frost matrix
Pr.frost: Pr(frost|day): daily probability of frost
The default frostTemp=32
is suitable for use with minimum daily temperatures in degrees Fahrenheit. Use frostTemp = 0
for temperatures in degrees Celsius.
D.E. Beaudette
+
+# 11 years of data from highland meadows
+data('HHM', package = 'sharpshootR')
+x.ffd <- FFD(HHM, returnDailyPr = FALSE, frostTemp = 32)
+
+str(x.ffd)
+#> 'data.frame': 1 obs. of 10 variables:
+#> $ ffd.50 : num 80
+#> $ ffd.80 : num 70
+#> $ ffd.90 : num 60
+#> $ spring.50: num 165
+#> $ spring.80: num 168
+#> $ spring.90: num 169
+#> $ fall.50 : num 245
+#> $ fall.80 : num 238
+#> $ fall.90 : num 228
+#> $ n.yrs : num 6
+
+
11 years of climate data from the Highland Meadows weather station, as maintained by CA DWR.
data("HHM")+
data("HHM")
A data frame with 3469 observations on the following 12 variables.
station_id
a character vector
dur_code
a character vector
A data frame with 3469 observations on the following 12 variables.
station_id
a character vector
dur_code
a character vector
sensor_num
a numeric vector
sensor_type
a character vector
value
a numeric vector
flag
a character vector
units
a character vector
datetime
a POSIXct
year
a numeric vector
month
a factor with levels January
February
March
April
May
June
July
August
September
October
November
December
water_year
a numeric vector
water_day
a numeric vector
sensor_num
a numeric vector
sensor_type
a character vector
value
a numeric vector
flag
a character vector
units
a character vector
datetime
a POSIXct
year
a numeric vector
month
a factor with levels January
February
March
April
May
June
July
August
September
October
November
December
water_year
a numeric vector
water_day
a numeric vector
R/HenryTimeLine.R
+ Source: R/HenryTimeLine.R
HenryTimeLine.Rd
This function generates a simple chart of start/end dates for a set of sensor data returned by soilDB::fetchHenry
.
This function generates a simple chart of start/end dates for non-NA sensor data returned by soilDB::fetchHenry()
. Data are organized according to sensor name + sensor depth.
HenryTimeLine(sensor_data, ...)+
HenryTimeLine(sensor_data, ...)
sensor_data | -
|
-
---|---|
... | -additional arguments to |
-
soiltemp
, soilVWC
, or related data returned by soilDB::fetchHenry()
a lattice
graphics object
additional arguments to latticeExtra::segplot
This function does not symbolize sections of missing data between the first and last record.
-a lattice
graphics object
D.E. Beaudette
+Uses lattitude and longitude coordinates to return the PLSS section geometry from the BLM PLSS web service.
+Uses latitude and longitude coordinates to return the PLSS section geometry from the BLM PLSS web service.
LL2PLSS(x, y, returnlevel = "I")- -
x | -longitude coordinates |
-
---|---|
y | -lattitude coordinates |
-
returnlevel | -'S' for "Section" or 'I' for "Intersection" (subsections) |
-
list
of of PLSS codes and coordinates.
This function takes xy coordinates and returns the PLSS section geometry to the quarter-quarter section. returnlevel
options are defaulted to 'I' which returns smallest intersected sectional aliquot geometry, 'S' will return the section geometry of the coordinates. See https://gis.blm.gov/arcgis/rest/services/Cadastral/BLM_Natl_PLSS_CadNSDI/MapServer for details.
LL2PLSS(x, y, returnlevel = c("I", "S"))
This function requires the following packages: httr
, jsonlite
, and sp
.
sf
object with geometry and PLSS definition.
This function accepts geographic coordinates and returns the PLSS fabric geometry to the quarter-quarter section. returnlevel
defaults to 'I' which returns smallest intersected sectional aliquot geometry, 'S' will return the section geometry of the coordinates. See https://gis.blm.gov/arcgis/rest/services/Cadastral/BLM_Natl_PLSS_CadNSDI/MapServer for details.
This function requires the following packages: httr
, jsonlite
, and sp
.
D.E. Beaudette, Jay Skovlin, A.G. Brown
+Generate a plot representing percentiles of cumulative precipitation, given a historic record, and criteria for selecting a year of data for comparison.
PCP_plot(x, this.year, this.day = NULL, method = "exemplar", -q.color = "RoyalBlue", c.color = "firebrick", ...)- -
x | -result from |
-
---|---|
this.year | -a single water year, e.g. 2020 |
-
this.day | -optional integer representing days since start of selected water year |
-
method | -'exemplar' or 'daily', currently 'exemplar' is the only method available |
-
q.color | -color of percentiles cumulative precipitation |
-
c.color | -color of selected year |
-
... | -addtional arguments to |
-
This is very much a work in progress. Further examples at https://ncss-tech.github.io/AQP/sharpshootR/CDEC.html, and https://ncss-tech.github.io/AQP/sharpshootR/cumulative-PPT.html
-Currently nothing is returned.
-PCP_plot(
+ x,
+ this.year,
+ this.day = NULL,
+ method = "exemplar",
+ q.color = "RoyalBlue",
+ c.color = "firebrick",
+ ...
+)
D.E. Beaudette
-result from CDECquery
for now, will need to generalize to other sources
# \donttest{ ++- this.year
+- + + +
a single water year, e.g. 2020
- this.day
+- + + +
optional integer representing days since start of selected water year
- method
+- + + +
'exemplar' or 'daily', currently 'exemplar' is the only method available
- q.color
+- + + +
color of percentiles cumulative precipitation
- c.color
+- + + +
color of selected year
- ...
+- + +
additional arguments to
plot
++Value
+ + +nothing, this function is called to create graphical output
+++Details
+This is very much a work in progress. Further examples at https://ncss-tech.github.io/AQP/sharpshootR/CDEC.html, and https://ncss-tech.github.io/AQP/sharpshootR/cumulative-PPT.html.
+++See also
+ ++-if(requireNamespace("curl") & - curl::has_internet() - ) { - - s <- 'SPW' -# get metadata -s.info <- CDEC_StationInfo(s) -# format title for cumulative PPT -title.text <- sprintf("%s [%s]", s.info$site.meta$Name, s) - -# get data -x <- CDECquery(id=s, sensor=45, interval='D', start='2000-01-01', end='2030-01-01') - -## NOTE: requires sharpshootR >= 1.6.1 -# plot -par(mar=c(4.5, 4.5, 2.5, 1.5)) -PCP_plot(x[1:(nrow(x)-60), ], ylab='Cumulative PPT (inches)', main=title.text, this.year = 2020) - -} -Author
+D.E. Beaudette
+
Fetch latitude and longitude centroid coordinates for coded PLSS information from the BLM PLSS web service.
+Fetch latitude and longitude (centroid) coordinates for coded PLSS information from the BLM PLSS web service.
PLSS2LL(p, plssid = "plssid")- -
p | -data.frame with chunks of PLSS coordinates |
-
---|---|
plssid | -Column name containing PLSS ID (default: |
-
PLSS2LL(p, plssid = "plssid")
data.frame
with chunks of PLSS definition
A data.frame
of PLSS codes and coordinates.
This function expects that the dataframe will have a 'plssid' column generated by the formatPLSS
function. Requires the following packages: httr
, and jsonlite
.
column name containing PLSS ID
A data.frame
of PLSS codes and coordinates.
This function expects that the dataframe will have a 'plssid' column generated by the formatPLSS
function. Requires the following packages: httr
, and jsonlite
.
D.E. Beaudette, Jay Skovlin, A.G. Brown
+R/SoilTaxonomyDendrogram.R
+ Source: R/SoilTaxonomyDendrogram.R
SoilTaxonomyDendrogram.Rd
Plot a dendrogram based on the first 4 levels of Soil Taxonomy, with soil profiles hanging below. A dissimilarity matrix is computed using Gower's distance metric for nominal-scale variables, based on order, sub order, great group, and subgroup level taxa. See the Details and Examples sections below for more information.
+Plot a dendrogram based on the first 4 levels of Soil Taxonomy, with soil profiles hanging below. A dissimilarity matrix is computed using Gower's distance metric for nominal (KST.order = FALSE
) or ordinal (KST.order = TRUE
) scale variables, based on soil order, suborder, greatgroup, and subgroup taxa.
SoilTaxonomyDendrogram( - spc, - name = "hzname", - name.style = "right-center", - rotationOrder = NULL, - max.depth = 150, - n.depth.ticks = 6, - scaling.factor = 0.015, - cex.names = 0.75, - cex.id = 0.75, - axis.line.offset = -4, - width = 0.1, - y.offset = 0.5, - shrink = FALSE, - font.id = 2, - cex.taxon.labels = 0.66, - dend.color = par("fg"), - dend.width = 1, - ... -)- -
spc | -a |
-
---|---|
name | -column name containing horizon names |
-
name.style | -passed to |
-
rotationOrder | -numeric vector with desired ordering of leaves in the dendrogram from left to right, or character vector matching profile IDs |
-
max.depth | -depth at which profiles are truncated for plotting |
-
n.depth.ticks | -suggested number of ticks on the depth axis |
-
scaling.factor | -scaling factor used to convert depth units into plotting units |
-
cex.names | -character scaling for horizon names |
-
cex.id | -character scaling for profile IDs |
-
axis.line.offset | -horizontal offset for depth axis |
-
width | -width of profiles |
-
y.offset | -vertical offset between dendrogram and profiles |
-
shrink | -logical, should long horizon names be shrunk by 80% ? |
-
font.id | -font style applied to profile id, default is 2 (bold) |
-
cex.taxon.labels | -character scaling for taxonomic information |
-
dend.color | -dendrogram line color |
-
dend.width | -dendrogram line width |
-
... | -additional arguments to |
-
An invisibly-returned list containing:
dist
: pair-wise dissimilarity matrix
order
: final ordering of hclust leaves
This function looks for specific site-level attributes named: soilorder
, suborder
, greatgroup
, and subgroup
.
The rotationOrder
argument uses (requires) the dendextend::rotate()
function to re-order leaves within the hclust
representation of the ST hierarchy. Perfect sorting is not always possible.
SoilTaxonomyDendrogram(
+ spc,
+ KST.order = TRUE,
+ rotationOrder = NULL,
+ level = c(soilorder = "soilorder", suborder = "suborder", greatgroup = "greatgroup",
+ subgroup = "subgroup"),
+ cluster.method = c("divisive", "agglomerative"),
+ cluster.args = list(),
+ name = "hzname",
+ name.style = "center-center",
+ id.style = "side",
+ max.depth = max(spc),
+ n.depth.ticks = 6,
+ scaling.factor = 0.015,
+ cex.names = 0.75,
+ cex.id = 0.75,
+ axis.line.offset = -4,
+ width = 0.1,
+ y.offset = 0.5,
+ shrink = FALSE,
+ font.id = 2,
+ cex.taxon.labels = 0.66,
+ dend.color = par("fg"),
+ dend.width = 1,
+ ...
+)
D.E. Beaudette
+a SoilProfileCollection
object, typically returned by soilDB::fetchOSD
+-# \donttest{ -if(requireNamespace("curl") & - curl::has_internet() & - require(aqp) & - require(soilDB) -) { - - - - # soils of interest - s.list <- c('musick', 'cecil', 'drummer', 'amador', 'pentz', 'reiff', - 'san joaquin','montpellier','grangeville','pollasky','ramona') - - # fetch and convert data into an SPC - h <- fetchOSD(s.list) - - # plot dendrogram + profiles - SoilTaxonomyDendrogram(h) - - # again, this time save the pair-wise dissimilarity matrix - # note that there isn't a lot of discrimination between soils - (d <- SoilTaxonomyDendrogram(h)) - - - # a different set - soils <- c('cecil', 'altavista', 'lloyd', 'wickham', 'wilkes', - 'chewacla', 'congaree') - - # get morphology + extended summaries for sorting of dendrogram - s <- fetchOSD(soils, extended = TRUE) - - # get summary and ignore the figure - res <- vizHillslopePosition(s$hillpos) - - # compare default sorting to soils sorting according to catenary, e.g. - # hillslope position - par(mar=c(0,0,1,1), mfrow=c(2,1)) - - SoilTaxonomyDendrogram(s$SPC, width=0.25) - mtext('default sorting', side = 2, line=-1, font=3, cex=1.25) - - SoilTaxonomyDendrogram(s$SPC, rotationOrder = res$order, width=0.25) - mtext('approx. catenary sorting', side = 2, line=-1, font=3, cex=1.25) - - -# classic chronosequence from the San Joaquin Valley, CA -library(aqp) -library(soilDB) -library(sharpshootR) - -s <- c('tujunga', 'hanford', 'greenfield', 'snelling', 'san joaquin') -osds <- fetchOSD(s) - -idx <- match(toupper(s), profile_id(osds)) - -# encode horizon boundarydistinctness via vertical offset -osds$hd <- hzDistinctnessCodeToOffset( - osds$distinctness, - codes=c('very abrupt', 'abrupt', 'clear', 'gradual', 'diffuse') -) - -# encode horizon boundary topography via vertical offset -osds$hzto <- hzTopographyCodeToOffset( - osds$topography, - codes = c('smooth', 'wavy', 'irregular', 'broken') -) - -# also encode horizon boundary topography las line type -osds$hzto.lty <- hzTopographyCodeToLineType( - osds$topography, - codes = c('smooth', 'wavy', 'irregular', 'broken') -) - -# label data source, used later -site(osds)$source <- 'OSD' - -# concise representation of hz bnd distinctness and topography -# similar to field notes -osds$bnd.code <- sprintf( - "%s%s", - substr(osds$distinctness, 1, 1), - substr(osds$topography, 1, 1) -) - -# remove NA -osds$bnd.code <- gsub('NANA', '', osds$bnd.code) - -par(mar = c(0, 0, 0, 1), bg = 'black', fg = 'white') - -plotSPC( -osds, -plot.order = idx, -width = 0.3, -name.style = 'center-center', -cex.names = 0.66, -plot.depth.axis = FALSE, -hz.depths = TRUE, -shrink = TRUE, -hz.distinctness.offset = 'hd', -hz.topography.offset = 'hzto', -hz.boundary.lty = 'hzto.lty' -) - -legend( -'bottomright', -horiz = TRUE, -legend = c('Smooth', 'Wavy', 'Irregular', 'Broken'), -lty = 1:4, -inset = 0.05, -bty = 'n', -cex = 0.85 -) - -# note that `rotationOrder` uses the ordering of series names (uppercase to match profile IDs) -# to re-order the terminal branches of the dendrogram -SoilTaxonomyDendrogram( -osds, -rotationOrder = toupper(s), -cex.taxon.labels = 0.85, -width = 0.3, -name.style = 'center-center', -cex.names = 0.66, -plot.depth.axis = FALSE, -hz.depths = TRUE, -shrink = TRUE, -hz.distinctness.offset = 'hd', -hz.topography.offset = 'hzto', -hz.boundary.lty = 'hzto.lty' -) - -legend( -'bottomright', -horiz = TRUE, -legend = c('Smooth', 'Wavy', 'Irregular', 'Broken'), -lty = 1:4, -inset = 0.05, -bty = 'n', -cex = 0.85 -) - -} -#> Registered S3 method overwritten by 'dendextend': -#> method from -#> rev.hclust vegan-# } +- KST.order
+- + + +
logical, encode / cluster taxa via ordinal factors, based on ordering within Keys to Soil Taxonomy
- rotationOrder
+- + -
character vector of profile IDs with desired ordering of leaves in the dendrogram from left to right; exact ordering is not always possible
character. One or more site-level columns in spc
. Default: "soilorder"
, "suborder"
, "greatgroup"
and "subgroup"
Either "divisive" (cluster::diana()
; default) or "agglomerative" (cluster::agnes()
)
Optional: additional arguments for cluster::diana()
or cluster::agnes()
cluster methods
column name containing horizon names
passed to aqp::plotSPC
passed to aqp::plotSPC
depth at which profiles are truncated for plotting
suggested number of ticks on the depth axis
scaling factor used to convert depth units into plotting units
character scaling for horizon names
character scaling for profile IDs
horizontal offset for depth axis
width of profiles
vertical offset between dendrogram and profiles
logical, should long horizon names be shrunk by 80% ?
font style applied to profile id, default is 2 (bold)
character scaling for taxonomic information
dendrogram line color
dendrogram line width
additional arguments to aqp::plotSPC
An invisibly-returned list containing:
dist
: pair-wise dissimilarity matrix
order
: final ordering of hclust
leaves
This function looks for specific site-level attributes named: "soilorder"
, "suborder"
, "greatgroup"
, and "subgroup"
, or their NASIS physical column name analogues "taxorder"
, "taxsuborder"
, "taxgrtgroup"
, and "taxsubgrp"
. See https://github.com/ncss-tech/sharpshootR/blob/master/misc/soilTaxonomyDendrogram-examples.R for some examples.
The rotationOrder
argument uses ape::rotateConstr()
to reorder leaves within the hclust
representation of the ST hierarchy. Perfect sorting is not always possible.
D.E. Beaudette
+
+# built-in data, same as results from soilDB::fetchOSD()
+data("OSDexamples")
+
+# examples using first 8 profiles
+
+# KST-style ordering
+SoilTaxonomyDendrogram(
+ OSDexamples$SPC[1:8, ], width = 0.3, name.style = 'center-center',
+ KST.order = TRUE
+)
+
+
+# classic ordering, based on nominal scale variables (unordered factors)
+SoilTaxonomyDendrogram(
+ OSDexamples$SPC[1:8, ], width = 0.3, name.style = 'center-center',
+ KST.order = FALSE
+)
+
+
+
+
Generate a plot from summaries generated by aqp::aggregateColor().
+Generate a plot from summaries generated by aqp::aggregateColor()
.
aggregateColorPlot(x, print.label=TRUE, label.font = 1, -label.cex = 0.65, buffer.pct = 0.02, print.n.hz=FALSE, -rect.border='black', horizontal.borders=FALSE, -horizontal.border.lwd=2, x.axis=TRUE, y.axis=TRUE, -...)- -
x | -a list, results from |
-
---|---|
print.label | -print Munsell color labels inside of rectangles, when they fit |
-
label.font | -font specification for color labels |
-
label.cex | -font size for color lables |
-
buffer.pct | -extra space between labels and color rectangles |
-
print.n.hz | -optionally print the number of horizons |
-
rect.border | -color for rectangle border |
-
horizontal.borders | -optionally add horizontal borders between bands of color |
-
horizontal.border.lwd | -line width for horizontal borders |
-
x.axis | -logical: add a scale and label to x-axis? |
-
y.axis | -logical: add group labels to y-axis? |
-
... | -additional arguments passed to plot |
-
Tutorial at http://ncss-tech.github.io/AQP/sharpshootR/aggregate-soil-color.html.
-aggregateColorPlot(
+ x,
+ print.label = TRUE,
+ label.font = 1,
+ label.cex = 0.65,
+ label.orientation = c("v", "h"),
+ buffer.pct = 0.02,
+ print.n.hz = FALSE,
+ rect.border = "black",
+ horizontal.borders = FALSE,
+ horizontal.border.lwd = 2,
+ x.axis = TRUE,
+ y.axis = TRUE,
+ ...
+)
D.E. Beaudette
+a list
, results from aqp::aggregateColor()
+# \donttest{ -if(requireNamespace("curl") & - curl::has_internet() & - require(aqp) & - require(soilDB)) { - - data(loafercreek, package = 'soilDB') +- print.label
+- -# generalize horizon names using REGEX rules -n <- c('Oi', 'A', 'BA','Bt1','Bt2','Bt3','Cr','R') -p <- c('O', '^A$|Ad|Ap|AB','BA$|Bw', -'Bt1$|^B$','^Bt$|^Bt2$','^Bt3|^Bt4|CBt$|BCt$|2Bt|2CB$|^C$','Cr','R') -loafercreek$genhz <- generalize.hz(loafercreek$hzname, n, p) -# remove non-matching generalized horizon names -loafercreek$genhz[loafercreek$genhz == 'not-used'] <- NA -loafercreek$genhz <- factor(loafercreek$genhz) +
logical, print Munsell color labels inside of rectangles, only if they fit
- label.font
+- -# aggregate color data, this function is from the `aqp` package -a <- aggregateColor(loafercreek, 'genhz') -# plot -par(mar=c(4,4,1,1)) -aggregateColorPlot(a, print.n.hz = TRUE) - -} -
font specification for color labels
#> Loading required package: aqp#> This is aqp 1.30#> -#> Attaching package: 'aqp'#> The following object is masked from 'package:stats': -#> -#> filter#> Loading required package: soilDB+- label.cex
+- + + +
font size for color labels
- label.orientation
+- + + +
label orientation,
v
for vertical orh
for horizontal- buffer.pct
+- + + +
extra space between labels and color rectangles
- print.n.hz
+- + + +
optionally print the number of horizons below Munsell color labels
- rect.border
+- + + +
color for rectangle border
- horizontal.borders
+- + + +
optionally add horizontal borders between bands of color
- horizontal.border.lwd
+- -# } -
line width for horizontal borders
logical, add a scale and label to x-axis?
logical, add group labels to y-axis?
additional arguments passed to plot
nothing, function called for graphical output
+Tutorial at http://ncss-tech.github.io/AQP/sharpshootR/aggregate-soil-color.html.
+D.E. Beaudette
+
+# \donttest{
+
+if(require(aqp) &
+ require(soilDB)) {
+
+ data(loafercreek, package = 'soilDB')
+
+ # generalize horizon names using REGEX rules
+ n <- c('Oi', 'A', 'BA','Bt1','Bt2','Bt3','Cr','R')
+ p <- c('O', '^A$|Ad|Ap|AB','BA$|Bw',
+ 'Bt1$|^B$','^Bt$|^Bt2$','^Bt3|^Bt4|CBt$|BCt$|2Bt|2CB$|^C$','Cr','R')
+ loafercreek$genhz <- generalize.hz(loafercreek$hzname, n, p)
+
+ # remove non-matching generalized horizon names
+ loafercreek$genhz[loafercreek$genhz == 'not-used'] <- NA
+ loafercreek$genhz <- factor(loafercreek$genhz)
+
+ # aggregate color data, this function is from the `aqp` package
+ a <- aggregateColor(loafercreek, 'genhz')
+
+ # plot
+ op <- par(no.readonly = TRUE)
+
+ par(mar=c(4,4,1,1))
+
+ # vertical labels, the default
+ aggregateColorPlot(a, print.n.hz = TRUE)
+
+ # horizontal labels
+ aggregateColorPlot(a, print.n.hz = TRUE, label.orientation = 'h')
+
+ par(op)
+
+}
+#> Loading required package: aqp
+#> This is aqp 2.0
+#> Loading required package: soilDB
+
+
+
+# }
+
SSURGO Data Associated with the Amador Soil Series
data(amador)- +
data(amador)
A subset of data taken from the "component" table of SSURGO
USDA-NRCS SSURGO Database
+Plot a graphical summary of multiple aspect measurements on a circular diagram.
aspect.plot(p, q=c(0.05, 0.5, 0.95), p.bins = 60, p.bw = 30, stack=TRUE, -p.axis = seq(0, 350, by = 10), plot.title = NULL, -line.col='RoyalBlue', line.lwd=1, line.lty=2, -arrow.col=line.col, arrow.lwd=1, arrow.lty=1, -arrow.length=0.15, -...)- -
p | -a vector of aspect angles in degrees, measured clock-wise from North |
-
---|---|
q | -a vector of desired quantiles |
-
p.bins | -number of bins to use for circular histogram |
-
p.bw | -bandwidth used for circular density estimation |
-
stack | -TRUE/FALSE, should the individual points be stacked into |
-
p.axis | -a sequence of integers (degrees) describing the circular axis |
-
plot.title | -an informative title |
-
line.col | -density line color |
-
line.lwd | -density line width |
-
line.lty | -density line line style |
-
arrow.col | -arrow color |
-
arrow.lwd | -arrow line width |
-
arrow.lty | -arrow line style |
-
arrow.length | -arrow head length |
-
... | -further arguments passed to |
-
Spread and central tendency are depicted with a combination of circular histogram and kernel density estimate. The circular mean, and relative confidence in that mean are depicted with an arrow: longer arrow lengths correspond to greater confidence in the mean.
-a vector of aspect angles in degrees, measured clock-wise from North
D.E. Beaudette
-a vector of desired quantiles
number of bins to use for circular histogram
bandwidth used for circular density estimation
logical
, should the individual points be stacked into p.bins
number of bins and plotted
a sequence of integers (degrees) describing the circular axis
an informative title
density line color
density line width
density line line style
arrow color
arrow line width
arrow line style
arrow head length
further arguments passed to circular::plot.circular
invisibly returns circular stats
+Spread and central tendency are depicted with a combination of circular histogram and kernel density estimate. The circular mean, and relative confidence in that mean are depicted with an arrow: longer arrow lengths correspond to greater confidence in the mean.
+Manual adjustment of p.bw
may be required in order to get an optimal circular density plot. This function requires the package circular
, version 0.4-7 or later.
D.E. Beaudette
++# simulate some data -p.narrow <- runif(n=25, min=215, max=280) -p.wide <- runif(n=25, min=0, max=270) - -# set figure margins to 0, 2-column plot -par(mar=c(0,0,0,0), mfcol=c(1,2)) - -# plot -aspect.plot(p.narrow, p.bw=10, plot.title='Soil A', pch=21, col='black', bg='RoyalBlue') -#> -#> Rayleigh Test of Uniformity -#> General Unimodal Alternative -#> -#> Test Statistic: 0.9431 -#> P-value: 0 -#>aspect.plot(p.wide, p.bw=10, plot.title='Soil B', pch=21, col='black', bg='RoyalBlue') -#> -#> Rayleigh Test of Uniformity -#> General Unimodal Alternative -#> -#> Test Statistic: 0.3636 -#> P-value: 0.035 -#>
# simulate some data
+p.narrow <- runif(n=25, min=215, max=280)
+p.wide <- runif(n=25, min=0, max=270)
+
+# set figure margins to 0, 2-column plot
+op <- par(no.readonly = TRUE)
+par(mar = c(0,0,0,0), mfcol = c(1,2))
+
+# plot, save circular stats
+x <- aspect.plot(p.narrow, p.bw=10, plot.title='Soil A', pch=21, col='black', bg='RoyalBlue')
+y <- aspect.plot(p.wide, p.bw=10, plot.title='Soil B', pch=21, col='black', bg='RoyalBlue')
+
+
+# reset output device options
+par(op)
+
+x
+#> Circular Data:
+#> Type = angles
+#> Units = degrees
+#> Template = geographics
+#> Modulo = 2pi
+#> Zero = 1.570796
+#> Rotation = clock
+#> 5% 50% 95%
+#> 274.1839 245.3117 217.7417
+#> attr(,"uniformity")
+#> Rayleigh Uniformity p Value
+#> 9.521243e-01 7.135418e-10
+
+
+
R/colorMixtureVenn.R
+ Source: R/colorMixtureVenn.R
colorMixtureVenn.Rd
Create a Venn Diagram of Simulated Color Mixtures
colorMixtureVenn( - chips, - mixingMethod = "spectra", - ellipse = FALSE, - labels = TRUE -)- -
chips | -character vector of standard Munsell color notation (e.g. "10YR 3/4") |
-
---|---|
mixingMethod | -approach used to simulate a mixture: see |
-
ellipse | -logical, use alternative ellipse-style (4 or 5 colors only) |
-
labels | -logical, print mixture labels |
-
+-if(requireNamespace("venn") & requireNamespace("gower")) { - -# "tan" / "dark red" / "dark brown" -chips <- c('10YR 8/1', '2.5YR 3/6', '10YR 2/2') -colorMixtureVenn(chips) - -} -#> Loading required namespace: venn#> Loading required namespace: gower#> please use `mixingMethod = "reference"`#> please use `mixingMethod = "reference"`#> please use `mixingMethod = "reference"`#> please use `mixingMethod = "reference"`
character vector of standard Munsell color notation (e.g. "10YR 3/4")
vector of proportions, can sum to any number, must be same length as chips
approach used to simulate a mixture: see aqp::mixMunsell
for details
logical, use alternative ellipse-style (4 or 5 colors only)
logical, print mixture labels
logical, print names outside of the "sets"
scaling factor for set names
nothing returned, function is called to create graphical output
+
+if(requireNamespace("venn") & requireNamespace("gower")) {
+
+chips <- c('10YR 8/1', '2.5YR 3/6', '10YR 2/2')
+names(chips) <- c("tan", "dark red", "dark brown")
+
+colorMixtureVenn(chips)
+colorMixtureVenn(chips, names = TRUE)
+
+colorMixtureVenn(chips, w = c(1, 1, 1), names = TRUE)
+colorMixtureVenn(chips, w = c(10, 5, 1), names = TRUE)
+
+}
+#> Loading required namespace: venn
+
+
+
+
+
+
R/component.adj.matrix.R
component.adj.matrix.Rd
Create an adjacency matrix from SSURGO component data
component.adj.matrix(d, mu='mukey', co='compname', wt='comppct_r', -method='community.matrix', standardization='max', metric='jaccard', -rm.orphans=TRUE, similarity=TRUE, return.comm.matrix=FALSE)- -
d | -a |
-
---|---|
mu | -name of the column containing the map unit ID (typically 'mukey') |
-
co | -name of the column containing the component ID (typically 'compname') |
-
wt | -name of the column containing the component weight percent (typically 'comppct_r') |
-
method | -one of either: `community.matrix`, or `occurrence`; see details |
-
standardization | -community matrix standardization method, passed to |
-
metric | -community matrix dissimilarity metric, passed to |
-
rm.orphans | -logical, should map units with a single component be ommitted? (typically yes) |
-
similarity | -logical, return a similarity matrix? (if FALSE, a distance matrix is returned) |
-
return.comm.matrix | -logical, return pseudo-community matrix? (if TRUE no adjacency matrix is created) |
-
a similatiy matrix / ajacency matrix suitable for use with igraph
functions or anything else that can accomodate a _similarity_ matrix.
component.adj.matrix(
+ d,
+ mu = "mukey",
+ co = "compname",
+ wt = "comppct_r",
+ method = c("community.matrix", "occurrence"),
+ standardization = "max",
+ metric = "jaccard",
+ rm.orphans = TRUE,
+ similarity = TRUE,
+ return.comm.matrix = FALSE
+)
D.E. Beaudette
-data.frame
, typically of SSURGO data
name of the column containing the map unit ID (typically 'mukey')
name of the column containing the component ID (typically 'compname')
name of the column containing the component weight percent (typically 'comppct_r')
one of either: community.matrix
, or occurrence
; see details
community matrix standardization method, passed to vegan::decostand
Pending...
-+# load sample data set -data(amador) +- metric
+- -# convert into adjacency matrix -m <- component.adj.matrix(amador) -# plot network diagram, with Amador soil highlighted -plotSoilRelationGraph(m, s='amador') -
community matrix dissimilarity metric, passed to
vegan::vegdist
logical
, should map units with a single component be omitted? (typically yes)
logical, return a similarity matrix? (if FALSE
, a distance matrix is returned)
logical, return pseudo-community matrix? (if TRUE
no adjacency matrix is created)
a similarity matrix / adjacency matrix suitable for use with igraph
functions or anything else that can accommodate a similarity matrix.
D.E. Beaudette
+if (requireNamespace("igraph")) {
+ # load sample data set
+ data(amador)
+
+ # convert into adjacency matrix
+ m <- component.adj.matrix(amador)
+
+ # plot network diagram, with Amador soil highlighted
+ plotSoilRelationGraph(m, s = 'amador')
+}
+#> Loading required namespace: igraph
+
+
Perform sampling at a constant density over all polygons within a SpatialPolygonsDataFrame object.
constantDensitySampling(x, polygon.id='pID', parallel=FALSE, cores=NULL, -n.pts.per.ac=1, min.samples=5, sampling.type='regular', iterations=10)- -
x | -a |
-
---|---|
polygon.id | -name of attribute in |
-
parallel | -invoke parallel back-end |
-
cores | -number of CPU cores to use for parallel operation |
-
n.pts.per.ac | -requested sampling density in points per acre (results will be close) |
-
min.samples | -minimum requested number of samples per polygon |
-
sampling.type | -sampling type, see |
-
iterations | -number of tries that |
-
constantDensitySampling(x, polygon.id='pID', parallel=FALSE, cores=NULL,
+n.pts.per.ac=1, min.samples=5, sampling.type='regular')
a SpatialPointsDataFrame
object
a SpatialPolygonsDataFrame
object in a projected CRS with units of meters
D.E. Beaudette
-name of attribute in x
that contains a unique ID for each polygon
This function expects that x
has coordinates associated with a projected CRS and units of meters.
invoke parallel back-end
number of CPU cores to use for parallel operation
requested sampling density in points per acre (results will be close)
minimum requested number of samples per polygon
sampling type
a SpatialPointsDataFrame
object
D.E. Beaudette
+This function expects that x
has coordinates associated with a projected CRS and units of meters.
Simple interface to the hydromad "leaky bucket" soil moisture model, with accommodation for typical inputs from common soil data and climate sources. Critical points along the water retention curve are specified using volumetric water content (VWC): satiation (saturation), field capacity (typically 1/3 bar suction), and permanent wilting point (typically 15 bar suction).
dailyWB(x, daily.data, id, S_0 = 0.5)+
dailyWB(x, daily.data, id, MS.style = "default", S_0 = 0.5, M = 0, etmult = 1)
x | -
+ Arguments+ |
-
---|---|
daily.data | -
|
-
id | -character, name of column in |
-
S_0 | -fraction of water storage filled at time = 0 (range: 0-1) |
-
a data.frame
character, name of column in x
that is used to identify records
moisture state classification style, see estimateSoilMoistureState
fraction of water storage filled at time = 0 (range: 0-1)
fraction of area covered by deep-rooted vegetation
multiplier for PET
a data.frame
Farmer, D., M. Sivapalan, Farmer, D. (2003). Climate, soil and vegetation controls upon the variability of water balance in temperate and semiarid landscapes: downward approach to water balance analysis. Water Resources Research 39(2), p 1035.
Bai, Y., T. Wagener, P. Reed (2009). A top-down framework for watershed model evaluation and selection under uncertainty. Environmental Modelling and Software 24(8), pp. 901-916.
+R/dailyWB_SSURGO.R
+ Source: R/dailyWB_SSURGO.R
dailyWB_SSURGO.Rd
Perform daily water balance modeling using SSURGO and DAYMET
+Pending.
+dailyWB_SSURGO(
+ x,
+ cokeys = NULL,
+ start = 1988,
+ end = 2018,
+ modelDepth = 100,
+ MS.style = "default",
+ a.ss = 0.1,
+ S_0 = 0.5,
+ bufferRadiusMeters = 1
+)
dailyWB_SSURGO( - x, - cokeys = NULL, - start = 1988, - end = 2018, - modelDepth = 100, - bufferRadiusMeters = 1, - ... -)- -
x | -
|
-
---|---|
cokeys | -vector of component keys to use |
-
start | -starting year (limited to DAYMET holdings) |
-
end | -ending year (limited to DAYMET holdings) |
-
modelDepth | -soil depth used for water balance, see details |
-
bufferRadiusMeters | -spatial buffer (meters) applied to |
-
... | -additional arguments to |
-
data.frame
of daily water balance results
sf
object representing a single point
Farmer, D., M. Sivapalan, Farmer, D. (2003). Climate, soil and vegetation controls upon the variability of water balance in temperate and semiarid landscapes: downward approach to water balance analysis. Water Resources Research 39(2), p 1035.
-vector of component keys to use
starting year (limited to DAYMET holdings)
ending year (limited to DAYMET holdings)
soil depth used for water balance, see details
moisture state classification style, see estimateSoilMoistureState
recession coefficients for subsurface flow from saturated zone, should be > 0 (range: 0-1)
fraction of water storage filled at time = 0 (range: 0-1)
spatial buffer (meters) applied to x
for the look-up of SSURGO data
data.frame
of daily water balance results
Farmer, D., M. Sivapalan, Farmer, D. (2003). Climate, soil and vegetation controls upon the variability of water balance in temperate and semiarid landscapes: downward approach to water balance analysis. Water Resources Research 39(2), p 1035.
+D.E. Beaudette
+R/diagnosticPropertyPlot.R
diagnosticPropertyPlot.Rd
Generate a graphical description of the presence/absence of soil diagnostic properties.
diagnosticPropertyPlot(f, v, k, grid.label='pedon_id', -dend.label='pedon_id', sort.vars=TRUE) -diagnosticPropertyPlot2(f, v, k, grid.label='pedon_id', sort.vars=TRUE)- -
f | -a |
-
---|---|
v | -a character vector of site-level attribute names that are boolean (e.g. TRUE/FALSE) data |
-
k | -an integer, number of groups to highlight |
-
grid.label | -the name of a site-level attribute (usually unique) annotating the y-axis of the grid |
-
dend.label | -the name of a site-level attribute (usually unique) annotating dendrogram terminal leaves |
-
sort.vars | -sort variables according to natural clustering (TRUE), or use supplied ordering in |
-
diagnosticPropertyPlot(
+ f,
+ v,
+ k,
+ grid.label = "pedon_id",
+ dend.label = "pedon_id",
+ sort.vars = TRUE
+)
This function attempts to display several pieces of information within a single figure. First, soil profiles are sorted according to the presence/absence of diagnostic features named in v
. Second, these diagnostic features are sorted according to their distribution among soil profiles. Third, a binary grid is established with row-ordering of profiles based on step 1 and column-ordering based on step 2. Blue cells represent the presence of a diagnostic feature. Soils with similar diagnostic features should 'clump' together. See examples below.
SoilProfileCollection
object
a list is silently returned by this function, containing:
rd
a data.frame
containing IDs and grouping code
profile.order
a vector containing the order of soil profiles (row-order in figure), according to diagnostic property values
var.order
a vector containing the order of variables (column-order in figure), according to their distribution among profiles
character vector of site-level attribute names of logical
type
D.E. Beaudette and J.M. Skovlin
-an integer, number of groups to highlight
+# \donttest{ ++- grid.label
+- -if(require(aqp) & - require(soilDB) & - require(latticeExtra) - ) { - - # sample data, an SPC -data(gopheridge, package='soilDB') -# get depth class -sdc <- getSoilDepthClass(gopheridge) -site(gopheridge) <- sdc +
the name of a site-level attribute (usually unique) annotating the y-axis of the grid
- dend.label
+- -# diagnostic properties to consider, no need to convert to factors -v <- c('lithic.contact', 'paralithic.contact', 'argillic.horizon', -'cambic.horizon', 'ochric.epipedon', 'mollic.epipedon', 'very.shallow', -'shallow', 'mod.deep', 'deep', 'very.deep') -# base graphics -x <- diagnosticPropertyPlot(gopheridge, v, k=5) +
the name of a site-level attribute (usually unique) annotating dendrogram terminal leaves
- sort.vars
+- -# lattice graphics -x <- diagnosticPropertyPlot2(gopheridge, v, k=3) +
sort variables according to natural clustering (
TRUE
), or use supplied ordering inv
#> List of 3 -#> $ rd :'data.frame': 52 obs. of 3 variables: -#> ..$ peiid : chr [1:52] "1137354" "1147151" "1147190" "242808" ... -#> ..$ pedon_id: chr [1:52] "08DWB028" "07RJV098" "07RJV099" "S2007CA009002" ... -#> ..$ g : int [1:52] 1 2 2 1 1 1 1 1 1 1 ... -#> $ profile.order: int [1:52] 1 4 6 7 8 9 15 16 17 21 ... -#> $ var.order : int [1:8] 1 3 5 8 2 4 6 7+a
list
is silently returned by this function, containing:
- +
rd
- + +
a
data.frame
containing IDs and grouping code- +
profile.order
- +
a vector containing the order of soil profiles (row-order in figure), according to diagnostic property values
- +
var.order
- -# } -
a vector containing the order of variables (column-order in figure), according to their distribution among profiles
This function attempts to display several pieces of information within a single figure. First, soil profiles are sorted according to the presence/absence of diagnostic features named in v
. Second, these diagnostic features are sorted according to their distribution among soil profiles. Third, a binary grid is established with row-ordering of profiles based on step 1 and column-ordering based on step 2. Blue cells represent the presence of a diagnostic feature. Soils with similar diagnostic features should 'clump' together. See examples below.
D.E. Beaudette and J.M. Skovlin
+
+# \donttest{
+
+if(require(aqp) &
+ require(soilDB) &
+ require(latticeExtra)
+) {
+
+ # sample data, an SPC
+ data(gopheridge, package='soilDB')
+
+ # get depth class
+ sdc <- getSoilDepthClass(gopheridge, name = 'hzname')
+ site(gopheridge) <- sdc
+
+ # diagnostic properties to consider, no need to convert to factors
+ v <- c('lithic.contact', 'paralithic.contact', 'argillic.horizon',
+ 'cambic.horizon', 'ochric.epipedon', 'mollic.epipedon', 'very.shallow',
+ 'shallow', 'mod.deep', 'deep', 'very.deep')
+
+ # base graphics
+ x <- diagnosticPropertyPlot(gopheridge, v, k=5)
+
+ # lattice graphics
+ x <- diagnosticPropertyPlot2(gopheridge, v, k=3)
+
+ # check output
+ str(x)
+
+}
+#> Loading required package: latticeExtra
+#> Loading required package: lattice
+
+
+#> List of 3
+#> $ rd :'data.frame': 52 obs. of 3 variables:
+#> ..$ peiid : chr [1:52] "1137354" "1147151" "1147190" "242808" ...
+#> ..$ pedon_id: chr [1:52] "08DWB028" "07RJV098" "07RJV099" "S2007CA009002" ...
+#> ..$ g : int [1:52] 1 2 2 1 1 1 1 1 1 1 ...
+#> $ profile.order: int [1:52] 1 4 6 7 8 9 15 16 17 21 ...
+#> $ var.order : int [1:8] 1 3 5 8 2 4 6 7
+
+# }
+
This function computes Euclidean distance along points aligned to a given gradient (e.g. elevation).
dist.along.grad(coords, var, grad.order, grad.scaled.min, grad.scaled.max)- -
coords | -a matrix of x and y coordinates in some projected coordinate system |
-
---|---|
var | -a vector of the same length as |
-
grad.order | -vector of integers that define ordering of coordinates along gradient |
-
grad.scaled.min | -min value of rescaled gradient values |
-
grad.scaled.max | -max value of rescaled gradient values |
-
This function is primarily intended for use within plotTransect
.
A data.frame
object:
scaled gradient values
cumulative distance, scaled to the interval of 0.5, nrow(coords) + 0.5
cumulative distance computed along gradient, e.g. transect distance
sorted gradient values
x coordinates, ordered by gradient values
y coordinate, ordered by gradient values
a vector index describing the sort order defined by gradient values
dist.along.grad(coords, var, grad.order, grad.scaled.min, grad.scaled.max)
a matrix of x and y coordinates in some projected coordinate system
D.E. Beaudette
-a vector of the same length as coords
, describing the gradient of interest
This function is very much a work in progress, ideas welcome.
-vector of integers that define ordering of coordinates along gradient
min value of rescaled gradient values
max value of rescaled gradient values
This function is primarily intended for use within plotTransect
.
A data.frame
object:
scaled gradient values
cumulative distance, scaled to the interval of 0.5, nrow(coords) + 0.5
cumulative distance computed along gradient, e.g. transect distance
sorted gradient values
x coordinates, ordered by gradient values
y coordinate, ordered by gradient values
a vector index describing the sort order defined by gradient values
D.E. Beaudette
+This function is very much a work in progress, ideas welcome.
+Graphically compare two related dendrograms
dueling.dendrograms(p.1, p.2, lab.1 = "D1", -lab.2 = "D2", cex.nodelabels=0.75, arrow.length=0.05)- -
p.1 | -left-hand phylo-class dendrogram |
-
---|---|
p.2 | -right-hand phylo-class dendrogram |
-
lab.1 | -left-hand title |
-
lab.2 | -right-hand title |
-
cex.nodelabels | -character expansion size for node labels |
-
arrow.length | -arrow head size |
-
D. E. Beaudette
-dueling.dendrograms(
+ p.1,
+ p.2,
+ lab.1 = "D1",
+ lab.2 = "D2",
+ cex.nodelabels = 0.75,
+ arrow.length = 0.05
+)
Connector arrows are used to link nodes from the left-hand dendrogram to the right-hand dendrogram.
+left-hand phylo-class dendrogram
if(require(aqp) & - require(cluster) & - require(latticeExtra) & - require(ape) -) { - - # load sample dataset from aqp package -data(sp3) -# promote to SoilProfileCollection -depths(sp3) <- id ~ top + bottom +- p.2
+- -# compute dissimilarity using different sets of variables -# note that these are rescaled to the interval [0,1] -d.1 <- profile_compare(sp3, vars=c('clay', 'cec'), k=0, max_d=100, rescale.result=TRUE) -d.2 <- profile_compare(sp3, vars=c('clay', 'L'), k=0, max_d=100, rescale.result=TRUE) -# cluster via divisive hierarchical algorithm -# convert to 'phylo' class -p.1 <- as.phylo(as.hclust(diana(d.1))) -p.2 <- as.phylo(as.hclust(diana(d.2))) +
right-hand phylo-class dendrogram
- lab.1
+- -# graphically compare two dendrograms -dueling.dendrograms(p.1, p.2, lab.1='clay and CEC', lab.2='clay and L') -# graphically check the results of ladderize() from ape package -dueling.dendrograms(p.1, ladderize(p.1), lab.1='standard', lab.2='ladderized') +
left-hand title
- lab.2
+- -# sanity-check: compare something to itself -dueling.dendrograms(p.1, p.1, lab.1='same', lab.2='same') -# graphically compare diana() to agnes() using d.2 -dueling.dendrograms(as.phylo(as.hclust(diana(d.2))), -as.phylo(as.hclust(agnes(d.2))), lab.1='diana', lab.2='agnes') -} -
right-hand title
#> Loading required package: cluster#> Warning: package 'cluster' was built under R version 4.0.4#> Loading required package: ape#> Warning: package 'ape' was built under R version 4.0.3#> Computing dissimilarity matrices from 10 profiles#> [0.22 Mb]#> Computing dissimilarity matrices from 10 profiles#> [0.22 Mb]++- cex.nodelabels
+- + + +
character expansion size for node labels
- arrow.length
+- + +
arrow head size
++Value
+ + +nothing is returned, function is called to generate graphical output
+++Details
+Connector arrows are used to link nodes from the left-hand dendrogram to the right-hand dendrogram.
++-Author
+D.E. Beaudette
+
R/estimateSoilMoistureState.R
+ Source: R/estimateSoilMoistureState.R
estimateSoilMoistureState.Rd
This is a very simple classification of volumetric water content (VWC) into 5 "moisture states", based on an interpretation of water retention thresholds. Classification is performed using VWC at satiation, field capacity (typically 1/3 bar suction), permanent wilting point (typically 15 bar suction), and water surplus in mm. The inputs to this function are closely aligned with the assumptions and output from hydromad::hydromad(sma = 'bucket', ...)
.
Soil moisture classification rules are as follows:
VWC <= pwp
: "very dry"
VWC > pwp
and <= (mid-point between fc
and pwp
): "dry"
VWC > (mid-point between fc
and pwp
) and <= fc
: "moist"
This is a very simple classification of volumetric water content (VWC) into 5 "moisture states", based on an interpretation of water retention thresholds. Classification is performed using VWC at satiation, field capacity (typically 1/3 bar suction), permanent wilting point (typically 15 bar suction), and water surplus in mm. The inputs to this function are closely aligned with the assumptions and output from hydromad::hydromad(sma = 'bucket', ...)
.
Soil moisture classification rules are as follows:
VWC <= pwp
: "very dry"
VWC > pwp
AND <= (mid-point between fc
and pwp
): "dry"
VWC > (mid-point between fc
and pwp
) AND <= fc
: "moist"
VWC > fc
: "very moist"
U
(surplus) > 2mm: "saturated / runoff"
VWC > fc
AND U
(surplus) > 4mm: "wet"
estimateSoilMoistureState(
+ VWC,
+ U,
+ sat,
+ fc,
+ pwp,
+ style = c("default", "newhall")
+)
estimateSoilMoistureState(VWC, U, sat, fc, pwp)- -
VWC | -vector of volumetric water content (VWC), range is 0-1 |
-
---|---|
U | -vector of surplus water (mm) |
-
sat | -satiation water content, range is 0-1 |
-
fc | -field capacity water content, range is 0-1 |
-
pwp | -permanent wilting point water content, range is 0-1 |
-
vector of moisture states (ordered factor)
-vector of volumetric water content (VWC), range is 0-1
vector of surplus water (mm)
satiation water content, range is 0-1
field capacity water content, range is 0-1
permanent wilting point water content, range is 0-1
VWC classification style
vector of moisture states (ordered factor)
+D.E. Beaudette
++-# "very moist" -estimateSoilMoistureState(VWC = 0.3, U = 0, sat = 0.35, fc = 0.25, pwp = 0.15) -#> [1] very moist -#> Levels: very dry < dry < moist < very moist < saturated / runoff-# "very dry" -estimateSoilMoistureState(VWC = 0.15, U = 0, sat = 0.35, fc = 0.25, pwp = 0.15) -#> [1] very dry -#> Levels: very dry < dry < moist < very moist < saturated / runoff-
+# "very moist"
+estimateSoilMoistureState(VWC = 0.3, U = 0, sat = 0.35, fc = 0.25, pwp = 0.15)
+#> [1] very moist
+#> Levels: very dry < dry < moist < very moist < wet
+estimateSoilMoistureState(VWC = 0.3, U = 2, sat = 0.35, fc = 0.25, pwp = 0.15)
+#> [1] very moist
+#> Levels: very dry < dry < moist < very moist < wet
+
+"wet"
+#> [1] "wet"
+estimateSoilMoistureState(VWC = 0.3, U = 5, sat = 0.35, fc = 0.25, pwp = 0.15)
+#> [1] wet
+#> Levels: very dry < dry < moist < very moist < wet
+
+# "very dry"
+estimateSoilMoistureState(VWC = 0.15, U = 0, sat = 0.35, fc = 0.25, pwp = 0.15)
+#> [1] very dry
+#> Levels: very dry < dry < moist < very moist < wet
+
+# "dry"
+estimateSoilMoistureState(VWC = 0.18, U = 0, sat = 0.35, fc = 0.25, pwp = 0.15)
+#> [1] dry
+#> Levels: very dry < dry < moist < very moist < wet
+
+