Commit 51e1713b authored by sonitaR's avatar sonitaR
Browse files

uploaded files to work on Friday

parent 5ec55224
#####################################################
# MULTIVARIATE STATISTICS #
#####################################################
#####################################################
# PART 4. BIODIVERSITY INDICES #
#####################################################
#####################################################
# E5. FUNCTIONAL DIVERSITY EXCERCISES #
#####################################################
#####################################################################################################
## PART 1 - Ordination of species according to morphological traits #
#####################################################################################################
rm(list=ls())
# 1.1. ORDINATION OF SPECIES BASED ON MORPHOLOGICAL TRAITS RELATED TO SWIMMING PERFORMANCE
setwd("c:/multivar")
traits<-read.delim("TraitsRevision.txt",h=T,dec=".")
traits <- data.frame(traits[,-1], row.names=traits[,1])
library(ggplot2)
library(grid)
library(vegan)
library(ade4)
library(ggthemes)
# A. RUN THE ORDINATION
traits<-scale(traits)
d<-dist(traits,method = "euclidean")
pcoa<-dudi.pco(d,scannf = F,full = T)
# B. EXTRACT EIGEN VALUES
eig<-pcoa$eig
eig/sum(eig)
cumsum(eig/sum(eig))
# How much variability is captured by the axes?
# How many axes should be retained?
# C. PLOTTING
# EXTRACT SCORES FOR SPECIES AND VECTORS (in this case vectors = traits)
scrs<- as.data.frame(scores(pcoa, display = "sites"))
vf <- envfit(pcoa, traits, perm = 999)
spp.scrs <- as.data.frame(scores(vf, display = "vectors"))
# PLOTTING VECTORS FIRST
# CREATE FULL LENGTH NAMES OF TRAITS
spp.scrs <- cbind(spp.scrs,
Traits=c("Pectoral fin position","Aspect ratio of pectoral fin","Caudal peduncule throttling",
"Aspect ratio of caudal fin", "Fin surface ratio", "Head length","Narrowest point on caudal peduncle",
"Body aspect ratio","Medial caudal fin ray length","Head depth"))
VectorPlot<-ggplot(data = scrs, aes(A1, A2)) +
geom_segment(data=spp.scrs,aes(x=0,xend=A1,y=0,yend=A2),
arrow = arrow(length = unit(0, "cm")),colour="darkgrey") +
geom_text(data=spp.scrs,aes(x=A1+0.06,y=A2,label=Traits),size=6)+
xlim(-1,1.3)+
ylim(-1,1.3)+
theme(panel.grid.minor=element_blank(), panel.grid.major=element_blank(),
panel.background = element_rect(fill = "white"),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
axis.text = element_blank(),
axis.line=element_blank(),
axis.ticks=element_blank())
VectorPlot
# 1.2. WARD'S CLUSTERING OF SPECIES AND PLOTTING SPECIES ORDINATION WITH CLUSTERING INFORMATION
# OVERLAYED
par(mfrow=c(1,1))
clust <- hclust(d, method="ward.D2")
# Determining the best number of clusters
# IN THIS OCCASION WE USED THE PACKAGE NbClusT TO SELECT THE BEST NUMBER OF CLUSTERS
# THIS PACKAGE HANDLES THE AMBIGUITY OF THE DIFFERENT INDEXES AND COMPUTES
# MANY INDICES, IT THEN DECIDES ON THE BEST NUMBER OF CLUSTERS BASED ON WHAT
# THE MAJORITY OF INDICES INDICATED
library(NbClust)
nb <- NbClust(traits,distance="euclidean", min.nc=2, max.nc=10,
method = "ward.D2", index = "all",alphaBeale = 0.01)
par(mfrow=c(1,1))
hist(nb$Best.nc[1,], breaks = max(na.omit(nb$Best.nc[1,])),
xlab="Number of clusters",col = "lightgrey", main='')
# Beales method tells us that the most sound decision is to cut the tree in 2
clusters <- cutree(clust, k=2)
clusters<-as.factor(clusters)
# extracting the species names for plotting
speciesnames<-rownames(scrs)
# FINAL ORDINATION PLOT ###############################################
OrdiPLot<-ggplot(data = scrs, aes(A1, A2,color=clusters))+
geom_point(aes(x=A1, y=A2,shape=clusters),size=3.5)+
scale_shape_manual(values=c(15,17),guide=F)+
geom_text(aes(x=A1, y=A2+0.15,label=speciesnames),size=6)+
scale_color_manual(values=c("black","orange"),guide=FALSE)+
labs(x="PC 1 (47.8 %)",y="PC 2 (15.0 %)")+
theme(legend.position="none",
axis.text.x = element_text(size=13),
axis.text.y = element_text(size=13),
axis.title.x=element_text(size=15,vjust=-0.3),
axis.title.y=element_text(size=15,vjust=1),
plot.margin = unit(c(0.5,0.5,0.5,0.5), "cm"))
OrdiPLot
# USE GGPLOT THEME ASSISTANT TO IMPROVE THE PLOT #######################
# THE END ##############################################################
#####################################################
# MULTIVARIATE STATISTICS #
#####################################################
#####################################################
# PART 4. BIODIVERSITY INDICES #
#####################################################
#####################################################
# E5. FUNCTIONAL DIVERSITY EXCERCISES #
#####################################################
#####################################################################################################
## PART 2 - Computation of functional diversity indices using Villeger's multidimFD function #
#####################################################################################################
# Note that these functions and codes are based on freely accessible page by Dr. Sebastian Villeger
# based at MARBEC (France)
# http://villeger.sebastien.free.fr/Rscripts.html
# 2.1. COMPUTATION OF FD USING MULTIDIM FD
rm(list=ls())
setwd("C:/multivar")
source("quality_funct_space.R")
source("plot_funct_space.R")
source("multidimFD.R")
# Import traits matrix
traits <-read.delim("TraitsRevision.txt", h=T, dec=".", row.names = 1)
# Import feeding frequency matrix
# This could be also an abundance matrix, and in general it is,
# but the novelty of our paper lies in using bite data
# For more details refer to Bejarano et al 2017:
# http://onlinelibrary.wiley.com/doi/10.1111/1365-2435.12828/full
bites <-read.delim("BitesRevision.txt", h=T, dec=".", row.names = 1)
bites <-as.matrix(bites)
# checking that species names are the same in the two matrices
sum(row.names(traits) %in% colnames(bites)) == ncol(bites)
# computing all functional spaces based on PCoA (up to 10 axes)
# NOTE: you need to be connected to internet for a correct functioning of quality_funct_space function
qual_funct_space<-quality_funct_space(traits,
traits_weights=NULL,
nbdim=10, metric="Euclidean",
dendro=F, plot="quality_funct_space")
qual_funct_space$meanSD
# extracting species coordinates in the 4D space
coord_4D<-qual_funct_space$details_funct_space$mat_coord[,1:4]
# saving them
save(coord_4D, file="coord_4D")
# computing functional diversity indices
FD_baskets<-multidimFD(coord_4D,bites, check_species_pool=F, verb=TRUE)
# printing results = rounded FD indices values
IndicesAll<-round(FD_baskets,3)
Indices<-IndicesAll[,c(1,19,21:22)]
Indices
# exporting to excel
setwd("C:/multivar")
write.table(Indices, file="ObservedIndicesMultidimFD.txt", sep=",")
# 2.2. CREATING FIGURES INDICATING ENVIRONMENTAL FILTERING (ONE EXAMPLE)
# Extract the species coordinates in the functional space
library(FD)
R<-dbFD(traits,print.pco=T)
newdat<-data.frame(R$x.axes)
# Global convex hull
vert.global<-data.frame(chull(newdat$A1,newdat$A2))
vert.global.coord<-newdat[c(vert.global[,1]),1:2]
######################################################################################################
# LOW WAVE EXPOSURE #
######################################################################################################
# Identity of species feeding at low exposure
sp_low<-which(bites["cam.low",]!=0)
set.low<-data.frame(newdat[sp_low,])
vert.low <- data.frame(chull(set.low$A1, set.low$A2))
vert.low.coord<-set.low[c(vert.low[,1]),1:2]
# Species not feeding at low exposure
sp_0low<-which(bites["cam.low",]==0)
set.0low<-data.frame(newdat[sp_0low,])
# Grazers feeding at low exposure
sp_graz<-which(bites["cam.low.grazer",]!=0)
set.graz<-data.frame(newdat[sp_graz,1:2])
vert.graz <- data.frame(chull(set.graz$A1, set.graz$A2))
vert.graz.coord<-set.graz[c(vert.graz[,1]),1:2]
# Scrapers feeding at low exposure
sp_scrap<-which(bites["cam.low.scrapers",]!=0)
set.scrap<-data.frame(newdat[sp_scrap,1:2])
vert.scrap <- data.frame(chull(set.scrap$A1, set.scrap$A2))
vert.scrap.coord<-set.scrap[c(vert.scrap[,1]),1:2]
# Relative frequency of bites at low exposure
nm_sp_low<-row.names(newdat)[which(bites["cam.low",]>0)]
bites_sp_low<-bites["cam.low",nm_sp_low]
rel.bites_low<-bites_sp_low/sum(bites_sp_low)
## Plot
library(ggplot2)
plotLow<-ggplot(data=newdat,(aes(x=A1,y=A2)))+
geom_polygon(data=vert.global,aes(x=vert.global.coord$A1,y=vert.global.coord$A2),
fill=NA,color="lightgrey",lty=2)+
geom_polygon(data=vert.low.coord,aes(x=A1,y=A2),
fill="gray90",color="lightgrey",alpha=0.7,lwd=0.2)+
geom_polygon(data=vert.graz.coord,aes(x=A1,y=A2),
fill="#4D8963",color="#4D8963",alpha=0.3)+
geom_polygon(data=vert.scrap.coord,aes(x=A1,y=A2),
fill="#41AAC4",color="#41AAC4",alpha=0.3)+
geom_point(data=set.0low,aes(x=A1,y=A2),pch=4,cex=2)+
geom_point(data=set.low,aes(x=A1,y=A2,size=rel.bites_low),color="black",pch=16)+
scale_size_continuous(name = "Intensity",limits=c(0,0.55),breaks = c(0.1,0.2,0.3,0.4,0.5), range = c(1,10))+
scale_x_continuous(breaks = c(-3.0,-1.5,0,1.5,3.0))+
scale_y_continuous(breaks = c(-3.0,-1.5,0,1.5))+
guides(size=F)+
theme_bw()+
theme(panel.grid.minor=element_blank(), panel.grid.major=element_blank(),
title = element_text(vjust=1))+
labs(title= "LOW",x = "PC 1", y = "PC 2")
plotLow
rows alin amac anid anif anig apyr cbic cble ccar cmic csor cstr hlon nlit nuni sarg scha scor sdim sdol sfor sfre sglo snig sovi spra spsi spue spun spus squo srub ssch sspi svul zsco zvel
com.low 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 0 1 1 0 1 1 0 1 1 1 1 1
com.mod 1 0 1 1 1 1 0 1 0 1 1 1 1 1 0 0 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 0 1 1 1 1 0
com.high 0 1 0 1 1 0 1 1 0 0 1 1 0 1 0 0 1 1 1 0 1 1 0 1 1 0 1 1 1 1 1 1 1 1 1 1 0
cam.low 0 0 0 0 0 2.966065292 0 0 6.25 10.95505618 148.9823497 486.772625 0 31.36562998 0 8.611111111 18.16603535 2.708333333 37.60421505 0 0.555555556 0 0 40.88955461 1.123595506 0 0 0 0 0 3.324468085 0 28.88678651 14.16666667 5.049398625 328.1209201 19.20532646
cam.low.grazer 0 0 0 0 0 2.966065292 0 0 0 0 0 486.772625 0 0 0 8.611111111 0 2.708333333 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 5.049398625 328.1209201 19.20532646
cam.low.scrapers 0 0 0 0 0 0 0 0 0 0 148.9823497 0 0 0 0 0 18.16603535 0 37.60421505 0 0.555555556 0 0 40.88955461 1.123595506 0 0 0 0 0 3.324468085 0 28.88678651 14.16666667 0 0 0
cam.low.browser 0 0 0 0 0 0 0 0 6.25 0 0 0 0 31.36562998 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
cam.low.bioeroder 0 0 0 0 0 0 0 0 0 10.95505618 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
cam.mod 0 0 0 89.88995627 126.4268869 0 0 10.45770482 0 0 156.5449202 750.4024272 0 33.46362278 0 0 21.52141608 35.79993483 0 0 4.742268041 0 0 29.11246165 0 0 16.25675911 0.444444444 3.409090909 4.090909091 0 0 18.90969884 103.9851845 18.31521739 253.7399268 0
cam.mod.grazer 0 0 0 89.88995627 126.4268869 0 0 0 0 0 0 750.4024272 0 0 0 0 0 35.79993483 0 0 0 0 0 0 0 0 0 0.444444444 3.409090909 4.090909091 0 0 0 0 18.31521739 253.7399268 0
cam.mod.scrapers 0 0 0 0 0 0 0 10.45770482 0 0 156.5449202 0 0 0 0 0 21.52141608 0 0 0 4.742268041 0 0 29.11246165 0 0 16.25675911 0 0 0 0 0 18.90969884 103.9851845 0 0 0
cam.mod.browser 0 0 0 0 0 0 0 0 0 0 0 0 0 33.46362278 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
cam.mod.bioeroder 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
cam.high 0 0 0 612.389634 85.30493428 0 0 0.505050505 0 0 216.7820377 1139.266415 0 7.233093417 0 0 10.46445639 0 0 0 9.386200717 0 0 18.76596231 0 0 77.20504009 0 2.916666667 0 12.22222222 0 152.5304561 221.1181095 0.333333333 6.401606426 0
cam.high.grazer 0 0 0 612.389634 85.30493428 0 0 0 0 0 0 1139.266415 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2.916666667 0 0 0 0 0 0.333333333 6.401606426 0
cam.high.scrapers 0 0 0 0 0 0 0 0.505050505 0 0 216.7820377 0 0 0 0 0 10.46445639 0 0 0 9.386200717 0 0 18.76596231 0 0 77.20504009 0 0 0 12.22222222 0 152.5304561 221.1181095 0 0 0
cam.high.browser 0 0 0 0 0 0 0 0 0 0 0 0 0 7.233093417 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
cam.high.bioeroder 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
This diff is collapsed.
#Library files for courses provided by: Highland Statistics Ltd.
#To cite these functions, use:
#Mixed effects models and extensions in ecology with R. (2009).
#Zuur, AF, Ieno, EN, Walker, N, Saveliev, AA, and Smith, GM. Springer.
#Copyright Highland Statistics LTD.
#####################################################################
#VIF FUNCTION.
#To use: corvif(YourDataFile)
corvif <- function(dataz) {
dataz <- as.data.frame(dataz)
#correlation part
#cat("Correlations of the variables\n\n")
#tmp_cor <- cor(dataz,use="complete.obs")
#print(tmp_cor)
#vif part
form <- formula(paste("fooy ~ ",paste(strsplit(names(dataz)," "),collapse=" + ")))
dataz <- data.frame(fooy=1,dataz)
lm_mod <- lm(form,dataz)
cat("\n\nVariance inflation factors\n\n")
print(myvif(lm_mod))
}
#Support function for corvif. Will not be called by the user
myvif <- function(mod) {
v <- vcov(mod)
assign <- attributes(model.matrix(mod))$assign
if (names(coefficients(mod)[1]) == "(Intercept)") {
v <- v[-1, -1]
assign <- assign[-1]
} else warning("No intercept: vifs may not be sensible.")
terms <- labels(terms(mod))
n.terms <- length(terms)
if (n.terms < 2) stop("The model contains fewer than 2 terms")
if (length(assign) > dim(v)[1] ) {
diag(tmp_cor)<-0
if (any(tmp_cor==1.0)){
return("Sample size is too small, 100% collinearity is present")
} else {
return("Sample size is too small")
}
}
R <- cov2cor(v)
detR <- det(R)
result <- matrix(0, n.terms, 3)
rownames(result) <- terms
colnames(result) <- c("GVIF", "Df", "GVIF^(1/2Df)")
for (term in 1:n.terms) {
subs <- which(assign == term)
result[term, 1] <- det(as.matrix(R[subs, subs])) * det(as.matrix(R[-subs, -subs])) / detR
result[term, 2] <- length(subs)
}
if (all(result[, 2] == 1)) {
result <- data.frame(GVIF=result[, 1])
} else {
result[, 3] <- result[, 1]^(1/(2 * result[, 2]))
}
invisible(result)
}
#END VIF FUNCTIONS
##################################################################
##################################################################
#Here are some functions that we took from the pairs help file and
#modified, or wrote ourselves. To cite these, use the r citation: citation()
panel.cor <- function(x, y, digits=1, prefix="", cex.cor = 6)
{
usr <- par("usr"); on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r1=cor(x,y,use="pairwise.complete.obs")
r <- abs(cor(x, y,use="pairwise.complete.obs"))
txt <- format(c(r1, 0.123456789), digits=digits)[1]
txt <- paste(prefix, txt, sep="")
if(missing(cex.cor)) { cex <- 0.9/strwidth(txt) } else {
cex = cex.cor}
text(0.5, 0.5, txt, cex = cex * r)
}
##################################################################
panel.smooth2=function (x, y, col = par("col"), bg = NA, pch = par("pch"),
cex = 1, col.smooth = "black", span = 2/3, iter = 3, ...)
{
points(x, y, pch = pch, col = col, bg = bg, cex = cex)
ok <- is.finite(x) & is.finite(y)
if (any(ok))
lines(stats::lowess(x[ok], y[ok], f = span, iter = iter),
col = 1, ...)
}
##################################################################
panel.lines2=function (x, y, col = par("col"), bg = NA, pch = par("pch"),
cex = 1, ...)
{
points(x, y, pch = pch, col = col, bg = bg, cex = cex)
ok <- is.finite(x) & is.finite(y)
if (any(ok)){
tmp=lm(y[ok]~x[ok])
abline(tmp)}
}
##################################################################
panel.hist <- function(x, ...)
{
usr <- par("usr"); on.exit(par(usr))
par(usr = c(usr[1:2], 0, 1.5) )
h <- hist(x, plot = FALSE)
breaks <- h$breaks; nB <- length(breaks)
y <- h$counts; y <- y/max(y)
rect(breaks[-nB], 0, breaks[-1], y, col="white", ...)
}
##################################################################
##################################################################
##################################################################
##################################################################
#Functions for variograms
#To cite these functions, use:
#Mixed effects models and extensions in ecology with R. (2009).
#Zuur, AF, Ieno, EN, Walker, N, Saveliev, AA, and Smith, GM. Springer.
#Make a variogram for one variable
#To use, type: MyVariogram(XUTM, YUTM, E , MyDistance=10)
# XUTM is x coordinates
# XUTM is y coordinates
# E is variable used in sample variogram
# MyDistance is the cutoff value for the distances
MyVariogram <- function(x,y,z, MyDistance) {
library(gstat)
mydata <- data.frame(z, x, y)
coordinates(mydata) <- c("x", "y")
Var <- variogram(z ~ 1, mydata, cutoff = MyDistance)
data.frame(Var$np, Var$dist, Var$gamma)
}
#Function for making multiple variograms in an xyplot
#To use, type: MultiVariogram(Z, MyVar,XUTM, YUTM, MyDistance=10)
# Z is a data frame with all the data
# Character string with variable names that will be used in the xyplot
# XUTM is x coordinates
# XUTM is y coordinates
# MyDistance is the cutoff value for the distances
MultiVariogram <- function(Z, MyVar, x, y, MyDistance) {
#Z is the data frame with data
#MyVar is a list of variables for for which variograms are calculated
#x, y: spatial coordinates
#MyDistance: limit for distances in the variogram
library(lattice)
VarAll<- c(NA,NA,NA,NA)
for (i in MyVar){
vi <- MyVariogram(x,y,Z[,i], MyDistance)
vii <- cbind(vi, i)
VarAll <- rbind(VarAll,vii)
}
VarAll <- VarAll[-1,]
P <- xyplot(Var.gamma ~ Var.dist | factor(i), col = 1, type = "p", pch = 16,
data = VarAll,
xlab = "Distance",
ylab = "Semi-variogram",
strip = function(bg='white', ...)
strip.default(bg='white', ...),
scales = list(alternating = T,
x = list(relation = "same"),
y = list(relation = "same"))
)
print(P)
}
#End variogram code
##########################################################
#Function for multi-panel Cleveland dotplot.
#The input file must contain no categorical variables
Mydotplot <- function(DataSelected){
P <- dotplot(as.matrix(as.matrix(DataSelected)),
groups=FALSE,
strip = strip.custom(bg = 'white',
par.strip.text = list(cex = 1.2)),
scales = list(x = list(relation = "free", draw = TRUE),
y = list(relation = "free", draw = FALSE)),
col=1, cex = 0.5, pch = 16,
xlab = list(label = "Value of the variable", cex = 1.5),
ylab = list(label = "Order of the data from text file", cex = 1.5))
print(P)
}
#Add more code here:
Mybwplot <- function(Z, MyVar, TargetVar){
#Multipanel boxplots
#Z: data set
#MyVar: character string
#TargetVar: variable for the x-axis..must be a factor
AllY <- as.vector(as.matrix(Z[,MyVar]))
AllX <- rep(Z[,TargetVar], length(MyVar))
ID <- rep(MyVar, each = nrow(Z))
P <- bwplot(AllY ~ factor(AllX) | ID, horizontal = FALSE,
ylab = "", xlab = "",
scales = list(alternating = T,cex.lab = 1.5,
x = list(relation = "same",rot =90, abbreviate = TRUE, cex = 1.5),
y = list(relation = "free", draw = FALSE)),
strip = strip.custom(bg = 'white',
par.strip.text = list(cex = 1.2)),
cex = .5,
par.settings = list(
box.rectangle = list(col = 1),
box.umbrella = list(col = 1),
plot.symbol = list(cex = .5, col = 1)))
print(P)
}
#######################################################
MyxyplotBin <- function(Z, MyV, NameY1) {
AllX <- as.vector(as.matrix(Z[,MyV]))
AllY <- rep(Z[,NameY1] , length(MyV))
AllID <- rep(MyV, each = nrow(Z))
library(mgcv)
library(lattice)
P <- xyplot(AllY ~ AllX | factor(AllID), col = 1,
strip = function(bg='white', ...) strip.default(bg='white', ...),
scales = list(alternating = T,
x = list(relation = "free"),
y = list(relation = "same")),
xlab = "Covariate",
ylab = "Probability of presence",
panel=function(x,y){
panel.grid(h=-1, v= 2)
panel.points(x,y,col=1)
tmp<-gam(y~s(x, k = 4), family = binomial)
MyData <- data.frame(x = seq(min(x), max(x), length = 25))
p1 <- predict(tmp, newdata = MyData, type ="response")
panel.lines(MyData$x,p1, col = 1, lwd = 3)
})
print(P)
}
#######################################################
#######################################################
Myxyplot <- function(Z, MyV, NameY1,MyYlab="") {
AllX <- as.vector(as.matrix(Z[,MyV]))
AllY <- rep(Z[,NameY1] , length(MyV))
AllID <- rep(MyV, each = nrow(Z))
library(mgcv)
library(lattice)
P <- xyplot(AllY ~ AllX|factor(AllID), col = 1,
xlab = list("Explanatory variables", cex = 1.5),
#ylab = list("Response variable", cex = 1.5),
#ylab = list("Pearson residuals", cex = 1.5),
ylab = list(MyYlab, cex = 1.5),
#layout = c(2,2), #Modify
strip = function(bg='white', ...)
strip.default(bg='white', ...),
scales = list(alternating = T,
x = list(relation = "free"),
y = list(relation = "same")),
panel=function(x, y){
panel.grid(h=-1, v= 2)
panel.points(x, y, col = 1)
panel.loess(x, y, span = 0.8,col = 1, lwd = 2)})
print(P)
}
#######################################################
MyxyplotPolygon <- function(Z, MyV, NameY1) {
AllX <- as.vector(as.matrix(Z[,MyV]))
AllY <- rep(Z[,NameY1] , length(MyV))
AllID <- rep(MyV, each = nrow(Z))
library(mgcv)
library(lattice)
Z <- xyplot(AllY ~ AllX|factor(AllID), col = 1,
xlab = list(label = "Explanatory variables", cex = 1.5),
ylab = "",
strip = function(bg='white',cex.lab = 1.5,...)
strip.default(bg='white', ...),
scales = list(alternating = T,
x = list(relation = "free"),
y = list(relation = "same")),
panel=function(x, y){
t1 <- gam(y~s(x))
MD1 <- data.frame(x=seq(from = min(x, na.rm = TRUE),
to = max(x, na.rm = TRUE),
length = 100))
P1 <- predict(t1, se.fit = TRUE)
I1 <- order(x)
xs <- sort(x)
panel.lines(xs, P1$fit[I1], col = 1)