Skip to content

Commit

Permalink
Merge branch 'master' of github.com:JuliaStats/Distributions.jl
Browse files Browse the repository at this point in the history
Resolve conflicts in:
	src/univariate/discrete/poisson.jl
  • Loading branch information
richardreeve committed Aug 3, 2015
2 parents 9e8cfb4 + 2457917 commit b9b0b9c
Show file tree
Hide file tree
Showing 54 changed files with 685 additions and 753 deletions.
11 changes: 5 additions & 6 deletions src/edgeworth.jl
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,10 @@ kurtosis(d::EdgeworthAbstract) = kurtosis(d.dist) / d.n
immutable EdgeworthZ{D<:UnivariateDistribution} <: EdgeworthAbstract
dist::D
n::Float64

function EdgeworthZ{T<:UnivariateDistribution}(d::T, n::Real)
n > zero(n) ||
error("n must be positive")
@compat new(d, Float64(n))
@check_args(EdgeworthZ, n > zero(n))
new(d, n)
end
end
EdgeworthZ(d::UnivariateDistribution,n::Real) = EdgeworthZ{typeof(d)}(d,n)
Expand Down Expand Up @@ -78,9 +78,8 @@ immutable EdgeworthSum{D<:UnivariateDistribution} <: EdgeworthAbstract
dist::D
n::Float64
function EdgeworthSum{T<:UnivariateDistribution}(d::T, n::Real)
n > zero(n) ||
error("n must be positive")
@compat new(d, Float64(n))
@check_args(EdgeworthSum, n > zero(n))
new(d, n)
end
end
EdgeworthSum(d::UnivariateDistribution, n::Real) = EdgeworthSum{typeof(d)}(d,n)
Expand Down
2 changes: 1 addition & 1 deletion src/matrix/inversewishart.jl
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ function _logpdf(d::InverseWishart, X::DenseMatrix{Float64})
-0.5 * ((df + p + 1) * logdet(Xcf) + trace(Xcf \ Ψ)) - d.c0
end

@compat _logpdf{T<:Real}(d::InverseWishart, X::DenseMatrix{T}) = _logpdf(d, Float64(X))
_logpdf{T<:Real}(d::InverseWishart, X::DenseMatrix{T}) = _logpdf(d, convert(Matrix{Float64}, X))


#### Sampling
Expand Down
4 changes: 2 additions & 2 deletions src/matrix/wishart.jl
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ function mode(d::Wishart)
end
end

function meanlogdet(d::Wishart)
function meanlogdet(d::Wishart)
p = dim(d)
df = d.df
v = logdet(d.S) + p * logtwo
Expand All @@ -81,7 +81,7 @@ function _logpdf(d::Wishart, X::DenseMatrix{Float64})
0.5 * ((df - (p + 1)) * logdet(Xcf) - trace(d.S \ X)) - d.c0
end

@compat _logpdf{T<:Real}(d::Wishart, X::DenseMatrix{T}) = _logpdf(d, Float64(X))
_logpdf{T<:Real}(d::Wishart, X::DenseMatrix{T}) = _logpdf(d, convert(Matrix{Float64}, X))

#### Sampling

Expand Down
2 changes: 1 addition & 1 deletion src/multivariate/dirichlet.jl
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ immutable Dirichlet <: ContinuousMultivariateDistribution
lmnB::Float64 = 0.0
for i in 1:length(alpha)
ai = alpha[i]
ai > 0 || throw(ArgumentError("alpha must be a positive vector."))
ai > 0 || throw(ArgumentError("Dirichlet: alpha must be a positive vector."))
alpha0 += ai
lmnB += lgamma(ai)
end
Expand Down
2 changes: 1 addition & 1 deletion src/testutils.jl
Original file line number Diff line number Diff line change
Expand Up @@ -435,7 +435,7 @@ end
function test_stats(d::DiscreteUnivariateDistribution, vs::AbstractVector)
# using definition (or an approximation)

@compat vf = Float64[Float64(v) for v in vs]
vf = Float64[v for v in vs]
p = pdf(d, vf)
xmean = dot(p, vf)
xvar = dot(p, abs2(vf .- xmean))
Expand Down
10 changes: 2 additions & 8 deletions src/univariate/continuous/arcsine.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,8 @@ immutable Arcsine <: ContinuousUnivariateDistribution
a::Float64
b::Float64

function Arcsine(a::Float64, b::Float64)
a < b || error("a must be less than b.")
new(a, b)
end

@compat Arcsine(a::Real, b::Real) = Arcsine(Float64(a), Float64(b))
@compat Arcsine(b::Real) = Arcsine(0.0, Float64(b))
Arcsine(a::Real, b::Real) = (@check_args(Arcsine, a < b); new(a, b))
Arcsine(b::Real) = (@check_args(Arcsine, b > zero(b)); new(0.0, b))
Arcsine() = new(0.0, 1.0)
end

Expand Down Expand Up @@ -51,4 +46,3 @@ quantile(d::Arcsine, p::Float64) = location(d) + abs2(sin(halfπ * p)) * scale(d
### Sampling

rand(d::Arcsine) = quantile(d, rand())

8 changes: 3 additions & 5 deletions src/univariate/continuous/beta.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,18 +2,16 @@ immutable Beta <: ContinuousUnivariateDistribution
α::Float64
β::Float64

function Beta(a::Real, b::Real)
(a > zero(a) && b > zero(b)) || error("α and β must be positive")
@compat new(Float64(a), Float64(b))
function Beta(α::Real, β::Real)
@check_args(Beta, α > zero(α) && β > zero(β))
new(α, β)
end

Beta::Real) = Beta(α, α)
Beta() = new(1.0, 1.0)
end

@distr_support Beta 0.0 1.0


#### Parameters

params(d::Beta) = (d.α, d.β)
Expand Down
37 changes: 19 additions & 18 deletions src/univariate/continuous/betaprime.jl
Original file line number Diff line number Diff line change
@@ -1,18 +1,20 @@
immutable BetaPrime <: ContinuousUnivariateDistribution
betad::Beta
α::Float64
β::Float64

BetaPrime::Float64, β::Float64) = new(Beta(α, β))
BetaPrime::Float64) = new(Beta(α))
BetaPrime() = new(Beta())
function BetaPrime::Real, β::Real)
@check_args(BetaPrime, α > zero(α) && β > zero(β))
new(α, β)
end
BetaPrime::Real) = BetaPrime(α)
BetaPrime() = new(1.0, 1.0)
end

@distr_support BetaPrime 0.0 Inf

show(io::IO, d::BetaPrime) = ((α, β) = params(d); show_oneline(io, d, [(, α), (, β)]))

#### Parameters

params(d::BetaPrime) = params(d.betad)
params(d::BetaPrime) = (d.α, d.β)


#### Statistics
Expand Down Expand Up @@ -46,21 +48,20 @@ end

pdf(d::BetaPrime, x::Float64) = exp(logpdf(d, x))

cdf(d::BetaPrime, x::Float64) = cdf(d.betad, x / (1.0 + x))
ccdf(d::BetaPrime, x::Float64) = ccdf(d.betad, x / (1.0 + x))
logcdf(d::BetaPrime, x::Float64) = logcdf(d.betad, x / (1.0 + x))
logccdf(d::BetaPrime, x::Float64) = logccdf(d.betad, x / (1.0 + x))
cdf(d::BetaPrime, x::Float64) = betacdf(d.α, d.β, x / (1.0 + x))
ccdf(d::BetaPrime, x::Float64) = betaccdf(d.α, d.β, x / (1.0 + x))
logcdf(d::BetaPrime, x::Float64) = betalogcdf(d.α, d.β, x / (1.0 + x))
logccdf(d::BetaPrime, x::Float64) = betalogccdf(d.α, d.β, x / (1.0 + x))

quantile(d::BetaPrime, p::Float64) = (x = betainvcdf(d.α, d.β, p); x / (1.0 - x))
cquantile(d::BetaPrime, p::Float64) = (x = betainvccdf(d.α, d.β, p); x / (1.0 - x))
invlogcdf(d::BetaPrime, p::Float64) = (x = betainvlogcdf(d.α, d.β, p); x / (1.0 - x))
invlogccdf(d::BetaPrime, p::Float64) = (x = betainvlogccdf(d.α, d.β, p); x / (1.0 - x))

quantile(d::BetaPrime, p::Float64) = (x = quantile(d.betad, p); x / (1.0 - x))
cquantile(d::BetaPrime, p::Float64) = (x = cquantile(d.betad, p); x / (1.0 - x))
invlogcdf(d::BetaPrime, p::Float64) = (x = invlogcdf(d.betad, p); x / (1.0 - x))
invlogccdf(d::BetaPrime, p::Float64) = (x = invlogccdf(d.betad, p); x / (1.0 - x))


#### Sampling

function rand(d::BetaPrime)
function rand(d::BetaPrime)
(α, β) = params(d)
rand(Gamma(α)) / rand(Gamma(β))
end

71 changes: 38 additions & 33 deletions src/univariate/continuous/biweight.jl
Original file line number Diff line number Diff line change
@@ -1,53 +1,58 @@
immutable Biweight <: ContinuousUnivariateDistribution
location::Float64
scale::Float64
function Biweight(l::Real, s::Real)
s > zero(s) || error("scale must be positive")
@compat new(Float64(l), Float64(s))
end
end
μ::Float64
σ::Float64

Biweight(location::Real) = Biweight(location, 1.0)
Biweight() = Biweight(0.0, 1.0)
Biweight::Real, σ::Real) = (@check_args(Biweight, σ > zero(σ)); new(μ, σ))
Biweight::Real) = new(μ, 1.0)
Biweight() = new(0.0, 1.0)
end

@distr_support Biweight d.location-d.scale d.location+d.scale
@distr_support Biweight d.μ - d.σ d.μ + d.σ

## Parameters
params(d::Biweight) = (d.location, d.scale)
params(d::Biweight) = (d.μ, d.σ)

## Properties
mean(d::Biweight) = d.location
median(d::Biweight) = d.location
mode(d::Biweight) = d.location
mean(d::Biweight) = d.μ
median(d::Biweight) = d.μ
mode(d::Biweight) = d.μ

var(d::Biweight) = d.scale*d.scale/7.0
var(d::Biweight) = d.σ^2 / 7.0
skewness(d::Biweight) = 0.0
kurtosis(d::Biweight) = 1/21-3
kurtosis(d::Biweight) = -2.9523809523809526 # = 1/21-3

## Functions
function pdf(d::Biweight, x::Real)
u = abs(x - d.location)/d.scale
u >= 1 ? 0.0 : 0.9375*(1-u*u)^2/d.scale
function pdf(d::Biweight, x::Float64)
u = abs(x - d.μ) / d.σ
u >= 1.0 ? 0.0 : 0.9375 * (1 - u^2)^2 / d.σ
end

function cdf(d::Biweight, x::Real)
u = (x - d.location)/d.scale
u <= -1 ? 0.0 : u >= 1 ? 1.0 : 0.0625*(1+u)^3*@horner(u,8.0,-9.0,3.0)
function cdf(d::Biweight, x::Float64)
u = (x - d.μ) / d.σ
u <= -1.0 ? 0.0 :
u >= 1.0 ? 1.0 :
0.0625 * (u + 1.0)^3 * @horner(u,8.0,-9.0,3.0)
end
function ccdf(d::Biweight, x::Real)
u = (d.location - x)/d.scale
u <= -1 ? 1.0 : u >= 1 ? 0.0 : 0.0625*(1+u)^3*@horner(u,8.0,-9.0,3.0)

function ccdf(d::Biweight, x::Float64)
u = (d.μ - x) / d.σ
u <= -1.0 ? 1.0 :
u >= 1.0 ? 0.0 :
0.0625 * (u + 1.0)^3 * @horner(u,8.0,-9.0,3.0)
end

@quantile_newton Biweight

function mgf(d::Biweight, t::Real)
a = d.scale*t
a2 = a*a
a == 0 ? one(a) : 15.0*exp(d.location*t)*(-3.0*cosh(a)+(a+3.0/a)*sinh(a))/(a2*a2)
function mgf(d::Biweight, t::Float64)
a = d.σ*t
a2 = a^2
a == 0 ? 1.0 :
15.0 * exp(d.μ * t) * (-3.0 * cosh(a) + (a + 3.0/a) * sinh(a)) / (a2^2)
end
function cf(d::Biweight, t::Real)
a = d.scale*t
a2 = a*a
a == 0 ? complex(one(a)) : -15.0*cis(d.location*t)*(3.0*cos(a)+(a-3.0/a)*sin(a))/(a2*a2)

function cf(d::Biweight, t::Float64)
a = d.σ * t
a2 = a^2
a == 0 ? 1.0+0.0im :
-15.0 * cis(d.μ * t) * (3.0 * cos(a) + (a - 3.0/a) * sin(a)) / (a2^2)
end
46 changes: 21 additions & 25 deletions src/univariate/continuous/cauchy.jl
Original file line number Diff line number Diff line change
@@ -1,13 +1,9 @@
immutable Cauchy <: ContinuousUnivariateDistribution
μ::Float64
β::Float64
σ::Float64

function Cauchy::Real, β::Real)
β > zero(β) || error("Cauchy: scale must be positive")
@compat new(Float64(μ), Float64(β))
end

@compat Cauchy::Real) = new(Float64(μ), 1.0)
Cauchy::Real, σ::Real) = (@check_args(Cauchy, σ > zero(σ)); new(μ, σ))
Cauchy::Real) = new(μ, 1.0)
Cauchy() = new(0.0, 1.0)
end

Expand All @@ -16,54 +12,54 @@ end
#### Parameters

location(d::Cauchy) = d.μ
scale(d::Cauchy) = d.β
scale(d::Cauchy) = d.σ

params(d::Cauchy) = (d.μ, d.β)
params(d::Cauchy) = (d.μ, d.σ)


#### Statistics

mean(d::Cauchy) = NaN
median(d::Cauchy) = location(d)
mode(d::Cauchy) = location(d)
median(d::Cauchy) = d.μ
mode(d::Cauchy) = d.μ

var(d::Cauchy) = NaN
skewness(d::Cauchy) = NaN
kurtosis(d::Cauchy) = NaN

entropy(d::Cauchy) = log(scale(d)) + log4π
entropy(d::Cauchy) = log4π + log(d.σ)


#### Functions

zval(d::Cauchy, x::Float64) = (x - d.μ) / d.β
xval(d::Cauchy, z::Float64) = d.μ + z * d.β
zval(d::Cauchy, x::Float64) = (x - d.μ) / d.σ
xval(d::Cauchy, z::Float64) = d.μ + z * d.σ

pdf(d::Cauchy, x::Float64) = 1.0 /* scale(d) * (1 + zval(d, x)^2))
logpdf(d::Cauchy, x::Float64) = - (logπ + log(scale(d)) + log1psq(zval(d, x)))
pdf(d::Cauchy, x::Float64) = 1.0 /* scale(d) * (1.0 + zval(d, x)^2))
logpdf(d::Cauchy, x::Float64) = - (log1psq(zval(d, x)) + logπ + log(d.σ))

function cdf(d::Cauchy, x::Float64)
μ, β = params(d)
invπ * atan2(x - μ, β) + 0.5
μ, σ = params(d)
invπ * atan2(x - μ, σ) + 0.5
end

function ccdf(d::Cauchy, x::Float64)
μ, β = params(d)
invπ * atan2- x, β) + 0.5
μ, σ = params(d)
invπ * atan2- x, σ) + 0.5
end

function quantile(d::Cauchy, p::Float64)
μ, β = params(d)
μ + β * tan* (p - 0.5))
μ, σ = params(d)
μ + σ * tan* (p - 0.5))
end

function cquantile(d::Cauchy, p::Float64)
μ, β = params(d)
μ + β * tan* (0.5 - p))
μ, σ = params(d)
μ + σ * tan* (0.5 - p))
end

mgf(d::Cauchy, t::Real) = t == zero(t) ? 1.0 : NaN
cf(d::Cauchy, t::Real) = exp(im * (t * d.μ) - d.β * abs(t))
cf(d::Cauchy, t::Real) = exp(im * (t * d.μ) - d.σ * abs(t))


#### Fitting
Expand Down
Loading

0 comments on commit b9b0b9c

Please sign in to comment.