Skip to content

Commit

Permalink
Remove support for Julia 0.3.x
Browse files Browse the repository at this point in the history
  • Loading branch information
simonbyrne committed Feb 10, 2016
1 parent 77eea93 commit 73758ec
Show file tree
Hide file tree
Showing 32 changed files with 99 additions and 104 deletions.
1 change: 0 additions & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ os:
- linux
- osx
julia:
- 0.3
- 0.4
- nightly
notifications:
Expand Down
4 changes: 1 addition & 3 deletions REQUIRE
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@
julia 0.3
ArrayViews 0.4.12
julia 0.4
PDMats 0.4 0.5
StatsFuns 0.1.1
StatsBase 0.7.0
Compat 0.4.0
4 changes: 1 addition & 3 deletions src/Distributions.jl
Original file line number Diff line number Diff line change
@@ -1,12 +1,10 @@
VERSION >= v"0.4.0-dev+6521" && __precompile__(true)
__precompile__(true)

module Distributions

using ArrayViews
using PDMats
using StatsFuns
using StatsBase
using Compat

import Base.Random
import Base: size, eltype, length, full, convert, show, getindex, scale, scale!, rand, rand!
Expand Down
4 changes: 2 additions & 2 deletions src/common.jl
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ abstract Distribution{F<:VariateForm,S<:ValueSupport} <: Sampleable{F,S}
typealias UnivariateDistribution{S<:ValueSupport} Distribution{Univariate,S}
typealias MultivariateDistribution{S<:ValueSupport} Distribution{Multivariate,S}
typealias MatrixDistribution{S<:ValueSupport} Distribution{Matrixvariate,S}
@compat typealias NonMatrixDistribution Union{UnivariateDistribution, MultivariateDistribution}
typealias NonMatrixDistribution Union{UnivariateDistribution, MultivariateDistribution}

typealias DiscreteDistribution{F<:VariateForm} Distribution{F,Discrete}
typealias ContinuousDistribution{F<:VariateForm} Distribution{F,Continuous}
Expand All @@ -64,4 +64,4 @@ abstract SufficientStats
abstract IncompleteDistribution

typealias DistributionType{D<:Distribution} Type{D}
@compat typealias IncompleteFormulation Union{DistributionType,IncompleteDistribution}
typealias IncompleteFormulation Union{DistributionType,IncompleteDistribution}
2 changes: 1 addition & 1 deletion src/edgeworth.jl
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ immutable EdgeworthMean{D<:UnivariateDistribution} <: EdgeworthAbstract
# although n would usually be an integer, no methods are require this
n > zero(n) ||
error("n must be positive")
@compat new(d, Float64(n))
new(d, Float64(n))
end
end
EdgeworthMean(d::UnivariateDistribution,n::Real) = EdgeworthMean{typeof(d)}(d,n)
Expand Down
2 changes: 1 addition & 1 deletion src/genericrand.jl
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ rand(s::Sampleable{Univariate}, dims::Int...) =

function _rand!(s::Sampleable{Multivariate}, A::DenseMatrix)
for i = 1:size(A,2)
_rand!(s, view(A,:,i))
_rand!(s, slice(A,:,i))
end
return A
end
Expand Down
8 changes: 4 additions & 4 deletions src/mixtures/mixturemodel.jl
Original file line number Diff line number Diff line change
Expand Up @@ -271,7 +271,7 @@ function _mixlogpdf!(r::DenseArray, d::AbstractMixtureModel, x)
@inbounds pi = p[i]
if pi > 0.0
lpri = log(pi)
lp_i = view(Lp, :, i)
lp_i = slice(Lp, :, i)
# compute logpdf in batch and store
logpdf!(lp_i, component(d, i), x)

Expand All @@ -289,7 +289,7 @@ function _mixlogpdf!(r::DenseArray, d::AbstractMixtureModel, x)
fill!(r, 0.0)
@inbounds for i = 1:K
if p[i] > 0.0
lp_i = view(Lp, :, i)
lp_i = slice(Lp, :, i)
for j = 1:n
r[j] += exp(lp_i[j] - m[j])
end
Expand Down Expand Up @@ -341,7 +341,7 @@ function _cwise_pdf!(r::StridedMatrix, d::AbstractMixtureModel, X)
n = size(X, ndims(X))
size(r) == (n, K) || error("The size of r is incorrect.")
for i = 1:K
pdf!(view(r,:,i), component(d, i), X)
pdf!(slice(r,:,i), component(d, i), X)
end
r
end
Expand All @@ -351,7 +351,7 @@ function _cwise_logpdf!(r::StridedMatrix, d::AbstractMixtureModel, X)
n = size(X, ndims(X))
size(r) == (n, K) || error("The size of r is incorrect.")
for i = 1:K
logpdf!(view(r,:,i), component(d, i), X)
logpdf!(slice(r,:,i), component(d, i), X)
end
r
end
Expand Down
6 changes: 3 additions & 3 deletions src/multivariate/dirichlet.jl
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ immutable Dirichlet <: ContinuousMultivariateDistribution
new(fill(alpha, d), alpha0, lgamma(alpha) * d - lgamma(alpha0))
end

@compat Dirichlet(d::Integer, alpha::Real) = Dirichlet(d, Float64(alpha))
Dirichlet(d::Integer, alpha::Real) = Dirichlet(d, Float64(alpha))
end

immutable DirichletCanon
Expand Down Expand Up @@ -144,7 +144,7 @@ end

# sampling

@compat function _rand!{T<:Real}(d::Union{Dirichlet,DirichletCanon}, x::AbstractVector{T})
function _rand!{T<:Real}(d::Union{Dirichlet,DirichletCanon}, x::AbstractVector{T})
s = 0.0
n = length(x)
α = d.alpha
Expand All @@ -164,7 +164,7 @@ immutable DirichletStats <: SufficientStats
slogp::Vector{Float64} # (weighted) sum of log(p)
tw::Float64 # total sample weights

@compat DirichletStats(slogp::Vector{Float64}, tw::Real) = new(slogp, Float64(tw))
DirichletStats(slogp::Vector{Float64}, tw::Real) = new(slogp, Float64(tw))
end

length(ss::DirichletStats) = length(s.slogp)
Expand Down
8 changes: 4 additions & 4 deletions src/multivariate/multinomial.jl
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,11 @@ immutable Multinomial <: DiscreteMultivariateDistribution
if !isprobvec(p)
throw(ArgumentError("p = $p is not a probability vector."))
end
@compat new(round(Int, n), p)
new(round(Int, n), p)
end

@compat Multinomial(n::Integer, p::Vector{Float64}, ::NoArgCheck) = new(round(Int, n), p)
@compat Multinomial(n::Integer, k::Integer) = new(round(Int, n), fill(1.0 / k, k))
Multinomial(n::Integer, p::Vector{Float64}, ::NoArgCheck) = new(round(Int, n), p)
Multinomial(n::Integer, k::Integer) = new(round(Int, n), fill(1.0 / k, k))
end

# Parameters
Expand Down Expand Up @@ -132,7 +132,7 @@ immutable MultinomialStats <: SufficientStats
scnts::Vector{Float64} # sum of counts
tw::Float64 # total sample weight

@compat MultinomialStats(n::Int, scnts::Vector{Float64}, tw::Real) = new(n, scnts, Float64(tw))
MultinomialStats(n::Int, scnts::Vector{Float64}, tw::Real) = new(n, scnts, Float64(tw))
end

function suffstats{T<:Real}(::Type{Multinomial}, x::Matrix{T})
Expand Down
2 changes: 1 addition & 1 deletion src/multivariate/mvlognormal.jl
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ immutable MvLogNormal <: AbstractMvLogNormal
end

#Constructors mirror the ones for MvNormmal
@compat MvLogNormal::Union{Vector,ZeroVector}::AbstractPDMat) = MvLogNormal(MvNormal(μ,Σ))
MvLogNormal::Union{Vector,ZeroVector}::AbstractPDMat) = MvLogNormal(MvNormal(μ,Σ))
MvLogNormal::AbstractPDMat) = MvLogNormal(MvNormal(ZeroVector(Float64,dim(Σ)),Σ))
MvLogNormal::Vector::Matrix) = MvLogNormal(MvNormal(μ,Σ))
MvLogNormal::Vector::Vector) = MvLogNormal(MvNormal(μ,σ))
Expand Down
20 changes: 10 additions & 10 deletions src/multivariate/mvnormal.jl
Original file line number Diff line number Diff line change
Expand Up @@ -32,9 +32,9 @@ insupport{T<:Real}(d::AbstractMvNormal, x::AbstractVector{T}) =
mode(d::AbstractMvNormal) = mean(d)
modes(d::AbstractMvNormal) = [mean(d)]

@compat entropy(d::AbstractMvNormal) = 0.5 * (length(d) * (Float64(log2π) + 1.0) + logdetcov(d))
entropy(d::AbstractMvNormal) = 0.5 * (length(d) * (Float64(log2π) + 1.0) + logdetcov(d))

@compat mvnormal_c0(g::AbstractMvNormal) = -0.5 * (length(g) * Float64(log2π) + logdetcov(g))
mvnormal_c0(g::AbstractMvNormal) = -0.5 * (length(g) * Float64(log2π) + logdetcov(g))

sqmahal{T<:Real}(d::AbstractMvNormal, x::DenseMatrix{T}) = sqmahal!(Array(Float64, size(x, 2)), d, x)

Expand All @@ -60,7 +60,7 @@ _pdf!{T<:Real}(r::DenseArray, d::AbstractMvNormal, x::AbstractMatrix{T}) = exp!(
#
###########################################################

@compat immutable MvNormal{Cov<:AbstractPDMat,Mean<:Union{Vector{Float64},ZeroVector{Float64}}} <: AbstractMvNormal
immutable MvNormal{Cov<:AbstractPDMat,Mean<:Union{Vector{Float64},ZeroVector{Float64}}} <: AbstractMvNormal
μ::Mean
Σ::Cov
end
Expand All @@ -86,11 +86,11 @@ MvNormal{Cov<:AbstractPDMat}(Σ::Cov) = MvNormal{Cov,ZeroVector{Float64}}(ZeroVe

MvNormal::Vector{Float64}, Σ::Matrix{Float64}) = MvNormal(μ, PDMat(Σ))
MvNormal::Vector{Float64}, σ::Vector{Float64}) = MvNormal(μ, PDiagMat(abs2(σ)))
@compat MvNormal::Vector{Float64}, σ::Real) = MvNormal(μ, ScalMat(length(μ), abs2(Float64(σ))))
MvNormal::Vector{Float64}, σ::Real) = MvNormal(μ, ScalMat(length(μ), abs2(Float64(σ))))

MvNormal::Matrix{Float64}) = MvNormal(PDMat(Σ))
MvNormal::Vector{Float64}) = MvNormal(PDiagMat(abs2(σ)))
@compat MvNormal(d::Int, σ::Real) = MvNormal(ScalMat(d, abs2(Float64(σ))))
MvNormal(d::Int, σ::Real) = MvNormal(ScalMat(d, abs2(Float64(σ))))

### Show

Expand Down Expand Up @@ -150,7 +150,7 @@ immutable MvNormalKnownCov{Cov<:AbstractPDMat}
end

MvNormalKnownCov{Cov<:AbstractPDMat}(C::Cov) = MvNormalKnownCov{Cov}(C)
@compat MvNormalKnownCov(d::Int, σ::Real) = MvNormalKnownCov(ScalMat(d, abs2(Float64(σ))))
MvNormalKnownCov(d::Int, σ::Real) = MvNormalKnownCov(ScalMat(d, abs2(Float64(σ))))
MvNormalKnownCov::Vector{Float64}) = MvNormalKnownCov(PDiagMat(abs2(σ)))
MvNormalKnownCov::Matrix{Float64}) = MvNormalKnownCov(PDMat(Σ))

Expand All @@ -166,7 +166,7 @@ function suffstats{Cov<:AbstractPDMat}(g::MvNormalKnownCov{Cov}, x::AbstractMatr
size(x,1) == length(g) || throw(DimensionMismatch("Invalid argument dimensions."))
invΣ = inv(g.Σ)
sx = vec(sum(x, 2))
@compat tw = Float64(size(x, 2))
tw = Float64(size(x, 2))
MvNormalKnownCovStats{Cov}(invΣ, sx, tw)
end

Expand Down Expand Up @@ -216,7 +216,7 @@ function suffstats(D::Type{MvNormal}, x::AbstractMatrix{Float64})
m = s * inv(n)
z = x .- m
s2 = A_mul_Bt(z, z)
@compat MvNormalStats(s, m, s2, Float64(n))
MvNormalStats(s, m, s2, Float64(n))
end

function suffstats(D::Type{MvNormal}, x::AbstractMatrix{Float64}, w::Array{Float64})
Expand All @@ -229,8 +229,8 @@ function suffstats(D::Type{MvNormal}, x::AbstractMatrix{Float64}, w::Array{Float
m = s * inv(tw)
z = similar(x)
for j = 1:n
xj = view(x,:,j)
zj = view(z,:,j)
xj = slice(x,:,j)
zj = slice(z,:,j)
swj = sqrt(w[j])
for i = 1:d
@inbounds zj[i] = swj * (xj[i] - m[i])
Expand Down
2 changes: 1 addition & 1 deletion src/multivariate/mvnormalcanon.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

### Generic types

@compat immutable MvNormalCanon{P<:AbstractPDMat,V<:Union{Vector{Float64},ZeroVector{Float64}}} <: AbstractMvNormal
immutable MvNormalCanon{P<:AbstractPDMat,V<:Union{Vector{Float64},ZeroVector{Float64}}} <: AbstractMvNormal
μ::V # the mean vector
h::V # potential vector, i.e. inv(Σ) * μ
J::P # precision matrix, i.e. inv(Σ)
Expand Down
8 changes: 4 additions & 4 deletions src/multivariate/mvtdist.jl
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ immutable GenericMvTDist{Cov<:AbstractPDMat} <: AbstractMvTDist

function GenericMvTDist{Cov}(df::Float64, dim::Int, zmean::Bool, μ::Vector{Float64}, Σ::Cov)
df > zero(df) || error("df must be positive")
@compat new(Float64(df), dim, zmean, μ, Σ)
new(Float64(df), dim, zmean, μ, Σ)
end
end

Expand Down Expand Up @@ -51,15 +51,15 @@ DiagTDist(df::Float64, μ::Vector{Float64}, σ::Vector{Float64}) = GenericMvTDis

IsoTDist(df::Float64, μ::Vector{Float64}, C::ScalMat) = GenericMvTDist(df, μ, C)
IsoTDist(df::Float64, C::ScalMat) = GenericMvTDist(df, C)
@compat IsoTDist(df::Float64, μ::Vector{Float64}, σ::Real) = GenericMvTDist(df, μ, ScalMat(length(μ), abs2(Float64(σ))))
@compat IsoTDist(df::Float64, d::Int, σ::Real) = GenericMvTDist(df, ScalMat(d, abs2(Float64(σ))))
IsoTDist(df::Float64, μ::Vector{Float64}, σ::Real) = GenericMvTDist(df, μ, ScalMat(length(μ), abs2(Float64(σ))))
IsoTDist(df::Float64, d::Int, σ::Real) = GenericMvTDist(df, ScalMat(d, abs2(Float64(σ))))

## convenient function to construct distributions of proper type based on arguments

mvtdist(df::Float64, μ::Vector{Float64}, C::AbstractPDMat) = GenericMvTDist(df, μ, C)
mvtdist(df::Float64, C::AbstractPDMat) = GenericMvTDist(df, C)

@compat mvtdist(df::Float64, μ::Vector{Float64}, σ::Real) = IsoTDist(df, μ, Float64(σ))
mvtdist(df::Float64, μ::Vector{Float64}, σ::Real) = IsoTDist(df, μ, Float64(σ))
mvtdist(df::Float64, d::Int, σ::Float64) = IsoTDist(d, σ)
mvtdist(df::Float64, μ::Vector{Float64}, σ::Vector{Float64}) = DiagTDist(df, μ, σ)
mvtdist(df::Float64, μ::Vector{Float64}, Σ::Matrix{Float64}) = MvTDist(df, μ, Σ)
Expand Down
4 changes: 2 additions & 2 deletions src/multivariate/vonmisesfisher.jl
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ immutable VonMisesFisher <: ContinuousMultivariateDistribution
end
end

@compat VonMisesFisher{T<:Real}::Vector{T}, κ::Real) = VonMisesFisher(Float64(μ), Float64(κ))
VonMisesFisher{T<:Real}::Vector{T}, κ::Real) = VonMisesFisher(Float64(μ), Float64(κ))

VonMisesFisher::Vector{Float64}) == vecnorm(θ); VonMisesFisher(scale(θ, 1.0 / κ), κ))
VonMisesFisher{T<:Real}::Vector{T}) = VonMisesFisher(Float64(θ))
Expand Down Expand Up @@ -77,7 +77,7 @@ function fit_mle(::Type{VonMisesFisher}, X::Matrix{Float64})
VonMisesFisher(μ, κ)
end

@compat fit_mle{T<:Real}(::Type{VonMisesFisher}, X::Matrix{T}) = fit_mle(VonMisesFisher, Float64(X))
fit_mle{T<:Real}(::Type{VonMisesFisher}, X::Matrix{T}) = fit_mle(VonMisesFisher, Float64(X))

function _vmf_estkappa(p::Int, ρ::Float64)
# Using the fixed-point iteration algorithm in the following paper:
Expand Down
12 changes: 6 additions & 6 deletions src/multivariates.jl
Original file line number Diff line number Diff line change
Expand Up @@ -19,17 +19,17 @@ rand(d::MultivariateDistribution, n::Int) = _rand!(sampler(d), Array(eltype(d),

## domain

@compat function insupport!{D<:MultivariateDistribution}(r::AbstractArray, d::Union{D,Type{D}}, X::AbstractMatrix)
function insupport!{D<:MultivariateDistribution}(r::AbstractArray, d::Union{D,Type{D}}, X::AbstractMatrix)
n = length(r)
size(X) == (length(d),n) ||
throw(DimensionMismatch("Inconsistent array dimensions."))
for i in 1:n
@inbounds r[i] = insupport(d, view(X, :, i))
@inbounds r[i] = insupport(d, slice(X, :, i))
end
return r
end

@compat insupport{D<:MultivariateDistribution}(d::Union{D,Type{D}}, X::AbstractMatrix) =
insupport{D<:MultivariateDistribution}(d::Union{D,Type{D}}, X::AbstractMatrix) =
insupport!(BitArray(size(X,2)), d, X)

## statistics
Expand Down Expand Up @@ -73,14 +73,14 @@ end

function _logpdf!(r::AbstractArray, d::MultivariateDistribution, X::DenseMatrix)
for i in 1 : size(X,2)
@inbounds r[i] = logpdf(d, view(X,:,i))
@inbounds r[i] = logpdf(d, slice(X,:,i))
end
return r
end

function _pdf!(r::AbstractArray, d::MultivariateDistribution, X::DenseMatrix)
for i in 1 : size(X,2)
@inbounds r[i] = pdf(d, view(X,:,i))
@inbounds r[i] = pdf(d, slice(X,:,i))
end
return r
end
Expand Down Expand Up @@ -114,7 +114,7 @@ end
function _loglikelihood(d::MultivariateDistribution, X::DenseMatrix)
ll = 0.0
for i in 1:size(X, 2)
ll += _logpdf(d, view(X,:,i))
ll += _logpdf(d, slice(X,:,i))
end
return ll
end
Expand Down
10 changes: 5 additions & 5 deletions src/samplers/binomial.jl
Original file line number Diff line number Diff line change
Expand Up @@ -212,10 +212,10 @@ function rand(s::BinomialTPESampler)
end

# 5.3
@compat x1 = Float64(y+1)
@compat f1 = Float64(s.Mi+1)
@compat z = Float64(s.n+1-s.Mi)
@compat w = Float64(s.n-y+1)
x1 = Float64(y+1)
f1 = Float64(s.Mi+1)
z = Float64(s.n+1-s.Mi)
w = Float64(s.n-y+1)

if A > (s.xM*log(f1/x1) + ((s.n-s.Mi)+0.5)*log(z/w) + (y-s.Mi)*log(w*s.r/(x1*s.q)) +
lstirling_asym(f1) + lstirling_asym(z) + lstirling_asym(x1) + lstirling_asym(w))
Expand Down Expand Up @@ -272,7 +272,7 @@ function BinomialPolySampler(n::Int, p::Float64)
BinomialPolySampler(use_btpe, geom_sampler, btpe_sampler)
end

@compat BinomialPolySampler(n::Real, p::Real) = BinomialPolySampler(round(Int, n), Float64(p))
BinomialPolySampler(n::Real, p::Real) = BinomialPolySampler(round(Int, n), Float64(p))

rand(s::BinomialPolySampler) = s.use_btpe ? rand(s.btpe_sampler) : rand(s.geom_sampler)

Expand Down
2 changes: 1 addition & 1 deletion src/samplers/obsoleted.jl
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ function DiscreteDistributionTable{T <: Real}(probs::Vector{T})
# Convert all Float64's into integers
vals = Array(Int64, n)
for i in 1:n
@compat vals[i] = round(Int, probs[i] * 64^9)
vals[i] = round(Int, probs[i] * 64^9)
end

# Allocate digit table and digit sums as table bounds
Expand Down
6 changes: 3 additions & 3 deletions src/samplers/vonmisesfisher.jl
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ _rand!(spl::VonMisesFisherSampler, x::DenseVector) = _rand!(spl, x, Array(Float6
function _rand!(spl::VonMisesFisherSampler, x::DenseMatrix)
t = Array(Float64, size(x, 1))
for j = 1:size(x, 2)
_rand!(spl, view(x,:,j), t)
_rand!(spl, slice(x,:,j), t)
end
return x
end
Expand Down Expand Up @@ -84,7 +84,7 @@ function _vmf_rotmat(u::Vector{Float64})

p = length(u)
A = zeros(p, p)
copy!(view(A,:,1), u)
copy!(slice(A,:,1), u)

# let k the be index of entry with max abs
k = 1
Expand All @@ -110,7 +110,7 @@ function _vmf_rotmat(u::Vector{Float64})

# perform QR factorization
Q = full(qrfact!(A)[:Q])
if dot(view(Q,:,1), u) < 0.0 # the first column was negated
if dot(slice(Q,:,1), u) < 0.0 # the first column was negated
for i = 1:p
@inbounds Q[i,1] = -Q[i,1]
end
Expand Down
Loading

0 comments on commit 73758ec

Please sign in to comment.