Skip to content
This repository has been archived by the owner on Apr 21, 2023. It is now read-only.

Commit

Permalink
Bump to Spark 2.0 with Scala 2.11
Browse files Browse the repository at this point in the history
  • Loading branch information
matfax committed Sep 27, 2016
1 parent bb33ea4 commit 1b39d03
Show file tree
Hide file tree
Showing 6 changed files with 18 additions and 24 deletions.
18 changes: 9 additions & 9 deletions pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@
<modelVersion>4.0.0</modelVersion>

<groupId>com.irvingc.spark</groupId>
<artifactId>dbscan-on-spark_2.10</artifactId>
<version>0.2.0-SNAPSHOT</version>
<artifactId>dbscan-on-spark_2.11</artifactId>
<version>0.3.0-SNAPSHOT</version>

<name>Distributed DBSCAN on Apache Spark</name>
<url>http://www.irvingc.com/dbscan-on-spark</url>
Expand All @@ -30,10 +30,10 @@
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
<scala.version>2.10.4</scala.version>
<scala.binary.version>2.10</scala.binary.version>
<scalatest.version>2.2.1</scalatest.version>
<spark.version>1.6.1</spark.version>
<scala.version>2.11.8</scala.version>
<scala.binary.version>2.11</scala.binary.version>
<scalatest.version>2.2.6</scalatest.version>
<spark.version>2.0.0</spark.version>
</properties>

<repositories>
Expand Down Expand Up @@ -71,7 +71,7 @@
<dependency>
<groupId>com.meetup</groupId>
<artifactId>archery_${scala.binary.version}</artifactId>
<version>0.3.0</version>
<version>0.4.0</version>
</dependency>
<dependency>
<groupId>org.scalatest</groupId>
Expand All @@ -86,7 +86,7 @@
<plugin>
<groupId>net.alchim31.maven</groupId>
<artifactId>scala-maven-plugin</artifactId>
<version>3.2.0</version>
<version>3.2.2</version>
<executions>
<execution>
<goals>
Expand All @@ -99,7 +99,7 @@
<plugin>
<groupId>org.scalastyle</groupId>
<artifactId>scalastyle-maven-plugin</artifactId>
<version>0.6.0</version>
<version>0.8.0</version>
<configuration>
<verbose>false</verbose>
<failOnViolation>true</failOnViolation>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,7 @@
*/
package org.apache.spark.mllib.clustering.dbscan

import org.apache.spark.Logging
import org.apache.spark.SparkContext.rddToPairRDDFunctions
import org.apache.spark.internal.Logging
import org.apache.spark.mllib.clustering.dbscan.DBSCANLabeledPoint.Flag
import org.apache.spark.mllib.linalg.Vector
import org.apache.spark.rdd.RDD
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,9 @@
*/
package org.apache.spark.mllib.clustering.dbscan

import scala.annotation.tailrec
import org.apache.spark.internal.Logging

import org.apache.spark.Logging
import scala.annotation.tailrec

/**
* Helper methods for calling the partitioner
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,11 @@
*/
package org.apache.spark.mllib.clustering.dbscan

import scala.collection.mutable.Queue

import org.apache.spark.Logging
import archery.{Box, Entry, Point, RTree}
import org.apache.spark.internal.Logging
import org.apache.spark.mllib.clustering.dbscan.DBSCANLabeledPoint.Flag

import archery.Box
import archery.Entry
import archery.Point
import archery.RTree
import scala.collection.mutable.Queue

/**
* An implementation of DBSCAN using an R-Tree to improve its running time
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,12 @@
*/
package org.apache.spark.mllib.clustering.dbscan

import scala.collection.mutable.Queue

import org.apache.spark.Logging
import org.apache.spark.internal.Logging
import org.apache.spark.mllib.clustering.dbscan.DBSCANLabeledPoint.Flag
import org.apache.spark.mllib.linalg.Vectors

import scala.collection.mutable.Queue

/**
* A naive implementation of DBSCAN. It has O(n2) complexity
* but uses no extra memory. This implementation is not used
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
*/
package org.apache.spark.mllib.clustering.dbscan

import org.apache.spark.SparkContext.rddToPairRDDFunctions
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.scalatest.Matchers
Expand Down

0 comments on commit 1b39d03

Please sign in to comment.