Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[jvm-packages]add feature size for LabelPoint and DataBatch #5303

Merged
merged 6 commits into from
Apr 7, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 0 additions & 4 deletions include/xgboost/generic_parameters.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ struct GenericParameter : public XGBoostParameter<GenericParameter> {
size_t gpu_page_size;
bool enable_experimental_json_serialization {false};
bool validate_parameters {false};
bool validate_features {true};

void CheckDeprecated() {
if (this->n_gpus != 0) {
Expand Down Expand Up @@ -75,9 +74,6 @@ struct GenericParameter : public XGBoostParameter<GenericParameter> {
DMLC_DECLARE_FIELD(validate_parameters)
.set_default(false)
.describe("Enable checking whether parameters are used or not.");
DMLC_DECLARE_FIELD(validate_features)
.set_default(false)
.describe("Enable validating input DMatrix.");
DMLC_DECLARE_FIELD(n_gpus)
.set_default(0)
.set_range(0, 1)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ object XGBoost {
Rabit.init(workerEnvs)
val mapper = (x: LabeledVector) => {
val (index, value) = x.vector.toSeq.unzip
LabeledPoint(x.label.toFloat, index.toArray, value.map(_.toFloat).toArray)
LabeledPoint(x.label.toFloat, x.vector.size, index.toArray, value.map(_.toFloat).toArray)
}
val dataIter = for (x <- it.iterator().asScala) yield mapper(x)
val trainMat = new DMatrix(dataIter, null)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ class XGBoostModel (booster: Booster) extends Serializable {
(it: Iterator[Vector]) => {
val mapper = (x: Vector) => {
val (index, value) = x.toSeq.unzip
LabeledPoint(0.0f, index.toArray, value.map(_.toFloat).toArray)
LabeledPoint(0.0f, x.size, index.toArray, value.map(_.toFloat).toArray)
}
val dataIter = for (x <- it) yield mapper(x)
val dmat = new DMatrix(dataIter, null)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,15 +38,11 @@ object DataUtils extends Serializable {

/**
* Returns feature of the point as [[org.apache.spark.ml.linalg.Vector]].
*
* If the point is sparse, the dimensionality of the resulting sparse
* vector would be [[Int.MaxValue]]. This is the only safe value, since
* XGBoost does not store the dimensionality explicitly.
*/
def features: Vector = if (labeledPoint.indices == null) {
Vectors.dense(labeledPoint.values.map(_.toDouble))
} else {
Vectors.sparse(Int.MaxValue, labeledPoint.indices, labeledPoint.values.map(_.toDouble))
Vectors.sparse(labeledPoint.size, labeledPoint.indices, labeledPoint.values.map(_.toDouble))
}
}

Expand All @@ -68,9 +64,9 @@ object DataUtils extends Serializable {
*/
def asXGB: XGBLabeledPoint = v match {
case v: DenseVector =>
XGBLabeledPoint(0.0f, null, v.values.map(_.toFloat))
XGBLabeledPoint(0.0f, v.size, null, v.values.map(_.toFloat))
case v: SparseVector =>
XGBLabeledPoint(0.0f, v.indices, v.values.map(_.toFloat))
XGBLabeledPoint(0.0f, v.size, v.indices, v.values.map(_.toFloat))
}
}

Expand Down Expand Up @@ -162,18 +158,18 @@ object DataUtils extends Serializable {
df => df.select(selectedColumns: _*).rdd.map {
case row @ Row(label: Float, features: Vector, weight: Float, group: Int,
baseMargin: Float) =>
val (indices, values) = features match {
case v: SparseVector => (v.indices, v.values.map(_.toFloat))
case v: DenseVector => (null, v.values.map(_.toFloat))
val (size, indices, values) = features match {
case v: SparseVector => (v.size, v.indices, v.values.map(_.toFloat))
case v: DenseVector => (v.size, null, v.values.map(_.toFloat))
}
val xgbLp = XGBLabeledPoint(label, indices, values, weight, group, baseMargin)
val xgbLp = XGBLabeledPoint(label, size, indices, values, weight, group, baseMargin)
attachPartitionKey(row, deterministicPartition, numWorkers, xgbLp)
case row @ Row(label: Float, features: Vector, weight: Float, baseMargin: Float) =>
val (indices, values) = features match {
case v: SparseVector => (v.indices, v.values.map(_.toFloat))
case v: DenseVector => (null, v.values.map(_.toFloat))
val (size, indices, values) = features match {
case v: SparseVector => (v.size, v.indices, v.values.map(_.toFloat))
case v: DenseVector => (v.size, null, v.values.map(_.toFloat))
}
val xgbLp = XGBLabeledPoint(label, indices, values, weight, baseMargin = baseMargin)
val xgbLp = XGBLabeledPoint(label, size, indices, values, weight, baseMargin = baseMargin)
attachPartitionKey(row, deterministicPartition, numWorkers, xgbLp)
}
}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
/*
Copyright (c) 2014 by Contributors

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package ml.dmlc.xgboost4j.scala.spark

import ml.dmlc.xgboost4j.java.XGBoostError
import org.apache.spark.Partitioner
import org.apache.spark.ml.feature.VectorAssembler
import org.apache.spark.sql.SparkSession
import org.scalatest.FunSuite

import scala.util.Random

class FeatureSizeValidatingSuite extends FunSuite with PerTest {

test("transform throwing exception if feature size of dataset is different with model's") {
val modelPath = getClass.getResource("/model/0.82/model").getPath
val model = XGBoostClassificationModel.read.load(modelPath)
val r = new Random(0)
// 0.82/model was trained with 251 features. and transform will throw exception
// if feature size of data is not equal to 251
val df = ss.createDataFrame(Seq.fill(100)(r.nextInt(2)).map(i => (i, i))).
toDF("feature", "label")
val assembler = new VectorAssembler()
.setInputCols(df.columns.filter(!_.contains("label")))
.setOutputCol("features")
val thrown = intercept[Exception] {
model.transform(assembler.transform(df)).show()
}
assert(thrown.getMessage.contains(
"Number of columns does not match number of features in booster"))
}

test("train throwing exception if feature size of dataset is different on distributed train") {
val paramMap = Map("eta" -> "1", "max_depth" -> "6", "silent" -> "1",
"objective" -> "binary:logistic",
"num_round" -> 5, "num_workers" -> 2, "use_external_memory" -> true, "missing" -> 0)
import DataUtils._
val sparkSession = SparkSession.builder().getOrCreate()
import sparkSession.implicits._
val repartitioned = sc.parallelize(Synthetic.trainWithDiffFeatureSize, 2)
.map(lp => (lp.label, lp)).partitionBy(
new Partitioner {
override def numPartitions: Int = 2

override def getPartition(key: Any): Int = key.asInstanceOf[Float].toInt
}
).map(_._2).zipWithIndex().map {
case (lp, id) =>
(id, lp.label, lp.features)
}.toDF("id", "label", "features")
val xgb = new XGBoostClassifier(paramMap)
intercept[XGBoostError] {
xgb.fit(repartitioned)
}
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,12 @@ package ml.dmlc.xgboost4j.scala.spark
import java.io.File
import java.util.Arrays

import scala.io.Source

import ml.dmlc.xgboost4j.scala.DMatrix
import scala.util.Random

import scala.util.Random
import org.apache.spark.ml.feature._
import org.apache.spark.ml.{Pipeline, PipelineModel}
import org.apache.spark.sql.functions._
import org.scalatest.FunSuite

class PersistenceSuite extends FunSuite with TmpFolderPerSuite with PerTest {
Expand Down Expand Up @@ -138,12 +137,21 @@ class PersistenceSuite extends FunSuite with TmpFolderPerSuite with PerTest {
val modelPath = getClass.getResource("/model/0.82/model").getPath
val model = XGBoostClassificationModel.read.load(modelPath)
val r = new Random(0)
val df = ss.createDataFrame(Seq.fill(100)(r.nextInt(2)).map(i => (i, i))).
var df = ss.createDataFrame(Seq.fill(100)(r.nextInt(2)).map(i => (i, i))).
toDF("feature", "label")
// 0.82/model was trained with 251 features. and transform will throw exception
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ditto

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ditto

// if feature size of data is not equal to 251
for (x <- 1 to 250) {
df = df.withColumn(s"feature_${x}", lit(1))
}
val assembler = new VectorAssembler()
.setInputCols(df.columns.filter(!_.contains("label")))
.setOutputCol("features")
model.transform(assembler.transform(df)).show()
df = assembler.transform(df)
for (x <- 1 to 250) {
df = df.drop(s"feature_${x}")
}
model.transform(df).show()
}
}

Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,12 @@ trait TrainTestData {
Source.fromInputStream(is).getLines()
}

protected def getLabeledPoints(resource: String, zeroBased: Boolean): Seq[XGBLabeledPoint] = {
protected def getLabeledPoints(resource: String, featureSize: Int, zeroBased: Boolean):
Seq[XGBLabeledPoint] = {
getResourceLines(resource).map { line =>
val labelAndFeatures = line.split(" ")
val label = labelAndFeatures.head.toFloat
val values = new Array[Float](126)
val values = new Array[Float](featureSize)
for (feature <- labelAndFeatures.tail) {
val idAndValue = feature.split(":")
if (!zeroBased) {
Expand All @@ -45,7 +46,7 @@ trait TrainTestData {
}
}

XGBLabeledPoint(label, null, values)
XGBLabeledPoint(label, featureSize, null, values)
}.toList
}

Expand All @@ -56,14 +57,14 @@ trait TrainTestData {
val label = original.head.toFloat
val group = original.last.toInt
val values = original.slice(1, length - 1).map(_.toFloat)
XGBLabeledPoint(label, null, values, 1f, group, Float.NaN)
XGBLabeledPoint(label, values.size, null, values, 1f, group, Float.NaN)
}.toList
}
}

object Classification extends TrainTestData {
val train: Seq[XGBLabeledPoint] = getLabeledPoints("/agaricus.txt.train", zeroBased = false)
val test: Seq[XGBLabeledPoint] = getLabeledPoints("/agaricus.txt.test", zeroBased = false)
val train: Seq[XGBLabeledPoint] = getLabeledPoints("/agaricus.txt.train", 126, zeroBased = false)
val test: Seq[XGBLabeledPoint] = getLabeledPoints("/agaricus.txt.test", 126, zeroBased = false)
}

object MultiClassification extends TrainTestData {
Expand All @@ -80,30 +81,42 @@ object MultiClassification extends TrainTestData {
values(i) = featuresAndLabel(i).toFloat
}

XGBLabeledPoint(label, null, values.take(values.length - 1))
XGBLabeledPoint(label, values.length - 1, null, values.take(values.length - 1))
}.toList
}
}

object Regression extends TrainTestData {
val train: Seq[XGBLabeledPoint] = getLabeledPoints("/machine.txt.train", zeroBased = true)
val test: Seq[XGBLabeledPoint] = getLabeledPoints("/machine.txt.test", zeroBased = true)
val MACHINE_COL_NUM = 36
val train: Seq[XGBLabeledPoint] = getLabeledPoints(
"/machine.txt.train", MACHINE_COL_NUM, zeroBased = true)
val test: Seq[XGBLabeledPoint] = getLabeledPoints(
"/machine.txt.test", MACHINE_COL_NUM, zeroBased = true)
}

object Ranking extends TrainTestData {
val RANK_COL_NUM = 3
val train: Seq[XGBLabeledPoint] = getLabeledPointsWithGroup("/rank.train.csv")
val test: Seq[XGBLabeledPoint] = getLabeledPoints("/rank.test.txt", zeroBased = false)
val test: Seq[XGBLabeledPoint] = getLabeledPoints(
"/rank.test.txt", RANK_COL_NUM, zeroBased = false)

private def getGroups(resource: String): Seq[Int] = {
getResourceLines(resource).map(_.toInt).toList
}
}

object Synthetic extends {
val TRAIN_COL_NUM = 3
val TRAIN_WRONG_COL_NUM = 2
val train: Seq[XGBLabeledPoint] = Seq(
XGBLabeledPoint(1.0f, Array(0, 1), Array(1.0f, 2.0f)),
XGBLabeledPoint(0.0f, Array(0, 1, 2), Array(1.0f, 2.0f, 3.0f)),
XGBLabeledPoint(0.0f, Array(0, 1, 2), Array(1.0f, 2.0f, 3.0f)),
XGBLabeledPoint(1.0f, Array(0, 1), Array(1.0f, 2.0f))
XGBLabeledPoint(1.0f, TRAIN_COL_NUM, Array(0, 1), Array(1.0f, 2.0f)),
XGBLabeledPoint(0.0f, TRAIN_COL_NUM, Array(0, 1, 2), Array(1.0f, 2.0f, 3.0f)),
XGBLabeledPoint(0.0f, TRAIN_COL_NUM, Array(0, 1, 2), Array(1.0f, 2.0f, 3.0f)),
XGBLabeledPoint(1.0f, TRAIN_COL_NUM, Array(0, 1), Array(1.0f, 2.0f))
)

val trainWithDiffFeatureSize: Seq[XGBLabeledPoint] = Seq(
XGBLabeledPoint(1.0f, TRAIN_WRONG_COL_NUM, Array(0, 1), Array(1.0f, 2.0f)),
XGBLabeledPoint(0.0f, TRAIN_COL_NUM, Array(0, 1, 2), Array(1.0f, 2.0f, 3.0f))
)
}
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,9 @@
package ml.dmlc.xgboost4j.scala.spark

import ml.dmlc.xgboost4j.scala.{DMatrix, XGBoost => ScalaXGBoost}

import org.apache.spark.ml.linalg._
import org.apache.spark.ml.param.ParamMap
import org.apache.spark.sql._
import org.scalatest.FunSuite

import org.apache.spark.Partitioner

class XGBoostClassifierSuite extends FunSuite with PerTest {
Expand Down Expand Up @@ -308,4 +305,5 @@ class XGBoostClassifierSuite extends FunSuite with PerTest {
val xgb = new XGBoostClassifier(paramMap)
xgb.fit(repartitioned)
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -16,19 +16,13 @@

package ml.dmlc.xgboost4j.scala.spark

import java.nio.file.Files

import scala.util.Random

import ml.dmlc.xgboost4j.{LabeledPoint => XGBLabeledPoint}
import ml.dmlc.xgboost4j.scala.DMatrix
import ml.dmlc.xgboost4j.scala.{XGBoost => SXGBoost, _}
import org.apache.hadoop.fs.{FileSystem, Path}

import org.apache.spark.TaskContext
import org.apache.spark.{TaskContext}
import org.scalatest.FunSuite

import org.apache.spark.ml.feature.VectorAssembler
import org.apache.spark.sql.functions.lit

class XGBoostGeneralSuite extends FunSuite with TmpFolderPerSuite with PerTest {

Expand Down Expand Up @@ -350,17 +344,27 @@ class XGBoostGeneralSuite extends FunSuite with TmpFolderPerSuite with PerTest {
val modelPath = getClass.getResource("/model/0.82/model").getPath
val model = XGBoostClassificationModel.read.load(modelPath)
val r = new Random(0)
val df = ss.createDataFrame(Seq.fill(100000)(1).map(i => (i, i))).
var df = ss.createDataFrame(Seq.fill(100000)(1).map(i => (i, i))).
toDF("feature", "label").repartition(5)
// 0.82/model was trained with 251 features. and transform will throw exception
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

based on my comments, this claim is invalid now,

you should not use native api to access model files produced by Spark directly

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hi @CodingCat, I added some log info in native when loading 0.82/model from jvm, the model did store the feature size = 251.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just to put more context here. The prediction data has only 1 feature, while model is trained with 251 features.

// if feature size of data is not equal to 251
for (x <- 1 to 250) {
df = df.withColumn(s"feature_${x}", lit(1))
}
val assembler = new VectorAssembler()
.setInputCols(df.columns.filter(!_.contains("label")))
.setOutputCol("features")
val df1 = model.transform(assembler.transform(df)).withColumnRenamed(
df = assembler.transform(df)
for (x <- 1 to 250) {
df = df.drop(s"feature_${x}")
}
val df1 = model.transform(df).withColumnRenamed(
"prediction", "prediction1").withColumnRenamed(
"rawPrediction", "rawPrediction1").withColumnRenamed(
"probability", "probability1")
val df2 = model.transform(df1)
df1.collect()
df2.collect()
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -69,8 +69,7 @@ class XGBoostRabitRegressionSuite extends FunSuite with PerTest {

test("test regression prediction parity w/o ring reduce") {
val training = buildDataFrame(Regression.train)
val testDM = new DMatrix(Regression.test.iterator, null)
val testDF = buildDataFrame(Classification.test)
val testDF = buildDataFrame(Regression.test)
val xgbSettings = Map("eta" -> "1", "max_depth" -> "2", "verbosity" -> "1",
"objective" -> "reg:squarederror", "num_round" -> 5, "num_workers" -> numWorkers)
val model1 = new XGBoostRegressor(xgbSettings).fit(training)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,6 @@ public class Booster implements Serializable, KryoSerializable {
*/
Booster(Map<String, Object> params, DMatrix[] cacheMats) throws XGBoostError {
init(cacheMats);
setParam("validate_features", "0");
setParams(params);
}

Expand Down
Loading