-
-
Notifications
You must be signed in to change notification settings - Fork 8.7k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[jvm-packages]add feature size for LabelPoint and DataBatch #5303
Changes from all commits
d9b5130
064a9c2
9689053
e645613
ba24c52
7868d8d
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,71 @@ | ||
/* | ||
Copyright (c) 2014 by Contributors | ||
|
||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
|
||
http://www.apache.org/licenses/LICENSE-2.0 | ||
|
||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. | ||
*/ | ||
|
||
package ml.dmlc.xgboost4j.scala.spark | ||
|
||
import ml.dmlc.xgboost4j.java.XGBoostError | ||
import org.apache.spark.Partitioner | ||
import org.apache.spark.ml.feature.VectorAssembler | ||
import org.apache.spark.sql.SparkSession | ||
import org.scalatest.FunSuite | ||
|
||
import scala.util.Random | ||
|
||
class FeatureSizeValidatingSuite extends FunSuite with PerTest { | ||
|
||
test("transform throwing exception if feature size of dataset is different with model's") { | ||
val modelPath = getClass.getResource("/model/0.82/model").getPath | ||
val model = XGBoostClassificationModel.read.load(modelPath) | ||
val r = new Random(0) | ||
// 0.82/model was trained with 251 features. and transform will throw exception | ||
// if feature size of data is not equal to 251 | ||
val df = ss.createDataFrame(Seq.fill(100)(r.nextInt(2)).map(i => (i, i))). | ||
toDF("feature", "label") | ||
val assembler = new VectorAssembler() | ||
.setInputCols(df.columns.filter(!_.contains("label"))) | ||
.setOutputCol("features") | ||
val thrown = intercept[Exception] { | ||
model.transform(assembler.transform(df)).show() | ||
} | ||
assert(thrown.getMessage.contains( | ||
"Number of columns does not match number of features in booster")) | ||
} | ||
|
||
test("train throwing exception if feature size of dataset is different on distributed train") { | ||
val paramMap = Map("eta" -> "1", "max_depth" -> "6", "silent" -> "1", | ||
"objective" -> "binary:logistic", | ||
"num_round" -> 5, "num_workers" -> 2, "use_external_memory" -> true, "missing" -> 0) | ||
import DataUtils._ | ||
val sparkSession = SparkSession.builder().getOrCreate() | ||
import sparkSession.implicits._ | ||
val repartitioned = sc.parallelize(Synthetic.trainWithDiffFeatureSize, 2) | ||
.map(lp => (lp.label, lp)).partitionBy( | ||
new Partitioner { | ||
override def numPartitions: Int = 2 | ||
|
||
override def getPartition(key: Any): Int = key.asInstanceOf[Float].toInt | ||
} | ||
).map(_._2).zipWithIndex().map { | ||
case (lp, id) => | ||
(id, lp.label, lp.features) | ||
}.toDF("id", "label", "features") | ||
val xgb = new XGBoostClassifier(paramMap) | ||
intercept[XGBoostError] { | ||
xgb.fit(repartitioned) | ||
} | ||
} | ||
|
||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -16,19 +16,13 @@ | |
|
||
package ml.dmlc.xgboost4j.scala.spark | ||
|
||
import java.nio.file.Files | ||
|
||
import scala.util.Random | ||
|
||
import ml.dmlc.xgboost4j.{LabeledPoint => XGBLabeledPoint} | ||
import ml.dmlc.xgboost4j.scala.DMatrix | ||
import ml.dmlc.xgboost4j.scala.{XGBoost => SXGBoost, _} | ||
import org.apache.hadoop.fs.{FileSystem, Path} | ||
|
||
import org.apache.spark.TaskContext | ||
import org.apache.spark.{TaskContext} | ||
import org.scalatest.FunSuite | ||
|
||
import org.apache.spark.ml.feature.VectorAssembler | ||
import org.apache.spark.sql.functions.lit | ||
|
||
class XGBoostGeneralSuite extends FunSuite with TmpFolderPerSuite with PerTest { | ||
|
||
|
@@ -350,17 +344,27 @@ class XGBoostGeneralSuite extends FunSuite with TmpFolderPerSuite with PerTest { | |
val modelPath = getClass.getResource("/model/0.82/model").getPath | ||
val model = XGBoostClassificationModel.read.load(modelPath) | ||
val r = new Random(0) | ||
val df = ss.createDataFrame(Seq.fill(100000)(1).map(i => (i, i))). | ||
var df = ss.createDataFrame(Seq.fill(100000)(1).map(i => (i, i))). | ||
toDF("feature", "label").repartition(5) | ||
// 0.82/model was trained with 251 features. and transform will throw exception | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. based on my comments, this claim is invalid now, you should not use native api to access model files produced by Spark directly There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Hi @CodingCat, I added some log info in native when loading 0.82/model from jvm, the model did store the feature size = 251. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Just to put more context here. The prediction data has only 1 feature, while model is trained with 251 features. |
||
// if feature size of data is not equal to 251 | ||
for (x <- 1 to 250) { | ||
df = df.withColumn(s"feature_${x}", lit(1)) | ||
} | ||
val assembler = new VectorAssembler() | ||
.setInputCols(df.columns.filter(!_.contains("label"))) | ||
.setOutputCol("features") | ||
val df1 = model.transform(assembler.transform(df)).withColumnRenamed( | ||
df = assembler.transform(df) | ||
for (x <- 1 to 250) { | ||
df = df.drop(s"feature_${x}") | ||
} | ||
val df1 = model.transform(df).withColumnRenamed( | ||
"prediction", "prediction1").withColumnRenamed( | ||
"rawPrediction", "rawPrediction1").withColumnRenamed( | ||
"probability", "probability1") | ||
val df2 = model.transform(df1) | ||
df1.collect() | ||
df2.collect() | ||
} | ||
|
||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
ditto
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
ditto