Skip to content

Commit

Permalink
Made properties public (#538) (#553)
Browse files Browse the repository at this point in the history
(cherry picked from commit b4450a3)
  • Loading branch information
zaleslaw authored May 9, 2023
1 parent e6804b4 commit 5d8967e
Show file tree
Hide file tree
Showing 9 changed files with 33 additions and 33 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,9 @@ private const val ACCUMULATOR_UPDATE = "accum_update"
* @property [epsilon] Float >= 0. Fuzz factor.
*/
public class AdaDelta(
private val learningRate: Float = 0.1f,
private val rho: Float = 0.95f,
private val epsilon: Float = 1e-8f,
public val learningRate: Float = 0.1f,
public val rho: Float = 0.95f,
public val epsilon: Float = 1e-8f,
clipGradient: ClipGradientAction = NoClipGradient()
) : Optimizer(clipGradient) {
private lateinit var epsilonConstant: Constant<Float>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,8 @@ private const val ACCUMULATOR = "accumulator"
* @property [initialAccumulatorValue] Decay: Float >= 0. Learning rate decay over each update.
*/
public class AdaGrad(
private val learningRate: Float = 0.1f,
private val initialAccumulatorValue: Float = 0.01f,
public val learningRate: Float = 0.1f,
public val initialAccumulatorValue: Float = 0.01f,
clipGradient: ClipGradientAction = NoClipGradient()
) : Optimizer(clipGradient) {
private lateinit var initialAccumulatorValueConstant: Constant<Float>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,10 +46,10 @@ private const val SQUARED_ACCUMULATOR = "gradient_squared_accumulator"
* @property [l2Strength] A float value, must be greater than or equal to zero.
*/
public class AdaGradDA(
private val learningRate: Float = 0.1f,
private val initialAccumulatorValue: Float = 0.01f,
private val l1Strength: Float = 0.01f,
private val l2Strength: Float = 0.01f,
public val learningRate: Float = 0.1f,
public val initialAccumulatorValue: Float = 0.01f,
public val l1Strength: Float = 0.01f,
public val l2Strength: Float = 0.01f,
clipGradient: ClipGradientAction = NoClipGradient()
) : Optimizer(clipGradient) {
private lateinit var learningRateConst: Constant<Float>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,11 +45,11 @@ private val SECOND_BETA_POWER_NAME = defaultOptimizerVariableName("beta2_power")
* @property [epsilon] Float >= 0. Fuzz factor.
*/
public class Adam(
private val learningRate: Float = 0.001f,
private val beta1: Float = 0.9f,
private val beta2: Float = 0.999f,
private val epsilon: Float = 1e-07f,
private val useNesterov: Boolean = false,
public val learningRate: Float = 0.001f,
public val beta1: Float = 0.9f,
public val beta2: Float = 0.999f,
public val epsilon: Float = 1e-07f,
public val useNesterov: Boolean = false,
clipGradient: ClipGradientAction = NoClipGradient()
) : Optimizer(clipGradient) {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,10 +46,10 @@ private val FIRST_BETA_POWER_NAME = defaultOptimizerVariableName("beta1_power")
* @property [epsilon] Float >= 0. Fuzz factor.
*/
public class Adamax(
private val learningRate: Float = 0.001f,
private val beta1: Float = 0.9f,
private val beta2: Float = 0.999f,
private val epsilon: Float = 1e-07f,
public val learningRate: Float = 0.001f,
public val beta1: Float = 0.9f,
public val beta2: Float = 0.999f,
public val epsilon: Float = 1e-07f,
clipGradient: ClipGradientAction = NoClipGradient()
) : Optimizer(clipGradient) {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,12 +52,12 @@ private const val LINEAR_ACCUMULATOR = "linear_accumulator"
* When input is sparse shrinkage will only happen on the active weights.
*/
public class Ftrl(
private val learningRate: Float = 0.001f,
private val l1RegularizationStrength: Float = 0.0f,
private val l2RegularizationStrength: Float = 0.0f,
private val learningRatePower: Float = -0.5f,
private val l2ShrinkageRegularizationStrength: Float = 0.0f,
private var initialAccumulatorValue: Float = 0.0f,
public val learningRate: Float = 0.001f,
public val l1RegularizationStrength: Float = 0.0f,
public val l2RegularizationStrength: Float = 0.0f,
public val learningRatePower: Float = -0.5f,
public val l2ShrinkageRegularizationStrength: Float = 0.0f,
public var initialAccumulatorValue: Float = 0.0f,
clipGradient: ClipGradientAction = NoClipGradient()
) : Optimizer(clipGradient) {
/** */
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,9 @@ private const val MOMENTUM = "momentum"
* @property [useNesterov] If true, applies Nesterov momentum.
*/
public class Momentum(
private val learningRate: Float = 0.001f,
private val momentum: Float = 0.99f,
private val useNesterov: Boolean = true,
public val learningRate: Float = 0.001f,
public val momentum: Float = 0.99f,
public val useNesterov: Boolean = true,
clipGradient: ClipGradientAction = NoClipGradient()
) : Optimizer(clipGradient) {
private lateinit var momentumConst: Constant<Float>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,11 @@ private const val MOMENTUM = "momentum"
* @property [centered] Centered or not.
*/
public class RMSProp(
private val learningRate: Float = 0.001f,
private val decay: Float = 0.9f,
private val momentum: Float = 0.0f,
private val epsilon: Float = 1e-10f,
private val centered: Boolean = false,
public val learningRate: Float = 0.001f,
public val decay: Float = 0.9f,
public val momentum: Float = 0.0f,
public val epsilon: Float = 1e-10f,
public val centered: Boolean = false,
clipGradient: ClipGradientAction = NoClipGradient()
) : Optimizer(clipGradient) {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ import org.tensorflow.op.train.ApplyGradientDescent
* NOTE: It's not an equivalent for `keras.sgd`, it is a pure SGD with simple 'variable' update by subtracting 'alpha' * 'delta' from it.
*/
public class SGD(
private var learningRate: Float = 0.2f,
public var learningRate: Float = 0.2f,
clipGradient: ClipGradientAction = NoClipGradient()
) : Optimizer(clipGradient) {

Expand Down

0 comments on commit 5d8967e

Please sign in to comment.