class SimpleSGD[T] extends StochasticGradientDescent[T]
Linear Supertypes
Ordering
- Alphabetic
- By Inheritance
Inherited
- SimpleSGD
- StochasticGradientDescent
- FirstOrderMinimizer
- SerializableLogging
- Serializable
- Serializable
- Minimizer
- AnyRef
- Any
- Hide All
- Show All
Visibility
- Public
- All
Instance Constructors
- new SimpleSGD(initialStepSize: Double = 4, maxIter: Int = 100)(implicit vs: NormedModule[T, Double])
Type Members
-
type
History = Unit
Any history the derived minimization function needs to do its updates.
Any history the derived minimization function needs to do its updates. typically an approximation to the second derivative/hessian matrix.
- Definition Classes
- SimpleSGD → FirstOrderMinimizer
-
type
State = FirstOrderMinimizer.State[T, Info, History]
- Definition Classes
- FirstOrderMinimizer
Value Members
-
final
def
!=(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
final
def
##(): Int
- Definition Classes
- AnyRef → Any
-
final
def
==(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
def
adjust(newX: T, newGrad: T, newVal: Double): (Double, T)
- Attributes
- protected
- Definition Classes
- FirstOrderMinimizer
-
def
adjustFunction(f: StochasticDiffFunction[T]): StochasticDiffFunction[T]
- Attributes
- protected
- Definition Classes
- FirstOrderMinimizer
-
final
def
asInstanceOf[T0]: T0
- Definition Classes
- Any
-
def
calculateObjective(f: StochasticDiffFunction[T], x: T, history: History): (Double, T)
- Attributes
- protected
- Definition Classes
- FirstOrderMinimizer
-
def
chooseDescentDirection(state: State, fn: StochasticDiffFunction[T]): T
- Attributes
- protected
- Definition Classes
- StochasticGradientDescent → FirstOrderMinimizer
-
def
clone(): AnyRef
- Attributes
- protected[java.lang]
- Definition Classes
- AnyRef
- Annotations
- @native() @HotSpotIntrinsicCandidate() @throws( ... )
-
val
convergenceCheck: ConvergenceCheck[T]
- Definition Classes
- FirstOrderMinimizer
-
val
defaultStepSize: Double
- Definition Classes
- StochasticGradientDescent
-
def
determineStepSize(state: State, f: StochasticDiffFunction[T], dir: T): Double
Choose a step size scale for this iteration.
Choose a step size scale for this iteration.
Default is eta / math.pow(state.iter + 1,2.0 / 3.0)
- Definition Classes
- StochasticGradientDescent → FirstOrderMinimizer
-
final
def
eq(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
-
def
equals(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
-
final
def
getClass(): Class[_]
- Definition Classes
- AnyRef → Any
- Annotations
- @native() @HotSpotIntrinsicCandidate()
-
def
hashCode(): Int
- Definition Classes
- AnyRef → Any
- Annotations
- @native() @HotSpotIntrinsicCandidate()
-
def
infiniteIterations(f: StochasticDiffFunction[T], state: State): Iterator[State]
- Definition Classes
- FirstOrderMinimizer
-
def
initialHistory(f: StochasticDiffFunction[T], init: T): Unit
- Definition Classes
- SimpleSGD → FirstOrderMinimizer
-
def
initialState(f: StochasticDiffFunction[T], init: T): State
- Attributes
- protected
- Definition Classes
- FirstOrderMinimizer
-
final
def
isInstanceOf[T0]: Boolean
- Definition Classes
- Any
-
def
iterations(f: StochasticDiffFunction[T], init: T): Iterator[State]
- Definition Classes
- FirstOrderMinimizer
-
def
logger: LazyLogger
- Attributes
- protected
- Definition Classes
- SerializableLogging
-
val
maxIter: Int
- Definition Classes
- StochasticGradientDescent
-
def
minimize(f: StochasticDiffFunction[T], init: T): T
- Definition Classes
- FirstOrderMinimizer → Minimizer
-
def
minimizeAndReturnState(f: StochasticDiffFunction[T], init: T): State
- Definition Classes
- FirstOrderMinimizer
-
final
def
ne(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
-
final
def
notify(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native() @HotSpotIntrinsicCandidate()
-
final
def
notifyAll(): Unit
- Definition Classes
- AnyRef
- Annotations
- @native() @HotSpotIntrinsicCandidate()
-
final
def
synchronized[T0](arg0: ⇒ T0): T0
- Definition Classes
- AnyRef
-
def
takeStep(state: State, dir: T, stepSize: Double): T
Projects the vector x onto whatever ball is needed.
Projects the vector x onto whatever ball is needed. Can also incorporate regularization, or whatever.
Default just takes a step
- Attributes
- protected
- Definition Classes
- StochasticGradientDescent → FirstOrderMinimizer
-
def
toString(): String
- Definition Classes
- AnyRef → Any
-
def
updateHistory(newX: T, newGrad: T, newValue: Double, f: StochasticDiffFunction[T], oldState: State): Unit
- Definition Classes
- SimpleSGD → FirstOrderMinimizer
-
implicit
val
vspace: NormedModule[T, Double]
- Attributes
- protected
- Definition Classes
- StochasticGradientDescent
-
final
def
wait(arg0: Long, arg1: Int): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... )
-
final
def
wait(arg0: Long): Unit
- Definition Classes
- AnyRef
- Annotations
- @native() @throws( ... )
-
final
def
wait(): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws( ... )