Skip to content

Commit

Permalink
Renaming
Browse files Browse the repository at this point in the history
  • Loading branch information
pashashiz committed Oct 15, 2023
1 parent 4fdc3ca commit aab22ad
Show file tree
Hide file tree
Showing 25 changed files with 126 additions and 128 deletions.
38 changes: 19 additions & 19 deletions src/main/scala/scanet/estimators/package.scala
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ import scanet.core.{Expr, Numeric, Session, Shape}

import scala.{math => m}
import scanet.math.syntax._
import scanet.models.{TrainedModel_}
import scanet.models.TrainedModel
import scanet.optimizers.Iterators.Partial
import scanet.optimizers.syntax._
import scanet.optimizers.Record
Expand All @@ -16,9 +16,9 @@ import scala.collection.immutable.Seq
package object estimators {

def accuracy[A: Numeric](
model: TrainedModel_[A],
ds: Dataset[Record[A]],
batch: Int = 1000): Float = {
model: TrainedModel[A],
ds: Dataset[Record[A]],
batch: Int = 1000): Float = {
import ds.sparkSession.implicits._
val brModel = ds.sparkSession.sparkContext.broadcast(model)
val (positives, total) = ds
Expand Down Expand Up @@ -47,31 +47,31 @@ package object estimators {
}

def RMSE[A: Numeric](
model: TrainedModel_[A],
ds: Dataset[Record[A]],
batch: Int = 1000): Float =
model: TrainedModel[A],
ds: Dataset[Record[A]],
batch: Int = 1000): Float =
m.sqrt(MSE(model, ds, batch)).toFloat

def MSE[A: Numeric](
model: TrainedModel_[A],
ds: Dataset[Record[A]],
batch: Int = 1000): Float =
model: TrainedModel[A],
ds: Dataset[Record[A]],
batch: Int = 1000): Float =
meanError(model, ds, batch) {
(predicted, expected) => (predicted - expected).sqr
}

def MAE[A: Numeric](
model: TrainedModel_[A],
ds: Dataset[Record[A]],
batch: Int = 1000): Float =
model: TrainedModel[A],
ds: Dataset[Record[A]],
batch: Int = 1000): Float =
meanError(model, ds, batch) {
(predicted, expected) => (predicted - expected).abs
}

private def meanError[A: Numeric](
model: TrainedModel_[A],
ds: Dataset[Record[A]],
batch: Int)(
model: TrainedModel[A],
ds: Dataset[Record[A]],
batch: Int)(
error: (Expr[A], Expr[A]) => Expr[A]): Float = {
import ds.sparkSession.implicits._
val brModel = ds.sparkSession.sparkContext.broadcast(model)
Expand Down Expand Up @@ -100,9 +100,9 @@ package object estimators {
}

def R2Score[A: Numeric](
model: TrainedModel_[A],
ds: Dataset[Record[A]],
batch: Int = 1000): Float = {
model: TrainedModel[A],
ds: Dataset[Record[A]],
batch: Int = 1000): Float = {
require(ds.labelsShape == Shape(1), "labels should have shape (1)")
import ds.sparkSession.implicits._
val brModel = ds.sparkSession.sparkContext.broadcast(model)
Expand Down
4 changes: 2 additions & 2 deletions src/main/scala/scanet/models/Math.scala
Original file line number Diff line number Diff line change
Expand Up @@ -12,13 +12,13 @@ object Math {

case object `x^2` extends StatelessLayer {

override def params_(input: Shape): Params[ParamDef] =
override def params(input: Shape): Params[ParamDef] =
Params(Weights -> ParamDef(Shape(), Initializer.Zeros, Some(Avg), trainable = true))

override def buildStateless_[E: Floating](input: Expr[E], params: Params[Expr[E]]): Expr[E] =
pow(params(Weights), 2)

override def penalty_[E: Floating](input: Shape, params: Params[Expr[E]]): Expr[E] =
override def penalty[E: Floating](input: Shape, params: Params[Expr[E]]): Expr[E] =
zeros[E](Shape())

override def outputShape(input: Shape): Shape = input
Expand Down
63 changes: 31 additions & 32 deletions src/main/scala/scanet/models/Model.scala
Original file line number Diff line number Diff line change
Expand Up @@ -15,38 +15,37 @@ abstract class Model extends Serializable {
* @param input input shape
* @return param definitions
*/
def params_(input: Shape): Params[ParamDef]
def params(input: Shape): Params[ParamDef]

/** Build a model
*
* @param input training set, where first dimension equals to number of samples (batch size)
* @param params initialized or calculated model params
* @return tuple where the first element is model output and second is changed params
*/
def build_[E: Floating](input: Expr[E], params: Params[Expr[E]]): (Expr[E], Params[Expr[E]])
def build[E: Floating](input: Expr[E], params: Params[Expr[E]]): (Expr[E], Params[Expr[E]])

/** Additional model penalty to be added to the loss
*
* @param params initialized or calculated model params
* @return penalty
*/
def penalty_[E: Floating](input: Shape, params: Params[Expr[E]]): Expr[E]
def penalty[E: Floating](input: Shape, params: Params[Expr[E]]): Expr[E]

def result_[E: Floating]: (Expr[E], Params[Expr[E]]) => Expr[E] =
(input, params) => build_(input, params)._1
def result[E: Floating]: (Expr[E], Params[Expr[E]]) => Expr[E] =
(input, params) => build(input, params)._1

def resultStateful_[E: Floating]: (Expr[E], Params[Expr[E]]) => (Expr[E], Params[Expr[E]]) =
(input, params) => build_(input, params)
def resultStateful[E: Floating]: (Expr[E], Params[Expr[E]]) => (Expr[E], Params[Expr[E]]) =
(input, params) => build(input, params)

// do we really need that???
def outputShape(input: Shape): Shape

def withLoss(loss: Loss): LossModel = LossModel(this, loss)

private def makeGraph[E: Floating](input: Shape): Expr[E] =
build_(
build(
input = placeholder[E](input),
params = params_(input).mapValues(paramDef => placeholder[E](paramDef.shape)))
params = params(input).mapValues(paramDef => placeholder[E](paramDef.shape)))
._1

def displayResult[E: Floating](input: Shape, dir: String = ""): Unit =
Expand All @@ -56,7 +55,7 @@ abstract class Model extends Serializable {
println(makeGraph[E](input).as("result").toString)

def info(input: Shape): Seq[LayerInfo] = {
val (weights, state) = params_(input).partitionValues(_.trainable)
val (weights, state) = params(input).partitionValues(_.trainable)
Seq(LayerInfo(
toString,
weights.values.map(_.shape).toList,
Expand All @@ -79,47 +78,47 @@ abstract class Model extends Serializable {

case class LossModel(model: Model, lossF: Loss) extends Serializable {

def build_[E: Floating](
def build[E: Floating](
input: Expr[E],
output: Expr[E],
params: Params[Expr[E]]): Expr[E] =
buildStateful_(input, output, params)._1
buildStateful(input, output, params)._1

def buildStateful_[E: Floating](
def buildStateful[E: Floating](
input: Expr[E],
output: Expr[E],
params: Params[Expr[E]]): (Expr[E], Params[Expr[E]]) = {
val (result, nextParams) = model.build_(input, params)
val loss = lossF.build(result, output) plus model.penalty_(input.shape, params)
val (result, nextParams) = model.build(input, params)
val loss = lossF.build(result, output) plus model.penalty(input.shape, params)
(loss, nextParams)
}

def loss_[E: Floating]: (Expr[E], Expr[E], Params[Expr[E]]) => Expr[E] =
(input, output, params) => buildStateful_(input, output, params)._1
def loss[E: Floating]: (Expr[E], Expr[E], Params[Expr[E]]) => Expr[E] =
(input, output, params) => buildStateful(input, output, params)._1

def lossStateful_[E: Floating]
def lossStateful[E: Floating]
: (Expr[E], Expr[E], Params[Expr[E]]) => (Expr[E], Params[Expr[E]]) =
(input, output, params) => buildStateful_(input, output, params)
(input, output, params) => buildStateful(input, output, params)

def grad_[E: Floating]: (Expr[E], Expr[E], Params[Expr[E]]) => Params[Expr[E]] =
def grad[E: Floating]: (Expr[E], Expr[E], Params[Expr[E]]) => Params[Expr[E]] =
(input, output, weights) => {
val loss = build_(input, output, weights)
val loss = build(input, output, weights)
loss.grad(weights).returns[E]
}

def gradStateful_[E: Floating]
def gradStateful[E: Floating]
: (Expr[E], Expr[E], Params[Expr[E]], Params[Expr[E]]) => (Params[Expr[E]], Params[Expr[E]]) =
(input, output, weights, state) => {
val (loss, nextState) = buildStateful_(input, output, weights ++ state)
val (loss, nextState) = buildStateful(input, output, weights ++ state)
val grad = loss.grad(weights).returns[E]
(grad, nextState)
}

def trained_[E: Floating](params: Params[Tensor[E]]) = new TrainedModel_(this, params)
def trained[E: Floating](params: Params[Tensor[E]]) = new TrainedModel(this, params)

def displayLoss[E: Floating](input: Shape, dir: String = ""): Unit = {
val params = model.params_(input)
build_(
val params = model.params(input)
build(
input = placeholder[E](input),
output = placeholder[E](model.outputShape(input)),
params = params.mapValues(paramDef => placeholder[E](paramDef.shape)))
Expand All @@ -128,8 +127,8 @@ case class LossModel(model: Model, lossF: Loss) extends Serializable {
}

def displayGrad[E: Floating](input: Shape, dir: String = ""): Unit = {
val (weights, state) = model.params_(input).partitionValues(_.trainable)
val (grad, _) = gradStateful_[E].apply(
val (weights, state) = model.params(input).partitionValues(_.trainable)
val (grad, _) = gradStateful[E].apply(
placeholder[E](input),
placeholder[E](model.outputShape(input)),
weights.mapValues(paramDef => placeholder[E](paramDef.shape)),
Expand All @@ -142,13 +141,13 @@ case class LossModel(model: Model, lossF: Loss) extends Serializable {
override def toString: String = s"$lossF($model)"
}

class TrainedModel_[E: Floating](val lossModel: LossModel, val params: Params[Tensor[E]]) {
class TrainedModel[E: Floating](val lossModel: LossModel, val params: Params[Tensor[E]]) {

def buildResult(input: Expr[E]): Expr[E] =
buildResultStateful(input)._1

def buildResultStateful(input: Expr[E]): (Expr[E], Params[Expr[E]]) =
lossModel.model.build_(input, params.mapValues(_.const))
lossModel.model.build(input, params.mapValues(_.const))

def result: Expr[E] => Expr[E] = (input: Expr[E]) => buildResult(input)

Expand All @@ -161,7 +160,7 @@ class TrainedModel_[E: Floating](val lossModel: LossModel, val params: Params[Te
def buildLossStateful(
input: Expr[E],
output: Expr[E]): (Expr[E], Params[Expr[E]]) =
lossModel.buildStateful_(input, output, params.mapValues(_.const))
lossModel.buildStateful(input, output, params.mapValues(_.const))

def loss: (Expr[E], Expr[E]) => Expr[E] = (input, output) => buildLoss(input, output)

Expand Down
2 changes: 1 addition & 1 deletion src/main/scala/scanet/models/layer/Activate.scala
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ case class Activate(activation: Activation) extends NotTrainableLayer {

override def name: String = activation.toString

override def build_[E: Floating](input: Expr[E]): Expr[E] =
override def build[E: Floating](input: Expr[E]): Expr[E] =
activation.build(input)

override def outputShape(input: Shape): Shape = input
Expand Down
4 changes: 2 additions & 2 deletions src/main/scala/scanet/models/layer/Bias.scala
Original file line number Diff line number Diff line change
Expand Up @@ -23,13 +23,13 @@ import scala.collection.immutable.Seq
case class Bias(features: Int, reg: Regularization = Zero, initializer: Initializer = Zeros)
extends StatelessLayer {

override def params_(input: Shape): Params[ParamDef] =
override def params(input: Shape): Params[ParamDef] =
Params(Weights -> ParamDef(Shape(features), initializer, Some(Avg), trainable = true))

override def buildStateless_[E: Floating](input: Expr[E], params: Params[Expr[E]]): Expr[E] =
input + params.weights

override def penalty_[E: Floating](input: Shape, params: Params[Expr[E]]): Expr[E] =
override def penalty[E: Floating](input: Shape, params: Params[Expr[E]]): Expr[E] =
reg.build(params.weights)

override def outputShape(input: Shape): Shape = input
Expand Down
16 changes: 8 additions & 8 deletions src/main/scala/scanet/models/layer/Composed.scala
Original file line number Diff line number Diff line change
Expand Up @@ -13,27 +13,27 @@ import scala.collection.immutable.Seq
*/
case class Composed(left: Layer, right: Layer) extends Layer {

override def params_(input: Shape): Params[ParamDef] = {
override def params(input: Shape): Params[ParamDef] = {
// todo: flatten
val leftParams = left.params_(input).prependPath("l")
val rightParams = right.params_(left.outputShape(input)).prependPath("r")
val leftParams = left.params(input).prependPath("l")
val rightParams = right.params(left.outputShape(input)).prependPath("r")
leftParams ++ rightParams
}

override def build_[E: Floating](
override def build[E: Floating](
input: Expr[E],
params: Params[Expr[E]]): (Expr[E], Params[Expr[E]]) = {
val leftParams = params.children("l")
val rightParams = params.children("r")
val (leftOutput, leftState) = left.build_(input, leftParams)
val (rightOutput, rightState) = right.build_(leftOutput, rightParams)
val (leftOutput, leftState) = left.build(input, leftParams)
val (rightOutput, rightState) = right.build(leftOutput, rightParams)
(rightOutput, leftState.prependPath("l") ++ rightState.prependPath("r"))
}

override def penalty_[E: Floating](input: Shape, params: Params[Expr[E]]): Expr[E] = {
override def penalty[E: Floating](input: Shape, params: Params[Expr[E]]): Expr[E] = {
val leftParams = params.children("l")
val rightParams = params.children("r")
left.penalty_(input, leftParams) plus right.penalty_(left.outputShape(input), rightParams)
left.penalty(input, leftParams) plus right.penalty(left.outputShape(input), rightParams)
}

override def outputShape(input: Shape): Shape =
Expand Down
4 changes: 2 additions & 2 deletions src/main/scala/scanet/models/layer/Conv2D.scala
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ case class Conv2D private (
def filterHeight: Int = kernel._1
def filterWidth: Int = kernel._2

override def params_(input: Shape): Params[ParamDef] = {
override def params(input: Shape): Params[ParamDef] = {
require(
input.rank == 4,
s"Conv2D input should have a shape (NHWC) or (NCHW) but was $input")
Expand All @@ -101,7 +101,7 @@ case class Conv2D private (
format = format)
}

override def penalty_[E: Floating](input: Shape, params: Params[Expr[E]]): Expr[E] =
override def penalty[E: Floating](input: Shape, params: Params[Expr[E]]): Expr[E] =
zeros[E](Shape())

override def outputShape(input: Shape): Shape = {
Expand Down
4 changes: 2 additions & 2 deletions src/main/scala/scanet/models/layer/Dense.scala
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ object Dense {
case class Dense private (outputs: Int, reg: Regularization, initializer: Initializer)
extends StatelessLayer {

override def params_(input: Shape): Params[ParamDef] =
override def params(input: Shape): Params[ParamDef] =
Params(Weights -> ParamDef(Shape(input(1), outputs), initializer, Some(Avg), trainable = true))

override def buildStateless_[E: Floating](input: Expr[E], params: Params[Expr[E]]): Expr[E] =
Expand All @@ -53,7 +53,7 @@ case class Dense private (outputs: Int, reg: Regularization, initializer: Initia
// x * w -> (samples, features) * (features, outputs) -> (samples, outputs)
input matmul params.weights

override def penalty_[E: Floating](input: Shape, params: Params[Expr[E]]): Expr[E] =
override def penalty[E: Floating](input: Shape, params: Params[Expr[E]]): Expr[E] =
reg.build(params.weights)

override def outputShape(input: Shape): Shape = Shape(input.head, outputs)
Expand Down
2 changes: 1 addition & 1 deletion src/main/scala/scanet/models/layer/Flatten.scala
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ import scanet.core.{Expr, Floating, Shape}
*/
case object Flatten extends NotTrainableLayer {

override def build_[E: Floating](input: Expr[E]): Expr[E] = {
override def build[E: Floating](input: Expr[E]): Expr[E] = {
val shape = input.shape
require(shape.rank >= 2, s"rank should be >= 2, but was ${shape.rank}")
val batch = shape(0)
Expand Down
10 changes: 5 additions & 5 deletions src/main/scala/scanet/models/layer/Layer.scala
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ trait StatelessLayer extends Layer {

def buildStateless_[E: Floating](input: Expr[E], params: Params[Expr[E]]): Expr[E]

override def build_[E: Floating](
override def build[E: Floating](
input: Expr[E],
params: Params[Expr[E]]): (Expr[E], Params[Expr[E]]) = {
(buildStateless_(input, params), Params.empty)
Expand All @@ -46,14 +46,14 @@ trait NotTrainableLayer extends StatelessLayer {

override def trainable: Boolean = false

override def params_(input: Shape): Params[ParamDef] = Params.empty
override def penalty_[E: Floating](input: Shape, params: Params[Expr[E]]): Expr[E] =
override def params(input: Shape): Params[ParamDef] = Params.empty
override def penalty[E: Floating](input: Shape, params: Params[Expr[E]]): Expr[E] =
zeros[E](Shape())

def build_[E: Floating](input: Expr[E]): Expr[E]
def build[E: Floating](input: Expr[E]): Expr[E]

override def buildStateless_[E: Floating](input: Expr[E], params: Params[Expr[E]]): Expr[E] = {
require(params.isEmpty, s"$this layer does not require params")
build_(input)
build(input)
}
}
2 changes: 1 addition & 1 deletion src/main/scala/scanet/models/layer/Pool2D.scala
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ case class Pool2D(
reduce: Reduce = Reduce.Max)
extends NotTrainableLayer {

override def build_[E: Floating](input: Expr[E]): Expr[E] =
override def build[E: Floating](input: Expr[E]): Expr[E] =
pool2D[E](
input = input,
window = Seq(window._1, window._2),
Expand Down
Loading

0 comments on commit aab22ad

Please sign in to comment.