Keras - tfruns - same metric results between different flags definition

Hello, everyone,
I am doing some keras experimentation using tfruns.

When I try to do the tuning parameter operation with tuning_run function with flags, I notice that the results between the various combinations always get the same error metrics, like loss and val_loss.

Could you help me understand where is the problme?

Code - tfruns

library(tfruns)

FLAGS <- flags(
  flag_integer('dense1', 5),
  flag_numeric('dropout1', 0.1),
  flag_integer('dense2', 5),
  flag_numeric('dropout2', 0.1),
  flag_integer('dense3', 5),
  flag_numeric('dropout3', 0.1),
  flag_numeric("lr_rate", 0.01),
  flag_integer('epochs', 100),
  flag_integer('batch_size', 10))

runs<-tuning_run("tuning_run.R", 
                 sample = 0.005,
                 flags = list(dense1=c(5, 10),
                              dropout1=c(0.1, 0.2),
                              dense2=c(5, 10),
                              dropout2=c(0.1, 0.2),
                              dense3=c(5, 10),
                              dropout3=c(0.1, 0.2),
                              lr_rate=c(0.01, 0.001),
                              epochs=c(100, 150),
                              batch_size=c(10, 15)))

Code - tuning_run

library(tensorflow)
library(keras)
library(caret)
library(tidyverse)
library(vip)

rm(list=ls())

training_1 <- read.csv("C:/Users/Fiorentini/Desktop/forecast_calogero/forecast_yield/Data/training.csv", sep=";")

training_1<-na.omit(training_1)

testing_1 <- read.csv("C:/Users/Fiorentini/Desktop/forecast_calogero/forecast_yield/Data/testing.csv", sep=";")

testing_1<-na.omit(testing_1)

training_1<-as_tibble(training_1)
testing_1<-as_tibble(testing_1)

x_train<-training_1[,-16]
y_train<-training_1[,16]
x_test<-testing_1[,-16]
y_test<-testing_1[,16]

x_train = scale(x_train)
x_test = scale(x_test, 
               center=attr(x_train, "scaled:center"), 
               scale=attr(x_train, "scaled:scale"))

x_train <- as.matrix(x_train)
y_train <- as.matrix(y_train)
x_test <- as.matrix(x_test)
y_test <- as.matrix(y_test)

all_scores <- c()

set_random_seed(seed=123)

kfold <- createFolds(x_train, k = 5)

set_random_seed(seed=123)

for (fold in kfold){
  
  model <- keras_model_sequential()
  
  model %>%
    layer_dense(units = FLAGS$dense1, activation = "relu", input_shape = 15) %>%
    layer_dropout(rate = FLAGS$dropout1) %>%
    layer_dense(units = FLAGS$dense2, activation = "relu") %>%
    layer_dropout(rate = FLAGS$dropout1) %>%
    layer_dense(units = FLAGS$dense3, activation = "relu") %>%
    layer_dropout(rate = FLAGS$dropout1) %>%
    layer_dense(units = 1, activation = "relu")
  
  model %>%
    compile(loss = "mean_absolute_error",
            optimizer = optimizer_adam(lr = FLAGS$lr_rate),
            metrics=list("mse",
                         "mae"))
  
  history <- model %>%
    fit(x=x_train, 
        y=y_train, 
        epochs = FLAGS$epochs, 
        batch_size = FLAGS$batch_size,
        verbose = 1,
        validation_data = list(x_test,
                               y_test))
  
  model %>% evaluate(x_test, y_test, verbose = 0)
  
  results <- model %>% evaluate(x_test, y_test, verbose = 0)
  
  all_scores <- c(all_scores, results)
  
}

I have solved it.

I understand that you have to declare the flags inside of your tuning_keras_1

The correct code is the follow

Code - tfruns

library(tfruns)
library(tensorflow)
library(keras)
library(caret)
library(tidyverse)
library(vip)

rm(list=ls())

training_1 <- read.csv("C:/Users/Fiorentini/Desktop/forecast_calogero/forecast_yield/Data/training.csv", sep=";")

training_1<-na.omit(training_1)

testing_1 <- read.csv("C:/Users/Fiorentini/Desktop/forecast_calogero/forecast_yield/Data/testing.csv", sep=";")

testing_1<-na.omit(testing_1)

training_1<-as_tibble(training_1)
testing_1<-as_tibble(testing_1)

x_train<-training_1[,-16]
y_train<-training_1[,16]
x_test<-testing_1[,-16]
y_test<-testing_1[,16]

x_train = scale(x_train)
x_test = scale(x_test, 
               center=attr(x_train, "scaled:center"), 
               scale=attr(x_train, "scaled:scale"))

x_train <- as.matrix(x_train)
y_train <- as.matrix(y_train)
x_test <- as.matrix(x_test)
y_test <- as.matrix(y_test)

set_random_seed(seed=123)

kfold <- createFolds(x_train, k = 5)

runs<-tuning_run("tuning_keras_1.R", 
                 sample = 0.005,
                 flags = list(dense1=c(5, 10),
                              dropout1=c(0.1, 0.2),
                              dense2=c(5, 10),
                              dropout2=c(0.1, 0.2),
                              dense3=c(5, 10),
                              dropout3=c(0.1, 0.2),
                              lr_rate=c(0.01, 0.001),
                              epochs=c(100, 150),
                              batch_size=c(10, 15)))

Code - tuning_run_1

FLAGS <- flags(
  flag_integer('dense1', 5),
  flag_numeric('dropout1', 0.1),
  flag_integer('dense2', 5),
  flag_numeric('dropout2', 0.1),
  flag_integer('dense3', 5),
  flag_numeric('dropout3', 0.1),
  flag_numeric("lr_rate", 0.01),
  flag_integer('epochs', 100),
  flag_integer('batch_size', 10))

for (fold in kfold){
  
  model <- keras_model_sequential()
  
  model %>%
    layer_dense(units = FLAGS$dense1, activation = "relu", input_shape = 15) %>%
    layer_dropout(rate = FLAGS$dropout1) %>%
    layer_dense(units = FLAGS$dense2, activation = "relu") %>%
    layer_dropout(rate = FLAGS$dropout1) %>%
    layer_dense(units = FLAGS$dense3, activation = "relu") %>%
    layer_dropout(rate = FLAGS$dropout1) %>%
    layer_dense(units = 1, activation = "relu")
  
  model %>%
    compile(loss = "mean_absolute_error",
            optimizer = optimizer_adam(lr = FLAGS$lr_rate),
            metrics=list("mse",
                         "mae"))
  
  model %>%
    fit(x=x_train, 
        y=y_train, 
        epochs = FLAGS$epochs, 
        batch_size = FLAGS$batch_size,
        verbose = 1,
        validation_data = list(x_test,
                               y_test))
  
}

This topic was automatically closed 7 days after the last reply. New replies are no longer allowed.

If you have a query related to it or one of the replies, start a new topic and refer back with a link.