当前位置:网站首页>3. Keras version model training

3. Keras version model training

2022-06-26 15:58:00 X1996_

Sequence model

from tensorflow.keras import layers
import tensorflow as tf
import numpy as np

data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))

#  Build a model 
model = tf.keras.Sequential()
model.add(layers.Dense(64, activation='relu'))# first floor 
model.add(layers.Dense(64, activation='relu'))# The second floor 
model.add(layers.Dense(10))# The third level 

#  Specify the loss function optimizer 
model.compile(optimizer=tf.keras.optimizers.Adam(0.01),
              loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
              metrics=['accuracy'])

#  Callback function 
callbacks = [
    #  Stop early 
    tf.keras.callbacks.EarlyStopping(
        #  When ‘val_loss’ Stop training when you no longer descend  
        monitor='val_loss',
        # “ No more decline ” Is defined as “ Reduce by no more than 1e-2”
        min_delta=1e-2,
        # “ No more improvement ” Further defined as “ At least 2 individual epoch”
        patience=2,
        verbose=1),
    #  Save weights 
    tf.keras.callbacks.ModelCheckpoint(
        filepath='mymodel_{epoch}',
        #  Model save path 
        #
        #  The following two parameters mean if and only if `val_loss` When the score increases , We will overwrite the current checkpoint .
        save_best_only=True,
        monitor='val_loss',
        # Add this to just save the model weights 
        save_weights_only=True,
        verbose=1),
    #  Adjust the learning rate dynamically 
    tf.keras.callbacks.ReduceLROnPlateau(monitor="val_loss", 
                                         verbose=1, 
                                         mode='max', 
                                         factor=0.5, 
                                         patience=3)
]

#  Training 
model.fit(data, labels,
          epochs=30,
          batch_size=64,
          callbacks=callbacks,
          validation_split=0.2
         )

Sequence model 2

from tensorflow.keras import layers
import tensorflow as tf
import numpy as np

data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))

#  Build a model 
model = tf.keras.Sequential([
    layers.Dense(64, activation='relu', input_shape=(32,)),# first floor 
    layers.Dense(64, activation='relu'),# The second floor 
    layers.Dense(10)# The third level 
])


#  Specify the loss function optimizer 
model.compile(optimizer=tf.keras.optimizers.Adam(0.01),
              loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
              metrics=['accuracy'])

#  Callback function 
callbacks = [
    #  Stop early 
    tf.keras.callbacks.EarlyStopping(
        #  When ‘val_loss’ Stop training when you no longer descend  
        monitor='val_loss',
        # “ No more decline ” Is defined as “ Reduce by no more than 1e-2”
        min_delta=1e-2,
        # “ No more improvement ” Further defined as “ At least 2 individual epoch”
        patience=2,
        verbose=1),
    #  Save weights 
    tf.keras.callbacks.ModelCheckpoint(
        filepath='mymodel_{epoch}',
        #  Model save path 
        #
        #  The following two parameters mean if and only if `val_loss` When the score increases , We will overwrite the current checkpoint .
        save_best_only=True,
        monitor='val_loss',
        # Add this to just save the model weights 
        save_weights_only=True,
        verbose=1),
    #  Adjust the learning rate dynamically 
    tf.keras.callbacks.ReduceLROnPlateau(monitor="val_loss", 
                                         verbose=1, 
                                         mode='max', 
                                         factor=0.5, 
                                         patience=3)
]

#  Training 
model.fit(data, labels,
          epochs=30,
          batch_size=64,
          callbacks=callbacks,
          validation_split=0.2
         )

Functional expression

from tensorflow.keras import layers
import tensorflow as tf
import numpy as np

data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))

inputs = tf.keras.Input(shape=(32,))  
# inputs = tf.keras.Input(shape=(32,)) 
x = layers.Dense(64, activation='relu')(inputs) # first floor 
x = layers.Dense(64, activation='relu')(x) # The second floor 
predictions = layers.Dense(10)(x) # The third level 
model = tf.keras.Model(inputs=inputs, outputs=predictions)

#  Specify the loss function optimizer 
model.compile(optimizer=tf.keras.optimizers.Adam(0.01),
              loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
              metrics=['accuracy'])

#  Callback function 
callbacks = [
    #  Stop early 
    tf.keras.callbacks.EarlyStopping(
        #  When ‘val_loss’ Stop training when you no longer descend  
        monitor='val_loss',
        # “ No more decline ” Is defined as “ Reduce by no more than 1e-2”
        min_delta=1e-2,
        # “ No more improvement ” Further defined as “ At least 2 individual epoch”
        patience=2,
        verbose=1),
    #  Save weights 
    tf.keras.callbacks.ModelCheckpoint(
        filepath='mymodel_{epoch}',
        #  Model save path 
        #
        #  The following two parameters mean if and only if `val_loss` When the score increases , We will overwrite the current checkpoint .
        save_best_only=True,
        monitor='val_loss',
        # Add this to just save the model weights 
        save_weights_only=True,
        verbose=1),
    #  Adjust the learning rate dynamically 
    tf.keras.callbacks.ReduceLROnPlateau(monitor="val_loss", 
                                         verbose=1, 
                                         mode='max', 
                                         factor=0.5, 
                                         patience=3)
]

#  Training 
model.fit(data, labels,
          epochs=30,
          batch_size=64,
          callbacks=callbacks,
          validation_split=0.2
         )

Subclassing model

from tensorflow.keras import layers
import tensorflow as tf
import numpy as np

data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))

class MyModel(tf.keras.Model):

    def __init__(self, num_classes=10):
        super(MyModel, self).__init__(name='my_model')
        self.num_classes = num_classes
        #  Define the layers you need 
        self.dense_1 = layers.Dense(32, activation='relu') #
        self.dense_2 = layers.Dense(num_classes)

    def call(self, inputs):
        # Define forward propagation 
        #  Use in  (in `__init__`) Defined layer 
        x = self.dense_1(inputs)
        x = self.dense_2(x)
        return x
    
model = MyModel(num_classes=10)

#  Specify the loss function optimizer 
model.compile(optimizer=tf.keras.optimizers.Adam(0.01),
              loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
              metrics=['accuracy'])

#  Callback function 
callbacks = [
    #  Stop early 
    tf.keras.callbacks.EarlyStopping(
        #  When ‘val_loss’ Stop training when you no longer descend  
        monitor='val_loss',
        # “ No more decline ” Is defined as “ Reduce by no more than 1e-2”
        min_delta=1e-2,
        # “ No more improvement ” Further defined as “ At least 2 individual epoch”
        patience=2,
        verbose=1),
    #  Save weights 
    tf.keras.callbacks.ModelCheckpoint(
        filepath='mymodel_{epoch}',
        #  Model save path 
        #
        #  The following two parameters mean if and only if `val_loss` When the score increases , We will overwrite the current checkpoint .
        save_best_only=True,
        monitor='val_loss',
        # Add this to just save the model weights 
        save_weights_only=True,
        verbose=1),
    #  Adjust the learning rate dynamically 
    tf.keras.callbacks.ReduceLROnPlateau(monitor="val_loss", 
                                         verbose=1, 
                                         mode='max', 
                                         factor=0.5, 
                                         patience=3)
]

#  Training 
model.fit(data, labels,
          epochs=30,
          batch_size=64,
          callbacks=callbacks,
          validation_split=0.2
         )

drawing

tf.keras.utils.plot_model(model, 'multi_input_and_output_model.png', show_shapes=True,dpi=500)

 Insert picture description here

model training :model.fit()
Model validation :model.evaluate()
Model to predict : model.predict()

# Evaluate the model on the test data using `evaluate`
print('\n# Evaluate on test data')
results = model.evaluate(x_test, y_test, batch_size=128)
print('test loss, test acc:', results)

# Generate predictions (probabilities -- the output of the last layer)
# on new data using `predict`
print('\n# Generate predictions for 3 samples')
predictions = model.predict(x_test[:3])
print('predictions shape:', predictions.shape)
原网站

版权声明
本文为[X1996_]所创,转载请带上原文链接,感谢
https://yzsam.com/2022/177/202206261529517435.html