"""Implementation of AutoInt."""
from ..bases import ModelMeta, TfBase
from ..feature.multi_sparse import true_sparse_field_size
from ..layers import embedding_lookup, multi_head_attention, tf_dense
from ..tfops import attention_config, dropout_config, reg_config, tf
from ..tfops.features import compute_dense_feats, compute_sparse_feats
from ..utils.validate import (
check_dense_values,
check_multi_sparse,
check_sparse_indices,
dense_field_size,
sparse_feat_size,
sparse_field_size,
)
[docs]class AutoInt(TfBase, metaclass=ModelMeta):
"""*AutoInt* algorithm.
Parameters
----------
task : {'rating', 'ranking'}
Recommendation task. See :ref:`Task`.
data_info : :class:`~libreco.data.DataInfo` object
Object that contains useful information for training and inference.
loss_type : {'cross_entropy', 'focal'}, default: 'cross_entropy'
Loss for model training.
embed_size: int, default: 16
Vector size of embeddings.
n_epochs: int, default: 10
Number of epochs for training.
lr : float, default 0.001
Learning rate for training.
lr_decay : bool, default: False
Whether to use learning rate decay.
epsilon : float, default: 1e-5
A small constant added to the denominator to improve numerical stability in
Adam optimizer.
According to the `official comment <https://github.com/tensorflow/tensorflow/blob/v1.15.0/tensorflow/python/training/adam.py#L64>`_,
default value of `1e-8` for `epsilon` is generally not good, so here we choose `1e-5`.
Users can try tuning this hyperparameter if the training is unstable.
reg : float or None, default: None
Regularization parameter, must be non-negative or None.
batch_size : int, default: 256
Batch size for training.
sampler : {'random', 'unconsumed', 'popular'}, default: 'random'
Negative sampling strategy.
- ``'random'`` means random sampling.
- ``'unconsumed'`` samples items that the target user did not consume before.
- ``'popular'`` has a higher probability to sample popular items as negative samples.
.. versionadded:: 1.1.0
num_neg : int, default: 1
Number of negative samples for each positive sample, only used in `ranking` task.
use_bn : bool, default: True
Whether to use batch normalization.
dropout_rate : float or None, default: None
Probability of an element to be zeroed. If it is None, dropout is not used.
att_embed_size : int, list of int or tuple of (int,), default: (8, 8, 8)
Head embedding size in each attention layer. If it is `int`, one layer is used.
num_heads : int, default: 2
Number of heads in multi-head attention.
use_residual : bool, default: True
Whether to use residual layer.
multi_sparse_combiner : {'normal', 'mean', 'sum', 'sqrtn'}, default: 'sqrtn'
Options for combining `multi_sparse` features.
seed : int, default: 42
Random seed.
lower_upper_bound : tuple or None, default: None
Lower and upper score bound for `rating` task.
tf_sess_config : dict or None, default: None
Optional TensorFlow session config, see `ConfigProto options
<https://github.com/tensorflow/tensorflow/blob/v2.10.0/tensorflow/core/protobuf/config.proto#L431>`_.
References
----------
*Weiping Song et al.* `AutoInt: Automatic Feature Interaction Learning via Self-Attentive Neural Networks
<https://arxiv.org/pdf/1810.11921.pdf>`_.
"""
user_variables = ("embedding/user_embeds_var",)
item_variables = ("embedding/item_embeds_var",)
sparse_variables = ("embedding/sparse_embeds_var",)
dense_variables = ("embedding/dense_embeds_var",)
def __init__(
self,
task,
data_info,
loss_type="cross_entropy",
embed_size=16,
n_epochs=10,
lr=0.001,
lr_decay=False,
epsilon=1e-5,
reg=None,
batch_size=256,
sampler="random",
num_neg=1,
use_bn=True,
dropout_rate=None,
att_embed_size=(8, 8, 8),
num_heads=2,
use_residual=True,
multi_sparse_combiner="sqrtn",
seed=42,
lower_upper_bound=None,
tf_sess_config=None,
):
super().__init__(task, data_info, lower_upper_bound, tf_sess_config)
self.all_args = locals()
self.loss_type = loss_type
self.embed_size = embed_size
self.n_epochs = n_epochs
self.lr = lr
self.lr_decay = lr_decay
self.epsilon = epsilon
self.reg = reg_config(reg)
self.batch_size = batch_size
self.sampler = sampler
self.num_neg = num_neg
self.use_bn = use_bn
self.dropout_rate = dropout_config(dropout_rate)
# `att_head_dims` also decides the num of attention layer
self.att_head_dims, self.att_layer_num = attention_config(att_embed_size)
self.num_heads = num_heads
self.use_residual = use_residual
self.seed = seed
self.sparse = check_sparse_indices(data_info)
self.dense = check_dense_values(data_info)
if self.sparse:
self.sparse_feature_size = sparse_feat_size(data_info)
self.sparse_field_size = sparse_field_size(data_info)
self.multi_sparse_combiner = check_multi_sparse(
data_info, multi_sparse_combiner
)
self.true_sparse_field_size = true_sparse_field_size(
data_info, self.sparse_field_size, self.multi_sparse_combiner
)
if self.dense:
self.dense_field_size = dense_field_size(data_info)
def build_model(self):
tf.set_random_seed(self.seed)
self.labels = tf.placeholder(tf.float32, shape=[None])
self.is_training = tf.placeholder_with_default(False, shape=[])
self.concat_embed = []
self._build_user_item()
if self.sparse:
self._build_sparse()
if self.dense:
self._build_dense()
att_block = tf.concat(self.concat_embed, axis=1)
for head_dim in self.att_head_dims:
# self-attention
mha_output = multi_head_attention(
att_block, att_block, self.num_heads, head_dim
)
att_block = att_block + mha_output if self.use_residual else mha_output
attention_layer = tf.keras.layers.Flatten()(att_block)
self.output = tf.squeeze(tf_dense(units=1)(attention_layer), axis=-1)
self.serving_topk = self.build_topk(self.output)
def _build_user_item(self):
self.user_indices = tf.placeholder(tf.int32, shape=[None])
self.item_indices = tf.placeholder(tf.int32, shape=[None])
user_embeds = embedding_lookup(
indices=self.user_indices,
var_name="user_embeds_var",
var_shape=(self.n_users + 1, self.embed_size),
initializer=tf.glorot_uniform_initializer(),
regularizer=self.reg,
)
item_embeds = embedding_lookup(
indices=self.item_indices,
var_name="item_embeds_var",
var_shape=(self.n_items + 1, self.embed_size),
initializer=tf.glorot_uniform_initializer(),
regularizer=self.reg,
)
self.concat_embed.extend(
[user_embeds[:, tf.newaxis, :], item_embeds[:, tf.newaxis, :]]
)
def _build_sparse(self):
self.sparse_indices = tf.placeholder(
tf.int32, shape=[None, self.sparse_field_size]
)
sparse_embed = compute_sparse_feats(
self.data_info,
self.multi_sparse_combiner,
self.sparse_indices,
var_name="sparse_embeds_var",
var_shape=(self.sparse_feature_size, self.embed_size),
initializer=tf.glorot_uniform_initializer(),
regularizer=self.reg,
flatten=False,
)
self.concat_embed.append(sparse_embed)
def _build_dense(self):
self.dense_values = tf.placeholder(
tf.float32, shape=[None, self.dense_field_size]
)
dense_embed = compute_dense_feats(
self.dense_values,
var_name="dense_embeds_var",
var_shape=(self.dense_field_size, self.embed_size),
initializer=tf.glorot_uniform_initializer(),
regularizer=self.reg,
flatten=False,
)
self.concat_embed.append(dense_embed)