I want to set the same weight for parts of positive samples. However tf.nn.weighted_cross_entropy_with_logits
can only set the weight for all positive samples, in my opinion.
for example, in the ctr predicate, I want to set 10 weights for sample orders, and the weight of the click samples and unclick sample is still 1.
Here is my unweighted code
def my_model(features, labels, mode, params): net = tf.feature_column.input_layer(features, params['feature_columns']) for units in params['hidden_units']: net = tf.layers.dense(net, units=units, activation=params["activation"]) logits = tf.layers.dense(net, params['n_classes'], activation=None) predicted_classes = tf.argmax(logits, 1) if mode == tf.estimator.ModeKeys.PREDICT: predictions = { 'class_ids': predicted_classes, #predicted_classes[:, tf.newaxis], 'probabilities': tf.nn.softmax(logits), 'logits': logits, } return tf.estimator.EstimatorSpec(mode, predictions=predictions) loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits) metrics = {'auc': tf.metrics.auc(labels=labels, predictions=tf.nn.softmax(logits)[:,1])} if mode == tf.estimator.ModeKeys.EVAL: return tf.estimator.EstimatorSpec(mode, loss=loss, eval_metric_ops=metrics) assert mode == tf.estimator.ModeKeys.TRAIN optimizer = tf.train.AdagradOptimizer(learning_rate=0.1) train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step()) return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
Train
train_input_fn = tf.estimator.inputs.pandas_input_fn(x=data_train, y=data_train_click, batch_size = 1024, num_epochs=1, shuffle=False) classifier.train(input_fn=train_input_fn)
Here, data_train_click
is a series in which click samples are 1 and loose samples are 0. And I have a series called data_train_order
which is of the order of 1 and the rest are 0