X-Git-Url: https://www.fleuret.org/cgi-bin/gitweb/gitweb.cgi?p=clueless-kmeans.git;a=blobdiff_plain;f=clusterer.cc;h=965b7bacd6fe9a9fb025dc68ab886c4a1dae5d1e;hp=47d9ac3f58fcea8f1ba2acdb359071968de6a41d;hb=HEAD;hpb=792d1a58d91f607d47ea5316ec680fb5e3454e5e diff --git a/clusterer.cc b/clusterer.cc index 47d9ac3..965b7ba 100644 --- a/clusterer.cc +++ b/clusterer.cc @@ -1,17 +1,17 @@ /* - * clueless-kmean is a variant of k-mean which enforces balanced + * clueless-kmeans is a variant of k-means which enforces balanced * distribution of classes in every cluster * * Copyright (c) 2013 Idiap Research Institute, http://www.idiap.ch/ * Written by Francois Fleuret * - * This file is part of clueless-kmean. + * This file is part of clueless-kmeans. * - * clueless-kmean is free software: you can redistribute it and/or + * clueless-kmeans is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 3 as published by the Free Software Foundation. * - * clueless-kmean is distributed in the hope that it will be useful, + * clueless-kmeans is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. @@ -34,6 +34,31 @@ Clusterer::~Clusterer() { deallocate_array(_cluster_var); } +scalar_t Clusterer::distance_to_centroid(scalar_t *x, int k) { + scalar_t dist = 0; + for(int d = 0; d < _dim; d++) { + dist += sq(_cluster_means[k][d] - x[d]) / (2 * _cluster_var[k][d]); + dist += 0.5 * log(_cluster_var[k][d]); + ASSERT(!isnan(dist) && !isinf(dist)); + } + return dist; +} + +void Clusterer::initialize_clusters(int nb_points, scalar_t **points) { + int *used = new int[nb_points]; + for(int k = 0; k < nb_points; k++) { used[k] = 0; } + for(int k = 0; k < _nb_clusters; k++) { + int l; + do { l = int(drand48() * nb_points); } while(used[l]); + for(int d = 0; d < _dim; d++) { + _cluster_means[k][d] = points[l][d]; + _cluster_var[k][d] = 1.0; + } + used[l] = 1; + } + delete[] used; +} + scalar_t Clusterer::baseline_cluster_association(int nb_points, scalar_t **points, int nb_classes, int *labels, scalar_t **gamma) { @@ -43,14 +68,7 @@ scalar_t Clusterer::baseline_cluster_association(int nb_points, scalar_t **point for(int n = 0; n < nb_points; n++) { scalar_t lowest_dist = 0; for(int k = 0; k < _nb_clusters; k++) { - scalar_t dist = 0; - - for(int d = 0; d < _dim; d++) { - dist += sq(_cluster_means[k][d] - points[n][d]) / (2 * _cluster_var[k][d]); - dist += 0.5 * log(_cluster_var[k][d]); - ASSERT(!isnan(dist) && !isinf(dist)); - } - + scalar_t dist = distance_to_centroid(points[n], k); if(k == 0 || dist <= lowest_dist) { lowest_dist = dist; associated_clusters[n] = k; @@ -77,9 +95,9 @@ scalar_t Clusterer::baseline_lp_cluster_association(int nb_points, scalar_t **po scalar_t **gamma) { glp_prob *lp; - int *ia = new int[nb_points * _nb_clusters + 1]; - int *ja = new int[nb_points * _nb_clusters + 1]; - scalar_t *ar = new scalar_t[nb_points * _nb_clusters + 1]; + int *coeff_row = new int[nb_points * _nb_clusters + 1]; + int *coeff_col = new int[nb_points * _nb_clusters + 1]; + scalar_t *coeff_wgt = new scalar_t[nb_points * _nb_clusters + 1]; lp = glp_create_prob(); @@ -96,31 +114,23 @@ scalar_t Clusterer::baseline_lp_cluster_association(int nb_points, scalar_t **po for(int k = 1; k <= _nb_clusters; k++) { for(int n = 1; n <= nb_points; n++) { int i = n + nb_points * (k - 1); - - scalar_t dist = 0; - - for(int d = 0; d < _dim; d++) { - dist += sq(_cluster_means[k-1][d] - points[n-1][d]) / (2 * _cluster_var[k-1][d]); - dist += 0.5 * log(_cluster_var[k-1][d]); - } - - glp_set_obj_coef(lp, i, dist); + glp_set_obj_coef(lp, i, distance_to_centroid(points[n-1], k-1)); glp_set_col_bnds(lp, i, GLP_DB, 0.0, 1.0); } } - int l = 1; + int n_coeff = 1; for(int n = 1; n <= nb_points; n++) { for(int k = 1; k <= _nb_clusters; k++) { - ia[l] = n; - ja[l] = n + nb_points * (k - 1); - ar[l] = 1.0; - l++; + coeff_row[n_coeff] = n; + coeff_col[n_coeff] = n + nb_points * (k - 1); + coeff_wgt[n_coeff] = 1.0; + n_coeff++; } } - glp_load_matrix(lp, nb_points * _nb_clusters, ia, ja, ar); + glp_load_matrix(lp, nb_points * _nb_clusters, coeff_row, coeff_col, coeff_wgt); glp_simplex(lp, NULL); @@ -133,9 +143,9 @@ scalar_t Clusterer::baseline_lp_cluster_association(int nb_points, scalar_t **po } } - delete[] ia; - delete[] ja; - delete[] ar; + delete[] coeff_row; + delete[] coeff_col; + delete[] coeff_wgt; glp_delete_prob(lp); @@ -144,14 +154,46 @@ scalar_t Clusterer::baseline_lp_cluster_association(int nb_points, scalar_t **po scalar_t Clusterer::uninformative_lp_cluster_association(int nb_points, scalar_t **points, int nb_classes, int *labels, - scalar_t **gamma) { + scalar_t **gamma, + int absolute_proportion) { + // N points + // K clusters + // dist(n,k) distance of samples n to cluster k + // + // We want to optimize the + // + // \gamma(n,k) is the association of point n to cluster k + // + // to minimize + // + // \sum_{n,k} \gamma(n,k) dist(n,k) + // + // under + // + // (A) \forall n, k, \gamma(n, k) >= 0 + // (B) \forall n, \sum_k \gamma(n, k) = 1 + // (C) \forall k, \sum_n \gamma(n, k) = N/K + glp_prob *lp; - int nb_coeffs = nb_points * _nb_clusters + nb_points * _nb_clusters; + // The coefficients for the constraints are passed to the glpk + // functions with a sparse representation. + + // ** GLPK USES INDEXES STARTING AT 1, NOT 0. ** - int *ia = new int[nb_coeffs + 1]; - int *ja = new int[nb_coeffs + 1]; - scalar_t *ar = new scalar_t[nb_coeffs + 1]; + int nb_coeffs; + + if(absolute_proportion) { + nb_coeffs = nb_points * _nb_clusters + nb_points * _nb_clusters; + } else { + nb_coeffs = nb_points * _nb_clusters + nb_points * nb_classes * _nb_clusters; + } + + int *coeff_row = new int[nb_coeffs + 1]; + int *coeff_col = new int[nb_coeffs + 1]; + scalar_t *coeff_wgt = new scalar_t[nb_coeffs + 1]; + + int n_coeff = 1; scalar_t *nb_samples_per_class = new scalar_t[nb_classes]; for(int c = 0; c < nb_classes; c++) { @@ -167,70 +209,100 @@ scalar_t Clusterer::uninformative_lp_cluster_association(int nb_points, scalar_t glp_set_prob_name(lp, "uninformative_lp_cluster_association"); glp_set_obj_dir(lp, GLP_MIN); - glp_add_rows(lp, nb_points + _nb_clusters * nb_classes); + // We have one column per coefficient gamma - for(int n = 1; n <= nb_points; n++) { - glp_set_row_bnds(lp, n, GLP_FX, 1.0, 1.0); - } + glp_add_cols(lp, nb_points * _nb_clusters); - for(int k = 1; k <= _nb_clusters; k++) { - for(int c = 1; c <= nb_classes; c++) { - int row = nb_points + (k - 1) * nb_classes + c; - scalar_t tau = nb_samples_per_class[c-1] / scalar_t(_nb_clusters); - glp_set_row_bnds(lp, row, GLP_FX, tau, tau); - } - } + // The column for gamma[n][k] point 1<=n<=nb_points and cluster + // 1<=k<=_nb_clusters is nb_points * (k - 1) + n; - glp_add_cols(lp, nb_points * _nb_clusters); + // The constraints (A) will be expressed by putting directly bounds + // on the variables (i.e. one per column). So we need one row per + // (B) constraint, and one per (C) constraint. + + glp_add_rows(lp, nb_points + _nb_clusters * nb_classes); + + // First, we set the weights for the objective function, and the (A) + // constraints on the individual gammas for(int k = 1; k <= _nb_clusters; k++) { for(int n = 1; n <= nb_points; n++) { - int r = n + nb_points * (k - 1); + int col = n + nb_points * (k - 1); - scalar_t dist = 0; + // The LP weight on the gammas for the global loss is the + // normalized distance of that sample to the centroid of that + // cluster - for(int d = 0; d < _dim; d++) { - dist += sq(_cluster_means[k-1][d] - points[n-1][d]) / (2 * _cluster_var[k-1][d]); - dist += 0.5 * log(_cluster_var[k-1][d]); - } + glp_set_obj_coef(lp, col, distance_to_centroid(points[n-1], k-1)); + + // The (A) constraints: Each column correspond to one of the + // gamma, and it has to be in [0,1] - glp_set_obj_coef(lp, r, dist); - glp_set_col_bnds(lp, r, GLP_DB, 0.0, 1.0); + glp_set_col_bnds(lp, col, GLP_DB, 0.0, 1.0); } } - int l = 1; + // The (B) constraints: for each point, the sum of its gamma is + // equal to 1.0 for(int n = 1; n <= nb_points; n++) { + int row = n; + glp_set_row_bnds(lp, row, GLP_FX, 1.0, 1.0); for(int k = 1; k <= _nb_clusters; k++) { - int row = n; - ia[l] = row; - ja[l] = nb_points * (k - 1) + n; - ar[l] = 1.0; - l++; + coeff_row[n_coeff] = row; + coeff_col[n_coeff] = nb_points * (k - 1) + n; + coeff_wgt[n_coeff] = 1.0; + n_coeff++; } } - for(int k = 1; k <= _nb_clusters; k++) { - for(int c = 1; c <= nb_classes; c++) { - int row = nb_points + (k - 1) * nb_classes + c; - for(int n = 1; n <= nb_points; n++) { - if(labels[n-1] == c - 1) { - ia[l] = row; - ja[l] = (k-1) * nb_points + n; - ar[l] = 1.0; - l++; + // The (C) constraints: For each pair cluster/class, the sum of the + // gammas for this cluster and this class is equal to the number of + // sample of that class, divided by the number of clusters + + if(absolute_proportion) { + for(int k = 1; k <= _nb_clusters; k++) { + for(int c = 1; c <= nb_classes; c++) { + int row = nb_points + (k - 1) * nb_classes + c; + scalar_t tau = nb_samples_per_class[c-1] / scalar_t(_nb_clusters); + glp_set_row_bnds(lp, row, GLP_FX, tau, tau); + for(int n = 1; n <= nb_points; n++) { + if(labels[n-1] == c - 1) { + coeff_row[n_coeff] = row; + coeff_col[n_coeff] = (k-1) * nb_points + n; + coeff_wgt[n_coeff] = 1.0; + n_coeff++; + } + } + } + } + } else { + for(int k = 1; k <= _nb_clusters; k++) { + for(int c = 1; c <= nb_classes; c++) { + int row = nb_points + (k - 1) * nb_classes + c; + glp_set_row_bnds(lp, row, GLP_FX, 0.0, 0.0); + for(int n = 1; n <= nb_points; n++) { + coeff_row[n_coeff] = row; + coeff_col[n_coeff] = (k-1) * nb_points + n; + coeff_wgt[n_coeff] = + (labels[n-1] == c - 1 ? 1.0 : 0.0) + - scalar_t(nb_samples_per_class[c-1]) / scalar_t(nb_points); + n_coeff++; } } } } - ASSERT(l == nb_coeffs + 1); + ASSERT(n_coeff == nb_coeffs + 1); - glp_load_matrix(lp, nb_coeffs, ia, ja, ar); + glp_load_matrix(lp, nb_coeffs, coeff_row, coeff_col, coeff_wgt); + + // Now a miracle occurs glp_simplex(lp, NULL); + // We retrieve the result + scalar_t total_dist = glp_get_obj_val(lp); for(int k = 1; k <= _nb_clusters; k++) { @@ -241,9 +313,9 @@ scalar_t Clusterer::uninformative_lp_cluster_association(int nb_points, scalar_t } delete[] nb_samples_per_class; - delete[] ia; - delete[] ja; - delete[] ar; + delete[] coeff_row; + delete[] coeff_col; + delete[] coeff_wgt; glp_delete_prob(lp); return total_dist; @@ -270,29 +342,16 @@ void Clusterer::update_clusters(int nb_points, scalar_t **points, scalar_t **gam for(int d = 0; d < _dim; d++) { if(sum_gamma >= 2) { - _cluster_var[k][d] = (_cluster_var[k][d] - sq(_cluster_means[k][d]) / sum_gamma) / (sum_gamma - 1); + _cluster_var[k][d] = + (_cluster_var[k][d] - sq(_cluster_means[k][d]) / sum_gamma) / (sum_gamma - 1); + _cluster_var[k][d] = max(scalar_t(min_cluster_variance), _cluster_var[k][d]); } else { _cluster_var[k][d] = 1; } - _cluster_var[k][d] = max(0.01, _cluster_var[k][d]); - _cluster_means[k][d] /= sum_gamma; - } - } -} -void Clusterer::initialize_clusters(int nb_points, scalar_t **points) { - int *used = new int[nb_points]; - for(int k = 0; k < nb_points; k++) { used[k] = 0; } - for(int k = 0; k < _nb_clusters; k++) { - int l; - do { l = int(drand48() * nb_points); } while(used[l]); - for(int d = 0; d < _dim; d++) { - _cluster_means[k][d] = points[l][d]; - _cluster_var[k][d] = 1.0; + _cluster_means[k][d] /= sum_gamma; } - used[l] = 1; } - delete[] used; } void Clusterer::train(int mode, @@ -322,18 +381,23 @@ void Clusterer::train(int mode, switch(mode) { case STANDARD_ASSOCIATION: - total_distance = - baseline_cluster_association(nb_points, points, nb_classes, labels, gammas); + total_distance = + baseline_cluster_association(nb_points, points, nb_classes, labels, gammas); break; case STANDARD_LP_ASSOCIATION: - total_distance = - baseline_lp_cluster_association(nb_points, points, nb_classes, labels, gammas); + total_distance = + baseline_lp_cluster_association(nb_points, points, nb_classes, labels, gammas); break; case UNINFORMATIVE_LP_ASSOCIATION: - total_distance = - uninformative_lp_cluster_association(nb_points, points, nb_classes, labels, gammas); + total_distance = + uninformative_lp_cluster_association(nb_points, points, nb_classes, labels, gammas, 0); + break; + + case UNINFORMATIVE_LP_ASSOCIATION_ABSOLUTE: + total_distance = + uninformative_lp_cluster_association(nb_points, points, nb_classes, labels, gammas, 1); break; default: