Extract decision boundary lines (x, y coordinate format) from the SKlearn decision tree

I am trying to create surface graphics on an external visualization platform. I am working with aperture dataset, which is shown on the sklearn decision tree description page . I also use the same approach to create my portion of the solution surface. My final goal, however, is not a graphical representation of matplot lib, so from here I enter data into my visualization software. To do this, I just called flatten()and tolist()in xx, yyand Zwrote the JSON file, containing the lists.

The problem is, when I try to build it, my visualization program crashes. It turns out the data is too large. When smoothing, the list length is> 86,000. This is due to the fact that the step of the size / plot step is very small .02. Thus, in accordance with model predictions, he, in fact, takes the child’s steps through the data area min and max and lays / fills them. It looks like a pixel grid; I reduced the size to an array of only 2000 and noticed that the coordinates are just lines going back and forth (eventually covering the entire coordinate plane).

Question: Can I get the x, y coordinates of the boundaries of the solution itself (as opposed to iterating over the entire plane)? Ideally, a list containing only the pivot points of each line. Or, alternatively, could there be some other completely different way to recreate this plot so that it is more computationally efficient?

This can be somewhat visualized by replacing the call contourf()with countour():

enter image description here

I just don’t know how to get the data controlling these strings (via xx, yyand Zor, perhaps, other means?).

.. / , , . , , , , . , , x, y, , ( ) .

+4
3

. , .

Node Harvest , scikit . , , . . matplotlib:

n = 100
np.random.seed(42)
x = np.concatenate([np.random.randn(n, 2) + 1, np.random.randn(n, 2) - 1])
y = ['b'] * n + ['r'] * n
plt.scatter(x[:, 0], x[:, 1], c=y)

dtc = DecisionTreeClassifier().fit(x, y)
rectangles = decision_areas(dtc, [-3, 3, -3, 3])
plot_areas(rectangles)
plt.xlim(-3, 3)
plt.ylim(-3, 3)

enter image description here

, , . , , , .

rectangles numpy. , [left, right, top, bottom, class].


: Iris

Iris 2, . plot_areas: color = ['b', 'r', 'g'][int(rect[4])]. , 4- ( ), 2D. , decision_area. x y - , x y . x=0, y=1, , . Iris , .

decision_areas . , (, , xyz, B). . -3..3 , ( , 3).

0..7 0..5:

from sklearn.datasets import load_iris
data = load_iris()
x = data.data
y = data.target
dtc = DecisionTreeClassifier().fit(x, y)
rectangles = decision_areas(dtc, [0, 7, 0, 5], x=2, y=3)
plt.scatter(x[:, 2], x[:, 3], c=y)
plot_areas(rectangles)

enter image description here

, . , , . . .

, , , . , .

, , , . 2- , :

from sklearn.datasets import load_iris
data = load_iris()
x = data.data[:, [2, 3]]
y = data.target
dtc = DecisionTreeClassifier().fit(x, y)
rectangles = decision_areas(dtc, [0, 7, 0, 3], x=0, y=1)
plt.scatter(x[:, 0], x[:, 1], c=y)
plot_areas(rectangles)

enter image description here


, :

import numpy as np
from collections import deque
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import _tree as ctree
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle


class AABB:
    """Axis-aligned bounding box"""
    def __init__(self, n_features):
        self.limits = np.array([[-np.inf, np.inf]] * n_features)

    def split(self, f, v):
        left = AABB(self.limits.shape[0])
        right = AABB(self.limits.shape[0])
        left.limits = self.limits.copy()
        right.limits = self.limits.copy()

        left.limits[f, 1] = v
        right.limits[f, 0] = v

        return left, right


def tree_bounds(tree, n_features=None):
    """Compute final decision rule for each node in tree"""
    if n_features is None:
        n_features = np.max(tree.feature) + 1
    aabbs = [AABB(n_features) for _ in range(tree.node_count)]
    queue = deque([0])
    while queue:
        i = queue.pop()
        l = tree.children_left[i]
        r = tree.children_right[i]
        if l != ctree.TREE_LEAF:
            aabbs[l], aabbs[r] = aabbs[i].split(tree.feature[i], tree.threshold[i])
            queue.extend([l, r])
    return aabbs


def decision_areas(tree_classifier, maxrange, x=0, y=1, n_features=None):
    """ Extract decision areas.

    tree_classifier: Instance of a sklearn.tree.DecisionTreeClassifier
    maxrange: values to insert for [left, right, top, bottom] if the interval is open (+/-inf) 
    x: index of the feature that goes on the x axis
    y: index of the feature that goes on the y axis
    n_features: override autodetection of number of features
    """
    tree = tree_classifier.tree_
    aabbs = tree_bounds(tree, n_features)

    rectangles = []
    for i in range(len(aabbs)):
        if tree.children_left[i] != ctree.TREE_LEAF:
            continue
        l = aabbs[i].limits
        r = [l[x, 0], l[x, 1], l[y, 0], l[y, 1], np.argmax(tree.value[i])]
        rectangles.append(r)
    rectangles = np.array(rectangles)
    rectangles[:, [0, 2]] = np.maximum(rectangles[:, [0, 2]], maxrange[0::2])
    rectangles[:, [1, 3]] = np.minimum(rectangles[:, [1, 3]], maxrange[1::2])
    return rectangles

def plot_areas(rectangles):
    for rect in rectangles:
        color = ['b', 'r'][int(rect[4])]
        print(rect[0], rect[1], rect[2] - rect[0], rect[3] - rect[1])
        rp = Rectangle([rect[0], rect[2]], 
                       rect[1] - rect[0], 
                       rect[3] - rect[2], color=color, alpha=0.3)
        plt.gca().add_artist(rp)
+4

@kazemakase - "". , "" Z, :

steps = np.diff(Z,axis=0)[:,1:] + np.diff(Z,axis=1)[1:,:]
is_boundary = steps != 0
x,y = np.where(is_boundary)
# rescale to convert pixels into into original units
x = x.astype(np.float) * plot_step
y = y.astype(np.float) * plot_step

is_boundary (, ):

enter image description here

+1

, , , :

number_of_leaves = (tree.tree_.children_left == -1).sum()
features = x.shape[1]
boundaries = np.zeros([number_of_leaves, features, 2])
boundaries[:,:,0] = -np.inf
boundaries[:,:,1] = np.inf

locs = np.where(tree.tree_.children_left == -1)[0]

for k in range(locs.shape[0]):
    idx = locs[k]
    idx_new = idx

    while idx_new != 0:
        i_check = np.where(tree.tree_.children_left == idx_new)[0]
        j_check = np.where(tree.tree_.children_right == idx_new)[0]

        if i_check.shape[0] == 1:
            idx_new = i_check[0]
            feat_ = tree.tree_.feature[idx_new]
            val_ = tree.tree_.value[idx_new]
            boundaries[k,feat_, 0] = val_
        elif j_check.shape[0] == 1:
            idx_new = j_check[0]
            feat_ = tree.tree_.feature[idx_new]
            val_ = tree.tree_.value[idx_new]
            boundaries[k,feat_, 1] = val_ 
        else: 
            print('Fail Case') # for debugging only - never occurs

, * d * 2, n - , d - , . tree.tree_.children_left/tree.tree_.children_right -1, , , , .

0

Source: https://habr.com/ru/post/1676974/


All Articles