真实的国产乱ⅩXXX66竹夫人,五月香六月婷婷激情综合,亚洲日本VA一区二区三区,亚洲精品一区二区三区麻豆

成都創(chuàng)新互聯(lián)網(wǎng)站制作重慶分公司

python實(shí)現(xiàn)ID3決策樹算法-創(chuàng)新互聯(lián)

ID3決策樹是以信息增益作為決策標(biāo)準(zhǔn)的一種貪心決策樹算法

成都創(chuàng)新互聯(lián)專注于嶺東企業(yè)網(wǎng)站建設(shè),響應(yīng)式網(wǎng)站開發(fā),購物商城網(wǎng)站建設(shè)。嶺東網(wǎng)站建設(shè)公司,為嶺東等地區(qū)提供建站服務(wù)。全流程定制制作,專業(yè)設(shè)計(jì),全程項(xiàng)目跟蹤,成都創(chuàng)新互聯(lián)專業(yè)和態(tài)度為您提供的服務(wù)

# -*- coding: utf-8 -*-


from numpy import *
import math
import copy
import cPickle as pickle


class ID3DTree(object):
  def __init__(self): # 構(gòu)造方法
    self.tree = {} # 生成樹
    self.dataSet = [] # 數(shù)據(jù)集
    self.labels = [] # 標(biāo)簽集


  # 數(shù)據(jù)導(dǎo)入函數(shù)
  def loadDataSet(self, path, labels):
    recordList = []
    fp = open(path, "rb") # 讀取文件內(nèi)容
    content = fp.read()
    fp.close()
    rowList = content.splitlines() # 按行轉(zhuǎn)換為一維表
    recordList = [row.split("\t") for row in rowList if row.strip()] # strip()函數(shù)刪除空格、Tab等
    self.dataSet = recordList
    self.labels = labels


  # 執(zhí)行決策樹函數(shù)
  def train(self):
    labels = copy.deepcopy(self.labels)
    self.tree = self.buildTree(self.dataSet, labels)


  # 構(gòu)件決策樹:穿件決策樹主程序
  def buildTree(self, dataSet, lables):
    cateList = [data[-1] for data in dataSet] # 抽取源數(shù)據(jù)集中的決策標(biāo)簽列
    # 程序終止條件1:如果classList只有一種決策標(biāo)簽,停止劃分,返回這個(gè)決策標(biāo)簽
    if cateList.count(cateList[0]) == len(cateList):
      return cateList[0]
    # 程序終止條件2:如果數(shù)據(jù)集的第一個(gè)決策標(biāo)簽只有一個(gè),返回這個(gè)標(biāo)簽
    if len(dataSet[0]) == 1:
      return self.maxCate(cateList)
    # 核心部分
    bestFeat = self.getBestFeat(dataSet) # 返回?cái)?shù)據(jù)集的最優(yōu)特征軸
    bestFeatLabel = lables[bestFeat]
    tree = {bestFeatLabel: {}}
    del (lables[bestFeat])
    # 抽取最優(yōu)特征軸的列向量
    uniqueVals = set([data[bestFeat] for data in dataSet]) # 去重
    for value in uniqueVals: # 決策樹遞歸生長
      subLables = lables[:] # 將刪除后的特征類別集建立子類別集
      # 按最優(yōu)特征列和值分隔數(shù)據(jù)集
      splitDataset = self.splitDataSet(dataSet, bestFeat, value)
      subTree = self.buildTree(splitDataset, subLables) # 構(gòu)建子樹
      tree[bestFeatLabel][value] = subTree
    return tree


  # 計(jì)算出現(xiàn)次數(shù)最多的類別標(biāo)簽
  def maxCate(self, cateList):
    items = dict([(cateList.count(i), i) for i in cateList])
    return items[max(items.keys())]


  # 計(jì)算最優(yōu)特征
  def getBestFeat(self, dataSet):
    # 計(jì)算特征向量維,其中最后一列用于類別標(biāo)簽
    numFeatures = len(dataSet[0]) - 1 # 特征向量維數(shù)=行向量維數(shù)-1
    baseEntropy = self.computeEntropy(dataSet) # 基礎(chǔ)熵
    bestInfoGain = 0.0 # 初始化最優(yōu)的信息增益
    bestFeature = -1 # 初始化最優(yōu)的特征軸
    # 外循環(huán):遍歷數(shù)據(jù)集各列,計(jì)算最優(yōu)特征軸
    # i為數(shù)據(jù)集列索引:取值范圍0~(numFeatures-1)
    for i in xrange(numFeatures):
      uniqueVals = set([data[i] for data in dataSet]) # 去重
      newEntropy = 0.0
      for value in uniqueVals:
        subDataSet = self.splitDataSet(dataSet, i, value)
        prob = len(subDataSet) / float(len(dataSet))
        newEntropy += prob * self.computeEntropy(subDataSet)
      infoGain = baseEntropy - newEntropy
      if (infoGain > bestInfoGain): # 信息增益大于0
        bestInfoGain = infoGain # 用當(dāng)前信息增益值替代之前的最優(yōu)增益值
        bestFeature = i # 重置最優(yōu)特征為當(dāng)前列
    return bestFeature



  # 計(jì)算信息熵
  # @staticmethod
  def computeEntropy(self, dataSet):
    dataLen = float(len(dataSet))
    cateList = [data[-1] for data in dataSet] # 從數(shù)據(jù)集中得到類別標(biāo)簽
    # 得到類別為key、 出現(xiàn)次數(shù)value的字典
    items = dict([(i, cateList.count(i)) for i in cateList])
    infoEntropy = 0.0
    for key in items: # 香農(nóng)熵: = -p*log2(p) --infoEntropy = -prob * log(prob, 2)
      prob = float(items[key]) / dataLen
      infoEntropy -= prob * math.log(prob, 2)
    return infoEntropy


  # 劃分?jǐn)?shù)據(jù)集: 分割數(shù)據(jù)集; 刪除特征軸所在的數(shù)據(jù)列,返回剩余的數(shù)據(jù)集
  # dataSet : 數(shù)據(jù)集; axis: 特征軸; value: 特征軸的取值
  def splitDataSet(self, dataSet, axis, value):
    rtnList = []
    for featVec in dataSet:
      if featVec[axis] == value:
        rFeatVec = featVec[:axis] # list操作:提取0~(axis-1)的元素
        rFeatVec.extend(featVec[axis + 1:])
        rtnList.append(rFeatVec)
    return rtnList
  # 存取樹到文件
  def storetree(self, inputTree, filename):
    fw = open(filename,'w')
    pickle.dump(inputTree, fw)
    fw.close()

  # 從文件抓取樹
  def grabTree(self, filename):
    fr = open(filename)
    return pickle.load(fr)

當(dāng)前標(biāo)題:python實(shí)現(xiàn)ID3決策樹算法-創(chuàng)新互聯(lián)
當(dāng)前地址:http://weahome.cn/article/jehee.html

其他資訊

在線咨詢

微信咨詢

電話咨詢

028-86922220(工作日)

18980820575(7×24)

提交需求

返回頂部