继承统总计法,此次也没怎么尤其的,还没到那么透彻,也是相比基础的
1、方差-样本
2、协方差(标准差)-样本
3、变异周全
四、相关周密

本次函数有

本次有以下函数

原稿链接:

BP神经互联网是深度学习的首要基础,它是深浅学习的显要提升算法之壹,因而领悟BP神经网络原理以及落到实处技能10分有不能缺少。接下来,大家对规律和落实展开研究。

1.原理

悠闲再稳步补上,请先参考老外1篇不错的文章:A Step by Step
Backpropagation Example

激活函数参考:深度学习常用激活函数之— Sigmoid & ReLU &
Softmax

浅显易懂的初叶化:CS23一n课程笔记翻译:神经网络笔记

有效的Trick:神经互连网磨练中的Tricks之连忙BP(反向传播算法)

经过不难演示BPNN的计量进程:一文弄懂神经网络中的反向传播法——BackPropagation

二.贯彻—-Batch随机梯度法

此地完毕了层数可定义的BP神经互联网,可经过参数net_struct实行定义互联网结果,如定义唯有输出层,未有隐藏层的网络布局,激活函数为”sigmoid”,学习率,可正如概念

net_struct = [[10,”sigmoid”,0.01]]#互联网布局

如定义一层隐藏层为918个神经元,再接1层隐藏层为伍12个神经元,输出层为十个神经元的网络布局,如下

net_struct =
[[100,”sigmoid”,0.01],[50,”sigmoid”,0.01],[10,”sigmoid”,0.01]]#网络布局

码农最爱的实现如下:

# # encoding=utf8

”’

Created on 2017-7-3

@author: Administrator

”’

import random

import pandas as pd

import numpy as np

from matplotlib import pyplot as plt

from sklearn.model_selection import train_test_split as ttsplit

class LossFun:

def __init__(self, lf_type=”least_square”):

self.name = “loss function”

self.type = lf_type

def cal(self, t, z):

loss = 0

if self.type == “least_square”:

loss = self.least_square(t, z)

return loss

def cal_deriv(self, t, z):

delta = 0

if self.type == “least_square”:

delta = self.least_square_deriv(t, z)

return delta

def least_square(self, t, z):

zsize = z.shape

sample_num = zsize[1]

return np.sum(0.5 * (t – z) * (t – z) * t) / sample_num

def least_square_deriv(self, t, z):

return z – t

class ActivationFun:

”’

激活函数

”’

def __init__(self, atype=”sigmoid”):

self.name = “activation function library”

self.type = atype;

def cal(self, a):

z = 0

if self.type == “sigmoid”:

z = self.sigmoid(a)

elif self.type == “relu”:

z = self.relu(a)

return z

def cal_deriv(self, a):

z = 0

if self.type == “sigmoid”:

z = self.sigmoid_deriv(a)

elif self.type == “relu”:

z = self.relu_deriv(a)

return z

def sigmoid(self, a):

return 1 / (1 + np.exp(-a))

def sigmoid_deriv(self, a):

fa = self.sigmoid(a)

return fa * (1 – fa)

def relu(self, a):

idx = a <= 0

a[idx] = 0.1 * a[idx]

return a  # np.maximum(a, 0.0)

def relu_deriv(self, a):

# print a

a[a > 0] = 1.0

a[a <= 0] = 0.1

# print a

return a

class Layer:

”’

神经网络层

”’

def __init__(self, num_neural, af_type=”sigmoid”,
learn_rate=0.5):

self.af_type = af_type  # active function type

self.learn_rate = learn_rate

self.num_neural = num_neural

self.dim = None

self.W = None

self.a = None

self.X = None

self.z = None

self.delta = None

self.theta = None

self.act_fun = ActivationFun(self.af_type)

def fp(self, X):

”’

Foward Propagation

”’

self.X = X

xsize = X.shape

self.dim = xsize[0]

self.num = xsize[1]

if self.W == None:

# self.W = np.random.random((self.dim, self.num_neural))-0.5

# self.W = np.random.uniform(-1,1,size=(self.dim,self.num_neural))

if(self.af_type == “sigmoid”):

self.W = np.random.normal(0, 1, size=(self.dim, self.num_neural)) /
np.sqrt(self.num)

elif(self.af_type == “relu”):

self.W = np.random.normal(0, 1, size=(self.dim, self.num_neural)) *
np.sqrt(2.0 / self.num)

if self.theta == None:

# self.theta = np.random.random((self.num_neural, 1))-0.5

# self.theta = np.random.uniform(-1,1,size=(self.num_neural,1))

if(self.af_type == “sigmoid”):

self.theta = np.random.normal(0, 1, size=(self.num_neural, 1)) /
np.sqrt(self.num)

elif(self.af_type == “relu”):

self.theta = np.random.normal(0, 1, size=(self.num_neural, 1)) *
np.sqrt(2.0 / self.num)

# calculate the foreward a

self.a = (self.W.T).dot(self.X)

###calculate the foreward z####

self.z = self.act_fun.cal(self.a)

return self.z

def bp(self, delta):

”’

Back Propagation

”’

self.delta = delta * self.act_fun.cal_deriv(self.a)

self.theta = np.array([np.mean(self.theta – self.learn_rate *
self.delta, 1)]).T  # 求全体样本的theta均值

dW = self.X.dot(self.delta.T) / self.num

self.W = self.W – self.learn_rate * dW

delta_out = self.W.dot(self.delta);

return delta_out

class BpNet:

”’

BP神经互联网

”’

def __init__(self, net_struct, stop_crit, max_iter,
batch_size=10):

self.name = “net work”

self.net_struct = net_struct

if len(self.net_struct) == 0:

print “no layer is specified!”

return

self.stop_crit = stop_crit

机械学习,统总结法_数值。self.max_iter = max_iter

self.batch_size = batch_size

self.layers = []

self.num_layers = 0;

# 创立网络

self.create_net(net_struct)

self.loss_fun = LossFun(“least_square”);

def create_net(self, net_struct):

”’

创制互连网

”’

self.num_layers = len(net_struct)

for i in range(self.num_layers):

self.layers.append(Layer(net_struct[i][0], net_struct[i][1],
net_struct[i][2]))

def train(self, X, t, Xtest=None, ttest=None):

”’

练习网络

”’

eva_acc_list = []

eva_loss_list = []

xshape = X.shape;

num = xshape[0]

dim = xshape[1]

for k in range(self.max_iter):

# i = random.randint(0,num-1)

idxs = random.sample(range(num), self.batch_size)

xi = np.array([X[idxs, :]]).T[:, :, 0]

ti = np.array([t[idxs, :]]).T[:, :, 0]

# 前向计算

zi = self.fp(xi)

# 偏差总结

delta_i = self.loss_fun.cal_deriv(ti, zi)

# 反馈总结

self.bp(delta_i)

# 评估精度

if Xtest != None:

if k % 100 == 0:

[eva_acc, eva_loss] = self.test(Xtest, ttest)

eva_acc_list.append(eva_acc)

eva_loss_list.append(eva_loss)

print “%4d,%4f,%4f” % (k, eva_acc, eva_loss)

else:

print “%4d” % (k)

return [eva_acc_list, eva_loss_list]

def test(self, X, t):

”’

测试模型精度

”’

xshape = X.shape;

num = xshape[0]

z = self.fp_eval(X.T)

t = t.T

est_pos = np.argmax(z, 0)

real_pos = np.argmax(t, 0)

corrct_count = np.sum(est_pos == real_pos)

acc = 1.0 * corrct_count / num

loss = self.loss_fun.cal(t, z)

# print “%4f,loss:%4f”%(loss)

return [acc, loss]

def fp(self, X):

”’

前向总结

”’

z = X

for i in range(self.num_layers):

z = self.layers[i].fp(z)

return z

def bp(self, delta):

”’

反映计算

”’

z = delta

for i in range(self.num_layers – 1, -1, -1):

z = self.layers[i].bp(z)

return z

def fp_eval(self, X):

”’

前向总结

”’

layers = self.layers

z = X

for i in range(self.num_layers):

z = layers[i].fp(z)

return z

def z_score_normalization(x):

mu = np.mean(x)

sigma = np.std(x)

x = (x – mu) / sigma;

return x;

def sigmoid(X, useStatus):

if useStatus:

return 1.0 / (1 + np.exp(-float(X)));

else:

return float(X);

def plot_curve(data, title, lege, xlabel, ylabel):

num = len(data)

idx = range(num)

plt.plot(idx, data, color=”r”, linewidth=1)

plt.xlabel(xlabel, fontsize=”xx-large”)

plt.ylabel(ylabel, fontsize=”xx-large”)

plt.title(title, fontsize=”xx-large”)

plt.legend([lege], fontsize=”xx-large”, loc=’upper left’);

plt.show()

if __name__ == “__main__”:

print (‘This is main of module “bp_nn.py”‘)

print(“Import data”)

raw_data = pd.read_csv(‘./train.csv’, header=0)

data = raw_data.values

imgs = data[0::, 1::]

labels = data[::, 0]

train_features, test_features, train_labels, test_labels = ttsplit(

imgs, labels, test_size=0.33, random_state=23323)

train_features = z_score_normalization(train_features)

test_features = z_score_normalization(test_features)

sample_num = train_labels.shape[0]

tr_labels = np.zeros([sample_num, 10])

for i in range(sample_num):

tr_labels[i][train_labels[i]] = 1

sample_num = test_labels.shape[0]

te_labels = np.zeros([sample_num, 10])

for i in range(sample_num):

te_labels[i][test_labels[i]] = 1

print train_features.shape

print tr_labels.shape

print test_features.shape

print te_labels.shape

stop_crit = 100  # 停止

max_iter = 10000  # 最大迭代次数

batch_size = 100  # 每一遍磨炼的范本个数

net_struct = [[100, “relu”, 0.01], [10, “sigmoid”, 0.1]]  #
互连网布局[[batch_size,active function, learning rate]]

# net_struct =
[[200,”sigmoid”,0.5],[100,”sigmoid”,0.5],[10,”sigmoid”,0.5]] 
网络布局[[batch_size,active function, learning rate]]

bpNNCls = BpNet(net_struct, stop_crit, max_iter, batch_size);

# train model

[acc, loss] = bpNNCls.train(train_features, tr_labels,
test_features, te_labels)

# [acc, loss] = bpNNCls.train(train_features, tr_labels)

print(“training model finished”)

# create test data

plot_curve(acc, “Bp Network Accuracy”, “accuracy”, “iter”, “Accuracy”)

plot_curve(loss, “Bp Network Loss”, “loss”, “iter”, “Loss”)

# test model

[acc, loss] = bpNNCls.test(test_features, te_labels);

print “test accuracy:%f” % (acc)

尝试数据为mnist数据集合,可从以下地点下载:https://github.com/WenDesi/lihang\_book\_algorithm/blob/master/data/train.csv

a.使用sigmoid激活函数和net_struct =
[10,”sigmoid”]的互联网布局(可作为是softmax
回归),其校验精度和损失函数的转移,如下图所示:

金沙注册送58 1

金沙注册送58 2

测试精度达到0.9160一7,效果依旧不错的。然而自由梯度法,信赖于参数的开始化,若是开头化不佳,会收敛缓慢,甚至有不美貌的结果。

b.使用sigmoid激活函数和net_struct =

[200,”sigmoid”,100,”sigmoid”,10,”sigmoid”] 的互联网布局(一个200的隐藏层,四个100的隐藏层,和二个10的输出层),其校验精度和损失函数的生成,如下图所示:

金沙注册送58 3

金沙注册送58 4

其校验精度达到0.96363陆,比softmax要好不少。从损失曲线能够看看,加入隐藏层后,算法收敛要比无隐藏层的安宁。

Make Change – Focus on Computer Vision and Pattern Recognition

版权注明:本文为博主原创文章,未经博主允许不得转发

分类:Deep
Learning,MachineLearning

标签:Deep
Learning,机器学习,形式识别

照旧是先造个list,这一次把那几个职能写个函数,方便以后调用,其它上1篇写过的函数此次也会一而再
def create_rand_list(min_num,max_num,count_list):
  case_list = []
  while len(case_list) < count_list:
    rand_float = random.uniform(min_num,max_num)
    if rand_float in case_list:
      continue
    case_list.append(rand_float)
  case_list = [round(case,2) for case in case_list]
  return case_list

1、阶乘

壹、简单边际可能率

上面是历史函数
sum_fun() #累加
len_fun() #总计个数
multiply_fun()
#累乘
sum_mean_fun()
#算数平平均数量
sum_mean_rate()
#算数平平均数量计算回报
median_fun()
#中位数
modes_fun() #众数
ext_minus_fun()
#极差
geom_mean_fun()
#几何平均数
geom_mean_rate()
#几何平均回报

二、总计组合数C

二、联合可能率

新函数代码

三、贰项概率分布

叁、条件可能率

import random

# 先生成一个随机list,已有函数,不赘述
rand_list = [15.79, 6.83, 12.83, 22.32, 17.92, 6.29, 10.19, 10.13, 24.23, 25.56]

# 1、方差-样本S^2,list中的每个元素减整个list的平均数的平方累加,结果比个数-1,方差总量不-1
def var_fun(rand_list):
  mean_num = sum_mean_fun(rand_list) #计算平均数
  len_num = len_fun(rand_list) #计算总量
  var_list = [(x-mean_num)**2 for x in rand_list]
  var_sum = sum_fun(var_list)
  var_num = var_sum/(len_num - 1)
  return var_num

# 2、协方差(标准差)-样本S,这个简单,用方差开平方就可以了
def covar_fun(rand_list):
  var_num = var_fun(rand_list)
  covar_num = var_num ** 0.5
  return covar_num

# 3、变异系数CV,变异程度度量,协方差/算数平均数*100%
# 说明(百度百科):在进行数据统计分析时,如果变异系数大于15%,则要考虑该数据可能不正常,应该剔除
def  trans_coef_fun(rand_list):
  covar_num = covar_fun(rand_list)
  mean_num = sum_mean_fun(rand_list)
  trans_coef_num = covar_num / mean_num
  return trans_coef_num

# 4、相关系数-样本r,表示两个维之间的线性关系,-1 < r < 1,越接近1关系维间的关系越强
#    因为是两个维,因此需要输入两维的list,算法比较麻烦
'''
((x1-mean(x))(y1-mean(y))+(x2-mean(x))(y2-mean(y))+...(xn-mean(x))(yn-mean(y)))
/((x1-mean(x))^2+(x2-mean(x))^2+...(xn-mean(x))^2)^0.5*((y1-mean(y))^2+(y2-mean(y))^2+...(yn-mean(y))^2)^0.5
'''
x_list = rand_list
y_list = [4.39, 13.84, 9.21, 9.91, 15.69, 14.92, 25.77, 23.99, 8.15, 25.07]
def pearson_fun(x_list,y_list):
  x_mean = sum_mean_fun(x_list)
  y_mean = sum_mean_fun(y_list)
  len_num = len_fun(x_list)
  if len_num == len_fun(y_list):
    xy_multiply_list = [(x_list[i]-x_mean)*(y_list[i]-y_mean) for i in range(len_num)]
    xy_multiply_num = sum_fun(xy_multiply_list)
  else:
    print 'input list wrong,another input try'
    return None
  x_covar_son_list = [(x-x_mean)**2 for x in x_list]
  y_covar_son_list = [(y-y_mean)**2 for y in y_list]
  x_covar_son_num = sum_fun(x_covar_son_list)
  y_covar_son_num = sum_fun(y_covar_son_list)
  xy_covar_son_multiply_num = (x_covar_son_num ** 0.5) * (y_covar_son_num ** 0.5)
  pearson_num = xy_multiply_num / xy_covar_son_multiply_num
  return pearson_num

四、泊松分布

四、随机变量期望值

 

以下是野史函数

5、随机变量方差

create_rand_list() #创办3个带有钦赐数量成分的list
sum_fun() #累加
len_fun() #计算个数
multiply_fun() #累乘
sum_mean_fun() #算数平平均数量
sum_mean_rate() #算数平平均数量总计回报
median_fun() #中位数
modes_fun() #众数
ext_minus_fun() #极差
geom_mean_fun() #几何平平均数量
geom_mean_rate() #几何平均回报

陆、随机变量协方差

var_fun() #方差-样本S^2
covar_fun() #协方差-样本S
trans_coef_fun() #变异全面CV
pearson_fun() #相关周到-样本r

柒、联合协方差

unite_rate_fun #一块可能率
condition_rate_fun #原则可能率
e_x #随机变量期望值
var_rand_fun #随机变量方差
covar_rand_fun #随机变量协方差
covar_rand_xy_fun #一同协方差
e_p #整合期望回报
var_p_fun #入股组合危害
bayes #贝叶斯

八、组合期望回报

—————以上是旧的————————————————————————
—————以下是新的————————————————————————

9、投资组合风险

继续概率,本次是二项分布和泊松分布,那一个四个依旧挺好玩的,能够当作预测函数用,因为函数相比少,这次就不给例子了,可是会对函数做逐一表达

 

1、阶乘n!
哪怕每回-一乘,直到*1,例如5! = 5 * 4 * 3 * 2 * 壹 =
120,那几个是例行的,可是在写函数的时候那样算法功能会低些,由此一贯扭转,壹*2*三…那种,那么函数正是

 

def fact_fun:  if n == 0:    return 1  n += 1  fact_list = [i for i in range(1,n)]  fact_num = multiply_fun(fact_list)  return fact_num

 

2、总结组合数C
C = n! / (x! *
意味着从n个样本中抽取x个样本单元,大概出现结果的组合数,例如从五个物品中抽取三个物品,那多少个物品的组合数正是10种

 

def c_n_x(case_count,real_count):  fact_n = fact_fun(case_count)  fact_x = fact_fun(real_count)  fact_n_x = fact_fun(case_count - real_count)  c_n_x_num = fact_n / (fact_x * fact_n_x)  return c_n_x_num

说可能率前复习下历史函数
create_rand_list()
#创制三个富含钦定数量成分的list
sum_fun() #累加
len_fun() #总结个数
multiply_fun()
#累乘
sum_mean_fun()
#算数平平均数量
sum_mean_rate()
#算数平平均数量总计回报
median_fun()
#中位数
modes_fun() #众数
ext_minus_fun()
#极差
geom_mean_fun()
#几何平平均数量
geom_mean_rate()
#几何平均回报

三、2项可能率分布
实践n次伯努利试验,伯努利试验正是履行二回唯有三种只怕且二种只怕互斥的事件,比如丢硬币实验,执行n次,成功k次的概率
P = C * p^k * ^
n=5 k=3 P = p + p + p
p表示3个风云的中标概率,失利则是一 – p

var_fun()
#方差-样本S^2
covar_fun()
#协方差(标准差)-样本S
trans_coef_fun()
#变异全面CV
pearson_fun()
#相关全面-样本r
—————以上是旧的————————————————————————
—————以下是新的————————————————————————
可能率那块全数给自个儿看了个懵逼,后边的代码都以循序渐进笔者要好驾驭写的,借使有错误,欢迎指正
此外表明的是可能率是很精妙的工作,所以浮点型的数字会相比多,而且小数位数十一分准确,除拾一分意况,小编就4舍伍入截取到小数点后二人
归纳事件,正是只有一个表征的轩然大波,全部一点都不小恐怕事件的聚集正是样本空间,举个例证
有两兜子花生米,第一个袋子有三20个花生米,个中有二个坏的,第二个袋子有1多少个花生米,当中有四个坏的,这么些事例的样本空间正是底下那样。作者想说,若是自己选了B袋子小编必然诅咒卖花生的老总吃方便面未有佐料
袋子|是或不是坏的|花生米个数
A   |0       |3
A   |1       |29
B   |0       |5
B   |1       |12
为了方便起见,是True用0表示,否false用一象征
1、简单边际概率,记做P(A)
本条不难通晓,比如总括坏花生米的出现率,那几个简单,就不单独写代码了
P(A) = 坏花生米/总数 = 8/4玖 = 0.1633

def binomial_fun(case_count,real_count,p):  c_n_k_num = c_n_x(case_count,real_count)  pi = (p ** real_count) *  ** (case_count - real_count))  binomial_num = c_n_k_num * pi  return binomial_num

2、联合可能率

四、泊松分布
加以的一个机会域中,机会域能够是多少个限量,也得以是壹段时间,在这些机会域中可能产生有些总计事件的票房价值,举个例子,比有个商户,每时辰平均有10人消费者光顾,那么贰个钟头有1三位顾客光临的票房价值,正是泊松分布,壹二位消费者光顾便是总计事件
P = /X! = (2.7182818^-10*10^13)/13! = 0.0729
此处的λ是指平均值,能够行使算数平平均数量获得,e是本来常数~=2.7182818,有函数

既是是三只了,就要求八个事件,记为P(A且B),∩这玩意儿就是且
便是A事件和B事件联合成同三个事变的可能率,从A袋子吃出一个坏花生米的可能率正是同台概率,事件A是坏花生米,事件B是A袋子
以此比较有分化,比较普遍利用的是
P(A∩B) = 3/49 = 0.0612
另壹种正是
P(A∩B) = 3/32*0.5 = 0.0517
自身个人对比同意第三种,可是遭到其余事件的震慑比较大,思量如若B袋子有10000个花生,坏花生数不变,结果会有相当大差异
那么函数就有了

def poisson_fun(chance_x, case_list = [0],mean_num = 0):  chance_x_fact = fact_fun  e = 2.7182818  if len_fun(case_list) == 1 and case_list[0] == 0:    poisson_num = ((e ** (0-mean_num)) * mean_num ** chance_x) / chance_x_fact  else:    mean_num = sum_mean_fun(case_list)    poisson_num = ((e ** (0-mean_num)) * mean_num ** chance_x) / chance_x_fact  return poisson_num
def unite_rate_fun(condition_count,all_count):
  p_a_with_b = float(condition_count) / all_count
  return p_a_with_b

其一函数必要证实下,实际须求的是多少个参数,贰个平均值另一个是指望计算量,之所以钦命了2个函数是因为只怕输入的不肯定是一个数字,也说不定是个list,那么会有二种计算方法,这些已在if中展示,引用方法有三种,例如

三、条件可能率
一个事件已发出的情事下,获得另三个风浪的产生可能率,相比文言的传教是,给定事件B,事件A的爆发可能率,当然也得以反过来
P(A|B) = P(A∩B)/P(B)
反过来
P(B|A) = P(A∩B)/P(A)
依旧那几个事例,未来已知B事件是从A袋子取,那么P(B) = 34/8九
P(A|B) = (3/49)/(32/49) = 3/32 = 0.0937
本条函数正是

if __name__ == '__main__':  # 第一种  poisson_rate = poisson_fun(mean_num = 10,chance_x = 13)  print poisson_rate   # 第二种  case_list = [8,9,10,11,12]  poisson_rate = poisson_fun(case_list = case_list ,chance_x = 13)  print poisson_rate 
def condition_rate_fun(p_a_with_b,p_b):
  p_a_from_b = p_a_with_b / p_b
  return p_a_from_b

 

金沙注册送58 ,上边包车型大巴剧情用花生米的例子就不对路了,换个学校的事
三个班保加利亚语考试各分数的比重
分数|占比
20  |0.1
40  |0.1
60  |0.3
80  |0.4
100 |0.1

4、随机变量期望值
和算数平均数大约,实际结果不应与这几个数有太多偏向
μ = E(X) = NΣXiP(Xi)
E(X) = 20 * 0.1 + 40 * 0.1 + 60 * 0.3 + 80 * 0.4 + 100 * 0.1 = 66

def e_x(count_list,rate_list):
  e_len = len_fun(count_list)
  if e_len == len_fun(rate_list):
    e_list = [count_list[i] * rate_list[i] for i in range(e_len)]
    e_num = sum_fun(e_list)
  else: return None
  return e_num

五、随机变量方差
和样本方差功用雷同,不多说了
σ^2 = NΣ[Xi-E(X)]^2P(Xi)

def var_rand_fun(count_list,rate_list):
  e_num = e_x(count_list,rate_list)
  var_len = len_fun(count_list)
  if var_len == len_fun(rate_list):
    var_list = [((count_list[i] - e_num) ** 2) * rate_list[i] for i in range(var_len)]
    var_num = sum_fun(var_list)
  else: return None
  return var_num

陆、随机变量协方差
函数简单,套用协方差函数即可

def covar_rand_fun(count_list,rate_list):
  var_rand_num = var_rand_fun(count_list,rate_list)
  covar_num = var_rand_num ** 0.5
  return covar_num

柒、联合协方差
σxy = NΣ[Xi-E(X)][Yi-E(Y)]P(XiYi)

def covar_rand_xy_fun(x_count_list,y_count_list,xy_rate_list):
  e_x_num = e_x(x_count_list,xy_rate_list)
  e_y_num = e_x(y_count_list,xy_rate_list)
  covar_len = len_fun(x_count_list)
  if covar_len == len_fun(y_count_list) and covar_len == len_fun(xy_rate_list):
    covar_rand_xy_list = [(x_count_list[i] - e_x_num) * (y_count_list[i] - e_y_num) * xy_rate_list[i] for i in range(covar_len)]
    covar_rand_xy_num = sum_fun(covar_rand_xy_list)
  else: return None
  return covar_rand_xy_num

八、组合期望回报
用相当小的高风险能博取的最大回报
E(P) = wE(X) + (1 – w)E(Y)
w是投资资产x的百分比

def e_p(x_count_list,y_count_list,xy_rate_list):
  e_x_num = e_x(x_count_list,xy_rate_list)
  e_y_num = e_x(y_count_list,xy_rate_list)
  w = sum_fun(x_count_list) / (sum_fun(x_count_list) + sum_fun(y_count_list))
  e_p_num = w * e_x_num + (1 - w) * e_y_num
  return e_p_num

九、投资组合危害
这一个未有搞懂是做如何的,应该是愿意回报的谬误值吗
σ(p) = [w^2σ(x)^2 + (1 – w)^2σ(y)^2 + 2w(1 – w)σ(xy)]^0.5

def var_p_fun(x_count_list,y_count_list,xy_rate_list):
  w = sum_fun(x_count_list) / (sum_fun(x_count_list) + sum_fun(y_count_list))
  var_rand_x_num = var_rand_fun(x_count_list,xy_rate_list)
  var_rand_y_num = var_rand_fun(y_count_list,xy_rate_list)
  covar_rand_xy_num = covar_rand_xy_fun(x_count_list,y_count_list,xy_rate_list)
  var_p_num = (w * w * var_rand_y_num + (1 - w) * (1 - w) * var_rand_y_num + 2 * w * (1 - w) * covar_rand_xy_num) ** 0.5
  return var_p_num

other、贝叶斯
本条实在是看的最懵逼的,感觉自笔者写的那么些不准,就当作参考吧

def bayes(true_coef,event_rate,event_bool,manage_num):
  'True = 0,False = 1'
  manage_num = manage_num - 1
  false_coef = 1 - true_coef
  event_count = len_fun(event_rate)
  if event_bool[manage_num] == 0:
    main_rate = event_rate[manage_num] * true_coef
  else:
    main_rate = event_rate[manage_num] * false_coef
  event_true_list = [event_rate[n] * true_coef for n in range(event_count) if event_bool[n] == 0]
  event_false_list = [event_rate[n] * true_coef for n in range(event_count) if event_bool[n] == 1]
  event_sum = sum_fun(event_true_list) + sum-fun(evemt_false_list)
  event_succe_rate = main_rate/event_sum
  return event_succe_rate

 

相关文章

网站地图xml地图