如何用70行Java代码实现神经网络算法
成都网站制作、成都网站设计、外贸营销网站建设的开发,更需要了解用户,从用户角度来建设网站,获得较好的用户体验。创新互联多年互联网经验,见的多,沟通容易、能帮助客户提出的运营建议。作为成都一家网络公司,打造的就是网站建设产品直销的概念。选择创新互联,不只是建站,我们把建站作为产品,不断的更新、完善,让每位来访用户感受到浩方产品的价值服务。
import java.util.Random;
public class BpDeep{
public double[][] layer;//神经网络各层节点
public double[][] layerErr;//神经网络各节点误差
public double[][][] layer_weight;//各层节点权重
public double[][][] layer_weight_delta;//各层节点权重动量
public double mobp;//动量系数
public double rate;//学习系数
public BpDeep(int[] layernum, double rate, double mobp){
this.mobp = mobp;
this.rate = rate;
layer = new double[layernum.length][];
layerErr = new double[layernum.length][];
layer_weight = new double[layernum.length][][];
layer_weight_delta = new double[layernum.length][][];
Random random = new Random();
for(int l=0;llayernum.length;l++){
layer[l]=new double[layernum[l]];
layerErr[l]=new double[layernum[l]];
if(l+1layernum.length){
layer_weight[l]=new double[layernum[l]+1][layernum[l+1]];
layer_weight_delta[l]=new double[layernum[l]+1][layernum[l+1]];
for(int j=0;jlayernum[l]+1;j++)
for(int i=0;ilayernum[l+1];i++)
layer_weight[l][j][i]=random.nextDouble();//随机初始化权重
}
}
}
//逐层向前计算输出
public double[] computeOut(double[] in){
for(int l=1;llayer.length;l++){
for(int j=0;jlayer[l].length;j++){
double z=layer_weight[l-1][layer[l-1].length][j];
for(int i=0;ilayer[l-1].length;i++){
layer[l-1][i]=l==1?in[i]:layer[l-1][i];
z+=layer_weight[l-1][i][j]*layer[l-1][i];
}
layer[l][j]=1/(1+Math.exp(-z));
}
}
return layer[layer.length-1];
}
//逐层反向计算误差并修改权重
public void updateWeight(double[] tar){
int l=layer.length-1;
for(int j=0;jlayerErr[l].length;j++)
layerErr[l][j]=layer[l][j]*(1-layer[l][j])*(tar[j]-layer[l][j]);
while(l--0){
for(int j=0;jlayerErr[l].length;j++){
double z = 0.0;
for(int i=0;ilayerErr[l+1].length;i++){
z=z+l0?layerErr[l+1][i]*layer_weight[l][j][i]:0;
layer_weight_delta[l][j][i]= mobp*layer_weight_delta[l][j][i]+rate*layerErr[l+1][i]*layer[l][j];//隐含层动量调整
layer_weight[l][j][i]+=layer_weight_delta[l][j][i];//隐含层权重调整
if(j==layerErr[l].length-1){
layer_weight_delta[l][j+1][i]= mobp*layer_weight_delta[l][j+1][i]+rate*layerErr[l+1][i];//截距动量调整
layer_weight[l][j+1][i]+=layer_weight_delta[l][j+1][i];//截距权重调整
}
}
layerErr[l][j]=z*layer[l][j]*(1-layer[l][j]);//记录误差
}
}
}
public void train(double[] in, double[] tar){
double[] out = computeOut(in);
updateWeight(tar);
}
}
一 利用二进制状态法求排列组合 此种方法比较容易懂 但是运行效率不高 小数据排列组合可以使用
复制代码 代码如下: import java util Arrays;
//利用二进制算法进行全排列 //count : //count :
public class test { public static void main(String[] args) { long start=System currentTimeMillis(); count (); long end=System currentTimeMillis(); System out println(end start); } private static void count (){ int[] num=new int []{ }; for(int i= ;iMath pow( );i++){ String str=Integer toString(i ); int sz=str length(); for(int j= ;j sz;j++){ str=" "+str; } char[] temp=str toCharArray(); Arrays sort(temp); String gl=new String(temp); if(!gl equals(" ")){ continue; } String result=""; for(int m= ;mstr length();m++){ result+=num[Integer parseInt(str charAt(m)+"")]; } System out println(result); } } public static void count (){ int[] num=new int []{ }; int[] ss=new int []{ }; int[] temp=new int[ ]; while(temp[ ] ){ temp[temp length ]++; for(int i=temp length ;i ;i ){ if(temp[i]== ){ temp[i]= ; temp[i ]++; } } int []tt=temp clone(); Arrays sort(tt); if(!Arrays equals(tt ss)){ continue; } String result=""; for(int i= ;inum length;i++){ result+=num[temp[i]]; } System out println(result); } } }
二 用递归的思想来求排列跟组合 代码量比较大
复制代码 代码如下: package practice;
import java util ArrayList; import java util List;
public class Test {
/** * @param args */ public static void main(String[] args) { // TODO Auto generated method stub Object[] tmp={ }; // ArrayListObject[] rs=RandomC(tmp); ArrayListObject[] rs=cmn(tmp ); for(int i= ;irs size();i++) { // System out print(i+"="); for(int j= ;jrs get(i) length;j++) { System out print(rs get(i)[j]+" "); } System out println(); } }
// 求一个数组的任意组合 static ArrayListObject[] RandomC(Object[] source) { ArrayListObject[] result=new ArrayListObject[](); if(source length== ) { result add(source); } else { Object[] psource=new Object[source length ]; for(int i= ;ipsource length;i++) { psource[i]=source[i]; } result=RandomC(psource); int len=result size();//fn组合的长度 result add((new Object[]{source[source length ]})); for(int i= ;ilen;i++) { Object[] tmp=new Object[result get(i) length+ ]; for(int j= ;jtmp length ;j++) { tmp[j]=result get(i)[j]; } tmp[tmp length ]=source[source length ]; result add(tmp); } } return result; } static ArrayListObject[] cmn(Object[] source int n) { ArrayListObject[] result=new ArrayListObject[](); if(n== ) { for(int i= ;isource length;i++) { result add(new Object[]{source[i]}); } } else if(source length==n) { result add(source); } else { Object[] psource=new Object[source length ]; for(int i= ;ipsource length;i++) { psource[i]=source[i]; } result=cmn(psource n); ArrayListObject[] tmp=cmn(psource n ); for(int i= ;itmp size();i++) { Object[] rs=new Object[n]; for(int j= ;jn ;j++) { rs[j]=tmp get(i)[j]; } rs[n ]=source[source length ]; result add(rs); } } return result; }
}
三 利用动态规划的思想求排列和组合
复制代码 代码如下: package Acm; //强大的求组合数 public class MainApp { public static void main(String[] args) { int[] num=new int[]{ }; String str=""; //求 个数的组合个数 // count( str num ); // 求 n个数的组合个数 count ( str num); }
private static void count (int i String str int[] num) { if(i==num length){ System out println(str); return; } count (i+ str num); count (i+ str+num[i]+" " num); }
private static void count(int i String str int[] num int n) { if(n== ){ System out println(str); return; } if(i==num length){ return; } count(i+ str+num[i]+" " num n ); count(i+ str num n); } }
下面是求排列
复制代码 代码如下: lishixinzhi/Article/program/Java/JSP/201311/20148
算法是别人提出来的,感兴趣可以搜索《Distance Transforms of Sampled Functions》这篇论文,网上也有很多实现的代码,但是结构不是很好,而且很分散不是一个完整的算法。所以我整理了一下,写成一个单独的类,只要简单调用一下即可出结果图片。至于算法原理什么的,我真很难解释清楚,大致的思想是基于能量最小化的,分别进行行与列的1D距离变变换采样。
package com.gloomyfish.image.transform;
import java.awt.image.BufferedImage;
import com.gloomyfish.filter.study.GrayFilter;
/**
*
* @author gloomyfish
*
*/
public class FastDistanceTransformAlg extends GrayFilter {
public final static double INF = 1E20;
private int backgroundColor = 0; // default black
public int getBackgroundColor() {
return backgroundColor;
}
public void setBackgroundColor(int backgroundColor) {
this.backgroundColor = backgroundColor;
}
@Override
public BufferedImage filter(BufferedImage src, BufferedImage dest) {
int width = src.getWidth();
int height = src.getHeight();
dest = super.filter(src, null);
//
int[] inPixels = new int[width*height];
float[] outPixels = new float[width*height];
getRGB( dest, 0, 0, width, height, inPixels );
int index = 0;
for(int row=0; rowheight; row++) {
int tr = 0;
for(int col=0; colwidth; col++) {
index = row * width + col;
tr = (inPixels[index] 16) 0xff;
if(tr == backgroundColor)
outPixels[index] = (float)INF;
else
outPixels[index] = 0;
}
}
// transform along columns
float[] f = new float[Math.max(width, height)];
for(int col=0; colwidth; col++) {
for(int row=0; rowheight; row++) {
index = row * width + col;
f[row] = outPixels[index];
}
float[] disColumns = distance1DTransform(f, height);
for(int row=0; rowheight; row++) {
index = row * width + col;
outPixels[index] = disColumns[row];
}
}
// transform along rows
for (int row = 0; row height; row++) {
for (int col = 0; col width; col++) {
index = row * width + col;
f[col] = outPixels[index];
}
float[] disColumns = distance1DTransform(f, width);
for (int col = 0; col width; col++) {
index = row * width + col;
outPixels[index] = disColumns[col];
}
}
// post sqrt calculation
int[] result = new int[width*height];
for(int row=0; rowheight; row++) {
for(int col=0; colwidth; col++) {
index = row * width + col;
int pc = clamp(Math.sqrt(outPixels[index]));
result[index] = (255 24) | (pc 16) | (pc 8) | pc;
}
}
setRGB( dest, 0, 0, width, height, result );
return dest;
}
public static int clamp(double c)
{
return c 255 ? 255 : (c 0 ? 0 : (int)c);
}
/**
* 1D distance transform using squared distance
*
* @param data
* @param n
* @return
*/
private float[] distance1DTransform(float[] f, int n)
{
float[] d = new float[n];
int[] v = new int[n];
double[] z = new double[n+1];
int k = 0;
v[0] = 0;
z[0] = -INF;
z[1] = +INF;
for (int q = 1; q = n-1; q++) {
double s = ((f[q]+square(q))-(f[v[k]]+square(v[k])))/(2*q-2*v[k]);
while (s = z[k]) {
k--;
s = ((f[q]+square(q))-(f[v[k]]+square(v[k])))/(2*q-2*v[k]);
}
k++;
v[k] = q;
z[k] = s;
z[k+1] = +INF;
}
k = 0;
for (int q = 0; q = n-1; q++) {
while (z[k+1] q)
k++;
d[q] = (float)square(q-v[k]) + f[v[k]];
}
return d;
}
private double square(double v)
{
return v*v;
}
}
神经网络结构如下图所示,最左边的是输入层,最右边的是输出层,中间是多个隐含层,对于隐含层和输出层的每个神经节点,都是由上一层节点乘以其权重累加得到,标上“+1”的圆圈为截距项b,对输入层外每个节点:Y=w0*x0+w1*x1+...+wn*xn+b,由此我们可以知道神经网络相当于一个多层逻辑回归的结构。
import java.util.Random;
public class BpDeep{
public double[][] layer;//神经网络各层节点
public double[][] layerErr;//神经网络各节点误差
public double[][][] layer_weight;//各层节点权重
public double[][][] layer_weight_delta;//各层节点权重动量
public double mobp;//动量系数
public double rate;//学习系数
public BpDeep(int[] layernum, double rate, double mobp){
this.mobp = mobp;
this.rate = rate;
layer = new double[layernum.length][];
layerErr = new double[layernum.length][];
layer_weight = new double[layernum.length][][];
layer_weight_delta = new double[layernum.length][][];
Random random = new Random();
for(int l=0;llayernum.length;l++){
layer[l]=new double[layernum[l]];
layerErr[l]=new double[layernum[l]];
if(l+1layernum.length){
layer_weight[l]=new double[layernum[l]+1][layernum[l+1]];
layer_weight_delta[l]=new double[layernum[l]+1][layernum[l+1]];
for(int j=0;jlayernum[l]+1;j++)
for(int i=0;ilayernum[l+1];i++)
layer_weight[l][j][i]=random.nextDouble();//随机初始化权重
}
}
}
//逐层向前计算输出
public double[] computeOut(double[] in){
for(int l=1;llayer.length;l++){
for(int j=0;jlayer[l].length;j++){
double z=layer_weight[l-1][layer[l-1].length][j];
for(int i=0;ilayer[l-1].length;i++){
layer[l-1][i]=l==1?in[i]:layer[l-1][i];
z+=layer_weight[l-1][i][j]*layer[l-1][i];
}
layer[l][j]=1/(1+Math.exp(-z));
}
}
return layer[layer.length-1];
}
//逐层反向计算误差并修改权重
public void updateWeight(double[] tar){
int l=layer.length-1;
for(int j=0;jlayerErr[l].length;j++)
layerErr[l][j]=layer[l][j]*(1-layer[l][j])*(tar[j]-layer[l][j]);
while(l--0){
for(int j=0;jlayerErr[l].length;j++){
double z = 0.0;
for(int i=0;ilayerErr[l+1].length;i++){
z=z+l0?layerErr[l+1][i]*layer_weight[l][j][i]:0;
layer_weight_delta[l][j][i]= mobp*layer_weight_delta[l][j][i]+rate*layerErr[l+1][i]*layer[l][j];//隐含层动量调整
layer_weight[l][j][i]+=layer_weight_delta[l][j][i];//隐含层权重调整
if(j==layerErr[l].length-1){
layer_weight_delta[l][j+1][i]= mobp*layer_weight_delta[l][j+1][i]+rate*layerErr[l+1][i];//截距动量调整
layer_weight[l][j+1][i]+=layer_weight_delta[l][j+1][i];//截距权重调整
}
}
layerErr[l][j]=z*layer[l][j]*(1-layer[l][j]);//记录误差
}
}
}
public void train(double[] in, double[] tar){
double[] out = computeOut(in);
updateWeight(tar);
}
}