当前位置:网站首页>tensorflow求解泊松方程
tensorflow求解泊松方程
2022-06-27 15:42:00 【Galerkin码农选手】
此代码是本人两年前写的,那时候代码功底很差,所以代码有一些bug以及求解泊松方程的效果也不太好,读者有兴趣可以自己尝试修改代码以提高数值精度
code1
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import time
tic = time.time()
h1 = 0.1
h2 = 0.05
M = int(1/h1) + 1
N = int(1/h2) + 1
x_train = np.linspace(0,1,M)
y_train = np.linspace(0,1,N)
matr = np.zeros([M,N,2])#将所有网格点存储在矩阵matrix上,每一行代表一个网格点的坐标
for i in range(M):
for j in range(N):
matr[i,j,0] = x_train[i]
matr[i,j,1] = y_train[j]
ma = matr.reshape(-1,2)
print(ma.shape)
ma_in = matr[1:-1,1:-1].reshape(-1,2)#将所有网格点存储在矩阵matrix上,每一行代表一个网格点的坐标
print(ma_in.shape)
ma_b = np.concatenate([matr[0],matr[-1],matr[:,0][1:-1],matr[:,-1][1:-1]],0)
print(ma_b.shape)
def f(x,y):#微分方程的右边函数f
return - (2*np.pi**2)*np.exp(np.pi*(x + y))*np.sin(np.pi*(x + y))
def u_accuracy(x,y):
return np.exp(np.pi*(x + y))*np.sin(np.pi*x)*np.sin(np.pi*y)
def u_boundary(x,y):
return 0*x*y
right_in = f(ma_in[:,0],ma_in[:,1]).reshape(-1,1)
right_b = u_boundary(ma_b[:,0],ma_b[:,1]).reshape(-1,1)
print(right_in.shape,right_b.shape)
ma_min = np.array([[0,0]])
ma_max = np.array([[1,1]])
np.random.seed(1234)
tf.compat.v1.set_random_seed(1234)
#定义tensorflow框架
prob_tf = tf.compat.v1.placeholder(tf.float32)
x_tf = tf.compat.v1.placeholder(tf.float32,shape = [None,2])
x_in_tf = tf.compat.v1.placeholder(tf.float32,shape = [None,2])
x_b_tf = tf.compat.v1.placeholder(tf.float32,shape = [None,2])
right_in_tf = tf.compat.v1.placeholder(tf.float32,shape = [None,1])
right_b_tf = tf.compat.v1.placeholder(tf.float32,shape = [None,1])
x_min_tf = tf.compat.v1.placeholder(tf.float32,shape = [1,2])
x_max_tf = tf.compat.v1.placeholder(tf.float32,shape = [1,2])
layers = [2,10,10,1]
H = 2*(x_tf - x_min_tf)/(x_max_tf - x_min_tf + 1e-7) - 1
H_in = 2*(x_in_tf - x_min_tf)/(x_max_tf - x_min_tf + 1e-7) - 1
H_b = 2*(x_b_tf - x_min_tf)/(x_max_tf - x_min_tf + 1e-7) - 1
#定义权重和偏置
def w_init(in_dim,out_dim):
w_std = np.sqrt(2/(in_dim + out_dim))
return tf.Variable(tf.random.truncated_normal([in_dim, out_dim], stddev = w_std), dtype=tf.float32)
weights = []
biases = []
num_layers = len(layers)
for i in range(0,num_layers - 1):
w = w_init(in_dim = layers[i],out_dim = layers[i + 1])
b = tf.Variable(tf.random.truncated_normal([1,layers[i + 1]], dtype = tf.float32), dtype=tf.float32)
weights.append(w)
biases.append(b)
#输出近似解
num_layers = len(weights)#比layers长度少1
for i in range(0,num_layers - 1):
w = weights[i]
b = biases[i]
E = tf.eye(tf.shape(w)[0],tf.shape(w)[1])
tf.nn.dropout(w,rate = prob_tf)
H_in = tf.tanh(tf.add(tf.matmul(H_in,w), b)) + tf.matmul(H_in,E)
H_b = tf.tanh(tf.add(tf.matmul(H_b,w), b)) + tf.matmul(H_b,E)
H = tf.tanh(tf.add(tf.matmul(H,w), b)) + tf.matmul(H,E)
W = weights[-1]
b = biases[-1]
u_in = tf.add(tf.matmul(H_in,W),b)
u_b = tf.add(tf.matmul(H_b,W),b)
u = tf.add(tf.matmul(H,W),b)
#定义损失函数
u_x = tf.gradients(u_in,x_in_tf)[0][:,0]
u_y = tf.gradients(u_in,x_in_tf)[0][:,1]
u_xx = tf.gradients(u_x,x_in_tf)[0][:,0]
u_yy = tf.gradients(u_y,x_in_tf)[0][:,1]
loss_in = 0.5*tf.reduce_mean(tf.square(u_x) + tf.square(u_y) - 2*u_in*right_in_tf)
#loss_in = 0.5*tf.reduce_mean((-u_xx - u_yy - 2*right_in_tf)*u_in)
loss_b = tf.reduce_mean(tf.square(u_b - right_b_tf))
belta = 5e3
loss = tf.add(loss_in,belta*loss_b)
#初始化
def plot_curve(data):#用于损失函数训练过程的可视化
fig = plt.figure(num = 1,figsize = (4,3),dpi = None,facecolor = None,edgecolor = None,frameon = True)#编号,宽和高,分辨率,背景颜色,边框颜色,是否显示边框
plt.plot(range(len(data)),data,color = 'blue')
plt.legend(['value'],loc = 'upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
lr = 5*1e-3
optim = tf.compat.v1.train.AdamOptimizer(learning_rate = lr)
trainer = optim.minimize(loss)
sess = tf.compat.v1.Session()
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
train_loss = []
step = []
error = []
train_step = 6000
for i in range(train_step):
batch = 19
st = i*batch%len(ma_in)
end = st+ batch
feed = {
prob_tf:0.5,x_tf:ma,x_in_tf:ma_in[st:end],x_b_tf:ma_b,right_in_tf:right_in[st:end],right_b_tf:right_b,x_min_tf:ma_min,x_max_tf:ma_max}
sess.run(trainer,feed_dict = feed)
if i%600 == 0:
print('train_step = {},loss = {}'.format(i,sess.run(loss,feed_dict = feed)))
train_loss.append(sess.run(loss,feed_dict = feed))
feed_val = {
x_tf:ma,x_in_tf:ma_in,x_b_tf:ma_b,right_in_tf:right_in,right_b_tf:right_b,x_min_tf:ma_min,x_max_tf:ma_max}
u_pred = sess.run(u,feed_dict = feed_val).reshape(M,N)
x,y = np.meshgrid(x_train,y_train)
error_square = ((u_pred - u_accuracy(x,y).T)**2).sum()/(u_accuracy(x,y)**2 + 1e-7).sum()
error_step = np.sqrt(error_square)
error.append(error_step)
step.append(i)
print(error_step)
toc = time.time()
plot_curve(train_loss)
print(toc - tic)
feed = {
prob_tf:0.5,x_tf:ma,x_in_tf:ma_in,x_b_tf:ma_b,right_in_tf:right_in,right_b_tf:right_b,x_min_tf:ma_min,x_max_tf:ma_max}
u_pred = sess.run(u,feed_dict = feed).reshape(M,N)
print(type(u_pred),u_pred.shape)
x,y = np.meshgrid(x_train,y_train)
print(type(u_accuracy(x,y)),u_accuracy(x,y).shape)
error_square = ((u_pred - u_accuracy(x,y).T)**2).sum()/(u_accuracy(x,y)**2 + 1e-7).sum()
error = np.sqrt(error_square)
print(error)
plt.subplot(2,1,1)
x,y = np.meshgrid(x_train,y_train)
plt.contourf(x,y,u_accuracy(x,y),40,cmap = 'Blues')
plt.colorbar()
plt.xlabel('x')
plt.ylabel('y')
plt.title('accuracy solution')
plt.subplot(2,1,2)
x,y = np.meshgrid(x_train,y_train)
plt.contourf(x,y,u_pred.T,40,cmap = 'Blues')
plt.colorbar()
plt.xlabel('x')
plt.ylabel('y')
plt.title('numerical solution')
code2
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import time
np.random.seed(1234)
tf.compat.v1.set_random_seed(1234)
def f(x,y):#微分方程的右边函数f
return - (2*np.pi**2)*np.exp(np.pi*(x + y))*np.sin(np.pi*(x + y))
def u_accuracy(x,y):#定义精确解
return np.exp(np.pi*(x + y))*np.sin(np.pi*x)*np.sin(np.pi*y)
def u_boundary(x,y):#定义边界函数
return 0*x*y
#定义内部点
class INSET():#定义训练集,包含区域内部点
def __init__(self):
self.dim = 2
self.xa,self.xb,self.ya,self.yb = 0,1,0,1
self.area = (self.xb - self.xa)*(self.yb - self.ya)
self.nx,self.ny = 20,30
self.hx = (self.xb - self.xa)/self.nx
self.hy = (self.yb - self.ya)/self.ny
self.size = self.nx*self.ny
self.X = np.zeros([self.size,self.dim])
for i in range(self.nx):
for j in range(self.ny):
self.X[i*self.ny + j,0] = self.xa + (i + 0.5)*self.hx
self.X[i*self.ny + j,1] = self.ya + (j + 0.5)*self.hy
self.u_acc = u_accuracy(self.X[:,0],self.X[:,1]).reshape(-1,1)#内部点精确解
self.right = f(self.X[:,0],self.X[:,1]).reshape(-1,1)#针对内部点的右边项
#定义边界点
class BDSET():#定义训练集,包含区域边界点
def __init__(self):
self.dim = 2
self.xa,self.xb,self.ya,self.yb = 0,1,0,1
self.area = (self.xb - self.xa)*(self.yb - self.ya)
self.length = 2*((self.xb - self.xa) + (self.yb - self.ya))
self.nx,self.ny = 20,30
self.hx = (self.xb - self.xa)/self.nx
self.hy = (self.yb - self.ya)/self.ny
self.size = 2*(self.nx + self.ny)
self.X = np.zeros([self.size,self.dim])
for i in range(self.nx):
for j in range(self.ny):
self.X[i,0] = self.xa + (i + 0.5)*self.hx
self.X[i,1] = self.ya
self.X[self.nx + j,0] = self.xb
self.X[self.nx + j,1] = self.ya + (j + 0.5)*self.hy
self.X[self.nx + self.ny + i,0] = self.xb - self.xa - (i + 0.5)*self.hx
self.X[self.nx + self.ny + i,1] = self.yb
self.X[2*self.nx + self.ny + j,0] = self.xa
self.X[2*self.nx + self.ny + j,1] = self.yb - self.ya - (j + 0.5)*self.hy
self.u_acc = u_boundary(self.X[:,0],self.X[:,1]).reshape(-1,1)#边界点精确解
#定义测试集
class TESET():#定义测试集
def __init__(self):
self.dim = 2
self.xa,self.xb,self.ya,self.yb = 0,1,0,1
self.hx = 0.1
self.hy = 0.05
self.nx = int((self.xb - self.xa)/self.hx) + 1
self.ny = int((self.yb - self.ya)/self.hx) + 1
self.size = self.nx*self.ny
self.X = np.zeros([self.size,self.dim])
for j in range(self.ny):
for i in range(self.nx):
self.X[j*self.nx + i,0] = self.xa + i*self.hx
self.X[j*self.nx + i,1] = self.ya + j*self.hy
self.u_acc = u_accuracy(self.X[:,0],self.X[:,1]).reshape(-1,1)#精确解
class NN:
def __init__(self,inset,bdset,layers,belta):
self.inset = inset#准备传入训练集内部点
self.bdset = bdset#准备传入训练集边界点
self.datamin = np.array([[inset.xa,inset.ya]])
self.datamax = np.array([[inset.xb,inset.yb]])
self.layers = layers#准备传入神经元层
self.inset_x_tf = tf.compat.v1.placeholder(tf.float32,shape = [None,2])#准备传入训练集内部点
self.bdset_x_tf = tf.compat.v1.placeholder(tf.float32,shape = [None,2])#准备传入训练集边界点
self.right_tf = tf.compat.v1.placeholder(tf.float32,shape = [None,1])#准备传入训练集内部点右边项
self.ub_tf = tf.compat.v1.placeholder(tf.float32,shape = [None,1])#准备传入训练集边界点函数值
self.min = tf.compat.v1.placeholder(tf.float32,shape = [None,2])#准备数据集两个轴的下限
self.max = tf.compat.v1.placeholder(tf.float32,shape = [None,2])#准备数据集两个轴的上线限
self.feed = {
self.inset_x_tf:inset.X,self.bdset_x_tf:bdset.X,
self.right_tf:inset.right,self.ub_tf:bdset.u_acc,
self.min:datamin,self.max:datamax}#准备喂数据集
self.Hin = 2*(self.inset_x_tf - self.min)/(self.max - self.min) - 1#正规化处理
self.Hbd = 2*(self.bdset_x_tf - self.min)/(self.max - self.min) - 1#正规化处理
self.weights,self.biases = self.NNinit()#通过函数NNinit完成权重,偏置初始化
self.u_in = self.NNU(self.Hin)#通过函数NNU完成计算,也就是训练集内部点的近似值
self.u_b = self.NNU(self.Hbd)#通过函数NNU完成计算,也就是训练集边界点的近似值
self.ux,self.uy = self.u_grad(self.inset_x_tf)#通过u_grad得到训练集内部点的一阶偏导数
self.loss_in = tf.reduce_mean(tf.square(self.ux) + tf.square(self.uy)) -\
tf.reduce_mean(2*self.u_in*self.right_tf)#通过极小位能原理得到的针对内部点的损失函数
self.loss_b = tf.reduce_mean(tf.square(self.u_b - self.ub_tf))#针对边界点的损失函数
self.loss = self.loss_in + belta*self.loss_b#总的损失函数
self.optim = tf.compat.v1.train.AdamOptimizer(learning_rate = 5e-3)#准备优化器
self.minimizer = self.optim.minimize(self.loss)
''' config=tf.compat.v1.ConfigProto(allow_soft_placement=True, log_device_placement=True) self.sess = tf.compat.v1.Session(config) '''
self.sess = tf.compat.v1.Session()#创建会话
init = tf.compat.v1.global_variables_initializer()#初始化变量
self.sess.run(init)
def w_init(self,in_dim,out_dim):#初始化权重
w_std = np.sqrt(2/(in_dim + out_dim))
return tf.Variable(tf.random.truncated_normal([in_dim, out_dim], stddev = w_std), dtype=tf.float32)
def NNinit(self):#初始化权重,偏置
weights = []
biases = []
num = len(self.layers)
for i in range(0,num - 1):
w = self.w_init(in_dim = self.layers[i],out_dim = self.layers[i + 1])
b = tf.Variable(tf.random.truncated_normal([1,self.layers[i + 1]], dtype = tf.float32), dtype=tf.float32)
weights.append(w)
biases.append(b)
return weights,biases
def NNU(self,Input):#计算神经网络输出
Input = tf.cast(Input,tf.float32)
weights,biases = self.NNinit()
num = len(weights)
for i in range(0,num - 1):
w,b = weights[i],biases[i]
Input = tf.tanh(tf.add(tf.matmul(Input,w),b))
W,B = weights[-1],biases[-1]
return tf.add(tf.matmul(Input,W),B)
def u_grad(self,Input):#计算梯度
Input = tf.cast(Input,tf.float32)
u = self.NNU(Input)
ux = tf.gradients(u,Input)[0][:,0:1]
uy = tf.gradients(u,Input)[0][:,1:2]
return ux,uy
def ERROR(self):#定义训练集精确解和近似解的误差
fenzi_in = tf.reduce_mean(tf.square(self.u_in - self.inset.u_acc))
fenzi_b = tf.reduce_mean(tf.square(self.u_b - self.bdset.u_acc))
fenmu_in = tf.reduce_mean(tf.square(self.u_in))
fenmu_b = tf.reduce_mean(tf.square(self.u_b))
fenzi = fenzi_in + fenzi_b
fenmu = fenmu_in + fenmu_b
return tf.sqrt(fenzi/fenmu)
def train(self,step):#定义训练过程
print('train the neural network')
st_time = time.time()
LOSS = self.sess.run(self.loss,feed_dict = self.feed)
loss_best = LOSS
weights_best = self.sess.run(self.weights)
biases_best = self.sess.run(self.biases)
record = 400
for j in range(step):
self.sess.run(self.minimizer,feed_dict = self.feed)#优化过程
if j%record == 0:
error = self.sess.run(self.ERROR(),feed_dict = self.feed)
LOSS = self.sess.run(self.loss,feed_dict = self.feed)
print('train step:%d,loss:%.2f,error:%.2f'
%(j,LOSS,error))#打印损失函数,训练集误差
if LOSS < loss_best:
loss_best = LOSS
weights_best = self.sess.run(self.weights)#准备保存最优权重
biases_best = self.sess.run(self.biases)#准备保存最优偏置
epo_time = time.time() - st_time
print('one epoch used:%.2f'%(epo_time))
print('------------------------------------------------')
def trainbatch(self,step):#尝试使用batch输入以提高精度,减少时间
print('train the neural network')
st_time = time.time()
batch = self.inset.nx
LOSS = self.sess.run(self.loss,feed_dict = self.feed)
loss_best = LOSS
weights_best = self.sess.run(self.weights)
biases_best = self.sess.run(self.biases)
record = 100
for j in range(step):
x = i*batch%len(self.inset.X)
y = x + batch
feed = {
self.inset_x_tf:inset.X[x:y],self.bdset_x_tf:bdset.X,
self.right_tf:inset.right[x:y],self.ub_tf:bdset.u_acc,
self.min:datamin,self.max:datamax}
self.sess.run(self.minimizer,feed_dict = feed)
if j%record == 0:
error = self.sess.run(self.ERROR(),feed_dict = self.feed)
LOSS = self.sess.run(self.loss,feed_dict = self.feed)
print('train step:%d,loss:%.2f,error:%.2f'
%(j,LOSS,error))
''' if LOSS < loss_best: loss_best = LOSS weights_best = self.sess.run(self.weights) biases_best = self.sess.run(self.biases) '''
epo_time = time.time() - st_time
print('one epoch used:%.2f'%(epo_time))
print('------------------------------------------------')
inset = INSET()
bdset = BDSET()
teset = TESET()
layers = [2,20,10,1]
belta = 5e3
#datamin = np.array([[teset.xa,teset.ya]])
#datamax = np.array([[teset.xb,teset.yb]])
ne = NN(inset,bdset,layers,belta)#对类进行实例化
epochs = 3
st = time.time()
for i in range(epochs):#开始训练
print('epochs:%d'%(i))
#ne.trainbatch(500)
ne.train(2000)
ed = time.time()
print('all used time:%.2f'%(ed - st))
#预测函数近似值和误差
feed = {
ne.inset_x_tf:teset.X,ne.min:datamin,ne.max:datamax}
u_pred = ne.sess.run(ne.u_in,feed_dict = feed)#利用类NN中的u_in来计算测试集上的近似值
u_acc = np.array(teset.u_acc,dtype = np.float32)#拿出测试集中精确解
def LERROR(u_pred,u_acc):
fenzi = np.square(u_pred - u_acc).sum()
fenmu = (np.square(u_acc) + 1e-7).sum()
return np.sqrt(fenzi/fenmu)
print('the test error:%.3f'%(LERROR(u_pred,u_acc)))#打印测试上的误差
M = teset.nx
N = teset.ny
x_train = np.linspace(teset.xa,teset.xb,M)
y_train = np.linspace(teset.ya,teset.yb,N)
x,y = np.meshgrid(x_train,y_train)
plt.contourf(x,y,u_pred.reshape(M,N).T,40,cmap = 'Blues')#画出图像
plt.colorbar()
plt.xlabel('x')
plt.ylabel('y')
plt.title('numerical solution')
边栏推荐
- Condom giants' sales have fallen by 40% in the past two years. What are the reasons for the decline?
- Cesium 使用MediaStreamRecorder 或者MediaRecorder录屏并下载视频,以及开启摄像头录像。【转】
- #27ES6的数值扩展
- 郎酒两大王牌产品成都联动共振,持续带动光瓶酒消费浪潮
- 锚文本大量丢失的问题
- Eolink launched a support program for small and medium-sized enterprises and start-ups to empower enterprises!
- 开源二三事|ShardingSphere 与 Database Mesh 之间不得不说的那些事
- SQL parsing practice of Pisa proxy
- Realize simple three-D cube automatic rotation
- 华为云DevCloud重磅发布四大新能力,创下国内两项第一
猜你喜欢

Slow bear market, bit Store provides stable stacking products to help you cross the bull and bear
![Beginner level Luogu 2 [branch structure] problem list solution](/img/53/d7bf659f7e1047db4676c9a01fcb42.png)
Beginner level Luogu 2 [branch structure] problem list solution

Google Earth Engine(GEE)——Export. image. The difference and mixing of toasset/todrive, correctly export classification sample data to asset assets and references

Weekly snapshot of substrate technology 20220411

2022年最新《谷粒学院开发教程》:8 - 前台登录功能

守护雪山之王:这些AI研究者找到了技术的新「用武之地」

Open source 23 things shardingsphere and database mesh have to say

Bit.Store:熊市漫漫,稳定Staking产品或成主旋律

#27ES6的数值扩展

Eolink launched a support program for small and medium-sized enterprises and start-ups to empower enterprises!
随机推荐
Source NAT address translation and server mapping web page configuration of firewall Foundation
Leetcode daily practice (main elements)
国家食品安全风险评估中心:不要盲目片面追捧标签为“零添加”“纯天然”食品
Create a database and use
带你认识图数据库性能和场景测试利器LDBC SNB
PSS:你距离NMS-free+提点只有两个卷积层 | 2021论文
Google Earth Engine(GEE)——Export. image. The difference and mixing of toasset/todrive, correctly export classification sample data to asset assets and references
QT5.5.1桌面版安装配置过程中的疑难杂症处理(配置ARM编译套件)
LeetCode每日一练(杨辉三角)
锚文本大量丢失的问题
Markdown syntax
目前PolarDB-X是不支持数据库自制服务DAS么?
事务的四大特性
Practice of constructing ten billion relationship knowledge map based on Nebula graph
深耕数字化,引领云原生,服务更多开发者
substrate 技术每周速览 20220411
Slow bear market, bit Store provides stable stacking products to help you cross the bull and bear
[pygame Games] ce jeu "eat Everything" est fantastique? Tu manges tout? (avec code source gratuit)
一场分销裂变活动,不止是发发朋友圈这么简单!
如果想用dms来处理数据库权限问题,想问下账号只能用阿里云的ram账号吗(阿里云的rds)