Lstm是RNN網(wǎng)絡(luò)中最有趣的結(jié)構(gòu)之一,不僅僅使得模型可以從長(zhǎng)序列中學(xué)習(xí),還創(chuàng)建了長(zhǎng)短期記憶模塊,模塊中所記憶的數(shù)值在需要時(shí)可以得到更改。
創(chuàng)新互聯(lián)-專業(yè)網(wǎng)站定制、快速模板網(wǎng)站建設(shè)、高性價(jià)比銅山網(wǎng)站開(kāi)發(fā)、企業(yè)建站全套包干低至880元,成熟完善的模板庫(kù),直接使用。一站式銅山網(wǎng)站制作公司更省心,省錢,快速模板網(wǎng)站建設(shè)找我們,業(yè)務(wù)覆蓋銅山地區(qū)。費(fèi)用合理售后完善,十余年實(shí)體公司更值得信賴。遺忘門
遺忘單元可以將輸入信息和隱藏信息進(jìn)行信息整合,并進(jìn)行信息更替,更替步驟如右圖公式,其中與乘上權(quán)重矩陣后,加上偏置項(xiàng)后,經(jīng)過(guò)激活函數(shù),此時(shí)輸出值為位于[0,1]之間,并將上一個(gè)時(shí)間步的與激活函數(shù)輸出值相乘,更新為
輸入門
當(dāng)有輸入進(jìn)入時(shí),輸入門會(huì)結(jié)合輸入信息與隱藏信息進(jìn)行整合,并對(duì)信息進(jìn)行更替
過(guò)程與 過(guò)程類似,中間公式使用了tanh函數(shù),可以將輸出縮放到[-1,1]之間,再更新
輸出門
輸出門也會(huì)對(duì)輸出過(guò)程進(jìn)行控制,與輸入門不同的是,輸出門使用tannh激活函數(shù)
pytorch的lstm遞推公式如下圖所示。
在pytorch中,4個(gè)權(quán)重矩陣Wii,Wif,Wig,Wio被合并為一個(gè)權(quán)重矩陣Wih,Whh也類似,方便一步計(jì)算。
可以根據(jù)公式簡(jiǎn)單的寫(xiě)出手動(dòng)實(shí)現(xiàn)的版本
這是一個(gè)兩層的lstm,w和b都寫(xiě)死了,就是固定兩層的參數(shù)。hidden為1024.
def test_lstm(input, wih0, bih0, whh0, bhh0, wih1, bih1, whh1, bhh1):
# 手動(dòng)模擬
B, T, F = input.shape
hidden_size = 1024
inp_pointer = input
for layer in range(2):
h_t, c_t = (torch.zeros(B, hidden_size).cuda(), torch.zeros(B, hidden_size).cuda())
output = torch.zeros(B, T, hidden_size).cuda()
batch, time, freq = output.shape
if layer == 0:
cur_w_ih = wih0
cur_w_hh = whh0
cur_b_ih = bih0
cur_b_hh = bhh0
else:
cur_w_ih = wih1
cur_w_hh = whh1
cur_b_ih = bih1
cur_b_hh = bhh1
for t in range(time):
x_t = inp_pointer[:, t, :]
gates = x_t @ cur_w_ih.T + cur_b_ih + h_t @ cur_w_hh.T + cur_b_hh
i_t, f_t, g_t, o_t = (
torch.sigmoid(gates[:, :hidden_size]), # input
torch.sigmoid(gates[:, hidden_size:hidden_size * 2]), # forget
torch.tanh(gates[:, hidden_size * 2:hidden_size * 3]),
torch.sigmoid(gates[:, hidden_size * 3:]), # output
)
c_t = f_t * c_t + i_t * g_t
h_t = o_t * torch.tanh(c_t)
output[:, t, :] = h_t
inp_pointer = output
return inp_pointer
另外,還實(shí)現(xiàn)了一個(gè)雙向LSTM的版本,用了一個(gè)小樣本進(jìn)行測(cè)試,同樣參數(shù)都是寫(xiě)死了。
def test_lstm():
input_size = 4
hidden_size = 6
num_layer = 2
bidirectional = True
direction = 2 if bidirectional else 1
input = torch.Tensor([[[[0.896227, 0.713551],
[0.605188, 0.0700275],
[0.827175, 0.186436]],
[[0.872269, 0.032015],
[0.259925, 0.517878],
[0.224867, 0.943635]]],
[[[0.290171, 0.0767354],
[0.251816, 0.31538],
[0.828251, 0.730255]],
[[0.24641, 0.757985],
[0.354927, 0.694123],
[0.990138, 0.946459]]]]).float().transpose(1, 2).reshape(2, 3, 4)
B, T, F = input.shape
lstm = nn.LSTM(input_size=input_size, hidden_size=hidden_size, num_layers=num_layer, batch_first=True,
bidirectional=bidirectional)
state = OrderedDict()
state['weight_ih_l0'] = torch.ones([4 * hidden_size, input_size])
state['weight_hh_l0'] = torch.ones([4 * hidden_size, hidden_size]) * 2
state['bias_ih_l0'] = torch.zeros(4 * hidden_size) + 0.5
state['bias_hh_l0'] = torch.zeros(4 * hidden_size) + 1.0
state['weight_ih_l1'] = torch.ones([4 * hidden_size, hidden_size * direction]) * 2
state['weight_hh_l1'] = torch.ones([4 * hidden_size, hidden_size]) * 3
state['bias_ih_l1'] = torch.zeros(4 * hidden_size) + 0.5
state['bias_hh_l1'] = torch.zeros(4 * hidden_size) + 1.0
state['weight_ih_l0_reverse'] = torch.ones([4 * hidden_size, input_size])
state['weight_hh_l0_reverse'] = torch.ones([4 * hidden_size, hidden_size]) * 2
state['bias_ih_l0_reverse'] = torch.zeros(4 * hidden_size) + 0.5
state['bias_hh_l0_reverse'] = torch.zeros(4 * hidden_size) + 1.0
state['weight_ih_l1_reverse'] = torch.ones([4 * hidden_size, hidden_size * direction]) * 2
state['weight_hh_l1_reverse'] = torch.ones([4 * hidden_size, hidden_size]) * 3
state['bias_ih_l1_reverse'] = torch.zeros(4 * hidden_size) + 0.5
state['bias_hh_l1_reverse'] = torch.zeros(4 * hidden_size) + 1.0
lstm.load_state_dict(state, strict=False)
# 手動(dòng)模擬
inp_pointer = input
for layer in range(num_layer):
h_t, c_t = (torch.zeros(B, hidden_size), torch.zeros(B, hidden_size))
h_t_reverse, c_t_reverse = (torch.zeros(B, hidden_size), torch.zeros(B, hidden_size))
output = torch.zeros(B, T, hidden_size)
output_reverse = torch.zeros(B, T, hidden_size)
batch, time, freq = output.shape
cur_w_ih = state['weight_ih_l{}'.format(layer)]
cur_w_ih_reverse = state['weight_ih_l{}_reverse'.format(layer)]
cur_w_hh = state['weight_hh_l{}'.format(layer)]
cur_w_hh_reverse = state['weight_hh_l{}_reverse'.format(layer)]
cur_b_ih = state['bias_ih_l{}'.format(layer)]
cur_b_ih_reverse = state['bias_ih_l{}_reverse'.format(layer)]
cur_b_hh = state['bias_hh_l{}'.format(layer)]
cur_b_hh_reverse = state['bias_hh_l{}_reverse'.format(layer)]
for t in range(time):
x_t = inp_pointer[:, t, :]
r_t = inp_pointer[:, time - t - 1, :]
gates = x_t @ cur_w_ih.T + cur_b_ih + h_t @ cur_w_hh.T + cur_b_hh
gates_r = r_t @ cur_w_ih_reverse.T + cur_b_ih_reverse + h_t_reverse @ cur_w_hh_reverse.T + cur_b_hh_reverse
i_t, f_t, g_t, o_t = (
torch.sigmoid(gates[:, :hidden_size]), # input
torch.sigmoid(gates[:, hidden_size:hidden_size * 2]), # forget
torch.tanh(gates[:, hidden_size * 2:hidden_size * 3]),
torch.sigmoid(gates[:, hidden_size * 3:]), # output
)
i_r, f_r, g_r, o_r = (
torch.sigmoid(gates_r[:, :hidden_size]), # input
torch.sigmoid(gates_r[:, hidden_size:hidden_size * 2]), # forget
torch.tanh(gates_r[:, hidden_size * 2:hidden_size * 3]),
torch.sigmoid(gates_r[:, hidden_size * 3:]), # output
)
c_t = f_t * c_t + i_t * g_t
c_t_reverse = f_r * c_t_reverse + i_r * g_r
h_t = o_t * torch.tanh(c_t)
h_t_reverse = o_r * torch.tanh(c_t_reverse)
output[:, t, :] = h_t
output_reverse[:, time - t - 1, :] = h_t_reverse
inp_pointer = torch.cat([output, output_reverse], dim=2)
print(inp_pointer.view(2, 3, 2, -1).transpose(1, 2))
th_out, (h, c) = lstm(input)
print(th_out)
2. 基于Eigen實(shí)現(xiàn)C++ 的LSTM推理
2.1 Layer_LSTM.h//
// Created by 65181 on 2022/10/31.
//
#ifndef CRN_LAYER_LSTM_H
#define CRN_LAYER_LSTM_H
#include "Eigen"
#include "mat.h"
#include "Eigen/CXX11/Tensor"
class Layer_LSTM {public:
Layer_LSTM();
Layer_LSTM(int64_t inp_size, int64_t hid_size, int64_t num_layer = 2, bool bidirectional = false);
void LoadState(MATFile *pmFile, const std::string &state_preffix);
void LoadTestState();
Eigen::Tensorforward(Eigen::Tensor&input, std::vector>&h_t,
std::vector>&c_t);
private:
int64_t input_size;
int64_t hidden_size;
int64_t num_layers;
int64_t direction;
bool bidirectional;
std::vector>weight_ih, weight_hh;
std::vector>weight_ih_reverse, weight_hh_reverse;
std::vector>bias_ih, bias_hh;
std::vector>bias_ih_reverse, bias_hh_reverse;
Eigen::Tensor_load_mat(MATFile *pmFile, const std::string &state_name);
Eigen::Tensor_uni_lstm(Eigen::Tensor&input, std::vector>&h_t,
std::vector>&c_t);
Eigen::Tensor_bi_lstm(Eigen::Tensor&input, std::vector>&h_t,
std::vector>&c_t);
void print2(Eigen::Tensorinput);
void print3(Eigen::Tensorinput);
};
#endif //CRN_LAYER_LSTM_H
2.2 Layer_LSTM.cpp//
// Created by 65181 on 2022/10/31.
//
#include "iostream"
#include "../include/Layer_LSTM.h"
using namespace std;
Layer_LSTM::Layer_LSTM() {this->input_size = 64;
this->hidden_size = 64;
this->num_layers = 2;
this->direction = 1;
}
Layer_LSTM::Layer_LSTM(int64_t inp_size, int64_t hid_size, int64_t num_layer, bool bidirectional) {this->input_size = inp_size;
this->hidden_size = hid_size;
this->num_layers = num_layer;
this->bidirectional = bidirectional;
this->direction = bidirectional ? 2 : 1;
}
void Layer_LSTM::LoadState(MATFile *pmFile, const std::string &state_preffix) {for (int layer_idx = 0; layer_idx< this->num_layers; layer_idx++) {std::string weight_ih_name = state_preffix + "_weight_ih_l" + std::to_string(layer_idx);
std::string bias_ih_name = state_preffix + "_bias_ih_l" + std::to_string(layer_idx);
std::string weight_hh_name = state_preffix + "_weight_hh_l" + std::to_string(layer_idx);
std::string bias_hh_name = state_preffix + "_bias_hh_l" + std::to_string(layer_idx);
this->weight_ih.push_back(_load_mat(pmFile, weight_ih_name));
this->bias_ih.push_back(_load_mat(pmFile, bias_ih_name));
this->weight_hh.push_back(_load_mat(pmFile, weight_hh_name));
this->bias_hh.push_back(_load_mat(pmFile, bias_hh_name));
if (this->bidirectional) {std::string w_ih_reverse = state_preffix + "_weight_ih_l" + std::to_string(layer_idx) + "_reverse";
std::string b_ih_reverse = state_preffix + "_bias_ih_l" + std::to_string(layer_idx) + "_reverse";
std::string w_hh_reverse = state_preffix + "_weight_hh_l" + std::to_string(layer_idx) + "_reverse";
std::string b_hh_reverse = state_preffix + "_bias_hh_l" + std::to_string(layer_idx) + "_reverse";
this->weight_ih_reverse.push_back(_load_mat(pmFile, w_ih_reverse));
this->bias_ih_reverse.push_back(_load_mat(pmFile, b_ih_reverse));
this->weight_hh_reverse.push_back(_load_mat(pmFile, w_hh_reverse));
this->bias_hh_reverse.push_back(_load_mat(pmFile, b_hh_reverse));
}
}
}
Eigen::TensorLayer_LSTM::_load_mat(MATFile *pmFile, const std::string &state_name) {mxArray *pa = matGetVariable(pmFile, state_name.c_str());
auto *values = (float_t *) mxGetData(pa);
long long dim1 = mxGetM(pa);
long long dim2 = mxGetN(pa);
Eigen::Tensormatrix(dim1, dim2);
int idx = 0;
for (int i = 0; i< dim2; i++) {for (int j = 0; j< dim1; j++) {matrix(j, i) = values[idx++];
}
}
return matrix;
}
void Layer_LSTM::LoadTestState() {for (int layer = 0; layer< this->num_layers; layer++) {int64_t _ih_DIM = layer == 0 ? this->input_size : this->hidden_size * this->direction;
Eigen::Tensorstate_w_ih(this->hidden_size * 4, _ih_DIM);
Eigen::Tensorstate_w_hh(this->hidden_size * 4, this->hidden_size);
Eigen::Tensorstate_b_ih(1, this->hidden_size * 4);
Eigen::Tensorstate_b_hh(1, this->hidden_size * 4);
state_w_ih.setConstant(2);
state_w_hh.setConstant(2);
state_b_ih.setConstant(1.0);
state_b_hh.setConstant(1.0);
this->weight_ih.push_back(state_w_ih);
this->weight_hh.push_back(state_w_hh);
this->bias_ih.push_back(state_b_ih);
this->bias_hh.push_back(state_b_hh);
// Eigen::Tensorstate_w_ih_reverse(this->hidden_size * 4, _ih_DIM);
// Eigen::Tensorstate_w_hh_reverse(this->hidden_size * 4, this->hidden_size);
// Eigen::Tensorstate_b_ih_reverse(1, this->hidden_size * 4);
// Eigen::Tensorstate_b_hh_reverse(1, this->hidden_size * 4);
// state_w_ih_reverse.setConstant(layer + 1);
// state_w_hh_reverse.setConstant(layer + 2);
// state_b_ih_reverse.setConstant(0.5);
// state_b_hh_reverse.setConstant(1.0);
// this->weight_ih_reverse.push_back(state_w_ih_reverse);
// this->weight_hh_reverse.push_back(state_w_hh_reverse);
// this->bias_ih_reverse.push_back(state_b_ih_reverse);
// this->bias_hh_reverse.push_back(state_b_hh_reverse);
}
}
Eigen::TensorLayer_LSTM::forward(Eigen::Tensor&input,
std::vector>&h_t,
std::vector>&c_t) {Eigen::Tensoroutput;
if (this->bidirectional) {output = this->_bi_lstm(input, h_t, c_t);
} else {output = this->_uni_lstm(input, h_t, c_t);
}
return output;
}
Eigen::TensorLayer_LSTM::_uni_lstm(Eigen::Tensor&input,
vector>&h_t,
vector>&c_t) {Eigen::Tensor::Dimensions dim_inp = input.dimensions();
Eigen::Tensorout_pointer = input;
if (h_t.empty() || c_t.empty()) {for (int idx_layer = 0; idx_layer< this->num_layers; idx_layer++) {Eigen::Tensorht_zeros(dim_inp[0], this->hidden_size);
Eigen::Tensorct_zeros(dim_inp[0], this->hidden_size);
ht_zeros.setZero();
ct_zeros.setZero();
h_t.push_back(ht_zeros);
c_t.push_back(ct_zeros);
}
}
for (int idx_layer = 0; idx_layer< this->num_layers; idx_layer++) {Eigen::Tensor::Dimensions dim_cur = out_pointer.dimensions();
int64_t N_BATCH = dim_cur[0], N_TIME = dim_cur[1], N_FREQ = dim_cur[2], N_HIDDEN = this->hidden_size;
Eigen::Tensorcur_w_ih = this->weight_ih[idx_layer];
Eigen::Tensorcur_w_hh = this->weight_hh[idx_layer];
Eigen::Tensorcur_b_ih = this->bias_ih[idx_layer].broadcast(Eigen::array{N_BATCH, 1});
Eigen::Tensorcur_b_hh = this->bias_hh[idx_layer].broadcast(Eigen::array{N_BATCH, 1});
Eigen::Tensor&cur_ht = h_t[idx_layer];
Eigen::Tensor&cur_ct = c_t[idx_layer];
// print2(cur_w_ih);
// print2(cur_w_hh);
// print2(cur_b_ih);
// print2(cur_b_hh);
// print2(cur_ht);
// print2(cur_ct);
Eigen::Tensoroutput(N_BATCH, N_TIME, N_HIDDEN);
Eigen::TensorX_t;
Eigen::Tensorgates;
Eigen::Tensori_t, f_t, g_t, o_t;
Eigen::array, 1>product_dims = {Eigen::IndexPair(1, 1)};
Eigen::arraygate_patch = Eigen::array{N_BATCH, N_HIDDEN};
for (int t = 0; t< N_TIME; t++) {X_t = out_pointer.chip(t, 1);
// print2(X_t);
// print2(X_t.contract(cur_w_ih, product_dims) + cur_b_ih);
// print2(cur_ht.contract(cur_w_hh, product_dims) + cur_b_hh);
gates = X_t.contract(cur_w_ih, product_dims) + cur_b_ih + cur_ht.contract(cur_w_hh, product_dims) +
cur_b_hh;
// print2(gates);
i_t = gates.slice(Eigen::array{0, N_HIDDEN * 0}, gate_patch).sigmoid();
f_t = gates.slice(Eigen::array{0, N_HIDDEN * 1}, gate_patch).sigmoid();
g_t = gates.slice(Eigen::array{0, N_HIDDEN * 2}, gate_patch).tanh();
o_t = gates.slice(Eigen::array{0, N_HIDDEN * 3}, gate_patch).sigmoid();
// print2(i_t);
// print2(f_t);
// print2(g_t);
// print2(o_t);
cur_ct = f_t * cur_ct + i_t * g_t;
cur_ht = o_t * cur_ct.tanh();
// print2(cur_ct);
// print2(cur_ht);
output.chip(t, 1) = cur_ht;
}
out_pointer = output;
}
return out_pointer;
}
Eigen::TensorLayer_LSTM::_bi_lstm(Eigen::Tensor&input,
vector>&h_t,
vector>&c_t) {Eigen::Tensor::Dimensions dim_inp = input.dimensions();
Eigen::Tensorout_pointer = input;
if (h_t.empty() || c_t.empty()) {for (int idx_layer = 0; idx_layer< this->num_layers * this->direction; idx_layer++) {Eigen::Tensorht_zeros(dim_inp[0], this->hidden_size);
Eigen::Tensorct_zeros(dim_inp[0], this->hidden_size);
ht_zeros.setZero();
ct_zeros.setZero();
h_t.push_back(ht_zeros);
c_t.push_back(ct_zeros);
}
}
for (int idx_layer = 0; idx_layer< this->num_layers; idx_layer++) {Eigen::Tensor::Dimensions dim_cur = out_pointer.dimensions();
int64_t N_BATCH = dim_cur[0], N_TIME = dim_cur[1], N_FREQ = dim_cur[2], N_HIDDEN = this->hidden_size;
Eigen::Tensorcur_w_ih = this->weight_ih[idx_layer];
Eigen::Tensorcur_w_ih_reverse = this->weight_ih_reverse[idx_layer];
Eigen::Tensorcur_w_hh = this->weight_hh[idx_layer];
Eigen::Tensorcur_w_hh_reverse = this->weight_hh_reverse[idx_layer];
Eigen::Tensorcur_b_ih = this->bias_ih[idx_layer].broadcast(Eigen::array{N_BATCH, 1});
Eigen::Tensorcur_b_ih_reverse = this->bias_ih_reverse[idx_layer].broadcast(
Eigen::array{N_BATCH, 1});
Eigen::Tensorcur_b_hh = this->bias_hh[idx_layer].broadcast(Eigen::array{N_BATCH, 1});
Eigen::Tensorcur_b_hh_reverse = this->bias_hh_reverse[idx_layer].broadcast(
Eigen::array{N_BATCH, 1});
Eigen::Tensor&cur_ht = h_t[idx_layer * 2];
Eigen::Tensor&cur_ht_reverse = h_t[idx_layer * 2 + 1];
Eigen::Tensor&cur_ct = c_t[idx_layer * 2];
Eigen::Tensor&cur_ct_reverse = c_t[idx_layer * 2 + 1];
// cout<< "cur_w_ih"<< endl<< cur_w_ih<< endl;
// cout<< "cur_w_hh"<< endl<< cur_w_hh<< endl;
// cout<< "cur_b_ih"<< endl<< cur_b_ih<< endl;
// cout<< "cur_b_hh"<< endl<< cur_b_hh<< endl;
// cout<< "cur_ht"<< endl<< cur_ht<< endl;
// cout<< "cur_ct"<< endl<< cur_ct<< endl;
Eigen::Tensoroutput(N_BATCH, N_TIME, N_HIDDEN);
Eigen::Tensoroutput_reverse(N_BATCH, N_TIME, N_HIDDEN);
Eigen::TensorX_t;
Eigen::TensorX_t_reverse;
Eigen::Tensorgates;
Eigen::Tensorgates_reverse;
Eigen::Tensori_t, f_t, g_t, o_t;
Eigen::Tensori_t_reverse, f_t_reverse, g_t_reverse, o_t_reverse;
Eigen::Tensorcur_cat;
Eigen::array, 1>product_dims = {Eigen::IndexPair(1, 1)};
Eigen::arraygate_patch = Eigen::array{N_BATCH, N_HIDDEN};
for (int t = 0; t< N_TIME; t++) {X_t = out_pointer.chip(t, 1);
X_t_reverse = out_pointer.chip(N_TIME - t - 1, 1);
// cout<< "X_t"<< endl<< X_t<< endl;
// cout<< "X_t_reverse"<< endl<< X_t_reverse<< endl;
gates = X_t.contract(cur_w_ih, product_dims) + cur_b_ih + cur_ht.contract(cur_w_hh, product_dims) +
cur_b_hh;
gates_reverse = X_t_reverse.contract(cur_w_ih_reverse, product_dims) + cur_b_ih_reverse +
cur_ht_reverse.contract(cur_w_hh_reverse, product_dims) +
cur_b_hh_reverse;
i_t = gates.slice(Eigen::array{0, N_HIDDEN * 0}, gate_patch).sigmoid();
f_t = gates.slice(Eigen::array{0, N_HIDDEN * 1}, gate_patch).sigmoid();
g_t = gates.slice(Eigen::array{0, N_HIDDEN * 2}, gate_patch).tanh();
o_t = gates.slice(Eigen::array{0, N_HIDDEN * 3}, gate_patch).sigmoid();
i_t_reverse = gates_reverse.slice(Eigen::array{0, N_HIDDEN * 0}, gate_patch).sigmoid();
f_t_reverse = gates_reverse.slice(Eigen::array{0, N_HIDDEN * 1}, gate_patch).sigmoid();
g_t_reverse = gates_reverse.slice(Eigen::array{0, N_HIDDEN * 2}, gate_patch).tanh();
o_t_reverse = gates_reverse.slice(Eigen::array{0, N_HIDDEN * 3}, gate_patch).sigmoid();
cur_ct = f_t * cur_ct + i_t * g_t;
cur_ht = o_t * cur_ct.tanh();
cur_ct_reverse = f_t_reverse * cur_ct_reverse + i_t_reverse * g_t_reverse;
cur_ht_reverse = o_t_reverse * cur_ct_reverse.tanh();
output.chip(t, 1) = cur_ht;
output_reverse.chip(N_TIME - t - 1, 1) = cur_ht_reverse;
}
out_pointer = output.concatenate(output_reverse, 2);
}
return out_pointer;
}
void Layer_LSTM::print2(Eigen::Tensorinput) {const Eigen::Tensor::Dimensions &dim_inp = input.dimensions();
std::cout<< "Variable:"<< std::endl;
// 0 0
std::cout<< input(0, 0)<< " "<< input(0, 1)<< " "<< input(0, 2)<< " ";
std::cout<< input(0, dim_inp[1] - 3)<< " "<< input(0, dim_inp[1] - 2)<< " "
<< input(0, dim_inp[1] - 1);
std::cout<< std::endl;
// 0 -1
std::cout<< input(dim_inp[0] - 1, 0)<< " "<< input(dim_inp[0] - 1, 1)<< " "
<< input(dim_inp[0] - 1, 2)<< " ";
std::cout<< input(dim_inp[0] - 1, dim_inp[1] - 3)<< " "<< input(dim_inp[0] - 1, dim_inp[1] - 2)
<< " "
<< input(dim_inp[0] - 1, dim_inp[1] - 1);
std::cout<< std::endl;
}
void Layer_LSTM::print3(Eigen::Tensorinput) {const Eigen::Tensor::Dimensions &dim_inp = input.dimensions();
std::cout<< "Variable:"<< std::endl;
// 0 0
std::cout<< input(0, 0, 0)<< " "<< input(0, 0, 1)<< " "<< input(0, 0, 2)<< " ";
std::cout<< input(0, 0, dim_inp[2] - 3)<< " "<< input(0, 0, dim_inp[2] - 2)<< " "
<< input(0, 0, dim_inp[2] - 1);
std::cout<< std::endl;
// 0 -1
std::cout<< input(0, dim_inp[1] - 1, 0)<< " "<< input(0, dim_inp[1] - 1, 1)<< " "
<< input(0, dim_inp[1] - 1, 2)<< " ";
std::cout<< input(0, dim_inp[1] - 1, dim_inp[2] - 3)<< " "<< input(0, dim_inp[1] - 1, dim_inp[2] - 2)
<< " "
<< input(0, dim_inp[1] - 1, dim_inp[2] - 1);
std::cout<< std::endl;
}
3.參考鏈接[1] 實(shí)現(xiàn)LSTM-pytorch版
你是否還在尋找穩(wěn)定的海外服務(wù)器提供商?創(chuàng)新互聯(lián)www.cdcxhl.cn海外機(jī)房具備T級(jí)流量清洗系統(tǒng)配攻擊溯源,準(zhǔn)確流量調(diào)度確保服務(wù)器高可用性,企業(yè)級(jí)服務(wù)器適合批量采購(gòu),新人活動(dòng)首月15元起,快前往官網(wǎng)查看詳情吧