{"id":1691,"date":"2025-02-10T17:24:47","date_gmt":"2025-02-10T09:24:47","guid":{"rendered":"https:\/\/www.forillusion.com\/?p=1691"},"modified":"2025-02-14T11:39:01","modified_gmt":"2025-02-14T03:39:01","slug":"3-13-dropout","status":"publish","type":"post","link":"https:\/\/www.forillusion.com\/index.php\/3-13-dropout\/","title":{"rendered":"3.13 \u4e22\u5f03\u6cd5"},"content":{"rendered":"\n<p><div class=\"has-toc have-toc\"><\/div><\/p>\n\n\n\n<h2 class=\"wp-block-heading\">\u65b9\u6cd5<\/h2>\n\n\n\n<p>\u9664\u4e86\u6743\u91cd\u8870\u51cf\u4ee5\u5916\uff0c\u6df1\u5ea6\u5b66\u4e60\u6a21\u578b\u5e38\u5e38\u4f7f\u7528\u4e22\u5f03\u6cd5\uff08dropout\uff09\u6765\u5e94\u5bf9\u8fc7\u62df\u5408\u95ee\u9898\u3002\u4e22\u5f03\u6cd5\u6709\u4e00\u4e9b\u4e0d\u540c\u7684\u53d8\u4f53\u3002\u672c\u8282\u4e2d\u63d0\u5230\u7684\u4e22\u5f03\u6cd5\u7279\u6307\u5012\u7f6e\u4e22\u5f03\u6cd5\uff08inverted dropout\uff09\u3002<\/p>\n\n\n\n<p>\u4e00\u4e2a\u5355\u9690\u85cf\u5c42\u7684\u591a\u5c42\u611f\u77e5\u673a\uff0c\u5176\u4e2d\u8f93\u5165\u4e2a\u6570\u4e3a4\uff0c\u9690\u85cf\u5355\u5143\u4e2a\u6570\u4e3a5\uff0c\u4e14\u9690\u85cf\u5355\u5143$h_i$\uff08$i=1, \\ldots, 5$\uff09\u7684\u8ba1\u7b97\u8868\u8fbe\u5f0f\u4e3a<\/p>\n\n\n\n<p>$$<br>h_i = \\phi\\left(x_1 w_{1i} + x_2 w_{2i} + x_3 w_{3i} + x_4 w_{4i} + b_i\\right)<br>$$<\/p>\n\n\n\n<p>\u8fd9\u91cc$\\phi$\u662f\u6fc0\u6d3b\u51fd\u6570\uff0c$x_1, \\ldots, x_4$\u662f\u8f93\u5165\uff0c\u9690\u85cf\u5355\u5143$i$\u7684\u6743\u91cd\u53c2\u6570\u4e3a$w_{1i}, \\ldots, w_{4i}$\uff0c\u504f\u5dee\u53c2\u6570\u4e3a$b_i$\u3002\u5f53\u5bf9\u8be5\u9690\u85cf\u5c42\u4f7f\u7528\u4e22\u5f03\u6cd5\u65f6\uff0c\u8be5\u5c42\u7684\u9690\u85cf\u5355\u5143\u5c06\u6709\u4e00\u5b9a\u6982\u7387\u88ab\u4e22\u5f03\u6389\u3002\u8bbe\u4e22\u5f03\u6982\u7387\u4e3a$p$\uff0c\u90a3\u4e48\u6709$p$\u7684\u6982\u7387$h_i$\u4f1a\u88ab\u6e05\u96f6\uff0c\u6709$1-p$\u7684\u6982\u7387$h_i$\u4f1a\u9664\u4ee5$1-p$\u505a\u62c9\u4f38\u3002\u4e22\u5f03\u6982\u7387\u662f\u4e22\u5f03\u6cd5\u7684\u8d85\u53c2\u6570\u3002\u5177\u4f53\u6765\u8bf4\uff0c\u8bbe\u968f\u673a\u53d8\u91cf$\\xi_i$\u4e3a0\u548c1\u7684\u6982\u7387\u5206\u522b\u4e3a$p$\u548c$1-p$\u3002\u4f7f\u7528\u4e22\u5f03\u6cd5\u65f6\u8ba1\u7b97\u65b0\u7684\u9690\u85cf\u5355\u5143$h_i'$<\/p>\n\n\n\n<p>$$<br>h_i' = \\frac{\\xi_i}{1-p} h_i<br>$$<\/p>\n\n\n\n<p>\u7531\u4e8e$E(\\xi_i) = 1-p$\uff0c\u56e0\u6b64<\/p>\n\n\n\n<p>$$<br>E(h_i') = \\frac{E(\\xi_i)}{1-p}h_i = h_i<br>$$<\/p>\n\n\n\n<p>\u5373<strong>\u4e22\u5f03\u6cd5\u4e0d\u6539\u53d8\u5176\u8f93\u5165\u7684\u671f\u671b\u503c<\/strong>\u3002\u5bf9\u4e0a\u8ff0\u9690\u85cf\u5c42\u4f7f\u7528\u4e22\u5f03\u6cd5\uff0c\u4e00\u79cd\u53ef\u80fd\u7684\u7ed3\u679c\u5982\u4e0b\u56fe\u6240\u793a\uff0c\u5176\u4e2d$h_2$\u548c$h_5$\u88ab\u6e05\u96f6\u3002\u8fd9\u65f6\u8f93\u51fa\u503c\u7684\u8ba1\u7b97\u4e0d\u518d\u4f9d\u8d56$h_2$\u548c$h_5$\uff0c\u5728\u53cd\u5411\u4f20\u64ad\u65f6\uff0c\u4e0e\u8fd9\u4e24\u4e2a\u9690\u85cf\u5355\u5143\u76f8\u5173\u7684\u6743\u91cd\u7684\u68af\u5ea6\u5747\u4e3a0\u3002\u7531\u4e8e\u5728\u8bad\u7ec3\u4e2d\u9690\u85cf\u5c42\u795e\u7ecf\u5143\u7684\u4e22\u5f03\u662f\u968f\u673a\u7684\uff0c\u5373$h_1, \\ldots, h_5$\u90fd\u6709\u53ef\u80fd\u88ab\u6e05\u96f6\uff0c\u8f93\u51fa\u5c42\u7684\u8ba1\u7b97\u65e0\u6cd5\u8fc7\u5ea6\u4f9d\u8d56$h_1, \\ldots, h_5$\u4e2d\u7684\u4efb\u4e00\u4e2a\uff0c\u4ece\u800c\u5728\u8bad\u7ec3\u6a21\u578b\u65f6\u8d77\u5230\u6b63\u5219\u5316\u7684\u4f5c\u7528\uff0c\u5e76\u53ef\u4ee5\u7528\u6765\u5e94\u5bf9\u8fc7\u62df\u5408\u3002\u5728\u6d4b\u8bd5\u6a21\u578b\u65f6\uff0c\u4e3a\u4e86\u62ff\u5230\u66f4\u52a0\u786e\u5b9a\u6027\u7684\u7ed3\u679c\uff0c\u4e00\u822c\u4e0d\u4f7f\u7528\u4e22\u5f03\u6cd5\u3002<\/p>\n\n\n\n<figure class=\"wp-block-image\"><img decoding=\"async\"   class=\"lazyload\" data-src=\"https:\/\/cos.forillusion.top\/wp-content\/uploads\/2025\/02\/3.13_dropout.png\" src=\"https:\/\/cdn.forillusion.com\/moezx\/img\/svg\/loader\/trans.ajax-spinner-preloader.svg\" onerror=\"imgError(this)\"  alt=\"\"\/><\/figure >\n<noscript><img decoding=\"async\" src=\"https:\/\/cos.forillusion.top\/wp-content\/uploads\/2025\/02\/3.13_dropout.png\" alt=\"\"\/><\/figure><\/noscript>\n\n\n\n<h2 class=\"wp-block-heading\">\u4ece\u96f6\u5f00\u59cb\u5b9e\u73b0<\/h2>\n\n\n\n<pre class=\"wp-block-code\"><code>import torch\nimport torch.nn as nn\nimport numpy as np\n\ndef dropout(X, drop_prob):\n    X = X.float()\n    assert 0 &lt;= drop_prob &lt;= 1 # assert\u662f\u65ad\u8a00\u51fd\u6570\uff0c\u5982\u679c\u540e\u9762\u7684\u6761\u4ef6\u4e3a\u5047\uff0c\u7a0b\u5e8f\u4f1a\u62a5\u9519\n    keep_prob = 1 - drop_prob # \u4fdd\u7559\u6982\u7387\n    # \u8fd9\u79cd\u60c5\u51b5\u4e0b\u628a\u5168\u90e8\u5143\u7d20\u90fd\u4e22\u5f03\n    if keep_prob == 0:\n        return torch.zeros_like(X) # zeros_like\u51fd\u6570\u8fd4\u56de\u4e00\u4e2a\u548c\u8f93\u5165\u5f62\u72b6\u76f8\u540c\u7684\u51680\u5f20\u91cf  \n    mask = (torch.rand(X.shape) &lt; keep_prob).float() # rand\u51fd\u6570\u8fd4\u56de\u4e00\u4e2a\u5f20\u91cf\uff0c\u5305\u542b\u4e86\u4ece\u533a\u95f4&#91;0, 1)\u7684\u5747\u5300\u5206\u5e03\u4e2d\u62bd\u53d6\u7684\u4e00\u7ec4\u968f\u673a\u6570\n\n    return mask * X \/ keep_prob \n\nX = torch.arange(16).view(2, 8)\nprint(dropout(X, 0.5))<\/code><\/pre>\n\n\n\n<h3 class=\"wp-block-heading\">\u5b9a\u4e49\u6a21\u578b\u53c2\u6570<\/h3>\n\n\n\n<p>\u4f7f\u7528Fashion-MNIST\u6570\u636e\u96c6\u3002\u5b9a\u4e49\u4e00\u4e2a\u5305\u542b\u4e24\u4e2a\u9690\u85cf\u5c42\u7684\u591a\u5c42\u611f\u77e5\u673a\uff0c\u5176\u4e2d\u4e24\u4e2a\u9690\u85cf\u5c42\u7684\u8f93\u51fa\u4e2a\u6570\u90fd\u662f256\u3002<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code>num_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256\n\nW1 = torch.tensor(np.random.normal(0, 0.01, size=(num_inputs, num_hiddens1)), dtype=torch.float, requires_grad=True)\nb1 = torch.zeros(num_hiddens1, requires_grad=True)\nW2 = torch.tensor(np.random.normal(0, 0.01, size=(num_hiddens1, num_hiddens2)), dtype=torch.float, requires_grad=True)\nb2 = torch.zeros(num_hiddens2, requires_grad=True)\nW3 = torch.tensor(np.random.normal(0, 0.01, size=(num_hiddens2, num_outputs)), dtype=torch.float, requires_grad=True)\nb3 = torch.zeros(num_outputs, requires_grad=True)\n\nparams = &#91;W1, b1, W2, b2, W3, b3]<\/code><\/pre>\n\n\n\n<h3 class=\"wp-block-heading\">\u5b9a\u4e49\u6a21\u578b<\/h3>\n\n\n\n<p>\u6a21\u578b\u4e2d\u5c06\u5168\u8fde\u63a5\u5c42\u548c\u6fc0\u6d3b\u51fd\u6570ReLU\u4e32\u8d77\u6765\uff0c\u5e76\u5bf9\u6bcf\u4e2a\u6fc0\u6d3b\u51fd\u6570\u7684\u8f93\u51fa\u4f7f\u7528\u4e22\u5f03\u6cd5\u3002\u5206\u522b\u8bbe\u7f6e\u5404\u4e2a\u5c42\u7684\u4e22\u5f03\u6982\u7387\u3002\u901a\u5e38\u7684\u5efa\u8bae\u662f\u628a\u9760\u8fd1\u8f93\u5165\u5c42\u7684\u4e22\u5f03\u6982\u7387\u8bbe\u5f97\u5c0f\u4e00\u70b9\u3002\u5728\u8fd9\u4e2a\u5b9e\u9a8c\u4e2d\uff0c\u628a\u7b2c\u4e00\u4e2a\u9690\u85cf\u5c42\u7684\u4e22\u5f03\u6982\u7387\u8bbe\u4e3a0.2\uff0c\u628a\u7b2c\u4e8c\u4e2a\u9690\u85cf\u5c42\u7684\u4e22\u5f03\u6982\u7387\u8bbe\u4e3a0.5\u3002\u901a\u8fc7\u53c2\u6570<code>is_training<\/code>\u6765\u5224\u65ad\u8fd0\u884c\u6a21\u5f0f\u4e3a\u8bad\u7ec3\u8fd8\u662f\u6d4b\u8bd5\uff0c\u5e76\u53ea\u9700\u5728\u8bad\u7ec3\u6a21\u5f0f\u4e0b\u4f7f\u7528\u4e22\u5f03\u6cd5\u3002<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code>drop_prob1, drop_prob2 = 0.2, 0.5 # \u4e22\u5f03\u6982\u7387\n\ndef net(X, is_training=True):\n    X = X.view(-1, num_inputs) # view\u51fd\u6570\u5c06X\u8f6c\u6362\u4e3a2D\u5f20\u91cf\uff0c\u7b2c\u4e00\u7ef4\u5ea6\u4fdd\u6301\u4e0d\u53d8\uff0c\u7b2c\u4e8c\u7ef4\u5ea6\u8bbe\u4e3anum_inputs\n    H1 = (torch.matmul(X, W1) + b1).relu()\n    if is_training:  # \u53ea\u5728\u8bad\u7ec3\u6a21\u578b\u65f6\u4f7f\u7528\u4e22\u5f03\u6cd5\n        H1 = dropout(H1, drop_prob1)  # \u5728\u7b2c\u4e00\u5c42\u5168\u8fde\u63a5\u540e\u6dfb\u52a0\u4e22\u5f03\u5c42\n    H2 = (torch.matmul(H1, W2) + b2).relu()\n    if is_training:\n        H2 = dropout(H2, drop_prob2)  # \u5728\u7b2c\u4e8c\u5c42\u5168\u8fde\u63a5\u540e\u6dfb\u52a0\u4e22\u5f03\u5c42\n    return torch.matmul(H2, W3) + b3<\/code><\/pre>\n\n\n\n<p>\u8bc4\u4f30\u51fd\u6570\uff0c\u6839\u636e\u6a21\u578b\u7684\u5b9a\u4e49\u65b9\u5f0f\uff0c\u8fdb\u5165\u8bc4\u4f30\u6a21\u5f0f\u3002<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code>def evaluate_accuracy(data_iter, net):\n    acc_sum, n = 0.0, 0 # \u521d\u59cb\u5316\u6b63\u786e\u9884\u6d4b\u7684\u6570\u91cf\uff0c\u603b\u9884\u6d4b\u7684\u6570\u91cf\n    for X, y in data_iter:\n        if isinstance(net, torch.nn.Module): # \u5982\u679cnet\u662ftorch.nn.Module\u7684\u5b9e\u4f8b\n            net.eval() # \u8bc4\u4f30\u6a21\u5f0f, \u8fd9\u4f1a\u5173\u95eddropout\n            acc_sum += (net(X).argmax(dim=1) == y).float().sum().item() # \u8ba1\u7b97\u6b63\u786e\u9884\u6d4b\u7684\u6570\u91cf\n            net.train() # \u6539\u56de\u8bad\u7ec3\u6a21\u5f0f\n        else: # \u81ea\u5b9a\u4e49\u7684\u6a21\u578b\n            if('is_training' in net.__code__.co_varnames): # \u5982\u679c\u6709is_training\u8fd9\u4e2a\u53c2\u6570\n                # \u5c06is_training\u8bbe\u7f6e\u6210False\n                acc_sum += (net(X, is_training=False).argmax(dim=1) == y).float().sum().item() \n            else:\n                acc_sum += (net(X).argmax(dim=1) == y).float().sum().item() \n        n += y.shape&#91;0]\n    return acc_sum \/ n<\/code><\/pre>\n\n\n\n<h3 class=\"wp-block-heading\">\u8bad\u7ec3\u548c\u6d4b\u8bd5\u6a21\u578b<\/h3>\n\n\n\n<p>\u8fd9\u90e8\u5206\u548c\u4e4b\u524d\u7684\u4e00\u6837<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code>def get_dataloader_workers():\n    return 0 if sys.platform.startswith('win') else 4\n\ndef load_data_fashion_mnist(batch_size, resize=None): \n    # \u4e0b\u8f7dFashion-MNIST\u6570\u636e\u96c6\uff0c\u7136\u540e\u5c06\u5176\u52a0\u8f7d\u5230\u5185\u5b58\u4e2d\uff0c\u8fd4\u56de\u8bad\u7ec3\u96c6\u548c\u6d4b\u8bd5\u96c6\u7684\u6570\u636e\u8fed\u4ee3\u5668\n    mnist_train = torchvision.datasets.FashionMNIST(root=\"data\", train=True, transform=transforms.ToTensor(), download=True)\n    mnist_test = torchvision.datasets.FashionMNIST(root=\"data\", train=False, transform=transforms.ToTensor(), download=True)\n    return (data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers()),\n            data.DataLoader(mnist_test, batch_size, shuffle=False, num_workers=get_dataloader_workers()))\n\ndef sgd(params,lr,batch_size):  #\u5b9a\u4e49\u4f18\u5316\u7b97\u6cd5,params:\u5f85\u4f18\u5316\u53c2\u6570,lr:\u5b66\u4e60\u7387,batch_size:\u6279\u91cf\u5927\u5c0f\n    for param in params:\n        param.data-=lr*param.grad\/batch_size  #\u6ce8\u610f\u8fd9\u91cc\u66f4\u6539param\u65f6\u7528\u7684param.data\n\ndef train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, params=None, lr=None, optimizer=None): \n    for epoch in range(num_epochs):  # \u8bad\u7ec3\u6a21\u578b\u4e00\u5171\u9700\u8981num_epochs\u4e2a\u8fed\u4ee3\u5468\u671f\n        train_l_sum, train_acc_sum, n = 0.0, 0.0, 0 # \u8bad\u7ec3\u635f\u5931\u603b\u548c\uff0c\u8bad\u7ec3\u51c6\u786e\u5ea6\u603b\u548c\uff0c\u6837\u672c\u6570\n        for X, y in train_iter: # X\u662f\u56fe\u50cf\uff0cy\u662f\u6807\u7b7e\uff0c\u6570\u91cf\u4e3abatch_size\n            y_hat = net(X) # \u9884\u6d4b\u6982\u7387\n            l = loss(y_hat, y).sum() # \u8ba1\u7b97\u635f\u5931\uff0csum()\u5c06\u6240\u6709loss\u503c\u76f8\u52a0\u5f97\u5230\u4e00\u4e2a\u6807\u91cf\n\n            # \u68af\u5ea6\u6e05\u96f6\n            if optimizer is not None: # \u4f7f\u7528PyTorch\u5185\u7f6e\u7684\u4f18\u5316\u5668\u548c\u635f\u5931\u51fd\u6570\n                optimizer.zero_grad() # \u68af\u5ea6\u6e05\u96f6\n            elif params is not None and params&#91;0].grad is not None: # \u4f7f\u7528\u81ea\u5b9a\u4e49\u7684\u4f18\u5316\u5668\u548c\u635f\u5931\u51fd\u6570\n                for param in params: \n                    param.grad.data.zero_()\n\n            l.backward() # \u8ba1\u7b97\u68af\u5ea6\n            if optimizer is None: # \u4f7f\u7528PyTorch\u5185\u7f6e\u7684\u4f18\u5316\u5668\u548c\u635f\u5931\u51fd\u6570\n                sgd(params, lr, batch_size) # \u66f4\u65b0\u6a21\u578b\u53c2\u6570\n            else:\n                optimizer.step()  # \u201csoftmax\u56de\u5f52\u7684\u7b80\u6d01\u5b9e\u73b0\u201d\u4e00\u8282\u5c06\u7528\u5230\n\n            train_l_sum += l.item() # \u5c06\u5f53\u524d\u6279\u6b21loss\u503c\u76f8\u52a0\u5f97\u5230\u4e00\u4e2a\u603b\u7684loss\u503c\n            train_acc_sum += (y_hat.argmax(dim=1) == y).sum().item() # \u8ba1\u7b97\u603b\u51c6\u786e\u7387\n            n += y.shape&#91;0] # y.shape&#91;0]\u662fy\u7684\u884c\u6570\uff0c\u4e5f\u5c31\u662fbatch_size\uff0c\u8ba1\u7b97\u603b\u6837\u672c\u6570\n\n        test_acc = evaluate_accuracy(test_iter, net) # \u8ba1\u7b97\u6d4b\u8bd5\u96c6\u51c6\u786e\u7387\n        print('\u5468\u671f %d, \u635f\u5931 %.4f, \u6570\u636e\u96c6\u51c6\u786e\u7387 %.3f, \u6d4b\u8bd5\u96c6\u51c6\u786e\u7387 %.3f'\n              % (epoch + 1, train_l_sum \/ n, train_acc_sum \/ n, test_acc))\n\n\nnum_epochs, lr, batch_size = 5, 100.0, 256\nloss = torch.nn.CrossEntropyLoss()\ntrain_iter, test_iter = load_data_fashion_mnist(batch_size)\ntrain_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, params, lr)<\/code><\/pre>\n\n\n\n<h2 class=\"wp-block-heading\">\u7b80\u6d01\u5b9e\u73b0<\/h2>\n\n\n\n<p>\u5728PyTorch\u4e2d\uff0c\u53ea\u9700\u8981\u5728\u5168\u8fde\u63a5\u5c42\u540e\u6dfb\u52a0<code>Dropout<\/code>\u5c42\u5e76\u6307\u5b9a\u4e22\u5f03\u6982\u7387\u3002\u5728\u8bad\u7ec3\u6a21\u578b\u65f6\uff0c<code>Dropout<\/code>\u5c42\u5c06\u4ee5\u6307\u5b9a\u7684\u4e22\u5f03\u6982\u7387\u968f\u673a\u4e22\u5f03\u4e0a\u4e00\u5c42\u7684\u8f93\u51fa\u5143\u7d20\uff1b\u5728\u6d4b\u8bd5\u6a21\u578b\u65f6\uff08\u5373<code>model.eval()<\/code>\u540e\uff09\uff0c<code>Dropout<\/code>\u5c42\u5e76\u4e0d\u53d1\u6325\u4f5c\u7528\u3002<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code>from torch import nn\n\nclass FlattenLayer(nn.Module):\n    def __init__(self):\n        super(FlattenLayer, self).__init__()\n    def forward(self, x): \n        return x.view(x.shape&#91;0], -1) # x \u7684\u5f62\u72b6\u8f6c\u6362\u6210(batch, 784)\uff0cx.shape&#91;0]\u8868\u793abatch_size\uff0c-1\u8868\u793a\u81ea\u52a8\u63a8\u6d4b\n\nnet = nn.Sequential(\n        FlattenLayer(), # \u5148\u5c06\u8f93\u5165x\u5c55\u5e73\uff0c\u5373\u5c06\u5f62\u72b6\u4e3a(batch, 1, 28, 28)\u7684\u8f93\u5165\u8f6c\u6362\u6210(batch, 784)\u7684\u8f93\u51fa\n        nn.Linear(num_inputs, num_hiddens1), # \u9690\u85cf\u5c421\n        nn.ReLU(), # \u6fc0\u6d3b\u51fd\u6570\n        nn.Dropout(drop_prob1), # \u4e22\u5f03\u6cd5\n        nn.Linear(num_hiddens1, num_hiddens2),  # \u9690\u85cf\u5c422\n        nn.ReLU(), # \u6fc0\u6d3b\u51fd\u6570\n        nn.Dropout(drop_prob2), # \u4e22\u5f03\u6cd5\n        nn.Linear(num_hiddens2, 10) # \u8f93\u51fa\u5c42\n        )\n\nfor param in net.parameters():\n    nn.init.normal_(param, mean=0, std=0.01)<\/code><\/pre>\n\n\n\n<p>\u8bad\u7ec3\u5e76\u6d4b\u8bd5\u6a21\u578b<\/p>\n\n\n\n<pre class=\"wp-block-code\"><code>optimizer = torch.optim.SGD(net.parameters(), lr=0.5)\ntrain_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, None, None, optimizer)<\/code><\/pre>\n","protected":false},"excerpt":{"rendered":"<p>\u65b9\u6cd5 \u9664\u4e86\u6743\u91cd\u8870\u51cf\u4ee5\u5916\uff0c\u6df1\u5ea6\u5b66\u4e60\u6a21\u578b\u5e38\u5e38\u4f7f\u7528\u4e22\u5f03\u6cd5\uff08dropout\uff09\u6765\u5e94\u5bf9\u8fc7\u62df\u5408\u95ee\u9898\u3002\u4e22\u5f03\u6cd5\u6709\u4e00\u4e9b\u4e0d\u540c\u7684\u53d8\u4f53\u3002\u672c\u8282\u4e2d\u63d0\u5230\u7684\u4e22\u5f03\u6cd5\u7279 &#8230;<\/p>","protected":false},"author":1,"featured_media":1694,"comment_status":"open","ping_status":"open","sticky":false,"template":"","format":"standard","meta":{"footnotes":""},"categories":[46,3],"tags":[45,44,12,22],"class_list":["post-1691","post","type-post","status-publish","format-standard","has-post-thumbnail","hentry","category-46","category-3","tag-45","tag-44","tag-12","tag-22"],"_links":{"self":[{"href":"https:\/\/www.forillusion.com\/index.php\/wp-json\/wp\/v2\/posts\/1691","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/www.forillusion.com\/index.php\/wp-json\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/www.forillusion.com\/index.php\/wp-json\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/www.forillusion.com\/index.php\/wp-json\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"https:\/\/www.forillusion.com\/index.php\/wp-json\/wp\/v2\/comments?post=1691"}],"version-history":[{"count":1,"href":"https:\/\/www.forillusion.com\/index.php\/wp-json\/wp\/v2\/posts\/1691\/revisions"}],"predecessor-version":[{"id":1709,"href":"https:\/\/www.forillusion.com\/index.php\/wp-json\/wp\/v2\/posts\/1691\/revisions\/1709"}],"wp:featuredmedia":[{"embeddable":true,"href":"https:\/\/www.forillusion.com\/index.php\/wp-json\/wp\/v2\/media\/1694"}],"wp:attachment":[{"href":"https:\/\/www.forillusion.com\/index.php\/wp-json\/wp\/v2\/media?parent=1691"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/www.forillusion.com\/index.php\/wp-json\/wp\/v2\/categories?post=1691"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/www.forillusion.com\/index.php\/wp-json\/wp\/v2\/tags?post=1691"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}