-
Notifications
You must be signed in to change notification settings - Fork 0
/
index.html
228 lines (206 loc) · 32.2 KB
/
index.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
<!DOCTYPE html><html lang="zh-CN" data-theme="light"><head><meta charset="UTF-8"><meta http-equiv="X-UA-Compatible" content="IE=edge"><meta name="viewport" content="width=device-width,initial-scale=1"><title>Eric's blog</title><meta name="keywords" content="Computer Science / Deep Learning"><meta name="author" content="Eric Chi"><meta name="copyright" content="Eric Chi"><meta name="format-detection" content="telephone=no"><meta name="theme-color" content="#ffffff"><meta name="description" content="Learn, play, sleep.">
<meta property="og:type" content="website">
<meta property="og:title" content="Eric's blog">
<meta property="og:url" content="https://w704710856.github.io/index.html">
<meta property="og:site_name" content="Eric's blog">
<meta property="og:description" content="Learn, play, sleep.">
<meta property="og:locale" content="zh_CN">
<meta property="og:image" content="https://pic-1300081557.cos.ap-chongqing.myqcloud.com/IMG_8918.JPG">
<meta property="article:author" content="Eric Chi">
<meta property="article:tag" content="Computer Science / Deep Learning">
<meta name="twitter:card" content="summary">
<meta name="twitter:image" content="https://pic-1300081557.cos.ap-chongqing.myqcloud.com/IMG_8918.JPG"><link rel="shortcut icon" href="/img/favicon.png"><link rel="canonical" href="https://w704710856.github.io/"><link rel="preconnect" href="//cdn.jsdelivr.net"/><link rel="preconnect" href="//busuanzi.ibruce.info"/><link rel="stylesheet" href="/css/index.css"><link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/@fortawesome/fontawesome-free/css/all.min.css" media="print" onload="this.media='all'"><script>const GLOBAL_CONFIG = {
root: '/',
algolia: undefined,
localSearch: undefined,
translate: undefined,
noticeOutdate: undefined,
highlight: {"plugin":"highlighjs","highlightCopy":true,"highlightLang":true,"highlightHeightLimit":false},
copy: {
success: '复制成功',
error: '复制错误',
noSupport: '浏览器不支持'
},
relativeDate: {
homepage: false,
post: false
},
runtime: '天',
date_suffix: {
just: '刚刚',
min: '分钟前',
hour: '小时前',
day: '天前',
month: '个月前'
},
copyright: undefined,
lightbox: 'fancybox',
Snackbar: undefined,
source: {
jQuery: 'https://cdn.jsdelivr.net/npm/jquery@latest/dist/jquery.min.js',
justifiedGallery: {
js: 'https://cdn.jsdelivr.net/npm/justifiedGallery/dist/js/jquery.justifiedGallery.min.js',
css: 'https://cdn.jsdelivr.net/npm/justifiedGallery/dist/css/justifiedGallery.min.css'
},
fancybox: {
js: 'https://cdn.jsdelivr.net/npm/@fancyapps/fancybox@latest/dist/jquery.fancybox.min.js',
css: 'https://cdn.jsdelivr.net/npm/@fancyapps/fancybox@latest/dist/jquery.fancybox.min.css'
}
},
isPhotoFigcaption: true,
islazyload: false,
isanchor: true
}</script><script id="config-diff">var GLOBAL_CONFIG_SITE = {
title: 'Eric\'s blog',
isPost: false,
isHome: true,
isHighlightShrink: false,
isToc: false,
postUpdate: '2022-05-26 00:46:08'
}</script><noscript><style type="text/css">
#nav {
opacity: 1
}
.justified-gallery img {
opacity: 1
}
#recent-posts time,
#post-meta time {
display: inline !important
}
</style></noscript><script>(win=>{
win.saveToLocal = {
set: function setWithExpiry(key, value, ttl) {
if (ttl === 0) return
const now = new Date()
const expiryDay = ttl * 86400000
const item = {
value: value,
expiry: now.getTime() + expiryDay,
}
localStorage.setItem(key, JSON.stringify(item))
},
get: function getWithExpiry(key) {
const itemStr = localStorage.getItem(key)
if (!itemStr) {
return undefined
}
const item = JSON.parse(itemStr)
const now = new Date()
if (now.getTime() > item.expiry) {
localStorage.removeItem(key)
return undefined
}
return item.value
}
}
win.getScript = url => new Promise((resolve, reject) => {
const script = document.createElement('script')
script.src = url
script.async = true
script.onerror = reject
script.onload = script.onreadystatechange = function() {
const loadState = this.readyState
if (loadState && loadState !== 'loaded' && loadState !== 'complete') return
script.onload = script.onreadystatechange = null
resolve()
}
document.head.appendChild(script)
})
win.activateDarkMode = function () {
document.documentElement.setAttribute('data-theme', 'dark')
if (document.querySelector('meta[name="theme-color"]') !== null) {
document.querySelector('meta[name="theme-color"]').setAttribute('content', '#0d0d0d')
}
}
win.activateLightMode = function () {
document.documentElement.setAttribute('data-theme', 'light')
if (document.querySelector('meta[name="theme-color"]') !== null) {
document.querySelector('meta[name="theme-color"]').setAttribute('content', '#ffffff')
}
}
const t = saveToLocal.get('theme')
if (t === 'dark') activateDarkMode()
else if (t === 'light') activateLightMode()
const asideStatus = saveToLocal.get('aside-status')
if (asideStatus !== undefined) {
if (asideStatus === 'hide') {
document.documentElement.classList.add('hide-aside')
} else {
document.documentElement.classList.remove('hide-aside')
}
}
const detectApple = () => {
if (GLOBAL_CONFIG_SITE.isHome && /iPad|iPhone|iPod|Macintosh/.test(navigator.userAgent)){
document.documentElement.classList.add('apple')
}
}
detectApple()
})(window)</script><meta name="generator" content="Hexo 5.4.0"></head><body><div id="sidebar"><div id="menu-mask"></div><div id="sidebar-menus"><div class="avatar-img is-center"><img src="https://pic-1300081557.cos.ap-chongqing.myqcloud.com/IMG_8918.JPG" onerror="onerror=null;src='/img/friend_404.gif'" alt="avatar"/></div><div class="site-data"><div class="data-item is-center"><div class="data-item-link"><a href="/archives/"><div class="headline">文章</div><div class="length-num">7</div></a></div></div><div class="data-item is-center"><div class="data-item-link"><a href="/tags/"><div class="headline">标签</div><div class="length-num">9</div></a></div></div><div class="data-item is-center"><div class="data-item-link"><a href="/categories/"><div class="headline">分类</div><div class="length-num">2</div></a></div></div></div><hr/><div class="menus_items"><div class="menus_item"><a class="site-page" href="/"><i class="fa-fw fas fa-home"></i><span> 主页</span></a></div><div class="menus_item"><a class="site-page" href="/archives/"><i class="fa-fw fas fa-archive"></i><span> 时间轴</span></a></div><div class="menus_item"><a class="site-page" href="/tags/"><i class="fa-fw fas fa-tags"></i><span> 标签</span></a></div><div class="menus_item"><a class="site-page" href="/categories/"><i class="fa-fw fas fa-folder-open"></i><span> 目录</span></a></div><div class="menus_item"><a class="site-page" href="/about/"><i class="fa-fw fas fa-heart"></i><span> 关于我</span></a></div></div></div></div><div class="page" id="body-wrap"><header class="not-top-img" id="page-header"><nav id="nav"><span id="blog_name"><a id="site-name" href="/">Eric's blog</a></span><div id="menus"><div class="menus_items"><div class="menus_item"><a class="site-page" href="/"><i class="fa-fw fas fa-home"></i><span> 主页</span></a></div><div class="menus_item"><a class="site-page" href="/archives/"><i class="fa-fw fas fa-archive"></i><span> 时间轴</span></a></div><div class="menus_item"><a class="site-page" href="/tags/"><i class="fa-fw fas fa-tags"></i><span> 标签</span></a></div><div class="menus_item"><a class="site-page" href="/categories/"><i class="fa-fw fas fa-folder-open"></i><span> 目录</span></a></div><div class="menus_item"><a class="site-page" href="/about/"><i class="fa-fw fas fa-heart"></i><span> 关于我</span></a></div></div><div id="toggle-menu"><a class="site-page"><i class="fas fa-bars fa-fw"></i></a></div></div></nav></header><main class="layout" id="content-inner"><div class="recent-posts" id="recent-posts"><div class="recent-post-item"><div class="post_cover left_radius"><a href="/p/48225/" title="Concept-based 基于概念的解释方法"> <img class="post_bg" src="https://pic-1300081557.cos.ap-chongqing.myqcloud.com/image-20220524065455081.png" onerror="this.onerror=null;this.src='/img/404.jpg'" alt="Concept-based 基于概念的解释方法"></a></div><div class="recent-post-info"><a class="article-title" href="/p/48225/" title="Concept-based 基于概念的解释方法">Concept-based 基于概念的解释方法</a><div class="article-meta-wrap"><span class="post-meta-date"><i class="far fa-calendar-alt"></i><span class="article-meta-label">发表于</span><time datetime="2022-05-24T04:40:04.000Z" title="发表于 2022-05-24 12:40:04">2022-05-24</time></span><span class="article-meta"><span class="article-meta__separator">|</span><i class="fas fa-inbox"></i><a class="article-meta__categories" href="/categories/%E6%B7%B1%E5%BA%A6%E5%AD%A6%E4%B9%A0/">深度学习</a></span></div><div class="content">基于概念的解释方法是18年提出的新方法,其动机在于先前的方法仅使用图像的像素级特征,其通常稀疏且不易被人类理解。这是因为人类观察图像时,通常看到的是某个像素区域或图像中的某一部分,而非逐个像素的观察图像。由于其更优秀的解释表现,该方法也有更多的关注。本文将这一方法从提出到目前的研究现状(to my best knowledge)进行梳理。
CAV
Concept activation vector( CAV,概念激活向量 )[1]是基于概念的解释方法的开山之作,其框架如图1.1所示。每个概念由一个输入图像的集合定义。例如,要定义“卷发”这一概念,用户需要使用若干包含弯卷头发的发型的相关图像。
设x∈Rnx \in \mathbb{R}^{n}x∈Rn为输入图像,取神经网络模型中的第lll层,该层具有mmm个神经元,即该层对应的映射函数为fl:Rn→Rmf_{l}: \mathbb{R}^{n} \rightarrow \mathbb{R}^{m}fl:Rn→Rm。
第一步,选取可以代表一个概念的图像集合,以定义一个概念。作者认为这一策略的优点是,概念的定义不会受限于图像中已经存在的 ...</div></div></div><div class="recent-post-item"><div class="post_cover right_radius"><a href="/p/48223/" title="Transformer"> <img class="post_bg" src="https://pic-1300081557.cos.ap-chongqing.myqcloud.com/20220405221619.png" onerror="this.onerror=null;this.src='/img/404.jpg'" alt="Transformer"></a></div><div class="recent-post-info"><a class="article-title" href="/p/48223/" title="Transformer">Transformer</a><div class="article-meta-wrap"><span class="post-meta-date"><i class="far fa-calendar-alt"></i><span class="article-meta-label">发表于</span><time datetime="2022-04-05T06:37:04.000Z" title="发表于 2022-04-05 14:37:04">2022-04-05</time></span><span class="article-meta"><span class="article-meta__separator">|</span><i class="fas fa-inbox"></i><a class="article-meta__categories" href="/categories/%E6%9D%8E%E5%AE%8F%E6%AF%85%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AC%94%E8%AE%B0/">李宏毅机器学习笔记</a></span></div><div class="content">引言
近期因导师要求,我学习机器学习的其他领域内容,于B站上观看李宏毅机器学习课程,故以记录课程笔记。
Transformer_P1_Encoder
变形金刚的英文就是Transformer,那Transformer也跟我们之后会,提到的BERT有非常强烈的关係,所以这边有一个BERT探出头来,代表说Transformer跟BERT,是很有关係的
Sequence-to-sequence (Seq2seq)
Transformer就是一个,==Sequence-to-sequence==的model,他的缩写,我们会写做==Seq2seq==,那Sequence-to-sequence的model,又是什麼呢
我们之前在讲input a sequence的,case的时候,我们说input是一个sequence,那output有几种可能
一种是input跟output的长度一样,这个是在作业二的时候做的
有一个case是output指,output一个东西,这个是在作业四的时候做的
那接来作业五的case是,我们不知道应该要output多长,由机器自己决定output的长度,即Se ...</div></div></div><div class="recent-post-item"><div class="post_cover left_radius"><a href="/p/48224/" title="GAN 生成对抗网络"> <img class="post_bg" src="https://pic-1300081557.cos.ap-chongqing.myqcloud.com/20220405222942.png" onerror="this.onerror=null;this.src='/img/404.jpg'" alt="GAN 生成对抗网络"></a></div><div class="recent-post-info"><a class="article-title" href="/p/48224/" title="GAN 生成对抗网络">GAN 生成对抗网络</a><div class="article-meta-wrap"><span class="post-meta-date"><i class="far fa-calendar-alt"></i><span class="article-meta-label">发表于</span><time datetime="2022-04-05T06:37:04.000Z" title="发表于 2022-04-05 14:37:04">2022-04-05</time></span><span class="article-meta"><span class="article-meta__separator">|</span><i class="fas fa-inbox"></i><a class="article-meta__categories" href="/categories/%E6%9D%8E%E5%AE%8F%E6%AF%85%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AC%94%E8%AE%B0/">李宏毅机器学习笔记</a></span></div><div class="content">引言
近期因导师要求,我学习机器学习的其他领域内容,于B站上观看李宏毅机器学习课程,故以记录课程笔记。
GAN_P1
Generation
Network as Generator
接下来要进入一个,新的主题 我们要讲==生成==这件事情
到目前為止大家学到的network,都是一个function,你给他一个X就可以输出一个Y
我们已经学到各式各样的,network架构,可以处理不同的X 不同的Y
我们学到输入的X
如果是一张图片的时候怎麼办
如果是一个sequence的时候怎麼办
我们也学到输出的Y
可以是一个数值
可以是一个类别
也可以是一个sequence
接下来我们要进入一个新的主题,这个新的主题是要把network,当做一个==generator==来用,我们要把network拿来做生成使用
那把network拿来,当作generator使用,他特别的地方是现在network的输入,会加上一个random的variable,会加上一个Z
这个Z,是从某一个,distribution sample出来的,所以现在network它不是只看一个固定的X得到输出,它是同 ...</div></div></div><div class="recent-post-item"><div class="post_cover right_radius"><a href="/p/48222/" title="Self-Attention 自注意力机制"> <img class="post_bg" src="https://pic-1300081557.cos.ap-chongqing.myqcloud.com/20220405214411.png" onerror="this.onerror=null;this.src='/img/404.jpg'" alt="Self-Attention 自注意力机制"></a></div><div class="recent-post-info"><a class="article-title" href="/p/48222/" title="Self-Attention 自注意力机制">Self-Attention 自注意力机制</a><div class="article-meta-wrap"><span class="post-meta-date"><i class="far fa-calendar-alt"></i><span class="article-meta-label">发表于</span><time datetime="2022-04-05T06:37:04.000Z" title="发表于 2022-04-05 14:37:04">2022-04-05</time></span><span class="article-meta"><span class="article-meta__separator">|</span><i class="fas fa-inbox"></i><a class="article-meta__categories" href="/categories/%E6%9D%8E%E5%AE%8F%E6%AF%85%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AC%94%E8%AE%B0/">李宏毅机器学习笔记</a></span></div><div class="content">引言
近期因导师要求,我学习机器学习的其他领域内容,于B站上观看李宏毅机器学习课程,故以记录课程笔记。
Self-attention_P1
CNN以后,我们要讲另外一个常见的Network架构,这个架构叫做Self-Attention,而这个Self-Attention
Sophisticated Input
到目前為止,我们的Network的Input都是一个向量,不管是在预测这个,YouTube观看人数的问题上啊,还是影像处理上啊,我们的输入都可以看作是一个向量,然后我们的输出,可能是一个数值,这个是Regression,可能是一个类别,这是Classification
但假设我们遇到更復杂的问题呢,假设我们说输入是多个向量,而且这个输入的向量的数目是会改变的呢
我们刚才在讲影像辨识的时候,我还特别强调我们假设输入的影像大小都是一样的,那现在假设每次我们Model输入的Sequence的数目,Sequence的长度都不一样呢,那这个时候应该要怎麼处理?
Vector Set as Input
文字处理
假设我们今天要Network的输入是一个句子,每一个句子的长度都不一样,每个句 ...</div></div></div><div class="recent-post-item"><div class="post_cover left_radius"><a href="/p/48221/" title="显著图的相关工作"> <img class="post_bg" src="https://pic-1300081557.cos.ap-chongqing.myqcloud.com/20220330152025.png" onerror="this.onerror=null;this.src='/img/404.jpg'" alt="显著图的相关工作"></a></div><div class="recent-post-info"><a class="article-title" href="/p/48221/" title="显著图的相关工作">显著图的相关工作</a><div class="article-meta-wrap"><span class="post-meta-date"><i class="far fa-calendar-alt"></i><span class="article-meta-label">发表于</span><time datetime="2022-03-30T06:43:23.000Z" title="发表于 2022-03-30 14:43:23">2022-03-30</time></span><span class="article-meta"><span class="article-meta__separator">|</span><i class="fas fa-inbox"></i><a class="article-meta__categories" href="/categories/%E6%B7%B1%E5%BA%A6%E5%AD%A6%E4%B9%A0/">深度学习</a></span></div><div class="content">前言
撰写本文的目的是为了总结在日常阅读论文算法的各个比较算法。通常在论文的实验部分,作者会将自己提出的算法与之前的算法进行比较。本文即为这些算法做统一梳理,从过去的论文中学习显著图这一研究领域的进展。
类激活映射方法
GradCAM++
简介
GradCAM++[1]是对经典的GradCAM[2]算法的改进。[1]的motivation是如果输入图像中同一类别的物体多次出现,GradCAM不能正确地定位图像中的各个物体,如图所示。
方法
令AkA^kAk为最尾卷积层的第kkk个特征图,GradCAM算法计算的类激活映射为
Lijc=∑wkc⋅Aijk(1.1)L^c_{ij}=\sum{w_k^c\cdot A^k_{ij}}\tag{1.1}
Lijc=∑wkc⋅Aijk(1.1)
其中,对每个特定的特征图AkA^kAk的权重由下式计算
kkc=1Z∑∑∂Yc∂Aijk(1.2)k^c_k=\frac{1}{Z}\sum\sum\frac{\partial Y^c}{\partial A^k_{ij}}\tag{1.2}
kkc=Z1∑∑∂Aijk∂Yc(1.2)
...</div></div></div><div class="recent-post-item"><div class="post_cover right_radius"><a href="/p/48220/" title="Sanity Check 完备性检验"> <img class="post_bg" src="https://pic-1300081557.cos.ap-chongqing.myqcloud.com/%E5%BE%AE%E4%BF%A1%E5%9B%BE%E7%89%87_20220116143522.png" onerror="this.onerror=null;this.src='/img/404.jpg'" alt="Sanity Check 完备性检验"></a></div><div class="recent-post-info"><a class="article-title" href="/p/48220/" title="Sanity Check 完备性检验">Sanity Check 完备性检验</a><div class="article-meta-wrap"><span class="post-meta-date"><i class="far fa-calendar-alt"></i><span class="article-meta-label">发表于</span><time datetime="2022-01-16T06:37:04.000Z" title="发表于 2022-01-16 14:37:04">2022-01-16</time></span><span class="article-meta"><span class="article-meta__separator">|</span><i class="fas fa-inbox"></i><a class="article-meta__categories" href="/categories/%E6%B7%B1%E5%BA%A6%E5%AD%A6%E4%B9%A0/">深度学习</a></span></div><div class="content">引言
生成类显著图的可视化方法,简称显著性方法,是解释性方法里的常用方法,其突出输入图像中的相关特征,对于分类网络来说即为图像对于指定类的高度相关区域。但是,对显著性方法的评估系统还有待完善。
完备性检验(sanity check)[1]^{[1]}[1] 是用来检测这些方法充分性的评价手段,该方法简单高效且易于理解,对显著性方法的评价可谓一针见血。同时[1]中还指出,仅仅依靠人类视觉观察可视化结果的评价方式可能会导致错误,需要结合更严谨的评价指标来评估显著性方法的性能,这一点同样重要。
[1]表示,在进行大量实验后,许多显著性方法生成的显著图和图像的边缘检测器的输出非常类似。这一发现将一些算法直接“打入深渊”,至少在大多数情况下,这种算法没有真正理解模型内部的工作,由其给出的显著图对模型的解释几乎没有作用,因为这些算法就像边缘检测器一样——对网络模型没有依赖、没有解释。
两个完备性检验的测试
模型参数随机化测试
我们知道,在训练完成后,网络模型中的参数中编码了从数据集中学到的知识,其抽象能力含与其中。如果一个显著性方法输出的显著图要对解释或调试该模型有帮助,它应该对这些参数敏感。换 ...</div></div></div><div class="recent-post-item"><div class="post_cover left_radius"><a href="/p/48219/" title="Class Activation Map(CAM) 类激活映射方法"> <img class="post_bg" src="https://pic-1300081557.cos.ap-chongqing.myqcloud.com/20211215125222.png" onerror="this.onerror=null;this.src='/img/404.jpg'" alt="Class Activation Map(CAM) 类激活映射方法"></a></div><div class="recent-post-info"><a class="article-title" href="/p/48219/" title="Class Activation Map(CAM) 类激活映射方法">Class Activation Map(CAM) 类激活映射方法</a><div class="article-meta-wrap"><span class="post-meta-date"><i class="far fa-calendar-alt"></i><span class="article-meta-label">发表于</span><time datetime="2021-12-15T04:40:04.000Z" title="发表于 2021-12-15 12:40:04">2021-12-15</time></span><span class="article-meta"><span class="article-meta__separator">|</span><i class="fas fa-inbox"></i><a class="article-meta__categories" href="/categories/%E6%B7%B1%E5%BA%A6%E5%AD%A6%E4%B9%A0/">深度学习</a></span></div><div class="content">前言
Class activation map(类激活映射,也译作类激活图)是我确定研究方向后学习的第一个可解释性中的领域。至于我为什么要选择可解释性作为自己的研究方向,需要把时间拉回到大学。在我刚接触深度学习的时候,心里就有一个自然而然的疑问:为什么神经网络可以做到以往经典的机器学习算法不能做到的事?它的精度是如此之高,以至人工智能前沿几乎没有了传统算法的痕迹。不把这个疑问弄清楚,我实在没办法说服自己搭建更深层、更精巧的神经网络模型来继续提高已有的表现。所幸这个研究方向也得到了导师的极力支持,可谓天时地利人和。
类激活映射于2016年在CVPR上被首次提出以来已5年有余,在深度学习高速发展的当下,可以称得上是”老算法“了。其建立在这样的事实之上:卷积网络中最尾卷积层的卷积核(convolutional kernel,有些论文也称为:卷积单元,convolutional unit)含有对输入图像的高度抽象。这可以解释为,在神经网络对图像进行一层一层的卷积和池化操作后,图像中的信息被凝练于各卷积层的卷积核中,最尾卷积层则含有最抽象最有用的图像信息。基于此,类激活映射分析并提取这些隐藏的信 ...</div></div></div><nav id="pagination"><div class="pagination"><span class="page-number current">1</span></div></nav></div><div class="aside-content" id="aside-content"><div class="card-widget card-info"><div class="is-center"><div class="avatar-img"><img src="https://pic-1300081557.cos.ap-chongqing.myqcloud.com/IMG_8918.JPG" onerror="this.onerror=null;this.src='/img/friend_404.gif'" alt="avatar"/></div><div class="author-info__name">Eric Chi</div><div class="author-info__description">Learn, play, sleep.</div></div><div class="card-info-data"><div class="card-info-data-item is-center"><a href="/archives/"><div class="headline">文章</div><div class="length-num">7</div></a></div><div class="card-info-data-item is-center"><a href="/tags/"><div class="headline">标签</div><div class="length-num">9</div></a></div><div class="card-info-data-item is-center"><a href="/categories/"><div class="headline">分类</div><div class="length-num">2</div></a></div></div><a class="button--animated" id="card-info-btn" target="_blank" rel="noopener" href="https://github.com/w704710856"><i class="fab fa-github"></i><span>Follow Me</span></a><div class="card-info-social-icons is-center"><a class="social-icon" href="https://github.com/w704710856" target="_blank" title="Github"><i class="fab fa-github"></i></a><a class="social-icon" href="mailto:[email protected]" target="_blank" title="Email"><i class="fas fa-envelope"></i></a></div></div><div class="sticky_layout"><div class="card-widget card-recent-post"><div class="item-headline"><i class="fas fa-history"></i><span>最新文章</span></div><div class="aside-list"><div class="aside-list-item"><a class="thumbnail" href="/p/48225/" title="Concept-based 基于概念的解释方法"><img src="https://pic-1300081557.cos.ap-chongqing.myqcloud.com/image-20220524065455081.png" onerror="this.onerror=null;this.src='/img/404.jpg'" alt="Concept-based 基于概念的解释方法"/></a><div class="content"><a class="title" href="/p/48225/" title="Concept-based 基于概念的解释方法">Concept-based 基于概念的解释方法</a><time datetime="2022-05-24T04:40:04.000Z" title="发表于 2022-05-24 12:40:04">2022-05-24</time></div></div><div class="aside-list-item"><a class="thumbnail" href="/p/48223/" title="Transformer"><img src="https://pic-1300081557.cos.ap-chongqing.myqcloud.com/20220405221619.png" onerror="this.onerror=null;this.src='/img/404.jpg'" alt="Transformer"/></a><div class="content"><a class="title" href="/p/48223/" title="Transformer">Transformer</a><time datetime="2022-04-05T06:37:04.000Z" title="发表于 2022-04-05 14:37:04">2022-04-05</time></div></div><div class="aside-list-item"><a class="thumbnail" href="/p/48224/" title="GAN 生成对抗网络"><img src="https://pic-1300081557.cos.ap-chongqing.myqcloud.com/20220405222942.png" onerror="this.onerror=null;this.src='/img/404.jpg'" alt="GAN 生成对抗网络"/></a><div class="content"><a class="title" href="/p/48224/" title="GAN 生成对抗网络">GAN 生成对抗网络</a><time datetime="2022-04-05T06:37:04.000Z" title="发表于 2022-04-05 14:37:04">2022-04-05</time></div></div><div class="aside-list-item"><a class="thumbnail" href="/p/48222/" title="Self-Attention 自注意力机制"><img src="https://pic-1300081557.cos.ap-chongqing.myqcloud.com/20220405214411.png" onerror="this.onerror=null;this.src='/img/404.jpg'" alt="Self-Attention 自注意力机制"/></a><div class="content"><a class="title" href="/p/48222/" title="Self-Attention 自注意力机制">Self-Attention 自注意力机制</a><time datetime="2022-04-05T06:37:04.000Z" title="发表于 2022-04-05 14:37:04">2022-04-05</time></div></div><div class="aside-list-item"><a class="thumbnail" href="/p/48221/" title="显著图的相关工作"><img src="https://pic-1300081557.cos.ap-chongqing.myqcloud.com/20220330152025.png" onerror="this.onerror=null;this.src='/img/404.jpg'" alt="显著图的相关工作"/></a><div class="content"><a class="title" href="/p/48221/" title="显著图的相关工作">显著图的相关工作</a><time datetime="2022-03-30T06:43:23.000Z" title="发表于 2022-03-30 14:43:23">2022-03-30</time></div></div></div></div><div class="card-widget card-categories"><div class="item-headline">
<i class="fas fa-folder-open"></i>
<span>分类</span>
</div>
<ul class="card-category-list" id="aside-cat-list">
<li class="card-category-list-item "><a class="card-category-list-link" href="/categories/%E6%9D%8E%E5%AE%8F%E6%AF%85%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AC%94%E8%AE%B0/"><span class="card-category-list-name">李宏毅机器学习笔记</span><span class="card-category-list-count">3</span></a></li><li class="card-category-list-item "><a class="card-category-list-link" href="/categories/%E6%B7%B1%E5%BA%A6%E5%AD%A6%E4%B9%A0/"><span class="card-category-list-name">深度学习</span><span class="card-category-list-count">4</span></a></li>
</ul></div><div class="card-widget card-tags"><div class="item-headline"><i class="fas fa-tags"></i><span>标签</span></div><div class="card-tag-cloud"><a href="/tags/Transformer/" style="font-size: 1.1em; color: #999">Transformer</a> <a href="/tags/%E5%8D%B7%E7%A7%AF%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C/" style="font-size: 1.3em; color: #99a1ac">卷积神经网络</a> <a href="/tags/%E5%8F%AF%E8%A7%A3%E9%87%8A%E6%80%A7/" style="font-size: 1.5em; color: #99a9bf">可解释性</a> <a href="/tags/%E5%9F%BA%E4%BA%8E%E6%89%B0%E5%8A%A8%E6%96%B9%E6%B3%95/" style="font-size: 1.1em; color: #999">基于扰动方法</a> <a href="/tags/%E5%9F%BA%E4%BA%8E%E6%A6%82%E5%BF%B5/" style="font-size: 1.1em; color: #999">基于概念</a> <a href="/tags/%E5%AE%8C%E5%A4%87%E6%80%A7%E6%A3%80%E9%AA%8C/" style="font-size: 1.1em; color: #999">完备性检验</a> <a href="/tags/%E7%94%9F%E6%88%90%E5%AF%B9%E6%8A%97%E7%BD%91%E7%BB%9C/" style="font-size: 1.1em; color: #999">生成对抗网络</a> <a href="/tags/%E7%B1%BB%E6%BF%80%E6%B4%BB%E6%98%A0%E5%B0%84/" style="font-size: 1.3em; color: #99a1ac">类激活映射</a> <a href="/tags/%E8%87%AA%E6%B3%A8%E6%84%8F%E5%8A%9B%E6%9C%BA%E5%88%B6/" style="font-size: 1.1em; color: #999">自注意力机制</a></div></div><div class="card-widget card-archives"><div class="item-headline"><i class="fas fa-archive"></i><span>归档</span></div><ul class="card-archive-list"><li class="card-archive-list-item"><a class="card-archive-list-link" href="/archives/2022/05/"><span class="card-archive-list-date">五月 2022</span><span class="card-archive-list-count">1</span></a></li><li class="card-archive-list-item"><a class="card-archive-list-link" href="/archives/2022/04/"><span class="card-archive-list-date">四月 2022</span><span class="card-archive-list-count">3</span></a></li><li class="card-archive-list-item"><a class="card-archive-list-link" href="/archives/2022/03/"><span class="card-archive-list-date">三月 2022</span><span class="card-archive-list-count">1</span></a></li><li class="card-archive-list-item"><a class="card-archive-list-link" href="/archives/2022/01/"><span class="card-archive-list-date">一月 2022</span><span class="card-archive-list-count">1</span></a></li><li class="card-archive-list-item"><a class="card-archive-list-link" href="/archives/2021/12/"><span class="card-archive-list-date">十二月 2021</span><span class="card-archive-list-count">1</span></a></li></ul></div><div class="card-widget card-webinfo"><div class="item-headline"><i class="fas fa-chart-line"></i><span>网站资讯</span></div><div class="webinfo"><div class="webinfo-item"><div class="item-name">文章数目 :</div><div class="item-count">7</div></div><div class="webinfo-item"><div class="item-name">已运行时间 :</div><div class="item-count" id="runtimeshow" data-publishDate="2021-12-09T16:00:00.000Z"></div></div><div class="webinfo-item"><div class="item-name">本站总字数 :</div><div class="item-count">86.9k</div></div><div class="webinfo-item"><div class="item-name">本站访客数 :</div><div class="item-count" id="busuanzi_value_site_uv"></div></div><div class="webinfo-item"><div class="item-name">本站总访问量 :</div><div class="item-count" id="busuanzi_value_site_pv"></div></div><div class="webinfo-item"><div class="item-name">最后更新时间 :</div><div class="item-count" id="last-push-date" data-lastPushDate="2022-05-25T16:46:08.241Z"></div></div></div></div></div></div></main><footer id="footer"><div id="footer-wrap"><div class="copyright">©2021 - 2022 By Eric Chi</div></div></footer></div><div id="rightside"><div id="rightside-config-hide"><button id="darkmode" type="button" title="浅色和深色模式转换"><i class="fas fa-adjust"></i></button><button id="hide-aside-btn" type="button" title="单栏和双栏切换"><i class="fas fa-arrows-alt-h"></i></button></div><div id="rightside-config-show"><button id="rightside_config" type="button" title="设置"><i class="fas fa-cog fa-spin"></i></button><button id="go-up" type="button" title="回到顶部"><i class="fas fa-arrow-up"></i></button></div></div><div><script src="/js/utils.js"></script><script src="/js/main.js"></script><div class="js-pjax"></div><script defer="defer" id="ribbon" src="https://cdn.jsdelivr.net/npm/butterfly-extsrc@1/dist/canvas-ribbon.min.js" size="150" alpha="0.6" zIndex="-1" mobile="true" data-click="true"></script><script async data-pjax src="//busuanzi.ibruce.info/busuanzi/2.3/busuanzi.pure.mini.js"></script></div></body></html>