-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathaction.yml
529 lines (457 loc) · 21.1 KB
/
action.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
# A GitHub Action that allows to run the Retriever on arbitrary GitHub repositories.
name: "Run Retriever"
description: |
'Reverse-engineers a project''s source into a Palladio Component Model.
Uploads the PCM as an artifact called `retriever`.
Within that artifact, the PCM is located at the input `source_path`,
e.g. for `source_path=MyProject/Repository` the output is `[retriever]/MyProject/Repository/repository.pcm`.'
inputs:
source_path:
description: "The location of the project to reverse-engineer"
required: true
default: "."
rules:
description: "The rules to reverse-engineer with, as a comma-separated list"
required: true
default: "org.palladiosimulator.retriever.extraction.rules.maven,org.palladiosimulator.retriever.extraction.rules.spring"
rules_path:
description: "The location of additional project specific rules"
required: false
benchmark:
description: "Whether use hyperfine to benchmark the execution of the retriever, otherwise just the one time execution time is reported"
required: false
default: "false"
analyze_vulnerabilities:
description: "Whether use analyze vulnerabilities using snyk"
required: false
default: "false"
snyk_token:
description: "snyk API token"
required: false
default: ""
nist_nvd_token:
description: "NIST NVD API token"
required: false
default: ""
runs:
using: "composite"
steps:
- name: Set Up JDK 17
uses: actions/setup-java@v4
with:
distribution: "temurin"
java-version: "17"
- name: Create Temporary Directory
shell: bash
run: |
TMP_DIR=$(mktemp -d)
echo "tmp_dir=$TMP_DIR" >> $GITHUB_ENV
mkdir $TMP_DIR/retriever_out
- name: Get Action Version
shell: bash
# Assuming a format like in the GitHub Actions documentation:
# /home/runner/work/_actions/repo-owner/name-of-action-repo/v1
# There, the 7th segment is the action's tag. This is bumped up
# to 8 since `cut` counts the empty string before the first / as well.
run: |
ACTION_VERSION=$(echo ${{ github.action_path }} | cut -d / -f 8- -)
echo "action_version=$ACTION_VERSION" >> $GITHUB_ENV
- name: Determine Retriever Version
shell: bash
# Uses the latest Retriever or the Retriever with the same version that this action has
run: |
if [[ "${{ env.action_version }}" == "main" ]]; then
echo "retriever=${{ github.api_url }}/repos/PalladioSimulator/Palladio-ReverseEngineering-Retriever/releases/latest" >> $GITHUB_ENV
else
echo "retriever=${{ github.api_url }}/repos/PalladioSimulator/Palladio-ReverseEngineering-Retriever/releases/tags/${{ env.action_version }}" >> $GITHUB_ENV
fi
- name: Gather Git Repository Info
shell: bash
run: |
GIT_INFO_FILE="${{ env.tmp_dir }}/retriever_out/git_info.md"
SOURCE_PATH="${{ github.workspace }}/${{ inputs.source_path }}"
echo "GIT_INFO_FILE=$GIT_INFO_FILE" >> $GITHUB_ENV
gather_git_info() {
REPO_URL=$(git config --get remote.origin.url)
BRANCH=$(git branch --show-current)
COMMIT=$(git log -1 --pretty=format:"%H")
REPO_PATH=$(echo $REPO_URL | sed -E 's/.*github\.com[:\/](.*)/\1/')
OWNER=$(echo $REPO_PATH | cut -d'/' -f1)
REPO_NAME=$(echo $REPO_PATH | cut -d'/' -f2 | sed 's/\.git$//')
API_URL="https://api.github.com/repos/$OWNER/$REPO_NAME"
NUM_STARS=$(curl -s $API_URL | jq '.stargazers_count // 0')
# Get number of commits, handle the case where pagination info might not be present (less than 30 commits)
COMMITS_LINK_HEADER=$(curl -s -I "${API_URL}/commits?per_page=1" | grep -i 'link:' || true)
if [ -z "$COMMITS_LINK_HEADER" ]; then
NUM_COMMITS=$(curl -s "${API_URL}/commits" | jq 'length')
else
NUM_COMMITS=$(echo "$COMMITS_LINK_HEADER" | sed -E 's/.*page=([0-9]+)>; rel="last".*/\1/' || echo "1")
fi
# Get number of contributors, handle the case where pagination info might not be present (less than 30 contributors)
CONTRIBUTORS_LINK_HEADER=$(curl -s -I "${API_URL}/contributors?per_page=1" | grep -i 'link:' || true)
if [ -z "$CONTRIBUTORS_LINK_HEADER" ]; then
NUM_CONTRIBUTORS=$(curl -s "${API_URL}/contributors" | jq 'length')
else
NUM_CONTRIBUTORS=$(echo "$CONTRIBUTORS_LINK_HEADER" | sed -E 's/.*page=([0-9]+)>; rel="last".*/\1/' || echo "1")
fi
echo "| Attribute | Value |"
echo "| ----------------- | ----- |"
echo "| Repository URL | $REPO_URL |"
echo "| Branch | $BRANCH |"
echo "| Commit | $COMMIT |"
echo "| Number of Commits | $NUM_COMMITS |"
echo "| Stars | $NUM_STARS |"
echo "| Contributors | $NUM_CONTRIBUTORS |"
echo ""
}
print_header() {
echo "## Git repository information"
}
# gather git repository information of the source directory or the containing directories
if [ -d "$SOURCE_PATH/.git" ]; then
cd "$SOURCE_PATH"
print_header >> "$GIT_INFO_FILE"
gather_git_info >> "$GIT_INFO_FILE"
else
FOUND_REPO=false
for d in "$SOURCE_PATH"/*; do
if [ -d "$d/.git" ]; then
cd "$d"
if [ "$FOUND_REPO" = false ]; then
print_header >> "$GIT_INFO_FILE"
fi
gather_git_info >> "$GIT_INFO_FILE"
FOUND_REPO=true
fi
done
if [ "$FOUND_REPO" = false ]; then
echo "no git repository found" > "$GIT_INFO_FILE"
fi
fi
- name: Install Neofetch
shell: bash
run: sudo apt-get install -y neofetch
- name: Gather Specific System Information with Neofetch
shell: bash
run: |
SYSTEM_INFO_FILE=${{ env.tmp_dir }}/retriever_out/system_info.md
echo "SYSTEM_INFO_FILE=$SYSTEM_INFO_FILE" >> $GITHUB_ENV
{
echo "## System information"
echo "| Attribute | Value |"
echo "| --------- | ----- |"
neofetch os distro kernel cpu gpu memory --stdout | \
sed -E 's/^(os): (.*)$/| OS | \2 |/; s/^(distro): (.*)$/| Distro | \2 |/; s/^(kernel): (.*)$/| Kernel | \2 |/; s/^(cpu): (.*)$/| CPU | \2 |/; s/^(gpu): (.*)$/| GPU | \2 |/; s/^(memory): (.*)$/| Memory | \2 |/'
} > $SYSTEM_INFO_FILE
- name: Install cloc
shell: bash
run: |
sudo apt install cloc
- name: Run cloc Analysis
shell: bash
run: |
CLOC_INFO_FILE=${{ env.tmp_dir }}/retriever_out/cloc.md
# Define your whitelist of languages here
WHITELIST=("CSV" "Dockerfile" "Gradle" "Java" "JSON" "Maven" "Properties" "XML" "YAML")
cloc ${{ inputs.source_path }} --unicode --autoconf --diff-timeout 300 --docstring-as-code --read-binary-files --md --quiet > $CLOC_INFO_FILE.tmp
CLOC_VERSION_INFO=$(grep -E '^cloc\|' $CLOC_INFO_FILE.tmp | sed 's/cloc|//')
echo "CLOC_INFO_FILE=$CLOC_INFO_FILE" >> $GITHUB_ENV
{
SUM_FILES=0
SUM_BLANK=0
SUM_COMMENT=0
SUM_CODE=0
SUM_TECHNOLOGY=0
echo "## Cloc analysis"
echo ""
echo "<!-- $CLOC_VERSION_INFO -->"
echo ""
echo "|Technology|files|blank|comment|code|"
echo "|:-------|-------:|-------:|-------:|-------:|"
while IFS= read -r line; do
# Skip the header line
if [[ "$line" =~ Language ]]; then
continue
fi
# Split the line into components
IFS='|' read -r language files blank comment code <<< "$line"
language=$(echo "$language" | xargs)
files=$(echo "$files" | xargs)
blank=$(echo "$blank" | xargs)
comment=$(echo "$comment" | xargs)
code=$(echo "$code" | xargs)
# Check if the language is in the whitelist
if [[ " ${WHITELIST[@]} " =~ " ${language} " ]]; then
SUM_FILES=$((SUM_FILES + files))
SUM_BLANK=$((SUM_BLANK + blank))
SUM_COMMENT=$((SUM_COMMENT + comment))
SUM_CODE=$((SUM_CODE + code))
SUM_TECHNOLOGY=$((SUM_TECHNOLOGY + 1))
echo "|$language|$files|$blank|$comment|$code|"
fi
done < <(head -n -2 $CLOC_INFO_FILE.tmp | grep -v '^$')
# Combine JavaScript and TypeScript rows into ECMAScript
JS_ROW=$(grep -E '^JavaScript' $CLOC_INFO_FILE.tmp | cut -d'|' -f2- || echo "")
TS_ROW=$(grep -E '^TypeScript' $CLOC_INFO_FILE.tmp | cut -d'|' -f2- || echo "")
if [[ -n "$JS_ROW" || -n "$TS_ROW" ]]; then
js_files=0; js_blank=0; js_comment=0; js_code=0
ts_files=0; ts_blank=0; ts_comment=0; ts_code=0
if [[ -n "$JS_ROW" ]]; then
IFS='|' read -r js_files js_blank js_comment js_code <<< "$JS_ROW"
fi
if [[ -n "$TS_ROW" ]]; then
IFS='|' read -r ts_files ts_blank ts_comment ts_code <<< "$TS_ROW"
fi
ecma_files=$((js_files + ts_files))
ecma_blank=$((js_blank + ts_blank))
ecma_comment=$((js_comment + ts_comment))
ecma_code=$((js_code + ts_code))
SUM_FILES=$((SUM_FILES + ecma_files))
SUM_BLANK=$((SUM_BLANK + ecma_blank))
SUM_COMMENT=$((SUM_COMMENT + ecma_comment))
SUM_CODE=$((SUM_CODE + ecma_code))
SUM_TECHNOLOGY=$((SUM_TECHNOLOGY + 1))
echo "|ECMAScript|$ecma_files|$ecma_blank|$ecma_comment|$ecma_code|"
fi
# Combine SQL, SQL Data, and SQL Stored Procedure rows into SQL
SQL_ROW=$(grep -E '^SQL' $CLOC_INFO_FILE.tmp | cut -d'|' -f2- || echo "")
SQL_DATA_ROW=$(grep -E '^SQL Data' $CLOC_INFO_FILE.tmp | cut -d'|' -f2- || echo "")
SQL_SP_ROW=$(grep -E '^SQL Stored Procedure' $CLOC_INFO_FILE.tmp | cut -d'|' -f2- || echo "")
if [[ -n "$SQL_ROW" || -n "$SQL_DATA_ROW" || -n "$SQL_SP_ROW" ]]; then
sql_files=0; sql_blank=0; sql_comment=0; sql_code=0
sql_data_files=0; sql_data_blank=0; sql_data_comment=0; sql_data_code=0
sql_sp_files=0; sql_sp_blank=0; sql_sp_comment=0; sql_sp_code=0
if [[ -n "$SQL_ROW" ]]; then
IFS='|' read -r sql_files sql_blank sql_comment sql_code <<< "$SQL_ROW"
fi
if [[ -n "$SQL_DATA_ROW" ]]; then
IFS='|' read -r sql_data_files sql_data_blank sql_data_comment sql_data_code <<< "$SQL_DATA_ROW"
fi
if [[ -n "$SQL_SP_ROW" ]]; then
IFS='|' read -r sql_sp_files sql_sp_blank sql_sp_comment sql_sp_code <<< "$SQL_SP_ROW"
fi
total_sql_files=$((sql_files + sql_data_files + sql_sp_files))
total_sql_blank=$((sql_blank + sql_data_blank + sql_sp_blank))
total_sql_comment=$((sql_comment + sql_data_comment + sql_sp_comment))
total_sql_code=$((sql_code + sql_data_code + sql_sp_code))
SUM_FILES=$((SUM_FILES + total_sql_files))
SUM_BLANK=$((SUM_BLANK + total_sql_blank))
SUM_COMMENT=$((SUM_COMMENT + total_sql_comment))
SUM_CODE=$((SUM_CODE + total_sql_code))
SUM_TECHNOLOGY=$((SUM_TECHNOLOGY + 1))
echo "|SQL|$total_sql_files|$total_sql_blank|$total_sql_comment|$total_sql_code|"
fi
echo "|**SUM:**|**$SUM_FILES**|**$SUM_BLANK**|**$SUM_COMMENT**|**$SUM_CODE**|"
echo "source_files_count=$SUM_FILES" >> $GITHUB_ENV
echo "lines_of_code=$SUM_CODE" >> $GITHUB_ENV
echo "technology_count=$SUM_TECHNOLOGY" >> $GITHUB_ENV
} > $CLOC_INFO_FILE
- name: Download Retriever
shell: bash
run: |
# Set maximum retries
max_retries=3
attempt=0
success=0
while [ $attempt -lt $max_retries ]; do
((attempt=attempt+1))
echo "Attempt $attempt of $max_retries..."
# Execute the curl command and save its output
output=$(curl --retry 3 -s ${{ env.retriever }})
# Check for 'browser_download_url' (this is sometimes missing which causes an error -> retry)
echo "$output" | grep 'browser_download_url' > /dev/null
if [ $? -eq 0 ]; then
# If found, use the URL to download the file with wget
echo "$output" | grep -E 'browser_download_url' \
| grep linux \
| grep x86_64 \
| grep -Eo 'https://[^\"]*' \
| xargs wget -O "${{ env.tmp_dir }}/retriever.zip"
success=1
break # Exit loop on success
else
echo "URL not found, retrying..."
sleep 5
fi
done
# Check if the download was successful
if [ $success -ne 1 ]; then
echo "Failed to find the download URL after $max_retries attempts."
exit 1
fi
- name: Extract Retriever
shell: bash
working-directory: ${{ env.tmp_dir }}
run: unzip retriever.zip -d retriever
- name: Install hyperfine
if: inputs.benchmark == 'true'
shell: bash
run: |
wget https://github.com/sharkdp/hyperfine/releases/download/v1.16.1/hyperfine_1.16.1_amd64.deb
sudo dpkg -i hyperfine_1.16.1_amd64.deb
- name: Install snyk
if: inputs.analyze_vulnerabilities == 'true'
shell: bash
run: |
curl --compressed https://static.snyk.io/cli/latest/snyk-linux -o snyk
chmod +x ./snyk
mv ./snyk /usr/local/bin/
snyk auth ${{ inputs.snyk_token }}
- name: Prepare Retriever Command
shell: bash
env:
RETRIEVER_COMMAND: './eclipse -nosplash -i "${{ github.workspace }}/${{ inputs.source_path }}" -o "${{ env.tmp_dir }}/eclipse_tmp" -r "${{ inputs.rules }}"'
run: |
if [[ "${{ inputs.rules_path }}" != '' ]]; then
RETRIEVER_COMMAND="${RETRIEVER_COMMAND} -x \"${{ github.workspace }}/${{ inputs.rules_path }}\""
fi
if [[ "${{ inputs.analyze_vulnerabilities }}" == 'true' ]]; then
RETRIEVER_COMMAND="${RETRIEVER_COMMAND} -a /usr/local/bin/snyk"
fi
echo "retriever_command=$RETRIEVER_COMMAND" >> $GITHUB_ENV
- name: Execute Retriever
shell: bash
working-directory: ${{ env.tmp_dir }}/retriever
env:
NO_AT_BRIDGE: 1 # avoid eclipse error "AT-SPI: Error retrieving accessibility bus address"
TIMING_INFO_FILE: ${{ env.tmp_dir }}/retriever_out/timing.md
NIST_NVD_API_KEY: ${{ inputs.nist_nvd_token }}
run: |
echo "TIMING_INFO_FILE=$TIMING_INFO_FILE" >> $GITHUB_ENV
mkdir "${{ env.tmp_dir }}/eclipse_tmp"
echo "log4j.rootLogger=debug, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
# Pattern to output the caller's file name and line number.
log4j.appender.stdout.layout.ConversionPattern=%5p [%t] (%F:%L) - %m%n" \
> log4j.properties
echo "log4j.configuration=file://${{ env.tmp_dir }}/retriever/log4j.properties" >> configuration/config.ini
if [ "${{ inputs.benchmark }}" = "true" ]; then
# Execute with Hyperfine
hyperfine \
--warmup 3 \
--runs 10 \
--show-output \
--ignore-failure \
--export-markdown $TIMING_INFO_FILE \
--prepare 'sleep 1; \
rm -rf "${{ env.tmp_dir }}/eclipse_tmp"; sleep 1; \
rm -rf "${{ env.tmp_dir }}/retriever/workspace"; sleep 1; \
echo "cleanup done"' \
"${{ env.retriever_command }}"
# Remove the first column from the table using sed
sed -i 's/^[^|]*|[^|]*|/|/' $TIMING_INFO_FILE
{
echo "## Retriever execution time"
cat $TIMING_INFO_FILE
} > "${TIMING_INFO_FILE}.tmp" && mv "${TIMING_INFO_FILE}.tmp" "$TIMING_INFO_FILE"
# Extract the mean time without deviation
MEAN_TIME=$(awk -F'|' '/^[ ]*\|/{print $2}' $TIMING_INFO_FILE | sed -n '3p' | sed 's/^[ ]*//')
echo "execution_time=$MEAN_TIME" >> $GITHUB_ENV
else
# Execute with /usr/bin/time
/usr/bin/time -p -o "$TIMING_INFO_FILE" ${{ env.retriever_command }}
# Read and reformat the timing information
{
echo "## Retriever execution time"
echo "| Metric | Time (seconds) |"
echo "| --- | ---: |"
while IFS= read -r line; do
case "$line" in
real*)
real_time=${line#* }
echo "| Real CPU Time | $real_time |"
;;
user*) echo "| User CPU Time | ${line#* } |" ;;
sys*) echo "| System CPU Time | ${line#* } |" ;;
esac
done < "$TIMING_INFO_FILE"
echo "<!--"
echo "Explainations:"
echo "- __Real CPU Time__: actual time the command has run (can be less than total time spent in user and system mode for multi-threaded processes)"
echo "- __User CPU Time__: time the command has spent running in user mode"
echo "- __System CPU Time__: time the command has spent running in system or kernel mode"
echo "-->"
} > "${TIMING_INFO_FILE}.tmp" && mv "${TIMING_INFO_FILE}.tmp" "$TIMING_INFO_FILE"
echo "execution_time=$real_time" >> $GITHUB_ENV
fi
mv ${{ env.tmp_dir }}/eclipse_tmp/* ${{ env.tmp_dir }}/retriever_out/
- name: Count Components in Repository
shell: bash
run: |
REPOSITORY_FILE=$(find ${{ env.tmp_dir }}/retriever_out -name '*.repository')
pip3 install yq
COMPONENT_COUNT=$(xq '.["repository:Repository"]["components__Repository"] | length' $REPOSITORY_FILE)
echo "component_count=$COMPONENT_COUNT" >> $GITHUB_ENV
- name: Gather Retriever Info
shell: bash
run: |
RETRIEVER_INFO_FILE=${{ env.tmp_dir }}/retriever_out/retriever_info.md
ABSTRACTION=$(echo "scale=2; ${{ env.source_files_count }} / ${{ env.component_count }}" | bc -l)
if [[ "$ABSTRACTION" == .* ]]; then
# add leading zero
ABSTRACTION="0$ABSTRACTION"
fi
echo "RETRIEVER_INFO_FILE=$RETRIEVER_INFO_FILE" >> $GITHUB_ENV
{
echo "# [Retriever](https://github.com/PalladioSimulator/Palladio-ReverseEngineering-Retriever) Report"
echo "| Attribute | Value |"
echo "| ------------------ | ----- |"
echo "| Retriever Version | $(curl -sL "${{ env.retriever }}" | jq -r ".tag_name") |"
echo "| Date | $(date -u) |"
echo "| #Components | ${{ env.component_count }} |"
echo "| #Source Files | ${{ env.source_files_count }} |"
echo "| Lines of Code | ${{ env.lines_of_code }} |"
echo "| #Technologies | ${{ env.technology_count }} |"
echo "| Abstraction | $ABSTRACTION |"
echo "| Execution Time [s] | ${{env.execution_time }} |"
} > $RETRIEVER_INFO_FILE
- name: Combine and Clean Up Files
shell: bash
run: |
# Combine files into one, with empty lines between file contents
{
cat $RETRIEVER_INFO_FILE
echo
cat $GIT_INFO_FILE
echo
cat $SYSTEM_INFO_FILE
echo
cat $TIMING_INFO_FILE
echo
cat $CLOC_INFO_FILE
} > ${{ env.tmp_dir }}/retriever_out/README.md
# Delete the original files
rm -f $RETRIEVER_INFO_FILE $GIT_INFO_FILE $SYSTEM_INFO_FILE $CLOC_INFO_FILE $TIMING_INFO_FILE
- name: Prepare Analysis Results for Upload
shell: bash
working-directory: ${{ env.tmp_dir }}
run: |
mkdir -p tmp/${{ inputs.source_path }}
mv retriever_out/* tmp/${{ inputs.source_path }}
mv tmp/* retriever_out
rmdir tmp
- name: Upload Analysis Results
uses: actions/upload-artifact@v3
with:
name: retriever
path: ${{ env.tmp_dir }}/retriever_out
- name: Prepare Eclipse Logs for Upload
if: failure()
shell: bash
working-directory: ${{ env.tmp_dir }}
run: |
mkdir -p eclipse_logs/${{ inputs.source_path }}
# Move log files, if they exist.
find retriever/configuration/ -maxdepth 1 -name "*.log" -exec mv -t eclipse_logs/${{ inputs.source_path }} {} +
find retriever/workspace/.metadata/ -maxdepth 1 -name ".log" -exec mv -t eclipse_logs/${{ inputs.source_path }} {}
- name: Upload Eclipse Logs
if: failure()
uses: actions/upload-artifact@v3
with:
name: eclipse_logs
path: ${{ env.tmp_dir }}/eclipse_logs
- name: Delete Temporary Directory
if: always()
shell: bash
run: rm -rf ${{ env.tmp_dir }}