Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package werf for openSUSE:Factory checked in 
at 2025-07-18 16:00:24
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/werf (Old)
 and      /work/SRC/openSUSE:Factory/.werf.new.8875 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "werf"

Fri Jul 18 16:00:24 2025 rev:59 rq:1294347 version:2.42.0

Changes:
--------
--- /work/SRC/openSUSE:Factory/werf/werf.changes        2025-07-11 
21:32:06.970006667 +0200
+++ /work/SRC/openSUSE:Factory/.werf.new.8875/werf.changes      2025-07-18 
16:01:39.674314928 +0200
@@ -1,0 +2,13 @@
+Fri Jul 18 09:40:28 UTC 2025 - Johannes Kastl 
<opensuse_buildserv...@ojkastl.de>
+
+- Update to version 2.42.0:
+  * Features
+    - build: extend build report with stages (#6951) (e82db1a)
+    - deploy: werf.io/sensitive-paths annotation and
+      NELM_FEAT_FIELD_SENSITIVE experimental flag (33b33a9)
+  * Bug Fixes
+    - deploy: goroutines leak during tracking (85772cb)
+    - deploy: logs from libraries still showed by default (83eade4)
+    - includes: add ssh flag to includes commands (#6969) (788f186)
+
+-------------------------------------------------------------------

Old:
----
  werf-2.41.2.obscpio

New:
----
  werf-2.42.0.obscpio

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ werf.spec ++++++
--- /var/tmp/diff_new_pack.m4kje7/_old  2025-07-18 16:01:41.086373872 +0200
+++ /var/tmp/diff_new_pack.m4kje7/_new  2025-07-18 16:01:41.090374039 +0200
@@ -17,7 +17,7 @@
 
 
 Name:           werf
-Version:        2.41.2
+Version:        2.42.0
 Release:        0
 Summary:        CLI for the Werf CI/CD system
 License:        Apache-2.0

++++++ _service ++++++
--- /var/tmp/diff_new_pack.m4kje7/_old  2025-07-18 16:01:41.122375375 +0200
+++ /var/tmp/diff_new_pack.m4kje7/_new  2025-07-18 16:01:41.122375375 +0200
@@ -3,7 +3,7 @@
     <param name="url">https://github.com/werf/werf</param>
     <param name="scm">git</param>
     <param name="exclude">.git</param>
-    <param name="revision">v2.41.2</param>
+    <param name="revision">v2.42.0</param>
     <param name="versionformat">@PARENT_TAG@</param>
     <param name="versionrewrite-pattern">v(.*)</param>
     <param name="changesgenerate">enable</param>

++++++ _servicedata ++++++
--- /var/tmp/diff_new_pack.m4kje7/_old  2025-07-18 16:01:41.142376210 +0200
+++ /var/tmp/diff_new_pack.m4kje7/_new  2025-07-18 16:01:41.146376377 +0200
@@ -1,6 +1,6 @@
 <servicedata>
 <service name="tar_scm">
                 <param name="url">https://github.com/werf/werf</param>
-              <param 
name="changesrevision">5e2957900782c508c79e120e0126f62337e87fbe</param></service></servicedata>
+              <param 
name="changesrevision">ccdb348347e7c2967e326e55140494a1f0fa8294</param></service></servicedata>
 (No newline at EOF)
 

++++++ vendor.tar.gz ++++++
/work/SRC/openSUSE:Factory/werf/vendor.tar.gz 
/work/SRC/openSUSE:Factory/.werf.new.8875/vendor.tar.gz differ: char 143, line 1

++++++ werf-2.41.2.obscpio -> werf-2.42.0.obscpio ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/werf-2.41.2/CHANGELOG.md new/werf-2.42.0/CHANGELOG.md
--- old/werf-2.41.2/CHANGELOG.md        2025-07-10 13:23:07.000000000 +0200
+++ new/werf-2.42.0/CHANGELOG.md        2025-07-11 16:48:28.000000000 +0200
@@ -1,5 +1,30 @@
 # Changelog
 
+## [2.42.0](https://github.com/werf/werf/compare/v2.41.3...v2.42.0) 
(2025-07-11)
+
+
+### Features
+
+* **build:** extend build report with stages 
([#6951](https://github.com/werf/werf/issues/6951)) 
([e82db1a](https://github.com/werf/werf/commit/e82db1a667b0a822333b0097ff8700c9f6d3fd89))
+* **deploy:** `werf.io/sensitive-paths` annotation and 
`NELM_FEAT_FIELD_SENSITIVE` experimental flag 
([33b33a9](https://github.com/werf/werf/commit/33b33a9b1e60095b0fbbcb8b64760d364192514b))
+
+
+### Bug Fixes
+
+* **deploy:** goroutines leak during tracking 
([ccdd65e](https://github.com/werf/werf/commit/ccdd65ea1e02219c80f7f91f0697a55805254040))
+* **deploy:** goroutines leak during tracking 
([85772cb](https://github.com/werf/werf/commit/85772cbd2dda5422305b9bd9dff3e909a3b6e428))
+* **deploy:** logs from libraries still showed by default 
([a48b1b2](https://github.com/werf/werf/commit/a48b1b23cbd490fa7015820d8aee6dfe572c1baa))
+* **deploy:** logs from libraries still showed by default 
([83eade4](https://github.com/werf/werf/commit/83eade48689e0a83f8bac0e9a925551364d57f54))
+* **includes:** add ssh flag to includes commands 
([#6969](https://github.com/werf/werf/issues/6969)) 
([788f186](https://github.com/werf/werf/commit/788f18623bcd817c990aebfa1286c2575ba0e5fa))
+
+## [2.41.3](https://github.com/werf/werf/compare/v2.41.2...v2.41.3) 
(2025-07-10)
+
+
+### Bug Fixes
+
+* **deploy:** no logs displayed 
([4521772](https://github.com/werf/werf/commit/4521772c33168c4ec9832b17a15afc047fef6997))
+* **deploy:** no logs displayed 
([0ae745d](https://github.com/werf/werf/commit/0ae745daef8743c08521a9cc134c6f1becb467a9))
+
 ## [2.41.2](https://github.com/werf/werf/compare/v2.41.1...v2.41.2) 
(2025-07-10)
 
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/werf-2.41.2/cmd/werf/common/common.go 
new/werf-2.42.0/cmd/werf/common/common.go
--- old/werf-2.41.2/cmd/werf/common/common.go   2025-07-10 13:23:07.000000000 
+0200
+++ new/werf-2.42.0/cmd/werf/common/common.go   2025-07-11 16:48:28.000000000 
+0200
@@ -1290,31 +1290,46 @@
 }
 
 func GetGiterminismManager(ctx context.Context, cmdData *CmdData) 
(*giterminism_manager.Manager, error) {
-       workingDir := GetWorkingDir(cmdData)
+       manager := new(giterminism_manager.Manager)
+       if err := logboek.Context(ctx).Info().LogProcess("Initialize 
giterminism manager").
+               DoError(func() error {
+                       workingDir := GetWorkingDir(cmdData)
+
+                       gitWorkTree, err := GetGitWorkTree(ctx, cmdData, 
workingDir)
+                       if err != nil {
+                               return fmt.Errorf("unable to get git work tree: 
%w", err)
+                       }
+
+                       localGitRepo, err := OpenGitRepo(ctx, cmdData, 
workingDir, gitWorkTree)
+                       if err != nil {
+                               return err
+                       }
+
+                       headCommit, err := localGitRepo.HeadCommitHash(ctx)
+                       if err != nil {
+                               return err
+                       }
+
+                       configRelPath := 
GetWerfGiterminismConfigRelPath(cmdData)
+
+                       gm, err := giterminism_manager.NewManager(ctx, 
configRelPath, workingDir, localGitRepo, headCommit, 
giterminism_manager.NewManagerOptions{
+                               LooseGiterminism:       
*cmdData.LooseGiterminism,
+                               Dev:                    *cmdData.Dev,
+                               CreateIncludesLockFile: 
cmdData.CreateIncludesLockFile,
+                               AllowIncludesUpdate:    
cmdData.AllowIncludesUpdate,
+                       })
+                       if err != nil {
+                               return err
+                       }
 
-       gitWorkTree, err := GetGitWorkTree(ctx, cmdData, workingDir)
-       if err != nil {
-               return nil, fmt.Errorf("unable to get git work tree: %w", err)
-       }
+                       manager = gm
 
-       localGitRepo, err := OpenGitRepo(ctx, cmdData, workingDir, gitWorkTree)
-       if err != nil {
+                       return nil
+               }); err != nil {
                return nil, err
        }
 
-       headCommit, err := localGitRepo.HeadCommitHash(ctx)
-       if err != nil {
-               return nil, err
-       }
-
-       configRelPath := GetWerfGiterminismConfigRelPath(cmdData)
-
-       return giterminism_manager.NewManager(ctx, configRelPath, workingDir, 
localGitRepo, headCommit, giterminism_manager.NewManagerOptions{
-               LooseGiterminism:       *cmdData.LooseGiterminism,
-               Dev:                    *cmdData.Dev,
-               CreateIncludesLockFile: cmdData.CreateIncludesLockFile,
-               AllowIncludesUpdate:    cmdData.AllowIncludesUpdate,
-       })
+       return manager, nil
 }
 
 func GetGitWorkTree(ctx context.Context, cmdData *CmdData, workingDir string) 
(string, error) {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/werf-2.41.2/cmd/werf/includes/get-file/get-file.go 
new/werf-2.42.0/cmd/werf/includes/get-file/get-file.go
--- old/werf-2.41.2/cmd/werf/includes/get-file/get-file.go      2025-07-10 
13:23:07.000000000 +0200
+++ new/werf-2.42.0/cmd/werf/includes/get-file/get-file.go      2025-07-11 
16:48:28.000000000 +0200
@@ -64,6 +64,7 @@
        common.SetupConfigPath(&commonCmdData, cmd)
        common.SetupGiterminismConfigPath(&commonCmdData, cmd)
        common.SetupEnvironment(&commonCmdData, cmd)
+       common.SetupSSHKey(&commonCmdData, cmd)
 
        common.SetupGiterminismOptions(&commonCmdData, cmd)
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/werf-2.41.2/cmd/werf/includes/ls-files/ls-files.go 
new/werf-2.42.0/cmd/werf/includes/ls-files/ls-files.go
--- old/werf-2.41.2/cmd/werf/includes/ls-files/ls-files.go      2025-07-10 
13:23:07.000000000 +0200
+++ new/werf-2.42.0/cmd/werf/includes/ls-files/ls-files.go      2025-07-11 
16:48:28.000000000 +0200
@@ -91,6 +91,7 @@
        common.SetupConfigPath(&commonCmdData, cmd)
        common.SetupGiterminismConfigPath(&commonCmdData, cmd)
        common.SetupEnvironment(&commonCmdData, cmd)
+       common.SetupSSHKey(&commonCmdData, cmd)
 
        common.SetupGiterminismOptions(&commonCmdData, cmd)
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/werf-2.41.2/cmd/werf/includes/update/update.go 
new/werf-2.42.0/cmd/werf/includes/update/update.go
--- old/werf-2.41.2/cmd/werf/includes/update/update.go  2025-07-10 
13:23:07.000000000 +0200
+++ new/werf-2.42.0/cmd/werf/includes/update/update.go  2025-07-11 
16:48:28.000000000 +0200
@@ -53,6 +53,7 @@
        common.SetupConfigPath(&commonCmdData, cmd)
        common.SetupGiterminismConfigPath(&commonCmdData, cmd)
        common.SetupEnvironment(&commonCmdData, cmd)
+       common.SetupSSHKey(&commonCmdData, cmd)
 
        common.SetupGiterminismOptions(&commonCmdData, cmd)
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/werf-2.41.2/docs/_includes/reference/cli/werf_includes_get_file.md 
new/werf-2.42.0/docs/_includes/reference/cli/werf_includes_get_file.md
--- old/werf-2.41.2/docs/_includes/reference/cli/werf_includes_get_file.md      
2025-07-10 13:23:07.000000000 +0200
+++ new/werf-2.42.0/docs/_includes/reference/cli/werf_includes_get_file.md      
2025-07-11 16:48:28.000000000 +0200
@@ -91,6 +91,11 @@
       --platform=[]
             Enable platform emulation when building images with werf, format: 
OS/ARCH[/VARIANT]     
             ($WERF_PLATFORM or $DOCKER_DEFAULT_PLATFORM by default)
+      --ssh-key=[]
+            Use only specific ssh key(s).
+            Can be specified with $WERF_SSH_KEY_* (e.g. 
$WERF_SSH_KEY_REPO=~/.ssh/repo_rsa,         
+            $WERF_SSH_KEY_NODEJS=~/.ssh/nodejs_rsa).
+            Defaults to $WERF_SSH_KEY_*, system ssh-agent or 
~/.ssh/{id_rsa|id_dsa}
       --tmp-dir=""
             Use specified dir to store tmp files and dirs (default 
$WERF_TMP_DIR or system tmp dir)
 ```
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/werf-2.41.2/docs/_includes/reference/cli/werf_includes_ls_files.md 
new/werf-2.42.0/docs/_includes/reference/cli/werf_includes_ls_files.md
--- old/werf-2.41.2/docs/_includes/reference/cli/werf_includes_ls_files.md      
2025-07-10 13:23:07.000000000 +0200
+++ new/werf-2.42.0/docs/_includes/reference/cli/werf_includes_ls_files.md      
2025-07-11 16:48:28.000000000 +0200
@@ -103,6 +103,11 @@
       --platform=[]
             Enable platform emulation when building images with werf, format: 
OS/ARCH[/VARIANT]     
             ($WERF_PLATFORM or $DOCKER_DEFAULT_PLATFORM by default)
+      --ssh-key=[]
+            Use only specific ssh key(s).
+            Can be specified with $WERF_SSH_KEY_* (e.g. 
$WERF_SSH_KEY_REPO=~/.ssh/repo_rsa,         
+            $WERF_SSH_KEY_NODEJS=~/.ssh/nodejs_rsa).
+            Defaults to $WERF_SSH_KEY_*, system ssh-agent or 
~/.ssh/{id_rsa|id_dsa}
       --tmp-dir=""
             Use specified dir to store tmp files and dirs (default 
$WERF_TMP_DIR or system tmp dir)
 ```
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/werf-2.41.2/docs/_includes/reference/cli/werf_includes_update.md 
new/werf-2.42.0/docs/_includes/reference/cli/werf_includes_update.md
--- old/werf-2.41.2/docs/_includes/reference/cli/werf_includes_update.md        
2025-07-10 13:23:07.000000000 +0200
+++ new/werf-2.42.0/docs/_includes/reference/cli/werf_includes_update.md        
2025-07-11 16:48:28.000000000 +0200
@@ -80,6 +80,11 @@
       --platform=[]
             Enable platform emulation when building images with werf, format: 
OS/ARCH[/VARIANT]     
             ($WERF_PLATFORM or $DOCKER_DEFAULT_PLATFORM by default)
+      --ssh-key=[]
+            Use only specific ssh key(s).
+            Can be specified with $WERF_SSH_KEY_* (e.g. 
$WERF_SSH_KEY_REPO=~/.ssh/repo_rsa,         
+            $WERF_SSH_KEY_NODEJS=~/.ssh/nodejs_rsa).
+            Defaults to $WERF_SSH_KEY_*, system ssh-agent or 
~/.ssh/{id_rsa|id_dsa}
       --tmp-dir=""
             Use specified dir to store tmp files and dirs (default 
$WERF_TMP_DIR or system tmp dir)
 ```
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/werf-2.41.2/docs/pages_en/reference/deploy_annotations.md 
new/werf-2.42.0/docs/pages_en/reference/deploy_annotations.md
--- old/werf-2.41.2/docs/pages_en/reference/deploy_annotations.md       
2025-07-10 13:23:07.000000000 +0200
+++ new/werf-2.42.0/docs/pages_en/reference/deploy_annotations.md       
2025-07-11 16:48:28.000000000 +0200
@@ -24,6 +24,7 @@
  - [`werf.io/show-logs-only-for-containers`](#show-logs-only-for-containers) — 
enable logging only for specified containers of the resource.
  - [`werf.io/show-service-messages`](#show-service-messages) — enable 
additional logging of Kubernetes related service messages for resource.
  - [`werf.io/sensitive`](#mark-resource-as-sensitive) — mark the resource as 
sensitive, so werf will not show diffs for this resource in `werf plan`.
+ - [`werf.io/sensitive-paths`](#mark-fields-of-a-resource-as-sensitive) — mark 
the fields of a resource as sensitive, so werf will not show diffs for this 
resource in `werf plan`.
 
 More info about chart templates and other stuff is available in the [helm 
chapter]({{ "usage/deploy/overview.html" | true_relative_url }}).
 
@@ -193,6 +194,15 @@
 
 <img 
src="https://raw.githubusercontent.com/werf/demos/master/deploy/werf-new-track-modes-1.gif";
 />
 
+## Mark fields of a resource as sensitive
+
+`"werf.io/sensitive-paths": "JSONPath,JSONPath,..."`
+
+Example: \
+`"werf.io/sensitive-paths": 
"$.spec.template.spec.containers[*].env[*].value,$.data.*"`
+
+Don't show diffs for resource fields that match specified JSONPath 
expressions. Overrides the behavior of `werf.io/sensitive`.
+
 ## Mark resource as sensitive
 
 `"werf.io/sensitive": "true"|"false"`
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/werf-2.41.2/docs/pages_ru/reference/deploy_annotations.md 
new/werf-2.42.0/docs/pages_ru/reference/deploy_annotations.md
--- old/werf-2.41.2/docs/pages_ru/reference/deploy_annotations.md       
2025-07-10 13:23:07.000000000 +0200
+++ new/werf-2.42.0/docs/pages_ru/reference/deploy_annotations.md       
2025-07-11 16:48:28.000000000 +0200
@@ -23,6 +23,7 @@
  - [`werf.io/show-logs-only-for-containers`](#show-logs-only-for-containers) — 
включить логирование вывода только для указанных контейнеров ресурса.
  - [`werf.io/show-service-messages`](#show-service-messages) — включить вывод 
сервисных сообщений и событий Kubernetes для данного ресурса.
  - [`werf.io/sensitive`](#mark-resource-as-sensitive) — пометить ресурс как 
содержащий чувствительные данные, чтобы werf не показывал диффы для этого 
ресурса в `werf plan`.
+ - [`werf.io/sensitive-paths`](#mark-fields-of-a-resource-as-sensitive) — 
пометить поля ресурса как чувствительные, чтобы werf не показывал диффы для 
этих полей в `werf plan`.
 
 Больше информации о том, что такое чарт, шаблоны и пр. доступно в [главе про 
Helm]({{ "usage/deploy/overview.html" | true_relative_url }}).
 
@@ -186,6 +187,15 @@
 
 <img 
src="https://raw.githubusercontent.com/werf/demos/master/deploy/werf-new-track-modes-1.gif";
 />
 
+## Mark fields of a resource as sensitive
+
+`"werf.io/sensitive-paths": "JSONPath,JSONPath,..."`
+
+Например: \
+`"werf.io/sensitive-paths": 
"$.spec.template.spec.containers[*].env[*].value,$.data.*"`
+
+Не показывать диффы для ресурсных полей, которые подпадают под указанные 
JSONPath. Переопределяет значение аннотации `werf.io/sensitive`.
+
 ## Mark resource as sensitive
 
 `"werf.io/sensitive": "true"|"false"`
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/werf-2.41.2/go.mod new/werf-2.42.0/go.mod
--- old/werf-2.41.2/go.mod      2025-07-10 13:23:07.000000000 +0200
+++ new/werf-2.42.0/go.mod      2025-07-11 16:48:28.000000000 +0200
@@ -62,11 +62,11 @@
        github.com/werf/3p-helm-for-werf-helm v0.0.0-20241217155820-089f92cd5c9d
        github.com/werf/common-go v0.0.0-20250528135810-c90e95ac760d
        github.com/werf/copy-recurse v0.2.7
-       github.com/werf/kubedog v0.13.1-0.20250710104559-34c38e786dce
+       github.com/werf/kubedog v0.13.1-0.20250710181210-b4a5a7f76b11
        github.com/werf/kubedog-for-werf-helm v0.0.0-20241217155728-9d45c48b82b6
        github.com/werf/lockgate v0.1.1
        github.com/werf/logboek v0.6.1
-       github.com/werf/nelm v1.7.2-0.20250710110013-4b185aa02ea7
+       github.com/werf/nelm v1.8.1-0.20250711141902-0dcd48049a64
        github.com/werf/nelm-for-werf-helm v0.0.0-20241217155925-b0e6734d1dbf
        go.opentelemetry.io/otel v1.24.0
        go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0
@@ -113,6 +113,7 @@
        github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect
        github.com/moby/term v0.5.0 // indirect
        github.com/muesli/cancelreader v0.2.2 // indirect
+       github.com/ohler55/ojg v1.26.7 // indirect
        github.com/radovskyb/watcher v1.0.7 // indirect
        github.com/sajari/fuzzy v1.0.0 // indirect
        github.com/spaolacci/murmur3 v1.1.0 // indirect
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/werf-2.41.2/go.sum new/werf-2.42.0/go.sum
--- old/werf-2.41.2/go.sum      2025-07-10 13:23:07.000000000 +0200
+++ new/werf-2.42.0/go.sum      2025-07-11 16:48:28.000000000 +0200
@@ -1050,6 +1050,8 @@
 github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod 
h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso=
 github.com/nxadm/tail v1.4.4/go.mod 
h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
 github.com/nxadm/tail v1.4.8/go.mod 
h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
+github.com/ohler55/ojg v1.26.7 h1:yZLS2xlZF/qk5LHM4LFhxxTDyMgZl+46Z6p7wQm8KAU=
+github.com/ohler55/ojg v1.26.7/go.mod 
h1:/Y5dGWkekv9ocnUixuETqiL58f+5pAsUfg5P8e7Pa2o=
 github.com/oklog/oklog v0.3.2/go.mod 
h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
 github.com/oklog/run v1.0.0/go.mod 
h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
 github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
@@ -1398,16 +1400,16 @@
 github.com/werf/common-go v0.0.0-20250528135810-c90e95ac760d/go.mod 
h1:taKDUxKmGfqNOlVx1O0ad5vdV4duKexTLO7Rch9HfeA=
 github.com/werf/copy-recurse v0.2.7 
h1:3FTOarbJ9uhFLi75oeUCioK9zxZwuV7o28kuUBPDZPM=
 github.com/werf/copy-recurse v0.2.7/go.mod 
h1:6Ypb+qN+hRBJgoCgEkX1vpbqcQ+8q69BQ3hi8s8Y6Qc=
-github.com/werf/kubedog v0.13.1-0.20250710104559-34c38e786dce 
h1:cNcDFhVITdx5d11jMRYuvDE+E6fLLPhK7W7tMCZJ0pw=
-github.com/werf/kubedog v0.13.1-0.20250710104559-34c38e786dce/go.mod 
h1:Y6pesrIN5uhFKqmHnHSoeW4jmVyZlWPFWv5SjB0rUPg=
+github.com/werf/kubedog v0.13.1-0.20250710181210-b4a5a7f76b11 
h1:9aZ8CjaczcO6Ez9T25DSPBE5EtmeBFyKdbwmvwBcvjM=
+github.com/werf/kubedog v0.13.1-0.20250710181210-b4a5a7f76b11/go.mod 
h1:Y6pesrIN5uhFKqmHnHSoeW4jmVyZlWPFWv5SjB0rUPg=
 github.com/werf/kubedog-for-werf-helm v0.0.0-20241217155728-9d45c48b82b6 
h1:lpgQPTCp+wNJfTqJWtR6A5gRA4e4m/eRJFV7V18XCoA=
 github.com/werf/kubedog-for-werf-helm 
v0.0.0-20241217155728-9d45c48b82b6/go.mod 
h1:PA9xGVKX9Il6sCgvPrcB3/FahRme3bXRz4BuylvAssc=
 github.com/werf/lockgate v0.1.1 h1:S400JFYjtWfE4i4LY9FA8zx0fMdfui9DPrBiTciCrx4=
 github.com/werf/lockgate v0.1.1/go.mod 
h1:0yIFSLq9ausy6ejNxF5uUBf/Ib6daMAfXuCaTMZJzIE=
 github.com/werf/logboek v0.6.1 h1:oEe6FkmlKg0z0n80oZjLplj6sXcBeLleCkjfOOZEL2g=
 github.com/werf/logboek v0.6.1/go.mod 
h1:Gez5J4bxekyr6MxTmIJyId1F61rpO+0/V4vjCIEIZmk=
-github.com/werf/nelm v1.7.2-0.20250710110013-4b185aa02ea7 
h1:XdFBqjDYQZlur9gDXCMNoIZAaXJjMzbcZBK84b+K3JE=
-github.com/werf/nelm v1.7.2-0.20250710110013-4b185aa02ea7/go.mod 
h1:aQwo0UZUYW12+CsUIA1q7qBOr0JkVGj1tbTpZm0r64c=
+github.com/werf/nelm v1.8.1-0.20250711141902-0dcd48049a64 
h1:Y5se5J+e46Ngkua20Cp462jaRt9cBjmvf6o57iukGVE=
+github.com/werf/nelm v1.8.1-0.20250711141902-0dcd48049a64/go.mod 
h1:CySR3Yu+hHo0tIbcsG8SvTteQWkbzdKr8aVpBcZ+H3o=
 github.com/werf/nelm-for-werf-helm v0.0.0-20241217155925-b0e6734d1dbf 
h1:K51qz209c1yJgKzPw8AeS72T21F/ACp0VI3RJvT4THA=
 github.com/werf/nelm-for-werf-helm v0.0.0-20241217155925-b0e6734d1dbf/go.mod 
h1:7RJXSGPKKPEvfPqrTwNA8jT7y52O0ebwhSbSn29ESMA=
 github.com/xanzy/go-gitlab v0.31.0/go.mod 
h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug=
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/werf-2.41.2/pkg/build/build_phase.go 
new/werf-2.42.0/pkg/build/build_phase.go
--- old/werf-2.41.2/pkg/build/build_phase.go    2025-07-10 13:23:07.000000000 
+0200
+++ new/werf-2.42.0/pkg/build/build_phase.go    2025-07-11 16:48:28.000000000 
+0200
@@ -1,15 +1,11 @@
 package build
 
 import (
-       "bytes"
        "context"
-       "encoding/json"
        "fmt"
-       "io/ioutil"
        "os"
        "strconv"
        "strings"
-       "sync"
        "time"
 
        "github.com/google/uuid"
@@ -92,68 +88,6 @@
        buildContextArchive container_backend.BuildContextArchiver
 }
 
-const (
-       ReportJSON    ReportFormat = "json"
-       ReportEnvFile ReportFormat = "envfile"
-)
-
-type ReportFormat string
-
-type ImagesReport struct {
-       mux              sync.Mutex
-       Images           map[string]ReportImageRecord
-       ImagesByPlatform map[string]map[string]ReportImageRecord
-}
-
-func NewImagesReport() *ImagesReport {
-       return &ImagesReport{
-               Images:           make(map[string]ReportImageRecord),
-               ImagesByPlatform: make(map[string]map[string]ReportImageRecord),
-       }
-}
-
-func (report *ImagesReport) SetImageRecord(name string, imageRecord 
ReportImageRecord) {
-       report.mux.Lock()
-       defer report.mux.Unlock()
-       report.Images[name] = imageRecord
-}
-
-func (report *ImagesReport) SetImageByPlatformRecord(targetPlatform, name 
string, imageRecord ReportImageRecord) {
-       report.mux.Lock()
-       defer report.mux.Unlock()
-
-       if _, hasKey := report.ImagesByPlatform[name]; !hasKey {
-               report.ImagesByPlatform[name] = 
make(map[string]ReportImageRecord)
-       }
-       report.ImagesByPlatform[name][targetPlatform] = imageRecord
-}
-
-func (report *ImagesReport) ToJsonData() ([]byte, error) {
-       report.mux.Lock()
-       defer report.mux.Unlock()
-
-       data, err := json.MarshalIndent(report, "", "\t")
-       if err != nil {
-               return nil, err
-       }
-       data = append(data, []byte("\n")...)
-
-       return data, nil
-}
-
-func (report *ImagesReport) ToEnvFileData() []byte {
-       report.mux.Lock()
-       defer report.mux.Unlock()
-
-       buf := bytes.NewBuffer([]byte{})
-       for img, record := range report.Images {
-               buf.WriteString(GenerateImageEnv(img, record.DockerImageName))
-               buf.WriteString("\n")
-       }
-
-       return buf.Bytes()
-}
-
 func GenerateImageEnv(werfImageName, imageName string) string {
        var imageEnvName string
        if werfImageName == "" {
@@ -170,17 +104,6 @@
        return fmt.Sprintf("%s=%s", imageEnvName, imageName)
 }
 
-type ReportImageRecord struct {
-       WerfImageName     string
-       DockerRepo        string
-       DockerTag         string
-       DockerImageID     string
-       DockerImageDigest string
-       DockerImageName   string
-       Rebuilt           bool
-       Final             bool
-}
-
 func (phase *BuildPhase) Name() string {
        return "build"
 }
@@ -481,90 +404,7 @@
 }
 
 func (phase *BuildPhase) createReport(ctx context.Context) error {
-       for _, desc := range phase.Conveyor.imagesTree.GetImagesByName(false) {
-               name, images := desc.Unpair()
-               targetPlatforms := util.MapFuncToSlice(images, func(img 
*image.Image) string { return img.TargetPlatform })
-
-               for _, img := range images {
-                       stageImage := 
img.GetLastNonEmptyStage().GetStageImage().Image
-                       stageDesc := stageImage.GetFinalStageDesc()
-                       if stageDesc == nil {
-                               stageDesc = stageImage.GetStageDesc()
-                       }
-
-                       record := ReportImageRecord{
-                               WerfImageName:     img.GetName(),
-                               DockerRepo:        stageDesc.Info.Repository,
-                               DockerTag:         stageDesc.Info.Tag,
-                               DockerImageID:     stageDesc.Info.ID,
-                               DockerImageDigest: stageDesc.Info.GetDigest(),
-                               DockerImageName:   stageDesc.Info.Name,
-                               Rebuilt:           img.GetRebuilt(),
-                               Final:             img.IsFinal,
-                       }
-
-                       if os.Getenv("WERF_ENABLE_REPORT_BY_PLATFORM") == "1" {
-                               
phase.ImagesReport.SetImageByPlatformRecord(img.TargetPlatform, img.GetName(), 
record)
-                       }
-                       if len(targetPlatforms) == 1 {
-                               phase.ImagesReport.SetImageRecord(img.Name, 
record)
-                       }
-               }
-
-               if _, isLocal := 
phase.Conveyor.StorageManager.GetStagesStorage().(*storage.LocalStagesStorage); 
!isLocal {
-                       if len(targetPlatforms) > 1 {
-                               img := 
phase.Conveyor.imagesTree.GetMultiplatformImage(name)
-
-                               isRebuilt := false
-                               for _, pImg := range img.Images {
-                                       isRebuilt = (isRebuilt || 
pImg.GetRebuilt())
-                               }
-
-                               stageDesc := img.GetFinalStageDesc()
-                               if stageDesc == nil {
-                                       stageDesc = img.GetStageDesc()
-                               }
-
-                               record := ReportImageRecord{
-                                       WerfImageName:     img.Name,
-                                       DockerRepo:        
stageDesc.Info.Repository,
-                                       DockerTag:         stageDesc.Info.Tag,
-                                       DockerImageID:     stageDesc.Info.ID,
-                                       DockerImageDigest: 
stageDesc.Info.GetDigest(),
-                                       DockerImageName:   stageDesc.Info.Name,
-                                       Rebuilt:           isRebuilt,
-                                       Final:             img.IsFinal,
-                               }
-                               phase.ImagesReport.SetImageRecord(img.Name, 
record)
-                       }
-               }
-       }
-
-       debugJsonData, err := phase.ImagesReport.ToJsonData()
-       logboek.Context(ctx).Debug().LogF("ImagesReport: (err: %v)\n%s", err, 
debugJsonData)
-
-       if phase.ReportPath != "" {
-               var data []byte
-               var err error
-               switch phase.ReportFormat {
-               case ReportJSON:
-                       if data, err = phase.ImagesReport.ToJsonData(); err != 
nil {
-                               return fmt.Errorf("unable to prepare report 
json: %w", err)
-                       }
-                       logboek.Context(ctx).Debug().LogF("Writing json report 
to the %q:\n%s", phase.ReportPath, data)
-               case ReportEnvFile:
-                       data = phase.ImagesReport.ToEnvFileData()
-                       logboek.Context(ctx).Debug().LogF("Writing envfile 
report to the %q:\n%s", phase.ReportPath, data)
-               default:
-                       panic(fmt.Sprintf("unknown report format %q", 
phase.ReportFormat))
-               }
-
-               if err := ioutil.WriteFile(phase.ReportPath, data, 0o644); err 
!= nil {
-                       return fmt.Errorf("unable to write report to %s: %w", 
phase.ReportPath, err)
-               }
-       }
-
-       return nil
+       return createBuildReport(ctx, phase)
 }
 
 func (phase *BuildPhase) ImageProcessingShouldBeStopped(_ context.Context, _ 
*image.Image) bool {
@@ -766,13 +606,26 @@
                }
        }
 
-       foundSuitableStage, cleanupFunc, err := phase.calculateStage(ctx, img, 
stg)
+       var foundSuitableStage bool
+       var cleanupFunc func()
+
+       if err := logboek.Context(ctx).Info().LogProcess("Try to find suitable 
stage for %s", stg.LogDetailedName()).
+               DoError(func() error {
+                       var err error
+                       var found bool
+                       found, cleanupFunc, err = phase.calculateStage(ctx, 
img, stg)
+                       if err != nil {
+                               return err
+                       }
+                       foundSuitableStage = found
+                       return nil
+               }); err != nil {
+               return err
+       }
+
        if cleanupFunc != nil {
                defer cleanupFunc()
        }
-       if err != nil {
-               return err
-       }
 
        if foundSuitableStage {
                logboek.Context(ctx).Default().LogFHighlight("Use previously 
built image for %s\n", stg.LogDetailedName())
@@ -786,6 +639,10 @@
                        }
                }
 
+               stg.SetMeta(&stage.StageMeta{
+                       Rebuilt: false,
+               })
+
                return nil
        }
 
@@ -800,14 +657,24 @@
                        return fmt.Errorf("stages required")
                }
 
+               start := time.Now()
+
                // Will build a new stage
                i := phase.Conveyor.GetOrCreateStageImage(uuid.New().String(), 
phase.StagesIterator.GetPrevImage(img, stg), stg, img)
                stg.SetStageImage(i)
 
+               var fetchInfo fetchBaseImageForStageInfo
                if stg.IsBuildable() {
-                       if err := phase.fetchBaseImageForStage(ctx, img, stg); 
err != nil {
+                       info, err := phase.fetchBaseImageForStage(ctx, img, stg)
+                       if err != nil {
                                return err
                        }
+                       fetchInfo = info
+               } else {
+                       fetchInfo = fetchBaseImageForStageInfo{
+                               BaseImagePulled: false,
+                               BaseImageSource: BaseImageSourceTypeRepo,
+                       }
                }
 
                if err := phase.prepareStageInstructions(ctx, img, stg); err != 
nil {
@@ -817,6 +684,14 @@
                if err := phase.buildStage(ctx, img, stg); err != nil {
                        return err
                }
+               duration := time.Since(start).Seconds()
+
+               stg.SetMeta(&stage.StageMeta{
+                       Rebuilt:             true,
+                       BaseImagePulled:     fetchInfo.BaseImagePulled,
+                       BaseImageSourceType: fetchInfo.BaseImageSource,
+                       BuildTime:           fmt.Sprintf("%.2f", duration),
+               })
        }
 
        // debug assertion
@@ -824,6 +699,12 @@
                panic(fmt.Sprintf("expected stage %s image %q built image info 
(image name = %s) to be set!", stg.Name(), img.GetName(), 
stg.GetStageImage().Image.Name()))
        }
 
+       if foundSuitableSecondaryStage {
+               stg.SetMeta(&stage.StageMeta{
+                       BaseImageSourceType: BaseImageSourceTypeSecondary,
+               })
+       }
+
        // Add managed image record only if there was at least one newly built 
stage
        if !phase.BuildOptions.SkipAddManagedImagesRecords {
                phase.Conveyor.SetShouldAddManagedImagesRecords()
@@ -905,8 +786,8 @@
                        storageManager.GetCacheStagesStorageList(),
                        manager.CopyStageIntoStorageOptions{
                                FetchStage:       stg,
-                               LogDetailedName:  stg.LogDetailedName(),
                                ContainerBackend: 
phase.Conveyor.ContainerBackend,
+                               LogDetailedName:  stg.LogDetailedName(),
                        },
                ); err != nil {
                        return fmt.Errorf("unable to copy stage %s into cache 
storages: %w", stg.GetStageImage().Image.GetStageDesc().StageID.String(), err)
@@ -936,15 +817,28 @@
        return foundSuitableStage, nil
 }
 
-func (phase *BuildPhase) fetchBaseImageForStage(ctx context.Context, img 
*image.Image, stg stage.Interface) error {
+type fetchBaseImageForStageInfo struct {
+       BaseImagePulled bool
+       BaseImageSource string
+}
+
+func (phase *BuildPhase) fetchBaseImageForStage(ctx context.Context, img 
*image.Image, stg stage.Interface) (fetchBaseImageForStageInfo, error) {
        if stg.HasPrevStage() {
-               return phase.Conveyor.StorageManager.FetchStage(ctx, 
phase.Conveyor.ContainerBackend, phase.StagesIterator.PrevBuiltStage)
+               info, err := phase.Conveyor.StorageManager.FetchStage(ctx, 
phase.Conveyor.ContainerBackend, phase.StagesIterator.PrevBuiltStage)
+               return fetchBaseImageForStageInfo{
+                       BaseImagePulled: info.BaseImagePulled,
+                       BaseImageSource: info.BaseImageSource,
+               }, err
        } else {
-               if err := img.FetchBaseImage(ctx); err != nil {
-                       return fmt.Errorf("unable to fetch base image %q for 
stage %s: %w", img.GetBaseStageImage().Image.Name(), stg.LogDetailedName(), err)
+               info, err := img.FetchBaseImage(ctx)
+               if err != nil {
+                       return fetchBaseImageForStageInfo{}, fmt.Errorf("unable 
to fetch base image %q for stage %s: %w", img.GetBaseStageImage().Image.Name(), 
stg.LogDetailedName(), err)
                }
+               return fetchBaseImageForStageInfo{
+                       BaseImagePulled: info.BaseImagePulled,
+                       BaseImageSource: info.BaseImageSource,
+               }, nil
        }
-       return nil
 }
 
 func (phase *BuildPhase) calculateStage(ctx context.Context, img *image.Image, 
stg stage.Interface) (bool, func(), error) {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/werf-2.41.2/pkg/build/build_report.go 
new/werf-2.42.0/pkg/build/build_report.go
--- old/werf-2.41.2/pkg/build/build_report.go   1970-01-01 01:00:00.000000000 
+0100
+++ new/werf-2.42.0/pkg/build/build_report.go   2025-07-11 16:48:28.000000000 
+0200
@@ -0,0 +1,244 @@
+package build
+
+import (
+       "bytes"
+       "context"
+       "encoding/json"
+       "fmt"
+       "os"
+       "sync"
+
+       "github.com/werf/common-go/pkg/util"
+       "github.com/werf/logboek"
+       "github.com/werf/werf/v2/pkg/build/image"
+       "github.com/werf/werf/v2/pkg/storage"
+)
+
+const (
+       ReportJSON    ReportFormat = "json"
+       ReportEnvFile ReportFormat = "envfile"
+)
+
+const (
+       BaseImageSourceTypeRepo      = "repo"
+       BaseImageSourceTypeSecondary = "secondary"
+)
+
+type ReportFormat string
+
+type ReportImageRecord struct {
+       WerfImageName     string
+       DockerRepo        string
+       DockerTag         string
+       DockerImageID     string
+       DockerImageDigest string
+       DockerImageName   string
+       Rebuilt           bool
+       Final             bool
+       Stages            []ReportStageRecord
+}
+
+type ReportStageRecord struct {
+       Name              string
+       DockerImageName   string
+       DockerTag         string
+       DockerImageID     string
+       DockerImageDigest string
+       CreatedAt         int64
+       Size              int64
+       SourceType        string
+       BaseImagePulled   bool
+       Rebuilt           bool
+       BuildTime         string
+}
+
+type ImagesReport struct {
+       mux              sync.Mutex
+       Images           map[string]ReportImageRecord
+       ImagesByPlatform map[string]map[string]ReportImageRecord
+}
+
+func NewImagesReport() *ImagesReport {
+       return &ImagesReport{
+               Images:           make(map[string]ReportImageRecord),
+               ImagesByPlatform: make(map[string]map[string]ReportImageRecord),
+       }
+}
+
+func (report *ImagesReport) SetImageRecord(name string, imageRecord 
ReportImageRecord) {
+       report.mux.Lock()
+       defer report.mux.Unlock()
+       report.Images[name] = imageRecord
+}
+
+func (report *ImagesReport) SetImageByPlatformRecord(targetPlatform, name 
string, imageRecord ReportImageRecord) {
+       report.mux.Lock()
+       defer report.mux.Unlock()
+
+       if _, hasKey := report.ImagesByPlatform[name]; !hasKey {
+               report.ImagesByPlatform[name] = 
make(map[string]ReportImageRecord)
+       }
+       report.ImagesByPlatform[name][targetPlatform] = imageRecord
+}
+
+func (report *ImagesReport) ToJsonData() ([]byte, error) {
+       report.mux.Lock()
+       defer report.mux.Unlock()
+
+       data, err := json.MarshalIndent(report, "", "\t")
+       if err != nil {
+               return nil, err
+       }
+       data = append(data, []byte("\n")...)
+
+       return data, nil
+}
+
+func (report *ImagesReport) ToEnvFileData() []byte {
+       report.mux.Lock()
+       defer report.mux.Unlock()
+
+       buf := bytes.NewBuffer([]byte{})
+       for img, record := range report.Images {
+               buf.WriteString(GenerateImageEnv(img, record.DockerImageName))
+               buf.WriteString("\n")
+       }
+
+       return buf.Bytes()
+}
+
+func createBuildReport(ctx context.Context, phase *BuildPhase) error {
+       for _, desc := range phase.Conveyor.imagesTree.GetImagesByName(false) {
+               name, images := desc.Unpair()
+               targetPlatforms := util.MapFuncToSlice(images, func(img 
*image.Image) string { return img.TargetPlatform })
+
+               for _, img := range images {
+                       stageImage := 
img.GetLastNonEmptyStage().GetStageImage().Image
+                       stageDesc := stageImage.GetFinalStageDesc()
+                       if stageDesc == nil {
+                               stageDesc = stageImage.GetStageDesc()
+                       }
+
+                       stages := getStagesReport(img, false)
+
+                       record := ReportImageRecord{
+                               WerfImageName:     img.GetName(),
+                               DockerRepo:        stageDesc.Info.Repository,
+                               DockerTag:         stageDesc.Info.Tag,
+                               DockerImageID:     stageDesc.Info.ID,
+                               DockerImageDigest: stageDesc.Info.GetDigest(),
+                               DockerImageName:   stageDesc.Info.Name,
+                               Rebuilt:           img.GetRebuilt(),
+                               Final:             img.IsFinal,
+                               Stages:            stages,
+                       }
+
+                       if os.Getenv("WERF_ENABLE_REPORT_BY_PLATFORM") == "1" {
+                               
phase.ImagesReport.SetImageByPlatformRecord(img.TargetPlatform, img.GetName(), 
record)
+                       }
+                       if len(targetPlatforms) == 1 {
+                               phase.ImagesReport.SetImageRecord(img.Name, 
record)
+                       }
+               }
+
+               if _, isLocal := 
phase.Conveyor.StorageManager.GetStagesStorage().(*storage.LocalStagesStorage); 
!isLocal {
+                       if len(targetPlatforms) > 1 {
+                               img := 
phase.Conveyor.imagesTree.GetMultiplatformImage(name)
+
+                               isRebuilt := false
+                               for _, pImg := range img.Images {
+                                       isRebuilt = (isRebuilt || 
pImg.GetRebuilt())
+                               }
+
+                               stageDesc := img.GetFinalStageDesc()
+                               if stageDesc == nil {
+                                       stageDesc = img.GetStageDesc()
+                               }
+
+                               stages := []ReportStageRecord{}
+                               for _, pImg := range img.Images {
+                                       for _, stage := range 
getStagesReport(pImg, true) {
+                                               stages = append(stages, stage)
+                                       }
+                               }
+
+                               record := ReportImageRecord{
+                                       WerfImageName:     img.Name,
+                                       DockerRepo:        
stageDesc.Info.Repository,
+                                       DockerTag:         stageDesc.Info.Tag,
+                                       DockerImageID:     stageDesc.Info.ID,
+                                       DockerImageDigest: 
stageDesc.Info.GetDigest(),
+                                       DockerImageName:   stageDesc.Info.Name,
+                                       Rebuilt:           isRebuilt,
+                                       Final:             img.IsFinal,
+                                       Stages:            stages,
+                               }
+                               phase.ImagesReport.SetImageRecord(img.Name, 
record)
+                       }
+               }
+       }
+
+       debugJsonData, err := phase.ImagesReport.ToJsonData()
+       logboek.Context(ctx).Debug().LogF("ImagesReport: (err: %v)\n%s", err, 
debugJsonData)
+
+       if phase.ReportPath != "" {
+               var data []byte
+               var err error
+               switch phase.ReportFormat {
+               case ReportJSON:
+                       if data, err = phase.ImagesReport.ToJsonData(); err != 
nil {
+                               return fmt.Errorf("unable to prepare report 
json: %w", err)
+                       }
+                       logboek.Context(ctx).Debug().LogF("Writing json report 
to the %q:\n%s", phase.ReportPath, data)
+               case ReportEnvFile:
+                       data = phase.ImagesReport.ToEnvFileData()
+                       logboek.Context(ctx).Debug().LogF("Writing envfile 
report to the %q:\n%s", phase.ReportPath, data)
+               default:
+                       panic(fmt.Sprintf("unknown report format %q", 
phase.ReportFormat))
+               }
+
+               if err := os.WriteFile(phase.ReportPath, data, 0o644); err != 
nil {
+                       return fmt.Errorf("unable to write report to %s: %w", 
phase.ReportPath, err)
+               }
+       }
+
+       return nil
+}
+
+func setBuildTime(b bool, t string) string {
+       if !b {
+               return "0.00"
+       }
+       return t
+}
+
+func getStagesReport(img *image.Image, multiplatform bool) []ReportStageRecord 
{
+       var stagesRecords []ReportStageRecord
+       for _, stg := range img.GetStages() {
+               stgImg := stg.GetStageImage()
+               if stgImg == nil || stgImg.Image == nil || 
stgImg.Image.GetStageDesc() == nil {
+                       continue
+               }
+               stgMeta := stg.GetMeta()
+               stgDesc := stgImg.Image.GetStageDesc()
+               name := string(stg.Name())
+               if multiplatform {
+                       name = fmt.Sprintf("%s (%s)", name, img.TargetPlatform)
+               }
+               record := ReportStageRecord{
+                       Name:              name,
+                       DockerImageName:   stgDesc.Info.Name,
+                       DockerTag:         stgDesc.Info.Tag,
+                       DockerImageID:     stgDesc.Info.ID,
+                       DockerImageDigest: stgDesc.Info.GetDigest(),
+                       CreatedAt:         stgDesc.Info.CreatedAtUnixNano,
+                       Size:              stgDesc.Info.Size,
+                       SourceType:        stgMeta.BaseImageSourceType,
+                       BaseImagePulled:   stgMeta.BaseImagePulled,
+                       Rebuilt:           stgMeta.Rebuilt,
+                       BuildTime:         setBuildTime(stgMeta.Rebuilt, 
stgMeta.BuildTime),
+               }
+               stagesRecords = append(stagesRecords, record)
+       }
+       return stagesRecords
+}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/werf-2.41.2/pkg/build/conveyor.go 
new/werf-2.42.0/pkg/build/conveyor.go
--- old/werf-2.41.2/pkg/build/conveyor.go       2025-07-10 13:23:07.000000000 
+0200
+++ new/werf-2.42.0/pkg/build/conveyor.go       2025-07-11 16:48:28.000000000 
+0200
@@ -261,7 +261,7 @@
                stg = c.GetImage(targetPlatform, 
imageName).GetLastNonEmptyStage()
        }
 
-       if err := c.StorageManager.FetchStage(ctx, c.ContainerBackend, stg); 
err != nil {
+       if _, err := c.StorageManager.FetchStage(ctx, c.ContainerBackend, stg); 
err != nil {
                return nil, fmt.Errorf("unable to fetch stage %s: %w", 
stg.GetStageImage().Image.Name(), err)
        }
 
@@ -405,7 +405,8 @@
 
 func (c *Conveyor) FetchLastImageStage(ctx context.Context, targetPlatform, 
imageName string) error {
        lastImageStage := c.GetImage(targetPlatform, 
imageName).GetLastNonEmptyStage()
-       return c.StorageManager.FetchStage(ctx, c.ContainerBackend, 
lastImageStage)
+       _, err := c.StorageManager.FetchStage(ctx, c.ContainerBackend, 
lastImageStage)
+       return err
 }
 
 func (c *Conveyor) GetFullImageName(ctx context.Context, imageName string) 
(string, error) {
@@ -842,11 +843,13 @@
 }
 
 func (c *Conveyor) FetchImageStage(ctx context.Context, targetPlatform, 
imageName, stageName string) error {
-       return c.StorageManager.FetchStage(ctx, c.ContainerBackend, 
c.getImageStage(targetPlatform, imageName, stageName))
+       _, err := c.StorageManager.FetchStage(ctx, c.ContainerBackend, 
c.getImageStage(targetPlatform, imageName, stageName))
+       return err
 }
 
 func (c *Conveyor) FetchLastNonEmptyImageStage(ctx context.Context, 
targetPlatform, imageName string) error {
-       return c.StorageManager.FetchStage(ctx, c.ContainerBackend, 
c.getLastNonEmptyImageStage(targetPlatform, imageName))
+       _, err := c.StorageManager.FetchStage(ctx, c.ContainerBackend, 
c.getLastNonEmptyImageStage(targetPlatform, imageName))
+       return err
 }
 
 func (c *Conveyor) GetImageNameForLastImageStage(targetPlatform, imageName 
string) string {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/werf-2.41.2/pkg/build/image/image.go 
new/werf-2.42.0/pkg/build/image/image.go
--- old/werf-2.41.2/pkg/build/image/image.go    2025-07-10 13:23:07.000000000 
+0200
+++ new/werf-2.42.0/pkg/build/image/image.go    2025-07-11 16:48:28.000000000 
+0200
@@ -292,7 +292,7 @@
                                                                
options.Style(style.Highlight())
                                                        }).
                                                        DoError(func() error {
-                                                               return 
i.ContainerBackend.PullImageFromRegistry(ctx, i.baseStageImage.Image)
+                                                               return 
container_backend.PullImageFromRegistry(ctx, i.ContainerBackend, 
i.baseStageImage.Image)
                                                        }); err != nil {
                                                        return err
                                                }
@@ -348,24 +348,34 @@
        return i.baseImageRepoDigest
 }
 
-func (i *Image) FetchBaseImage(ctx context.Context) error {
+const (
+       BaseImageSourceTypeRepo     = "repo"
+       BaseImageSourceTypeRegistry = "registry"
+)
+
+type FetchBaseImageInfo struct {
+       BaseImagePulled bool
+       BaseImageSource string
+}
+
+func (i *Image) FetchBaseImage(ctx context.Context) (FetchBaseImageInfo, 
error) {
        logboek.Context(ctx).Debug().LogF(" -- FetchBaseImage for %q\n", i.Name)
 
        switch i.baseImageType {
        case ImageFromRegistryAsBaseImage:
                if i.baseStageImage.Image.Name() == "scratch" {
                        if !i.IsDockerfileImage {
-                               return fmt.Errorf(`invalid base image: 
"scratch" is not allowed for stapel images. Please use a Dockerfile image or an 
alternative scratch image, such as "registry.werf.io/werf/scratch"`)
+                               return FetchBaseImageInfo{}, 
fmt.Errorf(`invalid base image: "scratch" is not allowed for stapel images. 
Please use a Dockerfile image or an alternative scratch image, such as 
"registry.werf.io/werf/scratch"`)
                        }
 
-                       return nil
+                       return FetchBaseImageInfo{}, nil
                }
 
                // TODO: Refactor, move manifest fetching into SetupBaseImage, 
only pull image in FetchBaseImage method
 
                // Check if image exists locally and is up-to-date.
                if info, err := i.ContainerBackend.GetImageInfo(ctx, 
i.baseStageImage.Image.Name(), container_backend.GetImageInfoOpts{}); err != 
nil {
-                       return fmt.Errorf("unable to inspect local image %s: 
%w", i.baseStageImage.Image.Name(), err)
+                       return FetchBaseImageInfo{}, fmt.Errorf("unable to 
inspect local image %s: %w", i.baseStageImage.Image.Name(), err)
                } else if info != nil {
                        logboek.Context(ctx).Debug().LogF("GetImageInfo of %q 
-> %#v\n", i.baseStageImage.Image.Name(), info)
 
@@ -390,7 +400,7 @@
                                        logboek.Context(ctx).Info().LogF("No 
pull needed for base image %s of image %q: image by digest %s is up to date\n", 
i.baseImageReference, i.Name, i.baseImageRepoDigest)
                                }
                                // No image pull
-                               return nil
+                               return FetchBaseImageInfo{BaseImagePulled: 
false}, nil
                        }
                }
 
@@ -399,18 +409,18 @@
                                options.Style(style.Highlight())
                        }).
                        DoError(func() error {
-                               return 
i.ContainerBackend.PullImageFromRegistry(ctx, i.baseStageImage.Image)
+                               return 
container_backend.PullImageFromRegistry(ctx, i.ContainerBackend, 
i.baseStageImage.Image)
                        }); err != nil {
-                       return err
+                       return FetchBaseImageInfo{}, err
                }
 
                info, err := i.ContainerBackend.GetImageInfo(ctx, 
i.baseStageImage.Image.Name(), container_backend.GetImageInfoOpts{})
                if err != nil {
-                       return fmt.Errorf("unable to inspect local image %s: 
%w", i.baseStageImage.Image.Name(), err)
+                       return FetchBaseImageInfo{}, fmt.Errorf("unable to 
inspect local image %s: %w", i.baseStageImage.Image.Name(), err)
                }
 
                if info == nil {
-                       return fmt.Errorf("unable to inspect local image %s 
after successful pull: image is not exists", i.baseStageImage.Image.Name())
+                       return FetchBaseImageInfo{}, fmt.Errorf("unable to 
inspect local image %s after successful pull: image is not exist", 
i.baseStageImage.Image.Name())
                }
 
                // TODO: It might be a stage as base image (passed as 
dependency), and the absence of StageID in the description will lead to 
breaking the logic.
@@ -423,12 +433,13 @@
                        })
                }
 
-               return nil
+               return FetchBaseImageInfo{BaseImagePulled: true, 
BaseImageSource: BaseImageSourceTypeRegistry}, nil
        case StageAsBaseImage:
-               return i.StorageManager.FetchStage(ctx, i.ContainerBackend, 
i.stageAsBaseImage)
+               info, err := i.StorageManager.FetchStage(ctx, 
i.ContainerBackend, i.stageAsBaseImage)
+               return FetchBaseImageInfo{BaseImagePulled: 
info.BaseImagePulled, BaseImageSource: info.BaseImageSource}, err
 
        case NoBaseImage:
-               return nil
+               return FetchBaseImageInfo{BaseImagePulled: true, 
BaseImageSource: BaseImageSourceTypeRepo}, nil
 
        default:
                panic(fmt.Sprintf("unknown base image type %q", 
i.baseImageType))
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/werf-2.41.2/pkg/build/stage/base.go 
new/werf-2.42.0/pkg/build/stage/base.go
--- old/werf-2.41.2/pkg/build/stage/base.go     2025-07-10 13:23:07.000000000 
+0200
+++ new/werf-2.42.0/pkg/build/stage/base.go     2025-07-11 16:48:28.000000000 
+0200
@@ -96,6 +96,7 @@
        s.imageTmpDir = options.ImageTmpDir
        s.containerWerfDir = options.ContainerWerfDir
        s.projectName = options.ProjectName
+       s.meta = &StageMeta{}
        return s
 }
 
@@ -112,6 +113,14 @@
        containerWerfDir string
        configMounts     []*config.Mount
        projectName      string
+       meta             *StageMeta
+}
+
+type StageMeta struct {
+       Rebuilt             bool
+       BaseImagePulled     bool
+       BaseImageSourceType string
+       BuildTime           string
 }
 
 func (s *BaseStage) IsBuildable() bool {
@@ -561,3 +570,14 @@
 
        return res
 }
+
+func (s *BaseStage) SetMeta(meta *StageMeta) {
+       s.meta = meta
+}
+
+func (s *BaseStage) GetMeta() *StageMeta {
+       if s.meta == nil {
+               s.meta = &StageMeta{}
+       }
+       return s.meta
+}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/werf-2.41.2/pkg/build/stage/interface.go 
new/werf-2.42.0/pkg/build/stage/interface.go
--- old/werf-2.41.2/pkg/build/stage/interface.go        2025-07-10 
13:23:07.000000000 +0200
+++ new/werf-2.42.0/pkg/build/stage/interface.go        2025-07-11 
16:48:28.000000000 +0200
@@ -45,4 +45,7 @@
        IsStapelStage() bool
 
        UsesBuildContext() bool
+
+       SetMeta(meta *StageMeta)
+       GetMeta() *StageMeta
 }
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/werf-2.41.2/pkg/config/parser.go 
new/werf-2.42.0/pkg/config/parser.go
--- old/werf-2.41.2/pkg/config/parser.go        2025-07-10 13:23:07.000000000 
+0200
+++ new/werf-2.42.0/pkg/config/parser.go        2025-07-11 16:48:28.000000000 
+0200
@@ -78,68 +78,80 @@
 }
 
 func GetWerfConfig(ctx context.Context, customWerfConfigRelPath, 
customWerfConfigTemplatesDirRelPath string, giterminismManager 
giterminism_manager.Interface, opts WerfConfigOptions) (string, *WerfConfig, 
error) {
-       werfConfigPath, werfConfigRenderContent, err := 
renderWerfConfigYaml(ctx, renderWerfConfigYamlOpts{
-               customWerfConfigRelPath:             customWerfConfigRelPath,
-               customWerfConfigTemplatesDirRelPath: 
customWerfConfigTemplatesDirRelPath,
-               giterminismManager:                  giterminismManager,
-               env:                                 opts.Env,
-               debugTemplates:                      opts.DebugTemplates,
-       })
-       if err != nil {
-               return "", nil, fmt.Errorf("unable to render werf config: %w", 
err)
-       }
-
-       werfConfigRenderPath, err := tmp_manager.CreateWerfConfigRender(ctx)
-       if err != nil {
-               return "", nil, err
-       }
+       var path string
+       var config *WerfConfig
+       err := logboek.Context(ctx).Info().LogProcess("Render werf 
config").DoError(func() error {
+               werfConfigPath, werfConfigRenderContent, err := 
renderWerfConfigYaml(ctx, renderWerfConfigYamlOpts{
+                       customWerfConfigRelPath:             
customWerfConfigRelPath,
+                       customWerfConfigTemplatesDirRelPath: 
customWerfConfigTemplatesDirRelPath,
+                       giterminismManager:                  giterminismManager,
+                       env:                                 opts.Env,
+                       debugTemplates:                      
opts.DebugTemplates,
+               })
+               if err != nil {
+                       return fmt.Errorf("unable to render werf config: %w", 
err)
+               }
 
-       if opts.LogRenderedFilePath {
-               logboek.Context(ctx).LogF("Using werf config render file: 
%s\n", werfConfigRenderPath)
-       }
+               werfConfigRenderPath, err := 
tmp_manager.CreateWerfConfigRender(ctx)
+               if err != nil {
+                       return err
+               }
 
-       err = writeWerfConfigRender(werfConfigRenderContent, 
werfConfigRenderPath)
-       if err != nil {
-               return "", nil, fmt.Errorf("unable to write rendered config to 
%s: %w", werfConfigRenderPath, err)
-       }
+               if opts.LogRenderedFilePath {
+                       logboek.Context(ctx).LogF("Using werf config render 
file: %s\n", werfConfigRenderPath)
+               }
 
-       docs, err := splitByDocs(werfConfigRenderContent, werfConfigRenderPath)
-       if err != nil {
-               return "", nil, err
-       }
+               err = writeWerfConfigRender(werfConfigRenderContent, 
werfConfigRenderPath)
+               if err != nil {
+                       return fmt.Errorf("unable to write rendered config to 
%s: %w", werfConfigRenderPath, err)
+               }
 
-       meta, rawStapelImages, rawImagesFromDockerfile, err := 
splitByMetaAndRawImages(docs)
-       if err != nil {
-               return "", nil, err
-       }
+               docs, err := splitByDocs(werfConfigRenderContent, 
werfConfigRenderPath)
+               if err != nil {
+                       return err
+               }
 
-       if meta == nil {
-               defaultProjectName, err := GetDefaultProjectName(ctx, 
giterminismManager)
+               meta, rawStapelImages, rawImagesFromDockerfile, err := 
splitByMetaAndRawImages(docs)
                if err != nil {
-                       return "", nil, fmt.Errorf("failed to get default 
project name: %w", err)
+                       return err
                }
 
-               format := "meta config section (part of YAML stream separated 
by three hyphens, https://yaml.org/spec/1.2/spec.html#id2800132) is not 
defined: add following example config section with required fields, e.g:\n\n" +
-                       "```\n" +
-                       "configVersion: 1\n" +
-                       "project: %s\n" +
-                       "---\n" +
-                       "```\n\n" +
-                       
"##############################################################################################################################\n"
 +
-                       "###           WARNING! Project name cannot be changed 
later without rebuilding and redeploying your application!           ###\n" +
-                       "###       Project name should be unique within group 
of projects that shares build hosts and deployed into the same        ###\n" +
-                       "###                    Kubernetes clusters (i.e. 
unique across all groups within the same gitlab).                         
###\n" +
-                       
"##############################################################################################################################"
+               if meta == nil {
+                       defaultProjectName, err := GetDefaultProjectName(ctx, 
giterminismManager)
+                       if err != nil {
+                               return fmt.Errorf("failed to get default 
project name: %w", err)
+                       }
+
+                       format := "meta config section (part of YAML stream 
separated by three hyphens, https://yaml.org/spec/1.2/spec.html#id2800132) is 
not defined: add following example config section with required fields, 
e.g:\n\n" +
+                               "```\n" +
+                               "configVersion: 1\n" +
+                               "project: %s\n" +
+                               "---\n" +
+                               "```\n\n" +
+                               
"##############################################################################################################################\n"
 +
+                               "###           WARNING! Project name cannot be 
changed later without rebuilding and redeploying your application!           
###\n" +
+                               "###       Project name should be unique within 
group of projects that shares build hosts and deployed into the same        
###\n" +
+                               "###                    Kubernetes clusters 
(i.e. unique across all groups within the same gitlab).                         
###\n" +
+                               
"##############################################################################################################################"
 
-               return "", nil, fmt.Errorf(format, defaultProjectName)
-       }
+                       return fmt.Errorf(format, defaultProjectName)
+               }
+
+               werfConfig, err := prepareWerfConfig(giterminismManager, 
rawStapelImages, rawImagesFromDockerfile, meta)
+               if err != nil {
+                       return err
+               }
 
-       werfConfig, err := prepareWerfConfig(giterminismManager, 
rawStapelImages, rawImagesFromDockerfile, meta)
+               path = werfConfigPath
+               config = werfConfig
+
+               return nil
+       })
        if err != nil {
                return "", nil, err
        }
 
-       return werfConfigPath, werfConfig, nil
+       return path, config, nil
 }
 
 func GetDefaultProjectName(ctx context.Context, giterminismManager 
giterminism_manager.Interface) (string, error) {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/werf-2.41.2/pkg/container_backend/interface.go 
new/werf-2.42.0/pkg/container_backend/interface.go
--- old/werf-2.41.2/pkg/container_backend/interface.go  2025-07-10 
13:23:07.000000000 +0200
+++ new/werf-2.42.0/pkg/container_backend/interface.go  2025-07-11 
16:48:28.000000000 +0200
@@ -5,6 +5,7 @@
        "context"
 
        "github.com/werf/common-go/pkg/util"
+       "github.com/werf/logboek"
        "github.com/werf/werf/v2/pkg/container_backend/info"
        "github.com/werf/werf/v2/pkg/container_backend/prune"
        "github.com/werf/werf/v2/pkg/image"
@@ -128,3 +129,9 @@
        RemoveImage(ctx context.Context, img LegacyImageInterface) error
        TagImageByName(ctx context.Context, img LegacyImageInterface) error
 }
+
+func PullImageFromRegistry(ctx context.Context, containerBackend 
ContainerBackend, img LegacyImageInterface) error {
+       return logboek.Context(ctx).Info().LogProcess("Pulling image %s", 
img.Name()).DoError(func() error {
+               return containerBackend.PullImageFromRegistry(ctx, img)
+       })
+}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/werf-2.41.2/pkg/docker_registry/api.go 
new/werf-2.42.0/pkg/docker_registry/api.go
--- old/werf-2.41.2/pkg/docker_registry/api.go  2025-07-10 13:23:07.000000000 
+0200
+++ new/werf-2.42.0/pkg/docker_registry/api.go  2025-07-11 16:48:28.000000000 
+0200
@@ -243,9 +243,22 @@
                api.defaultRemoteOptions(ctx),
                extraListOptions...,
        )
-       tags, err := remote.List(repo, listOptions...)
-       if err != nil {
-               return nil, fmt.Errorf("reading tags for %q: %w", repo, err)
+
+       var tags []string
+       if err := logboek.Context(ctx).Info().LogProcess("List tags for repo 
%s", repo).DoError(func() error {
+               var err error
+               tags, err = remote.List(repo, listOptions...)
+               if err != nil {
+                       return fmt.Errorf("reading tags for %q: %w", repo, err)
+               }
+
+               // TODO(iapershin): add additional logic for warnings about 
tags/meta ratio and warnings
+
+               logboek.Context(ctx).Info().LogF("Total tags listed: %d\n", 
len(tags))
+
+               return nil
+       }); err != nil {
+               return nil, err
        }
 
        return tags, nil
@@ -520,31 +533,32 @@
 }
 
 func (api *api) writeToRemote(ctx context.Context, ref name.Reference, 
imageOrIndex interface{}) error {
-       c := make(chan v1.Update, 200)
+       return logboek.Context(ctx).Info().LogProcess("Pushing reference %s to 
remote repo", ref).DoError(func() error {
+               c := make(chan v1.Update, 200)
 
-       remoteOpts := append(api.defaultRemoteOptions(ctx), 
remote.WithProgress(c))
-       switch i := imageOrIndex.(type) {
-       case v1.Image:
-               go remote.Write(ref, i, remoteOpts...)
-       case v1.ImageIndex:
-               go remote.WriteIndex(ref, i, remoteOpts...)
-       default:
-               panic(fmt.Sprintf("unexpected object type %#v", i))
-       }
-
-       for upd := range c {
-               switch {
-               case upd.Error != nil && errors.Is(upd.Error, io.EOF):
-                       logboek.Context(ctx).Debug().LogF("(%d/%d) done pushing 
image %q\n", upd.Complete, upd.Total, ref.String())
-                       return nil
-               case upd.Error != nil:
-                       return fmt.Errorf("error pushing image: %w", upd.Error)
+               remoteOpts := append(api.defaultRemoteOptions(ctx), 
remote.WithProgress(c))
+               switch i := imageOrIndex.(type) {
+               case v1.Image:
+                       go remote.Write(ref, i, remoteOpts...)
+               case v1.ImageIndex:
+                       go remote.WriteIndex(ref, i, remoteOpts...)
                default:
-                       logboek.Context(ctx).Debug().LogF("(%d/%d) pushing 
image %s is in progress\n", upd.Complete, upd.Total, ref.String())
+                       panic(fmt.Sprintf("unexpected object type %#v", i))
                }
-       }
 
-       return nil
+               for upd := range c {
+                       switch {
+                       case upd.Error != nil && errors.Is(upd.Error, io.EOF):
+                               logboek.Context(ctx).Debug().LogF("(%d/%d) done 
pushing image %q\n", upd.Complete, upd.Total, ref.String())
+                               return nil
+                       case upd.Error != nil:
+                               return fmt.Errorf("error pushing image: %w", 
upd.Error)
+                       default:
+                               logboek.Context(ctx).Debug().LogF("(%d/%d) 
pushing image %s is in progress\n", upd.Complete, upd.Total, ref.String())
+                       }
+               }
+               return nil
+       })
 }
 
 func (api *api) PullImageArchive(ctx context.Context, archiveWriter io.Writer, 
reference string) error {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/werf-2.41.2/pkg/storage/manager/copy.go 
new/werf-2.42.0/pkg/storage/manager/copy.go
--- old/werf-2.41.2/pkg/storage/manager/copy.go 2025-07-10 13:23:07.000000000 
+0200
+++ new/werf-2.42.0/pkg/storage/manager/copy.go 2025-07-11 16:48:28.000000000 
+0200
@@ -38,7 +38,7 @@
        }
 
        if opts.FetchStage != nil {
-               if err := m.FetchStage(ctx, opts.ContainerBackend, 
opts.FetchStage); err != nil {
+               if _, err := m.FetchStage(ctx, opts.ContainerBackend, 
opts.FetchStage); err != nil {
                        return nil, fmt.Errorf("unable to fetch stage %s: %w", 
opts.FetchStage.LogDetailedName(), err)
                }
        }
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/werf-2.41.2/pkg/storage/manager/storage_manager.go 
new/werf-2.42.0/pkg/storage/manager/storage_manager.go
--- old/werf-2.41.2/pkg/storage/manager/storage_manager.go      2025-07-10 
13:23:07.000000000 +0200
+++ new/werf-2.42.0/pkg/storage/manager/storage_manager.go      2025-07-11 
16:48:28.000000000 +0200
@@ -77,7 +77,7 @@
        GetStageDescSetWithCache(ctx context.Context) (image.StageDescSet, 
error)
        GetFinalStageDescSet(ctx context.Context) (image.StageDescSet, error)
 
-       FetchStage(ctx context.Context, containerBackend 
container_backend.ContainerBackend, stg stage.Interface) error
+       FetchStage(ctx context.Context, containerBackend 
container_backend.ContainerBackend, stg stage.Interface) (FetchStageInfo, error)
        SelectSuitableStageDesc(ctx context.Context, c stage.Conveyor, stg 
stage.Interface, stageDescSet image.StageDescSet) (*image.StageDesc, error)
        CopySuitableStageDescByDigest(ctx context.Context, stageDesc 
*image.StageDesc, sourceStagesStorage, destinationStagesStorage 
storage.StagesStorage, containerBackend container_backend.ContainerBackend, 
targetPlatform string) (*image.StageDesc, error)
        CopyStageIntoCacheStorages(ctx context.Context, stageID image.StageID, 
cacheStagesStorages []storage.StagesStorage, opts CopyStageIntoStorageOptions) 
error
@@ -398,16 +398,27 @@
        })
 }
 
-func (m *StorageManager) FetchStage(ctx context.Context, containerBackend 
container_backend.ContainerBackend, stg stage.Interface) error {
+const (
+       // could not be imported form build
+       BaseImageSourceTypeCacheRepo = "cache-repo"
+       BaseImageSourceTypeRepo      = "repo"
+)
+
+type FetchStageInfo struct {
+       BaseImagePulled bool
+       BaseImageSource string
+}
+
+func (m *StorageManager) FetchStage(ctx context.Context, containerBackend 
container_backend.ContainerBackend, stg stage.Interface) (FetchStageInfo, 
error) {
        logboek.Context(ctx).Debug().LogF("-- StagesManager.FetchStage %s\n", 
stg.LogDetailedName())
 
        if err := m.LockStageImage(ctx, stg.GetStageImage().Image.Name()); err 
!= nil {
-               return fmt.Errorf("error locking stage image %q: %w", 
stg.GetStageImage().Image.Name(), err)
+               return FetchStageInfo{}, fmt.Errorf("error locking stage image 
%q: %w", stg.GetStageImage().Image.Name(), err)
        }
 
        shouldFetch, err := m.StagesStorage.ShouldFetchImage(ctx, 
stg.GetStageImage().Image)
        if err != nil {
-               return fmt.Errorf("error checking should fetch image: %w", err)
+               return FetchStageInfo{}, fmt.Errorf("error checking should 
fetch image: %w", err)
        }
        if !shouldFetch {
                imageName := 
m.StagesStorage.ConstructStageImageName(m.ProjectName, 
stg.GetStageImage().Image.GetStageDesc().StageID.Digest, 
stg.GetStageImage().Image.GetStageDesc().StageID.CreationTs)
@@ -415,14 +426,16 @@
                logboek.Context(ctx).Info().LogF("Image %s exists, will not 
perform fetch\n", imageName)
 
                if err := lrumeta.CommonLRUImagesCache.AccessImage(ctx, 
imageName); err != nil {
-                       return fmt.Errorf("error accessing last recently used 
images cache for %s: %w", imageName, err)
+                       return FetchStageInfo{}, fmt.Errorf("error accessing 
last recently used images cache for %s: %w", imageName, err)
                }
 
-               return nil
+               return FetchStageInfo{BaseImagePulled: false}, nil
        }
 
        var fetchedImg container_backend.LegacyImageInterface
        var cacheStagesStorageListToRefill []storage.StagesStorage
+       var pulled bool
+       var source string
 
        fetchStageFromCache := func(stagesStorage storage.StagesStorage) 
(container_backend.LegacyImageInterface, error) {
                stageID := stg.GetStageImage().Image.GetStageDesc().StageID
@@ -441,6 +454,7 @@
                        proc.Start()
 
                        err := doFetchStage(ctx, m.ProjectName, stagesStorage, 
*stageID, stageImage)
+                       pulled = true
 
                        if IsErrStageNotFound(err) {
                                logboek.Context(ctx).Default().LogF("Stage not 
found\n")
@@ -468,6 +482,7 @@
                        if stageDesc == nil {
                                return nil, ErrStageNotFound
                        }
+                       pulled = false
                        stageImage.SetStageDesc(stageDesc)
                }
 
@@ -523,6 +538,7 @@
                }
 
                fetchedImg = cacheImg
+               source = BaseImageSourceTypeCacheRepo
                break
        }
 
@@ -537,7 +553,7 @@
 
                if IsErrStageNotFound(err) {
                        logboek.Context(ctx).Error().LogF("Stage %s image %s is 
no longer available!\n", stg.LogDetailedName(), 
stg.GetStageImage().Image.Name())
-                       return ErrUnexpectedStagesStorageState
+                       return FetchStageInfo{}, ErrUnexpectedStagesStorageState
                }
 
                if storage.IsErrBrokenImage(err) {
@@ -545,16 +561,17 @@
 
                        logboek.Context(ctx).Error().LogF("Will mark image %s 
as rejected in the stages storage %s\n", stg.GetStageImage().Image.Name(), 
m.StagesStorage.String())
                        if err := m.StagesStorage.RejectStage(ctx, 
m.ProjectName, stageID.Digest, stageID.CreationTs); err != nil {
-                               return fmt.Errorf("unable to reject stage %s 
image %s in the stages storage %s: %w", stg.LogDetailedName(), 
stg.GetStageImage().Image.Name(), m.StagesStorage.String(), err)
+                               return FetchStageInfo{}, fmt.Errorf("unable to 
reject stage %s image %s in the stages storage %s: %w", stg.LogDetailedName(), 
stg.GetStageImage().Image.Name(), m.StagesStorage.String(), err)
                        }
 
-                       return ErrUnexpectedStagesStorageState
+                       return FetchStageInfo{}, ErrUnexpectedStagesStorageState
                }
 
                if err != nil {
-                       return fmt.Errorf("unable to fetch stage %s from stages 
storage %s: %w", stageID.String(), m.StagesStorage.String(), err)
+                       return FetchStageInfo{}, fmt.Errorf("unable to fetch 
stage %s from stages storage %s: %w", stageID.String(), 
m.StagesStorage.String(), err)
                }
 
+               source = BaseImageSourceTypeRepo
                fetchedImg = img.Image
        }
 
@@ -576,7 +593,7 @@
                }
        }
 
-       return nil
+       return FetchStageInfo{BaseImagePulled: pulled, BaseImageSource: 
source}, nil
 }
 
 func (m *StorageManager) CopyStageIntoCacheStorages(ctx context.Context, 
stageID image.StageID, cacheStagesStorageList []storage.StagesStorage, opts 
CopyStageIntoStorageOptions) error {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/werf-2.41.2/pkg/storage/repo_stages_storage.go 
new/werf-2.42.0/pkg/storage/repo_stages_storage.go
--- old/werf-2.41.2/pkg/storage/repo_stages_storage.go  2025-07-10 
13:23:07.000000000 +0200
+++ new/werf-2.42.0/pkg/storage/repo_stages_storage.go  2025-07-11 
16:48:28.000000000 +0200
@@ -535,7 +535,7 @@
 }
 
 func (storage *RepoStagesStorage) FetchImage(ctx context.Context, img 
container_backend.LegacyImageInterface) error {
-       if err := storage.ContainerBackend.PullImageFromRegistry(ctx, img); err 
!= nil {
+       if err := container_backend.PullImageFromRegistry(ctx, 
storage.ContainerBackend, img); err != nil {
                if strings.HasSuffix(err.Error(), "unknown blob") {
                        return ErrBrokenImage
                }
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/werf-2.41.2/trdl_channels.yaml 
new/werf-2.42.0/trdl_channels.yaml
--- old/werf-2.41.2/trdl_channels.yaml  2025-07-10 13:23:07.000000000 +0200
+++ new/werf-2.42.0/trdl_channels.yaml  2025-07-11 16:48:28.000000000 +0200
@@ -38,7 +38,7 @@
   - name: "2"
     channels:
       - name: alpha
-        version: 2.41.1
+        version: 2.41.3
       - name: beta
         version: 2.38.1
       - name: ea

++++++ werf.obsinfo ++++++
--- /var/tmp/diff_new_pack.m4kje7/_old  2025-07-18 16:01:43.190461704 +0200
+++ /var/tmp/diff_new_pack.m4kje7/_new  2025-07-18 16:01:43.226463207 +0200
@@ -1,5 +1,5 @@
 name: werf
-version: 2.41.2
-mtime: 1752146587
-commit: 5e2957900782c508c79e120e0126f62337e87fbe
+version: 2.42.0
+mtime: 1752245308
+commit: ccdb348347e7c2967e326e55140494a1f0fa8294
 

Reply via email to