Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package kubearmor-client for 
openSUSE:Factory checked in at 2025-06-13 18:47:50
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/kubearmor-client (Old)
 and      /work/SRC/openSUSE:Factory/.kubearmor-client.new.19631 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "kubearmor-client"

Fri Jun 13 18:47:50 2025 rev:18 rq:1285464 version:1.4.3

Changes:
--------
--- /work/SRC/openSUSE:Factory/kubearmor-client/kubearmor-client.changes        
2025-06-06 22:44:05.259367051 +0200
+++ 
/work/SRC/openSUSE:Factory/.kubearmor-client.new.19631/kubearmor-client.changes 
    2025-06-13 18:48:01.209167121 +0200
@@ -1,0 +2,7 @@
+Fri Jun 13 11:31:33 UTC 2025 - Johannes Kastl 
<opensuse_buildserv...@ojkastl.de>
+
+- Update to version 1.4.3:
+  * Add profile docs for filtering by @harisudarsan1 in #497
+  * Reduce CPU usage in karmor profile by @harisudarsan1 in #498
+
+-------------------------------------------------------------------

Old:
----
  kubearmor-client-1.4.2.obscpio

New:
----
  kubearmor-client-1.4.3.obscpio

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ kubearmor-client.spec ++++++
--- /var/tmp/diff_new_pack.mQgPcv/_old  2025-06-13 18:48:02.401216470 +0200
+++ /var/tmp/diff_new_pack.mQgPcv/_new  2025-06-13 18:48:02.401216470 +0200
@@ -17,7 +17,7 @@
 
 
 Name:           kubearmor-client
-Version:        1.4.2
+Version:        1.4.3
 Release:        0
 Summary:        KubeArmor cli tool aka kArmor
 License:        Apache-2.0

++++++ _service ++++++
--- /var/tmp/diff_new_pack.mQgPcv/_old  2025-06-13 18:48:02.433217795 +0200
+++ /var/tmp/diff_new_pack.mQgPcv/_new  2025-06-13 18:48:02.433217795 +0200
@@ -3,7 +3,7 @@
     <param name="url">https://github.com/kubearmor/kubearmor-client</param>
     <param name="scm">git</param>
     <param name="exclude">.git</param>
-    <param name="revision">v1.4.2</param>
+    <param name="revision">v1.4.3</param>
     <param name="versionformat">@PARENT_TAG@</param>
     <param name="versionrewrite-pattern">v(.*)</param>
     <param name="changesgenerate">enable</param>

++++++ _servicedata ++++++
--- /var/tmp/diff_new_pack.mQgPcv/_old  2025-06-13 18:48:02.453218623 +0200
+++ /var/tmp/diff_new_pack.mQgPcv/_new  2025-06-13 18:48:02.473219451 +0200
@@ -1,6 +1,6 @@
 <servicedata>
 <service name="tar_scm">
                 <param 
name="url">https://github.com/kubearmor/kubearmor-client</param>
-              <param 
name="changesrevision">829e9c0601ff20d38471b165604150395efcd831</param></service></servicedata>
+              <param 
name="changesrevision">5999c20fdf84721ebca44fd97bae9c075cf2e137</param></service></servicedata>
 (No newline at EOF)
 

++++++ kubearmor-client-1.4.2.obscpio -> kubearmor-client-1.4.3.obscpio ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/kubearmor-client-1.4.2/cmd/profile.go 
new/kubearmor-client-1.4.3/cmd/profile.go
--- old/kubearmor-client-1.4.2/cmd/profile.go   2025-06-04 13:24:28.000000000 
+0200
+++ new/kubearmor-client-1.4.3/cmd/profile.go   2025-06-13 08:42:08.000000000 
+0200
@@ -8,24 +8,22 @@
        "github.com/spf13/cobra"
 )
 
-var profileOptions profileclient.Options
-
 // profileCmd represents the profile command
 var profilecmd = &cobra.Command{
        Use:   "profile",
        Short: "Profiling of logs",
        Long:  `Profiling of logs`,
        RunE: func(cmd *cobra.Command, args []string) error {
-               profileclient.Start(profileOptions)
+               profileclient.Start()
                return nil
        },
 }
 
 func init() {
        rootCmd.AddCommand(profilecmd)
-       profilecmd.Flags().StringVar(&profileOptions.GRPC, "gRPC", "", "use 
gRPC")
-       profilecmd.Flags().StringVarP(&profileOptions.Namespace, "namespace", 
"n", "", "Filter using namespace")
-       profilecmd.Flags().StringVar(&profileOptions.Pod, "pod", "", "Filter 
using Pod name")
-       profilecmd.Flags().StringVarP(&profileOptions.Container, "container", 
"c", "", "name of the container ")
-       profilecmd.Flags().BoolVar(&profileOptions.Save, "save", false, "Save 
Profile data in json format")
+       profilecmd.Flags().StringVar(&profileclient.ProfileOpts.GRPC, "gRPC", 
"", "use gRPC")
+       profilecmd.Flags().StringVarP(&profileclient.ProfileOpts.Namespace, 
"namespace", "n", "", "Filter using namespace")
+       profilecmd.Flags().StringVar(&profileclient.ProfileOpts.Pod, "pod", "", 
"Filter using Pod name")
+       profilecmd.Flags().StringVarP(&profileclient.ProfileOpts.Container, 
"container", "c", "", "name of the container ")
+       profilecmd.Flags().BoolVar(&profileclient.ProfileOpts.Save, "save", 
false, "Save Profile data in json format")
 }
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/kubearmor-client-1.4.2/profile/Client/helpers.go 
new/kubearmor-client-1.4.3/profile/Client/helpers.go
--- old/kubearmor-client-1.4.2/profile/Client/helpers.go        1970-01-01 
01:00:00.000000000 +0100
+++ new/kubearmor-client-1.4.3/profile/Client/helpers.go        2025-06-13 
08:42:08.000000000 +0200
@@ -0,0 +1,110 @@
+package profileclient
+
+import (
+       "encoding/json"
+       "fmt"
+       "github.com/evertras/bubble-table/table"
+       pb "github.com/kubearmor/KubeArmor/protobuf"
+       "os"
+       "path/filepath"
+)
+
+func generateRowFromLog(entry pb.Log) table.Row {
+
+       logType := "Container"
+       if entry.Type == "HostLog" {
+               logType = "Host"
+               entry.NamespaceName = "--"
+               entry.ContainerName = "--"
+       }
+
+       p := Profile{
+               LogSource:     logType,
+               Namespace:     entry.NamespaceName,
+               ContainerName: entry.ContainerName,
+               Process:       entry.ProcessName,
+               Resource:      entry.Resource,
+               Result:        entry.Result,
+       }
+
+       if entry.Operation == "Syscall" {
+               p.Resource = entry.Data
+       }
+
+       row := table.NewRow(table.RowData{
+               ColumnLogSource:     p.LogSource,
+               ColumnNamespace:     p.Namespace,
+               ColumnContainerName: p.ContainerName,
+               ColumnProcessName:   p.Process,
+               ColumnResource:      p.Resource,
+               ColumnResult:        p.Result,
+               ColumnCount:         1,
+               ColumnTimestamp:     entry.UpdatedTime,
+       })
+
+       return row
+}
+
+func isCorrectLog(entry pb.Log) bool {
+
+       if (ProfileOpts.Namespace != "") && (entry.NamespaceName != 
ProfileOpts.Namespace) {
+               return false
+       }
+       if (ProfileOpts.Pod != "") && (entry.PodName != ProfileOpts.Pod) {
+               return false
+       }
+       if (ProfileOpts.Container != "") && (entry.ContainerName != 
ProfileOpts.Container) {
+               return false
+       }
+
+       return true
+}
+
+func ExportRowsToJSON(columns []table.Column, rows []table.Row, operation 
string) (string, error) {
+       out := make([]map[string]interface{}, len(rows))
+
+       for i, row := range rows {
+               rowMap := make(map[string]interface{}, len(columns))
+               for _, col := range columns {
+                       key := col.Key()
+                       if val, ok := row.Data[key]; ok {
+                               rowMap[key] = val
+                       } else {
+                               rowMap[key] = nil
+                       }
+               }
+               out[i] = rowMap
+       }
+
+       jsonBytes, err := json.MarshalIndent(out, "", "  ")
+       if err != nil {
+               return "", err
+       }
+
+       // Get current working directory
+       cwd, err := os.Getwd()
+       if err != nil {
+               return "", fmt.Errorf("failed to get current working directory: 
%w", err)
+       }
+
+       // Create ProfileSummary directory if it doesn't exist
+       outputDir := filepath.Join(cwd, "ProfileSummary")
+       if err := os.MkdirAll(outputDir, 0700); err != nil {
+               return "", fmt.Errorf("failed to create output directory: %w", 
err)
+       }
+
+       // Define the output file path
+       fileName := fmt.Sprintf("%s.json", operation)
+       filePath := filepath.Join(outputDir, fileName)
+
+       // Write JSON to file
+       if err := os.WriteFile(filePath, jsonBytes, 0600); err != nil {
+               return "", fmt.Errorf("failed to write file: %w", err)
+       }
+
+       return filePath, nil
+}
+
+func makeKeyFromEntry(e pb.Log) string {
+       return fmt.Sprintf("%s|%s|%s|%s", e.NamespaceName, e.ContainerName, 
e.ProcessName, e.Operation)
+}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/kubearmor-client-1.4.2/profile/Client/profileClient.go 
new/kubearmor-client-1.4.3/profile/Client/profileClient.go
--- old/kubearmor-client-1.4.2/profile/Client/profileClient.go  2025-06-04 
13:24:28.000000000 +0200
+++ new/kubearmor-client-1.4.3/profile/Client/profileClient.go  2025-06-13 
08:42:08.000000000 +0200
@@ -6,21 +6,16 @@
 
 import (
        "bytes"
-       "encoding/json"
        "fmt"
-       "os"
-       "strings"
-       "time"
-
        "github.com/charmbracelet/bubbles/help"
        "github.com/charmbracelet/bubbles/key"
        tea "github.com/charmbracelet/bubbletea"
        "github.com/charmbracelet/lipgloss"
        "github.com/evertras/bubble-table/table"
        pb "github.com/kubearmor/KubeArmor/protobuf"
-       klog "github.com/kubearmor/kubearmor-client/log"
        profile "github.com/kubearmor/kubearmor-client/profile"
        log "github.com/sirupsen/logrus"
+       "os"
 )
 
 // Column keys
@@ -71,12 +66,26 @@
        Save      bool
 }
 
+var ProfileOpts Options
+
 // Model for main Bubble Tea
 type Model struct {
-       File     table.Model
-       Process  table.Model
-       Network  table.Model
-       Syscall  table.Model
+       File         table.Model
+       FileRows     []table.Row
+       FileRowIndex map[string]int
+
+       Process         table.Model
+       ProcessRows     []table.Row
+       ProcessRowIndex map[string]int
+
+       Network         table.Model
+       NetworkRows     []table.Row
+       NetworkRowIndex map[string]int
+
+       Syscall         table.Model
+       SyscallRows     []table.Row
+       SyscallRowIndex map[string]int
+
        tabs     tea.Model
        keys     keyMap
        quitting bool
@@ -88,20 +97,12 @@
        state sessionState
 }
 
-// SomeData stores incoming row data
-type SomeData struct {
-       rows []table.Row
-}
-
-func waitForActivity() tea.Cmd {
+func waitForNextEvent() tea.Cmd {
        return func() tea.Msg {
-               time.Sleep(2 * time.Second)
-               return klog.EventInfo{}
+               return <-profile.EventChan
        }
 }
 
-var o1 Options
-
 func generateColumns(Operation string) []table.Column {
        LogSource := table.NewFlexColumn(ColumnLogSource, "LogSource", 
1).WithStyle(ColumnStyle).WithFiltered(true)
 
@@ -137,17 +138,29 @@
 // Init calls initial functions if needed
 func (m Model) Init() tea.Cmd {
        return tea.Batch(
-               waitForActivity(),
+               waitForNextEvent(),
        )
 }
 
 // NewModel initializates new bubbletea model
 func NewModel() Model {
        model := Model{
-               File:    
table.New(generateColumns("File")).WithBaseStyle(styleBase).WithPageSize(30).Filtered(true),
-               Process: 
table.New(generateColumns("Process")).WithBaseStyle(styleBase).WithPageSize(30).Filtered(true),
-               Network: 
table.New(generateColumns("Network")).WithBaseStyle(styleBase).WithPageSize(30).Filtered(true),
-               Syscall: 
table.New(generateColumns("Syscall")).WithBaseStyle(styleBase).WithPageSize(30).Filtered(true),
+               File:         
table.New(generateColumns("File")).WithBaseStyle(styleBase).WithPageSize(30).Filtered(true),
+               FileRows:     []table.Row{},
+               FileRowIndex: make(map[string]int),
+
+               Process:         
table.New(generateColumns("Process")).WithBaseStyle(styleBase).WithPageSize(30).Filtered(true),
+               ProcessRows:     []table.Row{},
+               ProcessRowIndex: make(map[string]int),
+
+               Network:         
table.New(generateColumns("Network")).WithBaseStyle(styleBase).WithPageSize(30).Filtered(true),
+               NetworkRows:     []table.Row{},
+               NetworkRowIndex: make(map[string]int),
+
+               Syscall:         
table.New(generateColumns("Syscall")).WithBaseStyle(styleBase).WithPageSize(30).Filtered(true),
+               SyscallRows:     []table.Row{},
+               SyscallRowIndex: make(map[string]int),
+
                tabs: &tabs{
                        active: "Lip Gloss",
                        items:  []string{"Process", "File", "Network", 
"Syscall"},
@@ -180,7 +193,6 @@
                case key.Matches(msg, m.keys.Quit):
                        m.quitting = true
                        return m, tea.Quit
-
                }
 
                switch msg.String() {
@@ -209,6 +221,27 @@
                        m.Process = m.Process.WithPageSize(m.Process.PageSize() 
+ 1)
                        m.Syscall = m.Syscall.WithPageSize(m.Syscall.PageSize() 
+ 1)
 
+               case "e":
+                       var file string
+                       var err error
+                       switch m.state {
+                       case fileview:
+                               file, err = m.ExportFileJSON()
+                       case processview:
+                               file, err = m.ExportProcessJSON()
+                       case syscallview:
+                               file, err = m.ExportSyscallJSON()
+                       case networkview:
+                               file, err = m.ExportNetworkJSON()
+                       default:
+                               // Optionally log or handle unknown operations
+                               fmt.Println("Unknown operation")
+                       }
+                       if err != nil {
+                               panic(err)
+                       }
+                       fmt.Println("Exported json data to file:", file)
+
                }
 
                switch m.state {
@@ -231,23 +264,99 @@
                        m.Syscall, cmd = m.Syscall.Update(msg)
                        cmds = append(cmds, cmd)
                }
-       case klog.EventInfo:
-               profile.TelMutex.RLock()
-               m.File = 
m.File.WithRows(generateRowsFromData(profile.Telemetry, 
"File")).WithColumns(generateColumns("File"))
+       case pb.Log:
+               if isCorrectLog(msg) {
+                       m.updateTableWithNewEntry(msg)
+               }
+
+               return m, waitForNextEvent()
+       }
+
+       return m, tea.Batch(cmds...)
+}
+
+func (m *Model) updateTableWithNewEntry(msg pb.Log) {
+
+       switch msg.Operation {
+       case "File":
+               key := makeKeyFromEntry(msg)
+               if idx, ok := m.FileRowIndex[key]; ok {
+                       row := m.FileRows[idx]
+                       count, ok := row.Data[ColumnCount].(int)
+                       if ok {
+                               count++
+                               m.FileRows[idx].Data[ColumnCount] = count
+                       }
+
+                       m.FileRows[idx].Data[ColumnTimestamp] = msg.UpdatedTime
+               } else {
+                       newRow := generateRowFromLog(msg)
+                       m.FileRows = append(m.FileRows, newRow)
+                       m.FileRowIndex[key] = len(m.FileRows) - 1
+               }
+
+               m.File = m.File.WithRows(m.FileRows)
                m.File = 
m.File.SortByAsc(ColumnNamespace).ThenSortByAsc(ColumnContainerName).ThenSortByAsc(ColumnProcessName).ThenSortByAsc(ColumnCount).ThenSortByAsc(ColumnResource)
-               m.Process = 
m.Process.WithRows(generateRowsFromData(profile.Telemetry, 
"Process")).WithColumns(generateColumns("Process"))
+
+       case "Process":
+               key := makeKeyFromEntry(msg)
+               if idx, ok := m.ProcessRowIndex[key]; ok {
+                       row := m.ProcessRows[idx]
+                       count, ok := row.Data[ColumnCount].(int)
+                       if ok {
+                               count++
+                               m.ProcessRows[idx].Data[ColumnCount] = count
+                       }
+
+                       m.ProcessRows[idx].Data[ColumnTimestamp] = 
msg.UpdatedTime
+               } else {
+                       newRow := generateRowFromLog(msg)
+                       m.ProcessRows = append(m.ProcessRows, newRow)
+                       m.ProcessRowIndex[key] = len(m.ProcessRows) - 1
+               }
+
+               m.Process = m.Process.WithRows(m.ProcessRows)
                m.Process = 
m.Process.SortByAsc(ColumnNamespace).ThenSortByAsc(ColumnContainerName).ThenSortByAsc(ColumnProcessName).ThenSortByAsc(ColumnCount).ThenSortByAsc(ColumnResource)
-               m.Network = 
m.Network.WithRows(generateRowsFromData(profile.Telemetry, 
"Network")).WithColumns(generateColumns("Network"))
+
+       case "Network":
+               key := makeKeyFromEntry(msg)
+               if idx, ok := m.NetworkRowIndex[key]; ok {
+                       row := m.NetworkRows[idx]
+                       count, ok := row.Data[ColumnCount].(int)
+                       if ok {
+                               count++
+                               m.NetworkRows[idx].Data[ColumnCount] = count
+                       }
+
+                       m.NetworkRows[idx].Data[ColumnTimestamp] = 
msg.UpdatedTime
+               } else {
+                       newRow := generateRowFromLog(msg)
+                       m.NetworkRows = append(m.NetworkRows, newRow)
+                       m.NetworkRowIndex[key] = len(m.NetworkRows) - 1
+               }
+               m.Network = m.Network.WithRows(m.NetworkRows)
                m.Network = 
m.Network.SortByAsc(ColumnNamespace).ThenSortByAsc(ColumnContainerName).ThenSortByAsc(ColumnProcessName).ThenSortByAsc(ColumnCount).ThenSortByAsc(ColumnResource)
-               m.Syscall = 
m.Syscall.WithRows(generateRowsFromData(profile.Telemetry, 
"Syscall")).WithColumns(generateColumns("Syscall"))
-               m.Syscall = 
m.Syscall.SortByAsc(ColumnNamespace).ThenSortByAsc(ColumnContainerName).ThenSortByAsc(ColumnProcessName).ThenSortByAsc(ColumnCount).ThenSortByAsc(ColumnResource)
-               profile.TelMutex.RUnlock()
 
-               return m, waitForActivity()
+       case "Syscall":
+               key := makeKeyFromEntry(msg)
+               if idx, ok := m.SyscallRowIndex[key]; ok {
+                       row := m.SyscallRows[idx]
+                       count, ok := row.Data[ColumnCount].(int)
+                       if ok {
+                               count++
+                               m.SyscallRows[idx].Data[ColumnCount] = count
+                       }
 
-       }
+                       m.SyscallRows[idx].Data[ColumnTimestamp] = 
msg.UpdatedTime
+               } else {
+                       newRow := generateRowFromLog(msg)
+                       m.SyscallRows = append(m.SyscallRows, newRow)
+                       m.SyscallRowIndex[key] = len(m.SyscallRows) - 1
+               }
 
-       return m, tea.Batch(cmds...)
+               m.Syscall = m.Syscall.WithRows(m.SyscallRows)
+               m.Syscall = 
m.Syscall.SortByAsc(ColumnNamespace).ThenSortByAsc(ColumnContainerName).ThenSortByAsc(ColumnProcessName).ThenSortByAsc(ColumnCount).ThenSortByAsc(ColumnResource)
+       }
 }
 
 func (m *Model) recalculateTable() {
@@ -260,48 +369,62 @@
 // View Renders Bubble Tea UI
 func (m Model) View() string {
        pad := lipgloss.NewStyle().PaddingRight(1)
-       RowCount := lipgloss.JoinHorizontal(lipgloss.Left, 
lipgloss.NewStyle().Foreground(helptheme).Render(fmt.Sprintf("Max Rows: %d", 
m.Process.PageSize())))
+       RowCount := lipgloss.JoinHorizontal(
+               lipgloss.Left,
+               lipgloss.NewStyle().
+                       Foreground(helptheme).
+                       Render(fmt.Sprintf("Max Rows: %d", 
m.Process.PageSize())),
+       )
        helpKey := m.help.Styles.FullDesc.Foreground(helptheme).Padding(0, 0, 1)
-       help := lipgloss.JoinHorizontal(lipgloss.Left, 
helpKey.Render(m.help.FullHelpView(m.keys.FullHelp())))
-       var total string
-       s := lipgloss.NewStyle().Height(m.height).MaxHeight(m.height)
-       switch m.state {
-
-       case processview:
+       help := lipgloss.JoinHorizontal(
+               lipgloss.Left,
+               helpKey.Render(m.help.FullHelpView(m.keys.FullHelp())),
+       )
 
-               total = s.Render(lipgloss.JoinVertical(lipgloss.Top, 
lipgloss.JoinVertical(lipgloss.Top,
+       content := func(view string) string {
+               return lipgloss.JoinVertical(
+                       lipgloss.Top,
                        help,
                        RowCount,
                        m.tabs.View(),
-                       lipgloss.JoinVertical(lipgloss.Center, 
pad.Render(m.Process.View()))),
-               ))
+                       lipgloss.JoinVertical(lipgloss.Center, 
pad.Render(view)),
+               )
+       }
+
+       var view string
+       switch m.state {
+       case processview:
+               view = m.Process.View()
        case fileview:
-               // s := 
lipgloss.NewStyle().MaxHeight(m.height).MaxWidth(m.width)
-               total = s.Render(lipgloss.JoinVertical(lipgloss.Top, 
lipgloss.JoinVertical(lipgloss.Top,
-                       help,
-                       RowCount,
-                       m.tabs.View(),
-                       lipgloss.JoinVertical(lipgloss.Center, 
pad.Render(m.File.View()))),
-               ))
+               view = m.File.View()
        case networkview:
-               // s := 
lipgloss.NewStyle().MaxHeight(m.height).MaxWidth(m.width)
-               total = s.Render(lipgloss.JoinVertical(lipgloss.Top, 
lipgloss.JoinVertical(lipgloss.Top,
-                       help,
-                       RowCount,
-                       m.tabs.View(),
-                       lipgloss.JoinVertical(lipgloss.Center, 
pad.Render(m.Network.View()))),
-               ))
+               view = m.Network.View()
        case syscallview:
-               // s := 
lipgloss.NewStyle().MaxHeight(m.height).MaxWidth(m.width)
-               total = s.Render(lipgloss.JoinVertical(lipgloss.Top, 
lipgloss.JoinVertical(lipgloss.Top,
-                       help,
-                       RowCount,
-                       m.tabs.View(),
-                       lipgloss.JoinVertical(lipgloss.Center, 
pad.Render(m.Syscall.View()))),
-               ))
+               view = m.Syscall.View()
+       default:
+               view = ""
        }
-       return total
 
+       return lipgloss.NewStyle().
+               Height(m.height).
+               MaxHeight(m.height).
+               Render(content(view))
+}
+
+func (m *Model) ExportProcessJSON() (string, error) {
+       return ExportRowsToJSON(generateColumns("Process"), m.ProcessRows, 
"Process")
+}
+
+func (m *Model) ExportFileJSON() (string, error) {
+       return ExportRowsToJSON(generateColumns("File"), m.FileRows, "File")
+}
+
+func (m *Model) ExportNetworkJSON() (string, error) {
+       return ExportRowsToJSON(generateColumns("Network"), m.NetworkRows, 
"Network")
+}
+
+func (m *Model) ExportSyscallJSON() (string, error) {
+       return ExportRowsToJSON(generateColumns("Syscall"), m.SyscallRows, 
"Syscall")
 }
 
 // Profile Row Data to display
@@ -317,204 +440,11 @@
        Time          string `json:"time"`
 }
 
-// Frequency and Timestamp data for another map
-type Frequency struct {
-       freq int
-       time string
-}
-
-func isLaterTimestamp(timestamp1, timestamp2 string) bool {
-       t1, err := time.Parse(time.RFC3339, timestamp1)
-       if err != nil {
-               // Handle error, use some default value, or return false if you 
prefer
-               return false
-       }
-
-       t2, err := time.Parse(time.RFC3339, timestamp2)
-       if err != nil {
-               // Handle error, use some default value, or return false if you 
prefer
-               return false
-       }
-
-       return t1.After(t2)
-}
-
-// AggregateSummary used to aggregate summary data for a less cluttered view 
of file and process data
-func AggregateSummary(inputMap map[Profile]*Frequency, Operation string) 
map[Profile]*Frequency {
-       outputMap := make(map[Profile]*Frequency)
-       var fileArr []string
-       fileSumMap := make(map[Profile]*Frequency)
-       updatedSumMap := make(map[Profile]*Frequency)
-       if Operation == "Network" || Operation == "Syscall" {
-               return inputMap
-       }
-       for prof, count := range inputMap {
-               if Operation == "File" || Operation == "Process" {
-                       fileArr = append(fileArr, prof.Resource)
-                       fileSumMap[prof] = count
-               } else {
-                       updatedSumMap[prof] = count
-               }
-       }
-       inputMap = updatedSumMap
-       aggregatedPaths := profile.AggregatePaths(fileArr)
-       for summary, countTime := range fileSumMap {
-               for _, path := range aggregatedPaths {
-                       if strings.HasPrefix(summary.Resource, path.Path) && 
(len(summary.Resource) == len(path.Path) || 
summary.Resource[len(strings.TrimSuffix(path.Path, "/"))] == '/') {
-                               summary.Resource = path.Path
-                               break
-                       }
-               }
-               if existingFreq, ok := outputMap[summary]; ok {
-                       // If the prof already exists, update the frequency and 
timestamp if needed
-                       existingFreq.freq += countTime.freq
-
-                       if isLaterTimestamp(countTime.time, existingFreq.time) {
-                               existingFreq.time = countTime.time
-                       }
-                       outputMap[summary] = existingFreq
-               } else {
-                       outputMap[summary] = countTime
-               }
-       }
-
-       return outputMap
-}
-
-func convertToJSON(Operation string, data []Profile) {
-       var jsonArray []string
-       jsonByte, _ := json.MarshalIndent(data, " ", "   ")
-       //unmarshalling here because it is marshalled two times for some reason
-       if err := json.Unmarshal(jsonByte, &jsonArray); err != nil {
-               fmt.Println("Error parsing JSON array:", err)
-       }
-       if len(jsonArray) > 0 {
-               filepath := "Profile_Summary/"
-               err := os.MkdirAll(filepath, 0600)
-               err = os.WriteFile(filepath+Operation+".json", 
[]byte(jsonArray[0]), 0600)
-               if err != nil {
-                       panic(err)
-               }
-       }
-}
-
-func (p Profile) MarshalText() (text []byte, err error) {
-       type x Profile
-       return json.Marshal(x(p))
-}
-
-func generateRowsFromData(data []pb.Log, operation string) []table.Row {
-       var s SomeData
-       var jsondata []Profile
-       m := make(map[Profile]int)
-       w := make(map[Profile]*Frequency)
-       for _, entry := range data {
-
-               if entry.Operation != operation {
-                       continue
-               }
-
-               if (o1.Namespace != "") && (entry.NamespaceName != 
o1.Namespace) {
-                       continue
-               }
-               if (o1.Pod != "") && (entry.PodName != o1.Pod) {
-                       continue
-               }
-               if (o1.Container != "") && (entry.ContainerName != 
o1.Container) {
-                       continue
-               }
-
-               var p Profile
-               var logType string
-               if entry.Type == "HostLog" {
-                       logType = "Host"
-                       entry.NamespaceName = "--"
-                       entry.ContainerName = "--"
-               } else {
-                       logType = "Container"
-               }
-
-               if entry.Operation == "Syscall" {
-                       p = Profile{
-                               LogSource:     logType,
-                               Namespace:     entry.NamespaceName,
-                               ContainerName: entry.ContainerName,
-                               Process:       entry.ProcessName,
-                               Resource:      entry.Data,
-                               Result:        entry.Result,
-                       }
-               } else {
-                       p = Profile{
-                               LogSource:     logType,
-                               Namespace:     entry.NamespaceName,
-                               ContainerName: entry.ContainerName,
-                               Process:       entry.ProcessName,
-                               Resource:      entry.Resource,
-                               Result:        entry.Result,
-                       }
-               }
-
-               f := &Frequency{
-                       time: entry.UpdatedTime,
-               }
-               w[p] = f
-               m[p]++
-               w[p].freq = m[p]
-
-       }
-
-       finalmap := AggregateSummary(w, operation)
-       for r, frequency := range finalmap {
-               row := table.NewRow(table.RowData{
-                       ColumnLogSource:     r.LogSource,
-                       ColumnNamespace:     r.Namespace,
-                       ColumnContainerName: r.ContainerName,
-                       ColumnProcessName:   r.Process,
-                       ColumnResource:      r.Resource,
-                       ColumnResult:        r.Result,
-                       ColumnCount:         frequency.freq,
-                       ColumnTimestamp:     frequency.time,
-               })
-               jsondata = append(jsondata, Profile{
-                       LogSource:     r.LogSource,
-                       Namespace:     r.Namespace,
-                       ContainerName: r.ContainerName,
-                       Process:       r.Process,
-                       Resource:      r.Resource,
-                       Result:        r.Result,
-                       Count:         frequency.freq,
-                       Time:          frequency.time,
-               })
-               s.rows = append(s.rows, row)
-       }
-
-       if o1.Save {
-               if operation == "File" {
-                       convertToJSON("File", jsondata)
-               } else if operation == "Process" {
-                       convertToJSON("Process", jsondata)
-               } else if operation == "Network" {
-                       convertToJSON("Network", jsondata)
-               } else if operation == "Syscall" {
-                       convertToJSON("Syscall", jsondata)
-               }
-       }
-
-       return s.rows
-}
-
 // Start entire TUI
-func Start(o Options) {
-       o1 = Options{
-               Namespace: o.Namespace,
-               Pod:       o.Pod,
-               GRPC:      o.GRPC,
-               Container: o.Container,
-               Save:      o.Save,
-       }
+func Start() {
        p := tea.NewProgram(NewModel(), tea.WithAltScreen())
        go func() {
-               err := profile.GetLogs(o1.GRPC)
+               err := profile.GetLogs(ProfileOpts.GRPC)
                if err != nil {
                        p.Quit()
                        profile.ErrChan <- err
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/kubearmor-client-1.4.2/profile/README.md 
new/kubearmor-client-1.4.3/profile/README.md
--- old/kubearmor-client-1.4.2/profile/README.md        2025-06-04 
13:24:28.000000000 +0200
+++ new/kubearmor-client-1.4.3/profile/README.md        2025-06-13 
08:42:08.000000000 +0200
@@ -1,5 +1,68 @@
 ## Profiling of Kubearmor Logs using karmor
 
-`karmor profile` which shows real-time terminal user interface table of three 
different operations going on in KubeArmor: Process, File and Network. It 
maintains a counter of each operation that is happening within the cluster, 
along with other useful details. It directly fetches data from the `karmor 
logs` API and displays all the required information. The TUI includes simple 
navigation between operations and a user input based filter as well.
+`karmor profile` provides a real-time terminal UI that visualizes 
security-relevant activity observed by KubeArmor, including Process, File, and 
Network events. It fetches live data from the KubeArmor logs API, displays 
counters and key details for each event type, and supports easy navigation and 
filtering.
 
 
![Profile](https://user-images.githubusercontent.com/23097199/213850468-2462e8b2-b4f6-491f-a174-42d217cbfd28.gif)
+
+
+### 🔍 Filtering Logs with `karmor profile`
+
+The `karmor profile` command allows you to filter logs or alerts using a set 
of useful flags. These filters help narrow down the output to specific 
Kubernetes objects like containers, pods, and namespaces.
+
+### 🧰 Available Filters
+
+| Flag                | Description                               |
+| ------------------- | ----------------------------------------- |
+| `-c`, `--container` | Filters logs by **container name**.       |
+| `-n`, `--namespace` | Filters logs by **Kubernetes namespace**. |
+| `--pod`             | Filters logs by **pod name**.             |
+
+---
+
+### 📌 Usage Examples
+
+#### ✅ Filter by Container Name
+
+```bash
+karmor profile -c nginx
+```
+
+> Outputs logs only from the container named `nginx`.
+
+---
+
+#### ✅ Filter by Namespace
+
+```bash
+karmor profile -n nginx1
+```
+
+> Outputs logs only from the namespace `nginx1`.
+
+---
+
+#### ✅ Filter by Pod
+
+```bash
+karmor profile --pod nginx-pod-1
+```
+
+> Outputs logs only from the pod named `nginx-pod-1`.
+
+---
+
+### 🔗 Combine Multiple Filters
+
+You can combine filters to narrow down the logs even further.
+
+```bash
+karmor profile -n nginx1 -c nginx
+```
+
+> Outputs logs **only** from the `nginx` container in the `nginx1` namespace.
+
+---
+
+### 💡 Tip
+
+Use these filters during profiling sessions to quickly isolate behavior or 
security events related to a specific pod, container, or namespace.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/kubearmor-client-1.4.2/profile/profile.go 
new/kubearmor-client-1.4.3/profile/profile.go
--- old/kubearmor-client-1.4.2/profile/profile.go       2025-06-04 
13:24:28.000000000 +0200
+++ new/kubearmor-client-1.4.3/profile/profile.go       2025-06-13 
08:42:08.000000000 +0200
@@ -11,7 +11,6 @@
        klog "github.com/kubearmor/kubearmor-client/log"
        log "github.com/sirupsen/logrus"
        "google.golang.org/protobuf/encoding/protojson"
-       "sync"
 )
 
 var eventChan chan klog.EventInfo
@@ -19,11 +18,7 @@
 // ErrChan to make error channels from goroutines
 var ErrChan chan error
 
-// Telemetry to store incoming log events
-var Telemetry []pb.Log
-
-// TelMutex to prevent deadlock
-var TelMutex sync.RWMutex
+var EventChan = make(chan pb.Log)
 
 // GetLogs to fetch logs
 func GetLogs(grpc string) error {
@@ -43,9 +38,7 @@
                                if err != nil {
                                        return err
                                }
-                               TelMutex.Lock()
-                               Telemetry = append(Telemetry, log)
-                               TelMutex.Unlock()
+                               EventChan <- log
                        } else {
                                log.Errorf("UNKNOWN EVT type %s", evtin.Type)
                        }

++++++ kubearmor-client.obsinfo ++++++
--- /var/tmp/diff_new_pack.mQgPcv/_old  2025-06-13 18:48:02.649226737 +0200
+++ /var/tmp/diff_new_pack.mQgPcv/_new  2025-06-13 18:48:02.649226737 +0200
@@ -1,5 +1,5 @@
 name: kubearmor-client
-version: 1.4.2
-mtime: 1749036268
-commit: 829e9c0601ff20d38471b165604150395efcd831
+version: 1.4.3
+mtime: 1749796928
+commit: 5999c20fdf84721ebca44fd97bae9c075cf2e137
 

++++++ vendor.tar.gz ++++++
/work/SRC/openSUSE:Factory/kubearmor-client/vendor.tar.gz 
/work/SRC/openSUSE:Factory/.kubearmor-client.new.19631/vendor.tar.gz differ: 
char 13, line 1

Reply via email to