You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@plc4x.apache.org by sr...@apache.org on 2023/06/16 20:32:09 UTC

[plc4x] branch develop updated: chore(plc4go): re-order time declarations

This is an automated email from the ASF dual-hosted git repository.

sruehl pushed a commit to branch develop
in repository https://gitbox.apache.org/repos/asf/plc4x.git


The following commit(s) were added to refs/heads/develop by this push:
     new c02f8f51de chore(plc4go): re-order time declarations
c02f8f51de is described below

commit c02f8f51de39fabdaa52418adc892b99b17f37fe
Author: Sebastian Rühl <sr...@apache.org>
AuthorDate: Fri Jun 16 22:32:01 2023 +0200

    chore(plc4go): re-order time declarations
---
 plc4go/examples/ads/subscribe/Subscribe.go         |  2 +-
 plc4go/internal/cbus/Browser.go                    |  6 +--
 plc4go/internal/cbus/Connection.go                 |  2 +-
 plc4go/internal/cbus/Connection_test.go            |  4 +-
 plc4go/internal/cbus/Discoverer.go                 |  6 +--
 plc4go/internal/cbus/Reader.go                     |  4 +-
 plc4go/internal/knxnetip/Browser.go                |  2 +-
 plc4go/internal/knxnetip/Connection.go             |  2 +-
 .../knxnetip/ConnectionDriverSpecificOperations.go | 14 +++---
 plc4go/internal/knxnetip/Discoverer.go             |  4 +-
 plc4go/internal/modbus/Connection.go               |  2 +-
 plc4go/internal/s7/Reader.go                       |  2 +-
 plc4go/internal/s7/Writer.go                       |  2 +-
 plc4go/internal/simulated/Connection_test.go       |  2 +-
 plc4go/pkg/api/cache/PlcConnectionCache.go         |  2 +-
 plc4go/pkg/api/cache/PlcConnectionCache_test.go    | 48 +++++++++----------
 plc4go/pkg/api/cache/plcConnectionLease.go         |  2 +-
 plc4go/pkg/api/cache/plcConnectionLease_test.go    | 56 +++++++++++-----------
 plc4go/spi/default/DefaultCodec.go                 |  6 +--
 plc4go/spi/default/DefaultConnection.go            |  2 +-
 plc4go/spi/model/DefaultPlcReadRequest.go          |  2 +-
 plc4go/spi/model/DefaultPlcWriteRequest.go         |  2 +-
 plc4go/spi/pool/future.go                          |  2 +-
 plc4go/spi/pool/future_test.go                     |  8 ++--
 24 files changed, 92 insertions(+), 92 deletions(-)

diff --git a/plc4go/examples/ads/subscribe/Subscribe.go b/plc4go/examples/ads/subscribe/Subscribe.go
index 4eea4342e1..523e86a2a2 100644
--- a/plc4go/examples/ads/subscribe/Subscribe.go
+++ b/plc4go/examples/ads/subscribe/Subscribe.go
@@ -65,6 +65,6 @@ func main() {
 		print(responseCode)
 	}
 
-	time.Sleep(time.Second * 200)
+	time.Sleep(200 * time.Second)
 
 }
diff --git a/plc4go/internal/cbus/Browser.go b/plc4go/internal/cbus/Browser.go
index ee202dec64..b9ecadcd13 100644
--- a/plc4go/internal/cbus/Browser.go
+++ b/plc4go/internal/cbus/Browser.go
@@ -196,7 +196,7 @@ func (m *Browser) getInstalledUnitAddressBytes(ctx context.Context) (map[byte]an
 	if err != nil {
 		return nil, errors.Wrap(err, "Error subscribing to the installation MMI")
 	}
-	subCtx, subCtxCancel := context.WithTimeout(ctx, time.Second*2)
+	subCtx, subCtxCancel := context.WithTimeout(ctx, 2*time.Second)
 	defer subCtxCancel()
 	subscriptionResult := <-subscriptionRequest.ExecuteWithContext(subCtx)
 	if err := subscriptionResult.GetErr(); err != nil {
@@ -296,7 +296,7 @@ func (m *Browser) getInstalledUnitAddressBytes(ctx context.Context) (map[byte]an
 	if err != nil {
 		return nil, errors.Wrap(err, "Error building the installation MMI")
 	}
-	readCtx, readCtxCancel := context.WithTimeout(ctx, time.Second*2)
+	readCtx, readCtxCancel := context.WithTimeout(ctx, 2*time.Second)
 	defer readCtxCancel()
 	readWg := sync.WaitGroup{}
 	readWg.Add(1)
@@ -373,7 +373,7 @@ func (m *Browser) getInstalledUnitAddressBytes(ctx context.Context) (map[byte]an
 		}
 	}()
 
-	syncCtx, syncCtxCancel := context.WithTimeout(ctx, time.Second*6)
+	syncCtx, syncCtxCancel := context.WithTimeout(ctx, 6*time.Second)
 	defer syncCtxCancel()
 	for !blockOffset0Received || !blockOffset88Received || !blockOffset176Received {
 		select {
diff --git a/plc4go/internal/cbus/Connection.go b/plc4go/internal/cbus/Connection.go
index b23c76b986..813c5b7ee5 100644
--- a/plc4go/internal/cbus/Connection.go
+++ b/plc4go/internal/cbus/Connection.go
@@ -522,7 +522,7 @@ func (c *Connection) sendCalDataWrite(ctx context.Context, ch chan plc4go.PlcCon
 	}
 
 	startTime := time.Now()
-	timeout := time.NewTimer(time.Second * 2)
+	timeout := time.NewTimer(2 * time.Second)
 	defer utils.CleanupTimer(timeout)
 	select {
 	case <-directCommandAckChan:
diff --git a/plc4go/internal/cbus/Connection_test.go b/plc4go/internal/cbus/Connection_test.go
index 82a07cefb7..b2185ae447 100644
--- a/plc4go/internal/cbus/Connection_test.go
+++ b/plc4go/internal/cbus/Connection_test.go
@@ -747,7 +747,7 @@ func TestConnection_fireConnected(t *testing.T) {
 			},
 			args: args{ch: make(chan<- plc4go.PlcConnectionConnectResult, 1)},
 			chanValidator: func(t *testing.T, results chan<- plc4go.PlcConnectionConnectResult) bool {
-				time.Sleep(time.Millisecond * 50)
+				time.Sleep(50 * time.Millisecond)
 				return len(results) == 1
 			},
 		},
@@ -827,7 +827,7 @@ func TestConnection_fireConnectionError(t *testing.T) {
 			},
 			args: args{ch: make(chan<- plc4go.PlcConnectionConnectResult, 1)},
 			chanValidator: func(t *testing.T, results chan<- plc4go.PlcConnectionConnectResult) bool {
-				time.Sleep(time.Millisecond * 50)
+				time.Sleep(50 * time.Millisecond)
 				return len(results) == 1
 			},
 		},
diff --git a/plc4go/internal/cbus/Discoverer.go b/plc4go/internal/cbus/Discoverer.go
index 6658d22774..8c3606b64d 100644
--- a/plc4go/internal/cbus/Discoverer.go
+++ b/plc4go/internal/cbus/Discoverer.go
@@ -237,10 +237,10 @@ func (d *Discoverer) createDeviceScanDispatcher(tcpTransportInstance *tcp.Transp
 		}
 		// Keep on reading responses till the timeout is done.
 		// TODO: Make this configurable
-		timeout := time.NewTimer(time.Second * 1)
+		timeout := time.NewTimer(1 * time.Second)
 		defer utils.CleanupTimer(timeout)
-		for start := time.Now(); time.Since(start) < time.Second*5; {
-			timeout.Reset(time.Second * 1)
+		for start := time.Now(); time.Since(start) < 5*time.Second; {
+			timeout.Reset(1 * time.Second)
 			select {
 			case receivedMessage := <-codec.GetDefaultIncomingMessageChannel():
 				// Cleanup, going to be resetted again
diff --git a/plc4go/internal/cbus/Reader.go b/plc4go/internal/cbus/Reader.go
index 55015da183..70aa83016d 100644
--- a/plc4go/internal/cbus/Reader.go
+++ b/plc4go/internal/cbus/Reader.go
@@ -139,7 +139,7 @@ func (m *Reader) createMessageTransactionAndWait(ctx context.Context, messageToS
 func (m *Reader) sendMessageOverTheWire(ctx context.Context, transaction transactions.RequestTransaction, messageToSend readWriteModel.CBusMessage, addResponseCode func(name string, responseCode apiModel.PlcResponseCode), tagName string, addPlcValue func(name string, plcValue apiValues.PlcValue)) {
 	// Send the over the wire
 	m.log.Trace().Msg("send over the wire")
-	ttl := time.Second * 5
+	ttl := 5 * time.Second
 	if deadline, ok := ctx.Deadline(); ok {
 		ttl = -time.Since(deadline)
 		m.log.Debug().Msgf("setting ttl to %s", ttl)
@@ -233,7 +233,7 @@ func (m *Reader) sendMessageOverTheWire(ctx context.Context, transaction transac
 		ttl); err != nil {
 		m.log.Debug().Err(err).Msgf("Error sending message for tag %s", tagName)
 		addResponseCode(tagName, apiModel.PlcResponseCode_INTERNAL_ERROR)
-		if err := transaction.FailRequest(errors.Errorf("timeout after %s", time.Second*1)); err != nil {
+		if err := transaction.FailRequest(errors.Errorf("timeout after %s", 1*time.Second)); err != nil {
 			m.log.Debug().Err(err).Msg("Error failing request")
 		}
 	}
diff --git a/plc4go/internal/knxnetip/Browser.go b/plc4go/internal/knxnetip/Browser.go
index d2f0b35912..80b6729e3d 100644
--- a/plc4go/internal/knxnetip/Browser.go
+++ b/plc4go/internal/knxnetip/Browser.go
@@ -135,7 +135,7 @@ func (m Browser) executeDeviceQuery(ctx context.Context, query DeviceQuery, inte
 					queryResults = append(queryResults, queryResult)
 				}
 
-				disconnectTtlTimer := time.NewTimer(m.connection.defaultTtl * 10)
+				disconnectTtlTimer := time.NewTimer(10 * m.connection.defaultTtl)
 				deviceDisconnections := m.connection.DeviceDisconnect(ctx, knxAddress)
 				select {
 				case _ = <-deviceDisconnections:
diff --git a/plc4go/internal/knxnetip/Connection.go b/plc4go/internal/knxnetip/Connection.go
index d71c515a28..b86e72a96b 100644
--- a/plc4go/internal/knxnetip/Connection.go
+++ b/plc4go/internal/knxnetip/Connection.go
@@ -187,7 +187,7 @@ func NewConnection(transportInstance transports.TransportInstance, connectionOpt
 		valueCache:              map[uint16][]byte{},
 		valueCacheMutex:         sync.RWMutex{},
 		metadata:                &ConnectionMetadata{},
-		defaultTtl:              time.Second * 10,
+		defaultTtl:              10 * time.Second,
 		DeviceConnections:       map[driverModel.KnxAddress]*KnxDeviceConnection{},
 		handleTunnelingRequests: true,
 		passLogToModel:          options.ExtractPassLoggerToModel(_options...),
diff --git a/plc4go/internal/knxnetip/ConnectionDriverSpecificOperations.go b/plc4go/internal/knxnetip/ConnectionDriverSpecificOperations.go
index ae7a363375..9781e033d5 100644
--- a/plc4go/internal/knxnetip/ConnectionDriverSpecificOperations.go
+++ b/plc4go/internal/knxnetip/ConnectionDriverSpecificOperations.go
@@ -49,7 +49,7 @@ func (m *Connection) ReadGroupAddress(ctx context.Context, groupAddress []byte,
 	result := make(chan KnxReadResult, 1)
 
 	sendResponse := func(value values.PlcValue, numItems uint8, err error) {
-		timeout := time.NewTimer(time.Millisecond * 10)
+		timeout := time.NewTimer(10 * time.Millisecond)
 		select {
 		case result <- KnxReadResult{
 			value:    value,
@@ -110,7 +110,7 @@ func (m *Connection) DeviceConnect(ctx context.Context, targetAddress driverMode
 	result := make(chan KnxDeviceConnectResult, 1)
 
 	sendResponse := func(connection *KnxDeviceConnection, err error) {
-		timeout := time.NewTimer(time.Millisecond * 10)
+		timeout := time.NewTimer(10 * time.Millisecond)
 		select {
 		case result <- KnxDeviceConnectResult{
 			connection: connection,
@@ -206,7 +206,7 @@ func (m *Connection) DeviceDisconnect(ctx context.Context, targetAddress driverM
 	result := make(chan KnxDeviceDisconnectResult, 1)
 
 	sendResponse := func(connection *KnxDeviceConnection, err error) {
-		timeout := time.NewTimer(time.Millisecond * 10)
+		timeout := time.NewTimer(10 * time.Millisecond)
 		select {
 		case result <- KnxDeviceDisconnectResult{
 			connection: connection,
@@ -245,7 +245,7 @@ func (m *Connection) DeviceAuthenticate(ctx context.Context, targetAddress drive
 	result := make(chan KnxDeviceAuthenticateResult, 1)
 
 	sendResponse := func(err error) {
-		timeout := time.NewTimer(time.Millisecond * 10)
+		timeout := time.NewTimer(10 * time.Millisecond)
 		select {
 		case result <- KnxDeviceAuthenticateResult{
 			err: err,
@@ -303,7 +303,7 @@ func (m *Connection) DeviceReadProperty(ctx context.Context, targetAddress drive
 	result := make(chan KnxReadResult, 1)
 
 	sendResponse := func(value values.PlcValue, numItems uint8, err error) {
-		timeout := time.NewTimer(time.Millisecond * 10)
+		timeout := time.NewTimer(10 * time.Millisecond)
 		select {
 		case result <- KnxReadResult{
 			value:    value,
@@ -388,7 +388,7 @@ func (m *Connection) DeviceReadPropertyDescriptor(ctx context.Context, targetAdd
 	result := make(chan KnxReadResult, 1)
 
 	sendResponse := func(value values.PlcValue, numItems uint8, err error) {
-		timeout := time.NewTimer(time.Millisecond * 10)
+		timeout := time.NewTimer(10 * time.Millisecond)
 		select {
 		case result <- KnxReadResult{
 			value:    value,
@@ -453,7 +453,7 @@ func (m *Connection) DeviceReadMemory(ctx context.Context, targetAddress driverM
 	result := make(chan KnxReadResult, 1)
 
 	sendResponse := func(value values.PlcValue, numItems uint8, err error) {
-		timeout := time.NewTimer(time.Millisecond * 10)
+		timeout := time.NewTimer(10 * time.Millisecond)
 		select {
 		case result <- KnxReadResult{
 			value:    value,
diff --git a/plc4go/internal/knxnetip/Discoverer.go b/plc4go/internal/knxnetip/Discoverer.go
index 392ef3ee73..08ebf4de80 100644
--- a/plc4go/internal/knxnetip/Discoverer.go
+++ b/plc4go/internal/knxnetip/Discoverer.go
@@ -200,10 +200,10 @@ func (d *Discoverer) createDeviceScanDispatcher(udpTransportInstance *udp.Transp
 		}
 		// Keep on reading responses till the timeout is done.
 		// TODO: Make this configurable
-		timeout := time.NewTimer(time.Second * 1)
+		timeout := time.NewTimer(1 * time.Second)
 		timeout.Stop()
 		for start := time.Now(); time.Since(start) < time.Second*5; {
-			timeout.Reset(time.Second * 1)
+			timeout.Reset(1 * time.Second)
 			select {
 			case message := <-codec.GetDefaultIncomingMessageChannel():
 				{
diff --git a/plc4go/internal/modbus/Connection.go b/plc4go/internal/modbus/Connection.go
index 7d322d2122..8ad66ff8b4 100644
--- a/plc4go/internal/modbus/Connection.go
+++ b/plc4go/internal/modbus/Connection.go
@@ -72,7 +72,7 @@ func NewConnection(unitIdentifier uint8, messageCodec spi.MessageCodec, connecti
 		}
 	}
 	connection.DefaultConnection = _default.NewDefaultConnection(connection,
-		_default.WithDefaultTtl(time.Second*5),
+		_default.WithDefaultTtl(5*time.Second),
 		_default.WithPlcTagHandler(tagHandler),
 		_default.WithPlcValueHandler(NewValueHandler(_options...)),
 	)
diff --git a/plc4go/internal/s7/Reader.go b/plc4go/internal/s7/Reader.go
index 91917b2551..85581cc6fc 100644
--- a/plc4go/internal/s7/Reader.go
+++ b/plc4go/internal/s7/Reader.go
@@ -164,7 +164,7 @@ func (m *Reader) Read(ctx context.Context, readRequest apiModel.PlcReadRequest)
 					nil,
 					errors.Wrap(err, "error sending message"),
 				)
-				if err := transaction.FailRequest(errors.Errorf("timeout after %s", time.Second*1)); err != nil {
+				if err := transaction.FailRequest(errors.Errorf("timeout after %s", 1*time.Second)); err != nil {
 					m.log.Debug().Err(err).Msg("Error failing request")
 				}
 			}
diff --git a/plc4go/internal/s7/Writer.go b/plc4go/internal/s7/Writer.go
index 46c668cdf2..850387f941 100644
--- a/plc4go/internal/s7/Writer.go
+++ b/plc4go/internal/s7/Writer.go
@@ -150,7 +150,7 @@ func (m Writer) Write(ctx context.Context, writeRequest apiModel.PlcWriteRequest
 				return transaction.EndRequest()
 			}, time.Second*1); err != nil {
 				result <- spiModel.NewDefaultPlcWriteRequestResult(writeRequest, nil, errors.Wrap(err, "error sending message"))
-				if err := transaction.FailRequest(errors.Errorf("timeout after %s", time.Second*1)); err != nil {
+				if err := transaction.FailRequest(errors.Errorf("timeout after %s", 1*time.Second)); err != nil {
 					m.log.Debug().Err(err).Msg("Error failing request")
 				}
 			}
diff --git a/plc4go/internal/simulated/Connection_test.go b/plc4go/internal/simulated/Connection_test.go
index bcdd87cab1..a5e0841585 100644
--- a/plc4go/internal/simulated/Connection_test.go
+++ b/plc4go/internal/simulated/Connection_test.go
@@ -109,7 +109,7 @@ func TestConnection_Connect(t *testing.T) {
 				},
 				connected: true,
 			}, nil),
-			delayAtLeast: time.Second * 1,
+			delayAtLeast: 1 * time.Second,
 			wantErr:      false,
 		},
 	}
diff --git a/plc4go/pkg/api/cache/PlcConnectionCache.go b/plc4go/pkg/api/cache/PlcConnectionCache.go
index 4e2e91188e..c6e73ecaa9 100644
--- a/plc4go/pkg/api/cache/PlcConnectionCache.go
+++ b/plc4go/pkg/api/cache/PlcConnectionCache.go
@@ -43,7 +43,7 @@ func NewPlcConnectionCache(driverManager plc4go.PlcDriverManager, withConnection
 	if !config.TraceConnectionCache {
 		log = zerolog.Nop()
 	}
-	maxLeaseTime := time.Second * 5
+	maxLeaseTime := 5 * time.Second
 	cc := &plcConnectionCache{
 		log:           log,
 		driverManager: driverManager,
diff --git a/plc4go/pkg/api/cache/PlcConnectionCache_test.go b/plc4go/pkg/api/cache/PlcConnectionCache_test.go
index d1cf774cbe..ee9642adfa 100644
--- a/plc4go/pkg/api/cache/PlcConnectionCache_test.go
+++ b/plc4go/pkg/api/cache/PlcConnectionCache_test.go
@@ -288,8 +288,8 @@ func TestPlcConnectionCache_ReusingAnExistingConnection(t *testing.T) {
 	driverManager.RegisterDriver(simulated.NewDriver(options.WithCustomLogger(logger)))
 	cache := plcConnectionCache{
 		driverManager: driverManager,
-		maxLeaseTime:  time.Second * 5,
-		maxWaitTime:   time.Second * 25,
+		maxLeaseTime:  5 * time.Second,
+		maxWaitTime:   25 * time.Second,
 		cacheLock:     lock.NewCASMutex(),
 		connections:   make(map[string]*connectionContainer),
 		tracer:        nil,
@@ -353,8 +353,8 @@ func TestPlcConnectionCache_MultipleConcurrentConnectionRequests(t *testing.T) {
 	driverManager.RegisterDriver(simulated.NewDriver(options.WithCustomLogger(logger)))
 	cache := plcConnectionCache{
 		driverManager: driverManager,
-		maxLeaseTime:  time.Second * 5,
-		maxWaitTime:   time.Second * 25,
+		maxLeaseTime:  5 * time.Second,
+		maxWaitTime:   25 * time.Second,
 		cacheLock:     lock.NewCASMutex(),
 		connections:   make(map[string]*connectionContainer),
 		tracer:        nil,
@@ -377,7 +377,7 @@ func TestPlcConnectionCache_MultipleConcurrentConnectionRequests(t *testing.T) {
 			"ping-success",
 		}, 1)
 
-	time.Sleep(time.Millisecond * 1)
+	time.Sleep(1 * time.Millisecond)
 
 	// Almost instantly request the same connection for a second time.
 	// As the connection takes 100ms, the second connection request will come
@@ -427,8 +427,8 @@ func TestPlcConnectionCache_ConnectWithError(t *testing.T) {
 	driverManager.RegisterDriver(simulated.NewDriver(options.WithCustomLogger(logger)))
 	cache := plcConnectionCache{
 		driverManager: driverManager,
-		maxLeaseTime:  time.Second * 5,
-		maxWaitTime:   time.Second * 25,
+		maxLeaseTime:  5 * time.Second,
+		maxWaitTime:   25 * time.Second,
 		cacheLock:     lock.NewCASMutex(),
 		connections:   make(map[string]*connectionContainer),
 		tracer:        nil,
@@ -467,8 +467,8 @@ func TestPlcConnectionCache_ReturningConnectionWithPingError(t *testing.T) {
 	driverManager.RegisterDriver(simulated.NewDriver(options.WithCustomLogger(logger)))
 	cache := plcConnectionCache{
 		driverManager: driverManager,
-		maxLeaseTime:  time.Second * 5,
-		maxWaitTime:   time.Second * 25,
+		maxLeaseTime:  5 * time.Second,
+		maxWaitTime:   25 * time.Second,
 		cacheLock:     lock.NewCASMutex(),
 		connections:   make(map[string]*connectionContainer),
 		tracer:        nil,
@@ -528,8 +528,8 @@ func TestPlcConnectionCache_PingTimeout(t *testing.T) {
 	driverManager.RegisterDriver(simulated.NewDriver(options.WithCustomLogger(logger)))
 	cache := plcConnectionCache{
 		driverManager: driverManager,
-		maxLeaseTime:  time.Second * 5,
-		maxWaitTime:   time.Second * 25,
+		maxLeaseTime:  5 * time.Second,
+		maxWaitTime:   25 * time.Second,
 		cacheLock:     lock.NewCASMutex(),
 		connections:   make(map[string]*connectionContainer),
 		tracer:        nil,
@@ -573,8 +573,8 @@ func TestPlcConnectionCache_SecondCallGetNewConnectionAfterPingTimeout(t *testin
 	driverManager.RegisterDriver(simulated.NewDriver(options.WithCustomLogger(logger)))
 	cache := plcConnectionCache{
 		driverManager: driverManager,
-		maxLeaseTime:  time.Second * 5,
-		maxWaitTime:   time.Second * 25,
+		maxLeaseTime:  5 * time.Second,
+		maxWaitTime:   25 * time.Second,
 		cacheLock:     lock.NewCASMutex(),
 		connections:   make(map[string]*connectionContainer),
 		tracer:        nil,
@@ -651,8 +651,8 @@ func TestPlcConnectionCache_FistReadGivesUpBeforeItGetsTheConnectionSoSecondOneT
 	driverManager.RegisterDriver(simulated.NewDriver(options.WithCustomLogger(logger)))
 	cache := plcConnectionCache{
 		driverManager: driverManager,
-		maxLeaseTime:  time.Second * 5,
-		maxWaitTime:   time.Second * 25,
+		maxLeaseTime:  5 * time.Second,
+		maxWaitTime:   25 * time.Second,
 		cacheLock:     lock.NewCASMutex(),
 		connections:   make(map[string]*connectionContainer),
 		tracer:        nil,
@@ -667,7 +667,7 @@ func TestPlcConnectionCache_FistReadGivesUpBeforeItGetsTheConnectionSoSecondOneT
 	// Intentionally just ignore the response.
 	cache.GetConnection("simulated://1.2.3.4:42?connectionDelay=100&traceEnabled=true")
 
-	time.Sleep(time.Millisecond * 1)
+	time.Sleep(1 * time.Millisecond)
 
 	// Read once from the cache.
 	// NOTE: It doesn't contain the connect-part, as the previous connection handled that.
@@ -696,8 +696,8 @@ func TestPlcConnectionCache_SecondConnectionGivenUpWaiting(t *testing.T) {
 	driverManager.RegisterDriver(simulated.NewDriver(options.WithCustomLogger(logger)))
 	cache := plcConnectionCache{
 		driverManager: driverManager,
-		maxLeaseTime:  time.Second * 5,
-		maxWaitTime:   time.Second * 25,
+		maxLeaseTime:  5 * time.Second,
+		maxWaitTime:   25 * time.Second,
 		cacheLock:     lock.NewCASMutex(),
 		connections:   make(map[string]*connectionContainer),
 		tracer:        nil,
@@ -720,7 +720,7 @@ func TestPlcConnectionCache_SecondConnectionGivenUpWaiting(t *testing.T) {
 			"ping-success",
 		}, 1)
 
-	time.Sleep(time.Millisecond * 1)
+	time.Sleep(1 * time.Millisecond)
 
 	// Almost instantly we try to get a new connection but don't listen for the result
 	cache.GetConnection("simulated://1.2.3.4:42?connectionDelay=100&traceEnabled=true")
@@ -733,7 +733,7 @@ func TestPlcConnectionCache_SecondConnectionGivenUpWaiting(t *testing.T) {
 	}
 
 	// Wait for 1s to have the connection cache timeout (10ms) the lease as nobody's listening.
-	time.Sleep(time.Millisecond * 1000)
+	time.Sleep(1 * time.Second)
 
 	// This should be quite equal to the serial case as the connections are requested serially.
 	assert.NotNil(t, cache.GetTracer(), "Tracer should be available")
@@ -770,8 +770,8 @@ func TestPlcConnectionCache_MaximumWaitTimeReached(t *testing.T) {
 	// Reduce the max lease time as this way we also reduce the max wait time.
 	cache := plcConnectionCache{
 		driverManager: driverManager,
-		maxLeaseTime:  time.Second * 1,
-		maxWaitTime:   time.Second * 5,
+		maxLeaseTime:  1 * time.Second,
+		maxWaitTime:   5 * time.Second,
 		cacheLock:     lock.NewCASMutex(),
 		connections:   make(map[string]*connectionContainer),
 		tracer:        nil,
@@ -786,11 +786,11 @@ func TestPlcConnectionCache_MaximumWaitTimeReached(t *testing.T) {
 	// The first and second connection should work fine
 	firstConnectionResults := cache.GetConnection("simulated://1.2.3.4:42?connectionDelay=100&pingDelay=4000&traceEnabled=true")
 
-	time.Sleep(time.Millisecond * 1)
+	time.Sleep(1 * time.Millisecond)
 
 	secondConnectionResults := cache.GetConnection("simulated://1.2.3.4:42?connectionDelay=100&pingDelay=4000&traceEnabled=true")
 
-	time.Sleep(time.Millisecond * 1)
+	time.Sleep(1 * time.Millisecond)
 
 	// The third connection should be given up by the cache
 	thirdConnectionResults := cache.GetConnection("simulated://1.2.3.4:42?connectionDelay=100&pingDelay=4000&traceEnabled=true")
diff --git a/plc4go/pkg/api/cache/plcConnectionLease.go b/plc4go/pkg/api/cache/plcConnectionLease.go
index 28c5f2436e..8f59ae2bd6 100644
--- a/plc4go/pkg/api/cache/plcConnectionLease.go
+++ b/plc4go/pkg/api/cache/plcConnectionLease.go
@@ -98,7 +98,7 @@ func (t *plcConnectionLease) Close() <-chan plc4go.PlcConnectionCloseResult {
 	go func() {
 		// Check if the connection is still alive, if it is, put it back into the cache
 		pingResults := t.Ping()
-		pingTimeout := time.NewTimer(time.Second * 5)
+		pingTimeout := time.NewTimer(5 * time.Second)
 		newState := StateIdle
 		select {
 		case pingResult := <-pingResults:
diff --git a/plc4go/pkg/api/cache/plcConnectionLease_test.go b/plc4go/pkg/api/cache/plcConnectionLease_test.go
index d73c98fe7b..2566de89fb 100644
--- a/plc4go/pkg/api/cache/plcConnectionLease_test.go
+++ b/plc4go/pkg/api/cache/plcConnectionLease_test.go
@@ -41,8 +41,8 @@ func TestLeasedPlcConnection_IsTraceEnabled(t *testing.T) {
 	// Reduce the max lease time as this way we also reduce the max wait time.
 	cache := plcConnectionCache{
 		driverManager: driverManager,
-		maxLeaseTime:  time.Second * 1,
-		maxWaitTime:   time.Second * 5,
+		maxLeaseTime:  1 * time.Second,
+		maxWaitTime:   5 * time.Second,
 		cacheLock:     lock.NewCASMutex(),
 		connections:   make(map[string]*connectionContainer),
 		tracer:        nil,
@@ -112,8 +112,8 @@ func TestLeasedPlcConnection_GetTracer(t *testing.T) {
 	// Reduce the max lease time as this way we also reduce the max wait time.
 	cache := plcConnectionCache{
 		driverManager: driverManager,
-		maxLeaseTime:  time.Second * 1,
-		maxWaitTime:   time.Second * 5,
+		maxLeaseTime:  1 * time.Second,
+		maxWaitTime:   5 * time.Second,
 		cacheLock:     lock.NewCASMutex(),
 		connections:   make(map[string]*connectionContainer),
 		tracer:        nil,
@@ -157,8 +157,8 @@ func TestLeasedPlcConnection_GetConnectionId(t *testing.T) {
 	// Reduce the max lease time as this way we also reduce the max wait time.
 	cache := plcConnectionCache{
 		driverManager: driverManager,
-		maxLeaseTime:  time.Second * 1,
-		maxWaitTime:   time.Second * 5,
+		maxLeaseTime:  1 * time.Second,
+		maxWaitTime:   5 * time.Second,
 		cacheLock:     lock.NewCASMutex(),
 		connections:   make(map[string]*connectionContainer),
 		tracer:        nil,
@@ -202,8 +202,8 @@ func TestLeasedPlcConnection_Connect(t *testing.T) {
 	// Reduce the max lease time as this way we also reduce the max wait time.
 	cache := plcConnectionCache{
 		driverManager: driverManager,
-		maxLeaseTime:  time.Second * 1,
-		maxWaitTime:   time.Second * 5,
+		maxLeaseTime:  1 * time.Second,
+		maxWaitTime:   5 * time.Second,
 		cacheLock:     lock.NewCASMutex(),
 		connections:   make(map[string]*connectionContainer),
 		tracer:        nil,
@@ -245,8 +245,8 @@ func TestLeasedPlcConnection_BlockingClose(t *testing.T) {
 	// Reduce the max lease time as this way we also reduce the max wait time.
 	cache := plcConnectionCache{
 		driverManager: driverManager,
-		maxLeaseTime:  time.Second * 1,
-		maxWaitTime:   time.Second * 5,
+		maxLeaseTime:  1 * time.Second,
+		maxWaitTime:   5 * time.Second,
 		cacheLock:     lock.NewCASMutex(),
 		connections:   make(map[string]*connectionContainer),
 		tracer:        nil,
@@ -289,8 +289,8 @@ func TestLeasedPlcConnection_Close(t *testing.T) {
 	// Reduce the max lease time as this way we also reduce the max wait time.
 	cache := plcConnectionCache{
 		driverManager: driverManager,
-		maxLeaseTime:  time.Second * 1,
-		maxWaitTime:   time.Second * 5,
+		maxLeaseTime:  1 * time.Second,
+		maxWaitTime:   5 * time.Second,
 		cacheLock:     lock.NewCASMutex(),
 		connections:   make(map[string]*connectionContainer),
 		tracer:        nil,
@@ -333,8 +333,8 @@ func TestLeasedPlcConnection_IsConnected(t *testing.T) {
 	// Reduce the max lease time as this way we also reduce the max wait time.
 	cache := plcConnectionCache{
 		driverManager: driverManager,
-		maxLeaseTime:  time.Second * 1,
-		maxWaitTime:   time.Second * 5,
+		maxLeaseTime:  1 * time.Second,
+		maxWaitTime:   5 * time.Second,
 		cacheLock:     lock.NewCASMutex(),
 		connections:   make(map[string]*connectionContainer),
 		tracer:        nil,
@@ -369,8 +369,8 @@ func TestLeasedPlcConnection_Ping(t *testing.T) {
 	// Reduce the max lease time as this way we also reduce the max wait time.
 	cache := plcConnectionCache{
 		driverManager: driverManager,
-		maxLeaseTime:  time.Second * 1,
-		maxWaitTime:   time.Second * 5,
+		maxLeaseTime:  1 * time.Second,
+		maxWaitTime:   5 * time.Second,
 		cacheLock:     lock.NewCASMutex(),
 		connections:   make(map[string]*connectionContainer),
 		tracer:        nil,
@@ -414,8 +414,8 @@ func TestLeasedPlcConnection_GetMetadata(t *testing.T) {
 	// Reduce the max lease time as this way we also reduce the max wait time.
 	cache := plcConnectionCache{
 		driverManager: driverManager,
-		maxLeaseTime:  time.Second * 1,
-		maxWaitTime:   time.Second * 5,
+		maxLeaseTime:  1 * time.Second,
+		maxWaitTime:   5 * time.Second,
 		cacheLock:     lock.NewCASMutex(),
 		connections:   make(map[string]*connectionContainer),
 		tracer:        nil,
@@ -463,8 +463,8 @@ func TestLeasedPlcConnection_ReadRequestBuilder(t *testing.T) {
 	// Reduce the max lease time as this way we also reduce the max wait time.
 	cache := plcConnectionCache{
 		driverManager: driverManager,
-		maxLeaseTime:  time.Second * 1,
-		maxWaitTime:   time.Second * 5,
+		maxLeaseTime:  1 * time.Second,
+		maxWaitTime:   5 * time.Second,
 		cacheLock:     lock.NewCASMutex(),
 		connections:   make(map[string]*connectionContainer),
 		tracer:        nil,
@@ -509,8 +509,8 @@ func TestLeasedPlcConnection_WriteRequestBuilder(t *testing.T) {
 	// Reduce the max lease time as this way we also reduce the max wait time.
 	cache := plcConnectionCache{
 		driverManager: driverManager,
-		maxLeaseTime:  time.Second * 1,
-		maxWaitTime:   time.Second * 5,
+		maxLeaseTime:  1 * time.Second,
+		maxWaitTime:   5 * time.Second,
 		cacheLock:     lock.NewCASMutex(),
 		connections:   make(map[string]*connectionContainer),
 		tracer:        nil,
@@ -555,8 +555,8 @@ func TestLeasedPlcConnection_SubscriptionRequestBuilder(t *testing.T) {
 	// Reduce the max lease time as this way we also reduce the max wait time.
 	cache := plcConnectionCache{
 		driverManager: driverManager,
-		maxLeaseTime:  time.Second * 1,
-		maxWaitTime:   time.Second * 5,
+		maxLeaseTime:  1 * time.Second,
+		maxWaitTime:   5 * time.Second,
 		cacheLock:     lock.NewCASMutex(),
 		connections:   make(map[string]*connectionContainer),
 		tracer:        nil,
@@ -601,8 +601,8 @@ func TestLeasedPlcConnection_UnsubscriptionRequestBuilder(t *testing.T) {
 	// Reduce the max lease time as this way we also reduce the max wait time.
 	cache := plcConnectionCache{
 		driverManager: driverManager,
-		maxLeaseTime:  time.Second * 1,
-		maxWaitTime:   time.Second * 5,
+		maxLeaseTime:  1 * time.Second,
+		maxWaitTime:   5 * time.Second,
 		cacheLock:     lock.NewCASMutex(),
 		connections:   make(map[string]*connectionContainer),
 		tracer:        nil,
@@ -655,8 +655,8 @@ func TestLeasedPlcConnection_BrowseRequestBuilder(t *testing.T) {
 	// Reduce the max lease time as this way we also reduce the max wait time.
 	cache := plcConnectionCache{
 		driverManager: driverManager,
-		maxLeaseTime:  time.Second * 1,
-		maxWaitTime:   time.Second * 5,
+		maxLeaseTime:  1 * time.Second,
+		maxWaitTime:   5 * time.Second,
 		cacheLock:     lock.NewCASMutex(),
 		connections:   make(map[string]*connectionContainer),
 		tracer:        nil,
diff --git a/plc4go/spi/default/DefaultCodec.go b/plc4go/spi/default/DefaultCodec.go
index 3bca7b40b3..9b6efddbff 100644
--- a/plc4go/spi/default/DefaultCodec.go
+++ b/plc4go/spi/default/DefaultCodec.go
@@ -311,7 +311,7 @@ mainLoop:
 		if numberOfExpectations <= 0 && m.customMessageHandling == nil {
 			workerLog.Trace().Msg("no available expectations")
 			// Sleep for 10ms
-			time.Sleep(time.Millisecond * 10)
+			time.Sleep(10 * time.Millisecond)
 			continue mainLoop
 		}
 		m.TimeoutExpectations(now)
@@ -321,14 +321,14 @@ mainLoop:
 		message, err := m.Receive()
 		if err != nil {
 			workerLog.Error().Err(err).Msg("got an error reading from transport")
-			time.Sleep(time.Millisecond * 10)
+			time.Sleep(10 * time.Millisecond)
 			continue mainLoop
 		}
 		if message == nil {
 			workerLog.Trace().Msg("Not enough data yet")
 			// Sleep for 10ms before checking again, in order to not
 			// consume 100% CPU Power.
-			time.Sleep(time.Millisecond * 10)
+			time.Sleep(10 * time.Millisecond)
 			continue mainLoop
 		}
 		workerLog.Trace().Msgf("got message:\n%s", message)
diff --git a/plc4go/spi/default/DefaultConnection.go b/plc4go/spi/default/DefaultConnection.go
index 6639a49e1a..3ff36e787f 100644
--- a/plc4go/spi/default/DefaultConnection.go
+++ b/plc4go/spi/default/DefaultConnection.go
@@ -162,7 +162,7 @@ type defaultConnection struct {
 }
 
 func buildDefaultConnection(requirements DefaultConnectionRequirements, _options ...options.WithOption) DefaultConnection {
-	defaultTtl := time.Second * 10
+	defaultTtl := 10 * time.Second
 	var tagHandler spi.PlcTagHandler
 	var valueHandler spi.PlcValueHandler
 
diff --git a/plc4go/spi/model/DefaultPlcReadRequest.go b/plc4go/spi/model/DefaultPlcReadRequest.go
index 79952d9132..855e9be551 100644
--- a/plc4go/spi/model/DefaultPlcReadRequest.go
+++ b/plc4go/spi/model/DefaultPlcReadRequest.go
@@ -137,7 +137,7 @@ func (d *DefaultPlcReadRequest) ExecuteWithContextAndInterceptor(ctx context.Con
 	for _, subRequest := range readRequests {
 		subResultChannels = append(subResultChannels, d.reader.Read(ctx, subRequest))
 		// TODO: Replace this with a real queueing of requests. Later on we need throttling. At the moment this avoids race condition as the read above writes to fast on the line which is a problem for the test
-		time.Sleep(time.Millisecond * 4)
+		time.Sleep(4 * time.Millisecond)
 	}
 
 	// Create a new result-channel, which completes as soon as all sub-result-channels have returned
diff --git a/plc4go/spi/model/DefaultPlcWriteRequest.go b/plc4go/spi/model/DefaultPlcWriteRequest.go
index c86de4cf45..541f53e3ae 100644
--- a/plc4go/spi/model/DefaultPlcWriteRequest.go
+++ b/plc4go/spi/model/DefaultPlcWriteRequest.go
@@ -158,7 +158,7 @@ func (d *DefaultPlcWriteRequest) ExecuteWithContextAndInterceptor(ctx context.Co
 	for _, subRequest := range writeRequests {
 		subResultChannels = append(subResultChannels, d.writer.Write(ctx, subRequest))
 		// TODO: Replace this with a real queueing of requests. Later on we need throttling. At the moment this avoids race condition as the read above writes to fast on the line which is a problem for the test
-		time.Sleep(time.Millisecond * 4)
+		time.Sleep(4 * time.Millisecond)
 	}
 
 	// Create a new result-channel, which completes as soon as all sub-result-channels have returned
diff --git a/plc4go/spi/pool/future.go b/plc4go/spi/pool/future.go
index cc55be09e0..41d4396006 100644
--- a/plc4go/spi/pool/future.go
+++ b/plc4go/spi/pool/future.go
@@ -54,7 +54,7 @@ var Canceled = errors.New("Canceled")
 
 func (f *future) AwaitCompletion(ctx context.Context) error {
 	for !f.completed.Load() && !f.errored.Load() && !f.cancelRequested.Load() && ctx.Err() == nil {
-		time.Sleep(time.Millisecond * 10)
+		time.Sleep(10 * time.Millisecond)
 	}
 	if err := ctx.Err(); err != nil {
 		return err
diff --git a/plc4go/spi/pool/future_test.go b/plc4go/spi/pool/future_test.go
index 8d32051725..7a379b0b07 100644
--- a/plc4go/spi/pool/future_test.go
+++ b/plc4go/spi/pool/future_test.go
@@ -50,7 +50,7 @@ func Test_future_AwaitCompletion(t *testing.T) {
 			name: "completes regular",
 			args: args{ctx: context.TODO()},
 			completer: func(f *future) {
-				time.Sleep(time.Millisecond * 30)
+				time.Sleep(30 * time.Millisecond)
 				f.complete()
 			},
 			wantErr: assert.NoError,
@@ -63,7 +63,7 @@ func Test_future_AwaitCompletion(t *testing.T) {
 				return deadline
 			}()},
 			completer: func(f *future) {
-				time.Sleep(time.Millisecond * 300)
+				time.Sleep(300 * time.Millisecond)
 			},
 			wantErr: assert.Error,
 		},
@@ -71,7 +71,7 @@ func Test_future_AwaitCompletion(t *testing.T) {
 			name: "completes canceled without error",
 			args: args{ctx: context.TODO()},
 			completer: func(f *future) {
-				time.Sleep(time.Millisecond * 300)
+				time.Sleep(300 * time.Millisecond)
 				f.Cancel(true, nil)
 			},
 			wantErr: func(t assert.TestingT, err error, i ...any) bool {
@@ -83,7 +83,7 @@ func Test_future_AwaitCompletion(t *testing.T) {
 			name: "completes canceled with particular error",
 			args: args{ctx: context.TODO()},
 			completer: func(f *future) {
-				time.Sleep(time.Millisecond * 300)
+				time.Sleep(300 * time.Millisecond)
 				f.Cancel(true, errors.New("Uh oh"))
 			},
 			wantErr: func(t assert.TestingT, err error, i ...any) bool {