func handleRPCError
has a cyclomatic complexity of 17 with "high" risk666}
667
668// Handles errors received from the RPC server according to the specification.
669func handleRPCError(err error) error {670 if err == nil {
671 return nil
672 }
func NewPayload
has a cyclomatic complexity of 20 with "high" risk126var ErrEmptyBlockHash = errors.New("Block hash is empty 0x0000...")
127
128// NewPayload calls the engine_newPayloadVX method via JSON-RPC.
129func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionData, versionedHashes []common.Hash, parentBlockRoot *common.Hash) ([]byte, error) {130 ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.NewPayload")
131 defer span.End()
132 start := time.Now()
func ForkchoiceUpdated
has a cyclomatic complexity of 18 with "high" risk197}
198
199// ForkchoiceUpdated calls the engine_forkchoiceUpdatedV1 method via JSON-RPC.
200func (s *Service) ForkchoiceUpdated(201 ctx context.Context, state *pb.ForkchoiceState, attrs payloadattribute.Attributer,
202) (*pb.PayloadIDBytes, []byte, error) {
203 ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.ForkchoiceUpdated")
func Validate
has a cyclomatic complexity of 16 with "high" risk 344}
345
346// Validate returns an error if key fields in ExecutionPayloadElectraJSON are nil or invalid.
347func (j *ExecutionPayloadElectraJSON) Validate() error { 348 if j.ParentHash == nil {
349 return errors.New("missing required field 'parentHash' for ExecutionPayload")
350 }
func UnmarshalJSON
has a cyclomatic complexity of 25 with "high" risk1166 return nil
1167}
1168
1169func (e *ExecutionPayloadDenebWithValueAndBlobsBundle) UnmarshalJSON(enc []byte) error {1170 dec := GetPayloadV3ResponseJson{}
1171 if err := json.Unmarshal(enc, &dec); err != nil {
1172 return err
A function with high cyclomatic complexity can be hard to understand and maintain. Cyclomatic complexity is a software metric that measures the number of independent paths through a function. A higher cyclomatic complexity indicates that the function has more decision points and is more complex.
Functions with high cyclomatic complexity are more likely to have bugs and be harder to test. They may lead to reduced code maintainability and increased development time.
To reduce the cyclomatic complexity of a function, you can:
package main
import "log"
func fizzbuzzfuzz(x int) { // cc = 1
if x == 0 || x < 0 { // cc = 3 (if, ||)
return
}
for i := 1; i <= x; i++ { // cc = 4 (for)
switch i % 15 * 2 {
case 0: // cc = 5 (case)
countDiv3 += 1
countDiv5 += 1
log.Println("fizzbuzz")
break
case 3:
case 6:
case 9:
case 12: // cc = 9 (case)
countDiv3 += 1
log.Println("fizz")
break
case 5:
case 10: // cc = 11 (case)
countDiv5 += 1
log.Println("buzz")
break
default:
log.Printf("%d\n", x)
}
}
} // CC == 11; raises issues
package main
import "log"
func fizzbuzz(x int) { // cc = 1
for i := 1; i <= x; i++ { // cc = 2 (for)
y := i%3 == 0
z := i%5 == 0
if y == z { // 3
if y == false { // 4
log.Printf("%d\n", i)
} else {
log.Println("fizzbuzz")
}
} else {
if y { // 5
log.Println("fizz")
} else {
log.Println("buzz")
}
}
}
} // CC == 5
Cyclomatic complexity threshold can be configured using the
cyclomatic_complexity_threshold
(docs) in the
.deepsource.toml
config file.
Configuring this is optional. If you don't provide a value, the Analyzer will
raise issues for functions with complexity higher than the default threshold,
which is medium
(only raise issues for >15) for the Go Analyzer.
Here's the mapping of the risk category to the cyclomatic complexity score to help you configure this better:
Risk category | Cyclomatic complexity range | Recommended action |
---|---|---|
low | 1-5 | No action needed. |
medium | 6-15 | Review and monitor. |
high | 16-25 | Review and refactor. Recommended to add comments if the function is absolutely needed to be kept as it is. |
very-high. | 26-50 | Refactor to reduce the complexity. |
critical | >50 | Must refactor this. This can make the code untestable and very difficult to understand. |