Imap ошибка протокола tls неверная запись mac decryptmissingdatabytes

Здравствуйте, такая проблема случилась пару дней назад, не работает полностью mail, нельзя ин отправлять ни принимать почту.
Также в это же время закончилась лицензия на Касперского, возможно как то с этим связано, но не хочется в это верить. Есть ли решение, чтобы The bat работал без Касперского?????

Ошибка:
     IMAP  — Ошибка протокола TLS: Неверная запись MAC DecryptAEAD.
     IMAP  — Соединение с IMAP-сервером imap.mail.ru, порт 993
     IMAP  — Начинаю приветствие TLS

Изменено: Амир Султанов28.02.2021 12:38:34

@mengzhuo actually since this involves running lots of code on your builder, what you can do to quickly verify if the problem is with the Go’s TLS v1.3 vs Google TLS v1.2 interaction, you can apply this patch to your builder code in cmd/coordinator/gce.go

// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

// +build go1.13
// +build linux darwin

// Code interacting with Google Compute Engine (GCE) and
// a GCE implementation of the BuildletPool interface.

package main

import (
	"context"
	"crypto/tls"
	"encoding/json"
	"errors"
	"fmt"
	"io"
	"io/ioutil"
	"log"
	"net/http"
	"os"
	"path"
	"sort"
	"strconv"
	"strings"
	"sync"
	"time"

	"cloud.google.com/go/compute/metadata"
	"cloud.google.com/go/datastore"
	"cloud.google.com/go/errorreporting"
	monapi "cloud.google.com/go/monitoring/apiv3"
	"cloud.google.com/go/storage"
	"golang.org/x/build/buildenv"
	"golang.org/x/build/buildlet"
	"golang.org/x/build/cmd/coordinator/spanlog"
	"golang.org/x/build/dashboard"
	"golang.org/x/build/gerrit"
	"golang.org/x/build/internal/buildgo"
	"golang.org/x/build/internal/buildstats"
	"golang.org/x/build/internal/lru"
	"golang.org/x/oauth2"
	"golang.org/x/oauth2/google"
	compute "google.golang.org/api/compute/v1"
	"google.golang.org/api/googleapi"
	"google.golang.org/api/option"
)

func init() {
	buildlet.GCEGate = gceAPIGate
}

// apiCallTicker ticks regularly, preventing us from accidentally making
// GCE API calls too quickly. Our quota is 20 QPS, but we temporarily
// limit ourselves to less than that.
var apiCallTicker = time.NewTicker(time.Second / 10)

func gceAPIGate() {
	<-apiCallTicker.C
}

// Initialized by initGCE:
var (
	buildEnv *buildenv.Environment

	dsClient       *datastore.Client
	computeService *compute.Service
	gcpCreds       *google.Credentials
	errTryDeps     error // non-nil if try bots are disabled
	gerritClient   *gerrit.Client
	storageClient  *storage.Client
	metricsClient  *monapi.MetricClient
	inStaging      bool                   // are we running in the staging project? (named -dev)
	errorsClient   *errorreporting.Client // Stackdriver errors client
	gkeNodeIP      string

	initGCECalled bool
)

// oAuthHTTPClient is the OAuth2 HTTP client used to make API calls to Google Cloud APIs.
// It is initialized by initGCE.
var oAuthHTTPClient *http.Client

func initGCE() error {
	initGCECalled = true
	var err error

	// If the coordinator is running on a GCE instance and a
	// buildEnv was not specified with the env flag, set the
	// buildEnvName to the project ID
	if *buildEnvName == "" {
		if *mode == "dev" {
			*buildEnvName = "dev"
		} else if metadata.OnGCE() {
			*buildEnvName, err = metadata.ProjectID()
			if err != nil {
				log.Fatalf("metadata.ProjectID: %v", err)
			}
		}
	}

	buildEnv = buildenv.ByProjectID(*buildEnvName)
	inStaging = (buildEnv == buildenv.Staging)

	// If running on GCE, override the zone and static IP, and check service account permissions.
	if metadata.OnGCE() {
		projectZone, err := metadata.Get("instance/zone")
		if err != nil || projectZone == "" {
			return fmt.Errorf("failed to get current GCE zone: %v", err)
		}

		gkeNodeIP, err = metadata.Get("instance/network-interfaces/0/ip")
		if err != nil {
			return fmt.Errorf("failed to get current instance IP: %v", err)
		}

		// Convert the zone from "projects/1234/zones/us-central1-a" to "us-central1-a".
		projectZone = path.Base(projectZone)
		buildEnv.Zone = projectZone

		if buildEnv.StaticIP == "" {
			buildEnv.StaticIP, err = metadata.ExternalIP()
			if err != nil {
				return fmt.Errorf("ExternalIP: %v", err)
			}
		}

		if !hasComputeScope() {
			return errors.New("coordinator is not running with access to read and write Compute resources. VM support disabled")
		}

		if value, err := metadata.ProjectAttributeValue("farmer-run-bench"); err == nil {
			*shouldRunBench, _ = strconv.ParseBool(value)
		}
	}

	cfgDump, _ := json.MarshalIndent(buildEnv, "", "  ")
	log.Printf("Loaded configuration %q for project %q:n%s", *buildEnvName, buildEnv.ProjectName, cfgDump)

	opts := []option.ClientOption{
		// Force TLS 1.2 in the HTTP client because of issues:
		// * https://github.com/golang/go/issues/31217
		// * https://github.com/googleapis/google-cloud-go/issues/1581
		// in which there might be a bad interaction with Go's TLS v1.3 and Google's TLS v1.2.
		option.WithHTTPClient(&http.Client{
			Transport: &http.Transport{
				TLSClientConfig: &tls.Config{
					MaxVersion: tls.VersionTLS12,
				},
			},
		}),
	}

	ctx := context.Background()
	if *mode != "dev" {
		storageClient, err = storage.NewClient(ctx, opts...)
		if err != nil {
			log.Fatalf("storage.NewClient: %v", err)
		}

		metricsClient, err = monapi.NewMetricClient(ctx, opts...)
		if err != nil {
			log.Fatalf("monapi.NewMetricClient: %v", err)
		}
	}

	dsClient, err = datastore.NewClient(ctx, buildEnv.ProjectName, opts...)
	if err != nil {
		if *mode == "dev" {
			log.Printf("Error creating datastore client: %v", err)
		} else {
			log.Fatalf("Error creating datastore client: %v", err)
		}
	}

	// don't send dev errors to Stackdriver.
	if *mode != "dev" {
		errorsClient, err = errorreporting.NewClient(ctx, buildEnv.ProjectName, errorreporting.Config{
			ServiceName: "coordinator",
		})
		if err != nil {
			// don't exit, we still want to run coordinator
			log.Printf("Error creating errors client: %v", err)
		}
	}

	gcpCreds, err = buildEnv.Credentials(ctx)
	if err != nil {
		if *mode == "dev" {
			// don't try to do anything else with GCE, as it will likely fail
			return nil
		}
		log.Fatalf("failed to get a token source: %v", err)
	}
	oAuthHTTPClient = oauth2.NewClient(ctx, gcpCreds.TokenSource)
	computeService, _ = compute.New(oAuthHTTPClient)
	errTryDeps = checkTryBuildDeps()
	if errTryDeps != nil {
		log.Printf("TryBot builders disabled due to error: %v", errTryDeps)
	} else {
		log.Printf("TryBot builders enabled.")
	}

	if *mode != "dev" {
		go syncBuildStatsLoop(buildEnv)
	}

	go gcePool.pollQuotaLoop()
	go createBasepinDisks(context.Background())
	return nil
}

func checkTryBuildDeps() error {
	if !hasStorageScope() {
		return errors.New("coordinator's GCE instance lacks the storage service scope")
	}
	if *mode == "dev" {
		return errors.New("running in dev mode")
	}
	wr := storageClient.Bucket(buildEnv.LogBucket).Object("hello.txt").NewWriter(context.Background())
	fmt.Fprintf(wr, "Hello, world! Coordinator start-up at %v", time.Now())
	if err := wr.Close(); err != nil {
		return fmt.Errorf("test write of a GCS object to bucket %q failed: %v", buildEnv.LogBucket, err)
	}
	if inStaging {
		// Don't expect to write to Gerrit in staging mode.
		gerritClient = gerrit.NewClient("https://go-review.googlesource.com", gerrit.NoAuth)
	} else {
		gobotPass, err := metadata.ProjectAttributeValue("gobot-password")
		if err != nil {
			return fmt.Errorf("failed to get project metadata 'gobot-password': %v", err)
		}
		gerritClient = gerrit.NewClient("https://go-review.googlesource.com",
			gerrit.BasicAuth("git-gobot.golang.org", strings.TrimSpace(string(gobotPass))))
	}

	return nil
}

var gcePool = &gceBuildletPool{}

var _ BuildletPool = (*gceBuildletPool)(nil)

// maxInstances is a temporary hack because we can't get buildlets to boot
// without IPs, and we only have 200 IP addresses.
// TODO(bradfitz): remove this once fixed.
const maxInstances = 190

type gceBuildletPool struct {
	mu sync.Mutex // guards all following

	disabled bool

	// CPU quota usage & limits.
	cpuLeft   int // dead-reckoning CPUs remain
	instLeft  int // dead-reckoning instances remain
	instUsage int
	cpuUsage  int
	addrUsage int
	inst      map[string]time.Time // GCE VM instance name -> creationTime
}

func (p *gceBuildletPool) pollQuotaLoop() {
	if computeService == nil {
		log.Printf("pollQuotaLoop: no GCE access; not checking quota.")
		return
	}
	if buildEnv.ProjectName == "" {
		log.Printf("pollQuotaLoop: no GCE project name configured; not checking quota.")
		return
	}
	for {
		p.pollQuota()
		time.Sleep(5 * time.Second)
	}
}

func (p *gceBuildletPool) pollQuota() {
	gceAPIGate()
	reg, err := computeService.Regions.Get(buildEnv.ProjectName, buildEnv.Region()).Do()
	if err != nil {
		log.Printf("Failed to get quota for %s/%s: %v", buildEnv.ProjectName, buildEnv.Region(), err)
		return
	}
	p.mu.Lock()
	defer p.mu.Unlock()
	for _, quota := range reg.Quotas {
		switch quota.Metric {
		case "CPUS":
			p.cpuLeft = int(quota.Limit) - int(quota.Usage)
			p.cpuUsage = int(quota.Usage)
		case "INSTANCES":
			p.instLeft = int(quota.Limit) - int(quota.Usage)
			p.instUsage = int(quota.Usage)
		case "IN_USE_ADDRESSES":
			p.addrUsage = int(quota.Usage)
		}
	}
}

func (p *gceBuildletPool) SetEnabled(enabled bool) {
	p.mu.Lock()
	defer p.mu.Unlock()
	p.disabled = !enabled
}

func (p *gceBuildletPool) GetBuildlet(ctx context.Context, hostType string, lg logger) (bc *buildlet.Client, err error) {
	hconf, ok := dashboard.Hosts[hostType]
	if !ok {
		return nil, fmt.Errorf("gcepool: unknown host type %q", hostType)
	}
	qsp := lg.CreateSpan("awaiting_gce_quota")
	err = p.awaitVMCountQuota(ctx, hconf.GCENumCPU())
	qsp.Done(err)
	if err != nil {
		return nil, err
	}

	deleteIn, ok := ctx.Value(buildletTimeoutOpt{}).(time.Duration)
	if !ok {
		deleteIn = vmDeleteTimeout
	}

	instName := "buildlet-" + strings.TrimPrefix(hostType, "host-") + "-rn" + randHex(7)
	instName = strings.Replace(instName, "_", "-", -1) // Issue 22905; can't use underscores in GCE VMs
	p.setInstanceUsed(instName, true)

	gceBuildletSpan := lg.CreateSpan("create_gce_buildlet", instName)
	defer func() { gceBuildletSpan.Done(err) }()

	var (
		needDelete   bool
		createSpan   = lg.CreateSpan("create_gce_instance", instName)
		waitBuildlet spanlog.Span // made after create is done
		curSpan      = createSpan // either instSpan or waitBuildlet
	)

	log.Printf("Creating GCE VM %q for %s", instName, hostType)
	bc, err = buildlet.StartNewVM(gcpCreds, buildEnv, instName, hostType, buildlet.VMOpts{
		DeleteIn: deleteIn,
		OnInstanceRequested: func() {
			log.Printf("GCE VM %q now booting", instName)
		},
		OnInstanceCreated: func() {
			needDelete = true

			createSpan.Done(nil)
			waitBuildlet = lg.CreateSpan("wait_buildlet_start", instName)
			curSpan = waitBuildlet
		},
		OnGotInstanceInfo: func() {
			lg.LogEventTime("got_instance_info", "waiting_for_buildlet...")
		},
	})
	if err != nil {
		curSpan.Done(err)
		log.Printf("Failed to create VM for %s: %v", hostType, err)
		if needDelete {
			deleteVM(buildEnv.Zone, instName)
			p.putVMCountQuota(hconf.GCENumCPU())
		}
		p.setInstanceUsed(instName, false)
		return nil, err
	}
	waitBuildlet.Done(nil)
	bc.SetDescription("GCE VM: " + instName)
	bc.SetOnHeartbeatFailure(func() {
		p.putBuildlet(bc, hostType, instName)
	})
	return bc, nil
}

func (p *gceBuildletPool) putBuildlet(bc *buildlet.Client, hostType, instName string) error {
	// TODO(bradfitz): add the buildlet to a freelist (of max N
	// items) for up to 10 minutes since when it got started if
	// it's never seen a command execution failure, and we can
	// wipe all its disk content? (perhaps wipe its disk content
	// when it's retrieved, like the reverse buildlet pool) But
	// this will require re-introducing a distinction in the
	// buildlet client library between Close, Destroy/Halt, and
	// tracking execution errors.  That was all half-baked before
	// and thus removed. Now Close always destroys everything.
	deleteVM(buildEnv.Zone, instName)
	p.setInstanceUsed(instName, false)

	hconf, ok := dashboard.Hosts[hostType]
	if !ok {
		panic("failed to lookup conf") // should've worked if we did it before
	}
	p.putVMCountQuota(hconf.GCENumCPU())
	return nil
}

func (p *gceBuildletPool) WriteHTMLStatus(w io.Writer) {
	fmt.Fprintf(w, "<b>GCE pool</b> capacity: %s", p.capacityString())
	const show = 6 // must be even
	active := p.instancesActive()
	if len(active) > 0 {
		fmt.Fprintf(w, "<ul>")
		for i, inst := range active {
			if i < show/2 || i >= len(active)-(show/2) {
				fmt.Fprintf(w, "<li>%v, %s</li>n", inst.name, friendlyDuration(time.Since(inst.creation)))
			} else if i == show/2 {
				fmt.Fprintf(w, "<li>... %d of %d total omitted ...</li>n", len(active)-show, len(active))
			}
		}
		fmt.Fprintf(w, "</ul>")
	}
}

func (p *gceBuildletPool) String() string {
	return fmt.Sprintf("GCE pool capacity: %s", p.capacityString())
}

func (p *gceBuildletPool) capacityString() string {
	p.mu.Lock()
	defer p.mu.Unlock()
	return fmt.Sprintf("%d/%d instances; %d/%d CPUs",
		len(p.inst), p.instUsage+p.instLeft,
		p.cpuUsage, p.cpuUsage+p.cpuLeft)
}

// awaitVMCountQuota waits for numCPU CPUs of quota to become available,
// or returns ctx.Err.
func (p *gceBuildletPool) awaitVMCountQuota(ctx context.Context, numCPU int) error {
	// Poll every 2 seconds, which could be better, but works and
	// is simple.
	for {
		if p.tryAllocateQuota(numCPU) {
			return nil
		}
		select {
		case <-time.After(2 * time.Second):
		case <-ctx.Done():
			return ctx.Err()
		}
	}
}

func (p *gceBuildletPool) HasCapacity(hostType string) bool {
	hconf, ok := dashboard.Hosts[hostType]
	if !ok {
		return false
	}
	numCPU := hconf.GCENumCPU()
	p.mu.Lock()
	defer p.mu.Unlock()
	return p.haveQuotaLocked(numCPU)
}

// haveQuotaLocked reports whether the current GCE quota permits
// starting numCPU more CPUs.
//
// precondition: p.mu must be held.
func (p *gceBuildletPool) haveQuotaLocked(numCPU int) bool {
	return p.cpuLeft >= numCPU && p.instLeft >= 1 && len(p.inst) < maxInstances && p.addrUsage < maxInstances
}

func (p *gceBuildletPool) tryAllocateQuota(numCPU int) bool {
	p.mu.Lock()
	defer p.mu.Unlock()
	if p.disabled {
		return false
	}
	if p.haveQuotaLocked(numCPU) {
		p.cpuUsage += numCPU
		p.cpuLeft -= numCPU
		p.instLeft--
		p.addrUsage++
		return true
	}
	return false
}

// putVMCountQuota adjusts the dead-reckoning of our quota usage by
// one instance and cpu CPUs.
func (p *gceBuildletPool) putVMCountQuota(cpu int) {
	p.mu.Lock()
	defer p.mu.Unlock()
	p.cpuUsage -= cpu
	p.cpuLeft += cpu
	p.instLeft++
}

func (p *gceBuildletPool) setInstanceUsed(instName string, used bool) {
	p.mu.Lock()
	defer p.mu.Unlock()
	if p.inst == nil {
		p.inst = make(map[string]time.Time)
	}
	if used {
		p.inst[instName] = time.Now()
	} else {
		delete(p.inst, instName)
	}
}

func (p *gceBuildletPool) instanceUsed(instName string) bool {
	p.mu.Lock()
	defer p.mu.Unlock()
	_, ok := p.inst[instName]
	return ok
}

func (p *gceBuildletPool) instancesActive() (ret []resourceTime) {
	p.mu.Lock()
	defer p.mu.Unlock()
	for name, create := range p.inst {
		ret = append(ret, resourceTime{
			name:     name,
			creation: create,
		})
	}
	sort.Sort(byCreationTime(ret))
	return ret
}

// resourceTime is a GCE instance or Kube pod name and its creation time.
type resourceTime struct {
	name     string
	creation time.Time
}

type byCreationTime []resourceTime

func (s byCreationTime) Len() int           { return len(s) }
func (s byCreationTime) Less(i, j int) bool { return s[i].creation.Before(s[j].creation) }
func (s byCreationTime) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }

// cleanUpOldVMs loops forever and periodically enumerates virtual
// machines and deletes those which have expired.
//
// A VM is considered expired if it has a "delete-at" metadata
// attribute having a unix timestamp before the current time.
//
// This is the safety mechanism to delete VMs which stray from the
// normal deleting process. VMs are created to run a single build and
// should be shut down by a controlling process. Due to various types
// of failures, they might get stranded. To prevent them from getting
// stranded and wasting resources forever, we instead set the
// "delete-at" metadata attribute on them when created to some time
// that's well beyond their expected lifetime.
func (p *gceBuildletPool) cleanUpOldVMs() {
	if *mode == "dev" {
		return
	}
	if computeService == nil {
		return
	}

	// TODO(bradfitz): remove this list and just query it from the compute API?
	// http://godoc.org/google.golang.org/api/compute/v1#RegionsService.Get
	// and Region.Zones: http://godoc.org/google.golang.org/api/compute/v1#Region

	for {
		for _, zone := range buildEnv.ZonesToClean {
			if err := p.cleanZoneVMs(zone); err != nil {
				log.Printf("Error cleaning VMs in zone %q: %v", zone, err)
			}
		}
		time.Sleep(time.Minute)
	}
}

// cleanZoneVMs is part of cleanUpOldVMs, operating on a single zone.
func (p *gceBuildletPool) cleanZoneVMs(zone string) error {
	// Fetch the first 500 (default) running instances and clean
	// thoes. We expect that we'll be running many fewer than
	// that. Even if we have more, eventually the first 500 will
	// either end or be cleaned, and then the next call will get a
	// partially-different 500.
	// TODO(bradfitz): revist this code if we ever start running
	// thousands of VMs.
	gceAPIGate()
	list, err := computeService.Instances.List(buildEnv.ProjectName, zone).Do()
	if err != nil {
		return fmt.Errorf("listing instances: %v", err)
	}
	for _, inst := range list.Items {
		if inst.Metadata == nil {
			// Defensive. Not seen in practice.
			continue
		}
		var sawDeleteAt bool
		var deleteReason string
		for _, it := range inst.Metadata.Items {
			if it.Key == "delete-at" {
				if it.Value == nil {
					log.Printf("missing delete-at value; ignoring")
					continue
				}
				unixDeadline, err := strconv.ParseInt(*it.Value, 10, 64)
				if err != nil {
					log.Printf("invalid delete-at value %q seen; ignoring", *it.Value)
					continue
				}
				sawDeleteAt = true
				if time.Now().Unix() > unixDeadline {
					deleteReason = "delete-at expiration"
				}
			}
		}
		isBuildlet := strings.HasPrefix(inst.Name, "buildlet-")

		if isBuildlet && !sawDeleteAt && !p.instanceUsed(inst.Name) {
			createdAt, _ := time.Parse(time.RFC3339Nano, inst.CreationTimestamp)
			if createdAt.Before(time.Now().Add(-3 * time.Hour)) {
				deleteReason = fmt.Sprintf("no delete-at, created at %s", inst.CreationTimestamp)
			}
		}

		// Delete buildlets (things we made) from previous
		// generations. Only deleting things starting with "buildlet-"
		// is a historical restriction, but still fine for paranoia.
		if deleteReason == "" && sawDeleteAt && isBuildlet && !p.instanceUsed(inst.Name) {
			if _, ok := deletedVMCache.Get(inst.Name); !ok {
				deleteReason = "from earlier coordinator generation"
			}
		}

		if deleteReason != "" {
			log.Printf("deleting VM %q in zone %q; %s ...", inst.Name, zone, deleteReason)
			deleteVM(zone, inst.Name)
		}

	}
	return nil
}

var deletedVMCache = lru.New(100) // keyed by instName

// deleteVM starts a delete of an instance in a given zone.
//
// It either returns an operation name (if delete is pending) or the
// empty string if the instance didn't exist.
func deleteVM(zone, instName string) (operation string, err error) {
	deletedVMCache.Add(instName, token{})
	gceAPIGate()
	op, err := computeService.Instances.Delete(buildEnv.ProjectName, zone, instName).Do()
	apiErr, ok := err.(*googleapi.Error)
	if ok {
		if apiErr.Code == 404 {
			return "", nil
		}
	}
	if err != nil {
		log.Printf("Failed to delete instance %q in zone %q: %v", instName, zone, err)
		return "", err
	}
	log.Printf("Sent request to delete instance %q in zone %q. Operation ID, Name: %v, %v", instName, zone, op.Id, op.Name)
	return op.Name, nil
}

func hasScope(want string) bool {
	// If not on GCE, assume full access
	if !metadata.OnGCE() {
		return true
	}
	scopes, err := metadata.Scopes("default")
	if err != nil {
		log.Printf("failed to query metadata default scopes: %v", err)
		return false
	}
	for _, v := range scopes {
		if v == want {
			return true
		}
	}
	return false
}

func hasComputeScope() bool {
	return hasScope(compute.ComputeScope) || hasScope(compute.CloudPlatformScope)
}

func hasStorageScope() bool {
	return hasScope(storage.ScopeReadWrite) || hasScope(storage.ScopeFullControl) || hasScope(compute.CloudPlatformScope)
}

func readGCSFile(name string) ([]byte, error) {
	if *mode == "dev" {
		b, ok := testFiles[name]
		if !ok {
			return nil, &os.PathError{
				Op:   "open",
				Path: name,
				Err:  os.ErrNotExist,
			}
		}
		return []byte(b), nil
	}

	r, err := storageClient.Bucket(buildEnv.BuildletBucket).Object(name).NewReader(context.Background())
	if err != nil {
		return nil, err
	}
	defer r.Close()
	return ioutil.ReadAll(r)
}

// syncBuildStatsLoop runs forever in its own goroutine and syncs the
// coordinator's datastore Build & Span entities to BigQuery
// periodically.
func syncBuildStatsLoop(env *buildenv.Environment) {
	ticker := time.NewTicker(5 * time.Minute)
	for {
		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
		if err := buildstats.SyncBuilds(ctx, env); err != nil {
			log.Printf("buildstats: SyncBuilds: %v", err)
		}
		if err := buildstats.SyncSpans(ctx, env); err != nil {
			log.Printf("buildstats: SyncSpans: %v", err)
		}
		cancel()
		<-ticker.C
	}
}

// createBasepinDisks creates zone-local copies of VM disk images, to
// speed up VM creations in the future.
//
// Other than a list call, this a no-op unless new VM images were
// added or updated recently.
func createBasepinDisks(ctx context.Context) {
	if !metadata.OnGCE() || (buildEnv != buildenv.Production && buildEnv != buildenv.Staging) {
		return
	}
	for {
		t0 := time.Now()
		bgc, err := buildgo.NewClient(ctx, buildEnv)
		if err != nil {
			log.Printf("basepin: NewClient: %v", err)
			return
		}
		log.Printf("basepin: creating basepin disks...")
		err = bgc.MakeBasepinDisks(ctx)
		d := time.Since(t0).Round(time.Second / 10)
		if err != nil {
			basePinErr.Store(err.Error())
			log.Printf("basepin: error creating basepin disks, after %v: %v", d, err)
			time.Sleep(5 * time.Minute)
			continue
		}
		basePinErr.Store("")
		log.Printf("basepin: created basepin disks after %v", d)
		return
	}
}

Здравствуйте. 
Ежедневная проблема с Kaspersky Security Center установленным на Windows Server 2012r2.

 
Каждое утро заходя на сервер вижу сообщение:
«Не удалось подключиться к Серверу администрирования «localhost». Укажите другой адрес или попытайтесь подключиться еще раз.»

 
При этом в отчете «События аудита» появляется сообщение:

 
Имя события Сервер остановлен
Важность: Информационное сообщение
Программа: Сервер администрирования Kaspersky Security Center
Номер версии: 10.1.249
Название задачи:
Компьютер: Сервер администрирования <имя>
Группа: <имя>
Время: 26 декабря 2014 г. 2:00:02
Имя виртуального Cервера:
Описание: Остановлена служба Сервера администрирования

 
При перезапуске приложения вручную все работает нормально до следующего утра. Подскажите, что делать?

Hi,

Testing Kowl but I get this error tls: bad record MAC
Full output:

kowl_1  | {"level":"info","msg":"config filepath is not set, proceeding with options set from env variables and flags"}
kowl_1  | {"level":"info","ts":"2022-03-29T12:55:23.258Z","msg":"started Kowl","version":"master","git_sha":"c6a6f4cb15d9e58e28b13930af95b272e2eae2ca","built":"2022-02-27T09:45:11Z"}
kowl_1  | {"level":"info","ts":"2022-03-29T12:55:23.266Z","msg":"connecting to Kafka seed brokers, trying to fetch cluster metadata"}
kowl_1  | {"level":"debug","ts":"2022-03-29T12:55:23.266Z","msg":"opening connection to broker","source":"kafka_client","addr":"kafka-app-test.mydomain.net:9193","broker":"seed 0"}
kowl_1  | {"level":"debug","ts":"2022-03-29T12:55:23.314Z","msg":"kafka connection succeeded","source":"kafka_client_hooks","host":"kafka-app-test.mydomain.net","dial_duration":0.0478782}
kowl_1  | {"level":"debug","ts":"2022-03-29T12:55:23.314Z","msg":"connection opened to broker","source":"kafka_client","addr":"kafka-app-test.mydomain.net:9193","broker":"seed 0"}
kowl_1  | {"level":"debug","ts":"2022-03-29T12:55:23.314Z","msg":"issuing api versions request","source":"kafka_client","broker":"seed 0","version":3}
kowl_1  | {"level":"debug","ts":"2022-03-29T12:55:23.314Z","msg":"wrote ApiVersions v3","source":"kafka_client","broker":"seed 0","bytes_written":61,"write_wait":0.0000375,"time_to_write":0.0000462,"err":null}
kowl_1  | {"level":"debug","ts":"2022-03-29T12:55:23.347Z","msg":"read ApiVersions v3","source":"kafka_client","broker":"seed 0","bytes_read":0,"read_wait":0.0000645,"time_to_read":0.032389,"err":"local error: tls: bad record MAC"}
kowl_1  | {"level":"error","ts":"2022-03-29T12:55:23.347Z","msg":"unable to request api versions","source":"kafka_client","broker":"seed 0","err":"local error: tls: bad record MAC"}
kowl_1  | {"level":"debug","ts":"2022-03-29T12:55:23.347Z","msg":"connection initialization failed","source":"kafka_client","addr":"kafka-app-test.mydomain.net:9193","broker":"seed 0","err":"local error: tls: bad record MAC"}
kowl_1  | {"level":"debug","ts":"2022-03-29T12:55:23.347Z","msg":"kafka broker disconnected","source":"kafka_client_hooks","host":"kafka-app-test.mydomain.net"}
kowl_1  | {"level":"fatal","ts":"2022-03-29T12:55:23.347Z","msg":"failed to create kafka service","error":"failed to test kafka connection: failed to request metadata: local error: tls: bad record MAC"}

Using this docker-compose.yaml

version: '2'

services:

  kowl:
    image: 'quay.io/cloudhut/kowl:master'
    ports:
      - '8088:8080'
    environment:
      - KAFKA_BROKERS=kafka-app-test.mydomain.net:9193
      - KAFKA_TLS_ENABLED=true
      - KAFKA_TLS_CAFILEPATH=/vdsp_test_ca.cer
      - KAFKA_TLS_KEYFILEPATH=/cert.p12
      - KAFKA_TLS_PASSPHRASE=4IfS**************6dxGj
      - KAFKA_TLS_INSECURESKIPTLSVERIFY=true
      - LOGGER_LEVEL=debug
    volumes:
      - '/c/Users/myuserid/projects/kowl/certs/vdsp_test_ca.cer:/vdsp_test_ca.cer'
      - '/c/Users/myuserid/projects/kowl/certs/cert.p12:/cert.p12'

Any idea why this happens?

Hi,

Testing Kowl but I get this error tls: bad record MAC
Full output:

kowl_1  | {"level":"info","msg":"config filepath is not set, proceeding with options set from env variables and flags"}
kowl_1  | {"level":"info","ts":"2022-03-29T12:55:23.258Z","msg":"started Kowl","version":"master","git_sha":"c6a6f4cb15d9e58e28b13930af95b272e2eae2ca","built":"2022-02-27T09:45:11Z"}
kowl_1  | {"level":"info","ts":"2022-03-29T12:55:23.266Z","msg":"connecting to Kafka seed brokers, trying to fetch cluster metadata"}
kowl_1  | {"level":"debug","ts":"2022-03-29T12:55:23.266Z","msg":"opening connection to broker","source":"kafka_client","addr":"kafka-app-test.mydomain.net:9193","broker":"seed 0"}
kowl_1  | {"level":"debug","ts":"2022-03-29T12:55:23.314Z","msg":"kafka connection succeeded","source":"kafka_client_hooks","host":"kafka-app-test.mydomain.net","dial_duration":0.0478782}
kowl_1  | {"level":"debug","ts":"2022-03-29T12:55:23.314Z","msg":"connection opened to broker","source":"kafka_client","addr":"kafka-app-test.mydomain.net:9193","broker":"seed 0"}
kowl_1  | {"level":"debug","ts":"2022-03-29T12:55:23.314Z","msg":"issuing api versions request","source":"kafka_client","broker":"seed 0","version":3}
kowl_1  | {"level":"debug","ts":"2022-03-29T12:55:23.314Z","msg":"wrote ApiVersions v3","source":"kafka_client","broker":"seed 0","bytes_written":61,"write_wait":0.0000375,"time_to_write":0.0000462,"err":null}
kowl_1  | {"level":"debug","ts":"2022-03-29T12:55:23.347Z","msg":"read ApiVersions v3","source":"kafka_client","broker":"seed 0","bytes_read":0,"read_wait":0.0000645,"time_to_read":0.032389,"err":"local error: tls: bad record MAC"}
kowl_1  | {"level":"error","ts":"2022-03-29T12:55:23.347Z","msg":"unable to request api versions","source":"kafka_client","broker":"seed 0","err":"local error: tls: bad record MAC"}
kowl_1  | {"level":"debug","ts":"2022-03-29T12:55:23.347Z","msg":"connection initialization failed","source":"kafka_client","addr":"kafka-app-test.mydomain.net:9193","broker":"seed 0","err":"local error: tls: bad record MAC"}
kowl_1  | {"level":"debug","ts":"2022-03-29T12:55:23.347Z","msg":"kafka broker disconnected","source":"kafka_client_hooks","host":"kafka-app-test.mydomain.net"}
kowl_1  | {"level":"fatal","ts":"2022-03-29T12:55:23.347Z","msg":"failed to create kafka service","error":"failed to test kafka connection: failed to request metadata: local error: tls: bad record MAC"}

Using this docker-compose.yaml

version: '2'

services:

  kowl:
    image: 'quay.io/cloudhut/kowl:master'
    ports:
      - '8088:8080'
    environment:
      - KAFKA_BROKERS=kafka-app-test.mydomain.net:9193
      - KAFKA_TLS_ENABLED=true
      - KAFKA_TLS_CAFILEPATH=/vdsp_test_ca.cer
      - KAFKA_TLS_KEYFILEPATH=/cert.p12
      - KAFKA_TLS_PASSPHRASE=4IfS**************6dxGj
      - KAFKA_TLS_INSECURESKIPTLSVERIFY=true
      - LOGGER_LEVEL=debug
    volumes:
      - '/c/Users/myuserid/projects/kowl/certs/vdsp_test_ca.cer:/vdsp_test_ca.cer'
      - '/c/Users/myuserid/projects/kowl/certs/cert.p12:/cert.p12'

Any idea why this happens?

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and
privacy statement. We’ll occasionally send you account related emails.

Already on GitHub?
Sign in
to your account


Closed

jceloria opened this issue

Jul 2, 2020

· 11 comments


Closed

local error: tls: bad record MAC

#2838

jceloria opened this issue

Jul 2, 2020

· 11 comments

Comments

@jceloria

Summary

This is a new install using docker-compose, following the getting started guide. After running docker-compose up, I proceed to the console and see Token exchange refused. I came across #2353, #1818, #2511, and #2521 all of which led me to try different options to resolve this issue, unfortunately nothing has worked for me thus far.

Steps to Reproduce

  1. Configure docker-compose.yml and ttn-lw-stack.yml
  2. Initialize the database, create admin user, create oauth-client using the same value as client-secret as outlined in the getting-started documentation for console.oauth.client-secret
  3. run: docker-compose up

What do you see now?

I’m able to resolve the host name from the container:

/ # ping -q -c1 lora.<redacted>
PING lora.<redacted> (192.168.1.10): 56 data bytes

--- lora.<redacted> ping statistics ---
1 packets transmitted, 1 packets received, 0% packet loss
round-trip min/avg/max = 0.052/0.052/0.052 ms

I have verified the certificates:

/ # ttn-lw-stack config | grep .pem | sed 's/^ *//g'
--tls.certificate="/run/secrets/cert.pem"
--tls.key="/run/secrets/key.pem"
--tls.root-ca="/run/secrets/ca.pem"

/ # apk add openssl
fetch http://dl-cdn.alpinelinux.org/alpine/v3.10/main/x86_64/APKINDEX.tar.gz
fetch http://dl-cdn.alpinelinux.org/alpine/v3.10/community/x86_64/APKINDEX.tar.gz
(1/1) Installing openssl (1.1.1g-r0)
Executing busybox-1.30.1-r3.trigger
OK: 8 MiB in 19 packages

/ # openssl x509 -in /run/secrets/ca.pem -text -noout | awk '/Subject Key Identifier/ && $0 != "" {getline; print $0}' | tr -d ' '
A8:4A:6A:63:04:7D:DD:BA:E6:D1:39:B7:A6:45:65:EF:F3:A8:EC:A1

/ # openssl x509 -in /run/secrets/cert.pem -text -noout | awk '/Authority Key Identifier/ && $0 != "" {getline; print $0}' | tr -d ' '
keyid:A8:4A:6A:63:04:7D:DD:BA:E6:D1:39:B7:A6:45:65:EF:F3:A8:EC:A1

When I run curl against localhost and the host name I’m using in the config I get the following:

/ # for host in localhost lora.<redacted>; do echo ${host}:; curl https://${host}:8885; echo -e '-----n'; done
localhost:
curl: (60) SSL certificate problem: unable to get local issuer certificate
More details here: https://curl.haxx.se/docs/sslcerts.html

curl failed to verify the legitimacy of the server and therefore could not
establish a secure connection to it. To learn more about this situation and
how to fix it, please visit the web page mentioned above.
-----

lora.<redacted>:
curl: (60) SSL certificate problem: unable to get local issuer certificate
More details here: https://curl.haxx.se/docs/sslcerts.html

curl failed to verify the legitimacy of the server and therefore could not
establish a secure connection to it. To learn more about this situation and
how to fix it, please visit the web page mentioned above.
-----

I get the same outcome when I specify the ca cert:
curl --cacert /run/secrets/ca.pem https://${host}:8885

When viewing the docker logs, I see the following when attempting to connect:

2020/07/02 18:05:42 http: TLS handshake error from 127.0.0.1:47152: local error: tls: bad record MAC
2020/07/02 18:05:42 http: TLS handshake error from 192.168.176.1:36420: local error: tls: bad record MAC

Am I missing something?

What do you want to see instead?

I would like to login to the console.

Environment

#─► rpm -q centos-release docker-ce
centos-release-8.1-1.1911.0.9.el8.x86_64
docker-ce-19.03.10-3.el7.x86_64
/ # ttn-lw-stack version
The Things Stack for LoRaWAN: ttn-lw-stack
Version:             3.8.4
Build date:          2020-06-12T17:15:05Z
Git commit:          d63f7de74
Go version:          go1.14.4
OS/Arch:             linux/amd64

How do you propose to implement this?

no idea.

How do you propose to test this?

n/a

Can you do this yourself and submit a Pull Request?

n/a

@KrishnaIyer

  1. How did you generate the SSL Certs? If you’ve deployed this on a public machine, you can either use the in-built stack ACME or query certificates yourself using Lets Encrypt.
  2. Does the CN (Canonical Name) of the certificate match the host where this is deployed?

If you’re using the stack locally (localhost), then I’d recommend interacting with the stack without TLS (using the http/mqtt/ws/grpc ports).

for host in localhost lora.; do echo ${host}:; curl https://${host}:8885; echo -e ‘——n’; done

This is quite wrong. No Certificate Authority will provide you a certificate for localhost and if you’re using self-signed certs, please don’t use this on a public deployment.

@jceloria

Thank you for the quick response.

1) How did you generate the SSL Certs? If you’ve deployed this on a public
machine, you can either use the in-built stack ACME or query certificates
yourself using Lets Encrypt.
— I’m using an existing Let’s Encrypt generated by acme.sh and managed
outside of The Things Stack for my domain.
2) Does the CN (Canonical Name) of the certificate match the host where
this is deployed?
— Yes, the certificate I use is a wildcard certificate that I use for other
services that are exposed publicly, The Things Stack is not exposed
publicly and I have not yet decided if I will ever do so. If I do decide to
publicly expose it, then it will be behind an SSL terminated reverse Apache
proxy protected with oauth that I maintain. The goal was to test this out
as a POC before doing anything else.

If you’re using the stack locally (localhost), then I’d recommend

interacting with the stack without TLS (using the http/mqtt/ws/grpc ports).
— That was my original intent, but I had received the Token exchange
refused message upon initial login after bringing up the stack and
connecting to http://lora.<redacted>:1885. After searching github and the
forum, I read something about TLS being used on the back end or something (I
can’t quite recall), that led me down the path of verifying that my
certificates were in fact valid. I also noticed that the certificates were
not included when bringing up the stack when configured in the config file,
but instead I needed to supply the values using the environment variables
instead.

for host in localhost lora.; do echo ${host}:; curl https://${host}:8885;

echo -e ‘——n’; done
Something must have gotten stripped from that message because its a for
loop testing curl against localhost and a redacted hostname that I was not
comfortable displaying publicly `lora.»redacted»` the «redacted» part of
that for loop is my domain, which I demonstrated as being a valid hostname
with the ping statement I mentioned in the issue. The only reason I had
included localhost, was some previous comments on other issues had
mentioned attempting the connection to localhost which succeeded. The
attempt was purely a last ditch «what else can I possibly try» effort.

On Fri, Jul 3, 2020 at 2:17 PM Krishna Iyer Easwaran < ***@***.***> wrote:

1. How did you generate the SSL Certs? If you’ve deployed this on a
public machine, you can either use the in-built stack ACME or query
certificates yourself using Lets Encrypt.
2. Does the CN (Canonical Name) of the certificate match the host
where this is deployed?

If you’re using the stack locally (localhost), then I’d recommend
interacting with the stack without TLS (using the http/mqtt/ws/grpc ports).

for host in localhost lora.; do echo ${host}:; curl https://${host}:8885;
echo -e ‘——n’; done

This is quite wrong. No Certificate Authority will provide you a
certificate for localhost and if you’re using self-signed certs, please
don’t use this on a public deployment.


You are receiving this because you authored the thread.
Reply to this email directly, view it on GitHub
<#2838 (comment)>,
or unsubscribe
<https://github.com/notifications/unsubscribe-auth/AAKKWYBP4QHVB76IMZYXZIDRZYVEVANCNFSM4OPE4G6A>
.

@benolayinka

Let me see if understand right

  • You have a machine exposing lora.redacted with Apache using acme certificates for TLS
  • You’re running a docker container on this machine but you’re not exposing The Things Stack with Apache
  • You’re running all of the above commands on this machine, so a ping to localhost will never leave the machine but a ping to lora.redacted should hit a DNS resolver somewhere and then come back to the machine through Apache

If I have that right, is this machine physically local to you? Are you opening a browser on this machine and hitting localhost:1885?

@jceloria

I’m accessing all resources from my local network at the moment, this is
simply a POC.

I have a UniFi Security Gateway with ports 80 and 443 mapped to a docker
host on a private subnet running Apache. The same docker host that’s
running Apache is also running The Things Stack. I’m using split DNS, and I
only have a few services that I’m exposing publicly. I am not yet exposing
lora.redacted either through the Apache reverse proxy nor DNS. All of the
services I’m hosting (both public and private) are using the same Let’s
Encrypt wildcard certificate. I’m using dnsmasq on my USG as the internal
resolver. I have multiple CNAME/aliases pointing to the docker host running
Apache/The Things Stack/etc. Apache is only ever involved when attempting
to connect to public DNS from outside of my local network. The DNS entry,
lora.redacted (192.168.1.10) points directly to the docker host.

To simplify things:

— local network -> http://lora.redacted:1885 (192.168.1.10:1885) -> «Token
exchange refused»
— local network -> ssh tunnel (1885:192.168.1.10:1885) ->
http://localhost:1885 -> «Token exchange refused»
— local network -> https://lora.redacted:8885 (192.168.1.10:1885) -> «Token
exchange refused»
— local network -> ssh tunnel (8885:192.168.1.10:8885
<http://192.168.1.10:1885>) -> https://localhost:8885
<http://localhost:1885> -> «SSL_ERROR_BAD_CERT_DOMAIN» -> «Token
exchange refused»
— local network -> https://www.redacted (192.168.1.10:443) -> OK
— public network -> https://www.redacted -> OK

On Mon, Jul 6, 2020 at 9:14 AM Ben Olayinka ***@***.***> wrote:
Let me see if understand right

— You have a machine exposing lora.redacted with Apache using acme
certificates for TLS
— You’re running a docker container on this machine but you’re not
exposing The Things Stack with Apache
— You’re running all of the above commands on this machine, so a ping
to localhost will never leave the machine but a ping to lora.redacted
should hit a DNS resolver somewhere and then come back to the machine
through Apache

If I have that right, is this machine physically local to you? Are you
opening a browser on this machine and hitting localhost:1885?


You are receiving this because you authored the thread.
Reply to this email directly, view it on GitHub
<#2838 (comment)>,
or unsubscribe
<https://github.com/notifications/unsubscribe-auth/AAKKWYGNIGJGWBKYF5U4NR3R2HL2ZANCNFSM4OPE4G6A>
.

@KrishnaIyer

@jceloria: This has to do with your console/oauth configuration. Can you add that here?

@jceloria

That’s most likely the case, I’m sure.

I posted both the docker-compose.yml and ttn-lw-stack.yml, under Steps to reproduce (1.) as gists, or were you referring to something else?

@KrishnaIyer

Ah yeah indeed. I just saw that. I think I see the issue here.
For your configuration, only https://lora.redacted will work. Also your config is missing :8885 unless you’re proxying https://lora.redacted to https://lora.redacted:8885.

What values did you use as callback when creating the Oauth client?

@jceloria

I used Ansible to deploy with the following arguments to docker-compose which I grabbed from the getting-started/installation page:

      - >-
        pull
      - >-
        run --rm stack is-db init
      - >-
        run --rm stack is-db create-admin-user --id admin --email '{{ admin_email }}'
        --password '{{ admin_password }}'
      - >-
        run --rm stack is-db create-oauth-client --id cli --name 'Command Line Interface'
        --owner admin --no-secret --redirect-uri 'local-callback' --redirect-uri 'code'
      - >-
        run --rm stack is-db create-oauth-client --id console --owner admin
        --secret '{{ client_secret }}' --redirect-uri 'https://{{ fqdn }}/console/oauth/callback'
        --redirect-uri '/console/oauth/callback' --logout-redirect-uri 'https://{{ fqdn }}/console'
        --logout-redirect-uri '/console'

{{ fqdn }}: lora.redacted
{{ client_secret }}: is the same value that’s in ttn-lw-stack.yml

In all likelihood, I’m missing something that’s not obvious to me with the configuration. I appreciate the assistance.

@jceloria

I see what you’re saying, since this is all local, I should be able to update the config to the following:

# Web UI configuration
console:
  ui:
    canonical-url: 'https://{{ fqdn }}:1885/console'
    is:
      base-url: 'https://{{ fqdn }}:1885/api/v3'
    gs:
      base-url: 'https://{{ fqdn }}:1885/api/v3'
    ns:
      base-url: 'https://{{ fqdn }}:1885/api/v3'
    as:
      base-url: 'https://{{ fqdn }}:1885/api/v3'
    js:
      base-url: 'https://{{ fqdn }}:1885/api/v3'
    qrg:
      base-url: 'https://{{ fqdn }}:1885/api/v3'
    edtc:
      base-url: 'https://{{ fqdn }}:1885/api/v3'

  oauth:
    authorize-url: 'https://{{ fqdn }}:1885/oauth/authorize'
    token-url: 'https://{{ fqdn }}:1885/oauth/token'
    client-id: 'console'
    client-secret: '{{ client_secret }}'# Web UI configuration
console:
  ui:
    canonical-url: 'http://{{ fqdn }}:1885/console'
    is:
      base-url: 'http://{{ fqdn }}:1885/api/v3'
    gs:
      base-url: 'http://{{ fqdn }}:1885/api/v3'
    ns:
      base-url: 'http://{{ fqdn }}:1885/api/v3'
    as:
      base-url: 'http://{{ fqdn }}:1885/api/v3'
    js:
      base-url: 'http://{{ fqdn }}:1885/api/v3'
    qrg:
      base-url: 'http://{{ fqdn }}:1885/api/v3'
    edtc:
      base-url: 'http://{{ fqdn }}:1885/api/v3'

  oauth:
    authorize-url: 'http://{{ fqdn }}:1885/oauth/authorize'
    token-url: 'http://{{ fqdn }}:1885/oauth/token'
    client-id: 'console'
    client-secret: '{{ client_secret }}'

@KrishnaIyer

Exactly. The above looks correct.

@jceloria

Thank you, that was in fact what I needed. After you had mentioned it, I came across #1752. If anyone is curious, this is the scrubbed & stripped down Ansible role that I’m testing with: https://github.com/jceloria/ansible-ttn-stack

Thanks again for everyone’s help.

I am using an Apache CXF client, running in a Windows Java 1.6.0_29-b11 VM to connect to an IBM mainframe (I believe it is zSeries), and invoking a SOAP Web Service running there. The connection is done through SSL/TLS, and most of the time works fine.

However, from time to time I have SSL Exceptions with a bad record MAC message. Here is the output of the program using with the javax.net.debug property.

2011-11-16 12:32:37,731 INFO  LoggingOutInterceptor: Outbound Message
---------------------------
ID: 29
Address: https://1.2.3.4/access/servlet/blabla.atk123
Encoding: UTF-8
Content-Type: text/xml
Headers: {Accept=[*/*], SOAPAction=["Blablaaction/ATK123.Execute"]}
Payload: <soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/"><soap:Body><ATK123.Execute xmlns="Blabla"><Usrid>WA</Usrid><Usrpwd>54321</Usrpwd><Ultautid>9999</Ultautid></ATK123.Execute></soap:Body></soap:Envelope>
--------------------------------------
pool-1-thread-1, setSoTimeout(30000) called
pool-1-thread-1, WRITE: TLSv1 Application Data, length = 321
pool-1-thread-1, WRITE: TLSv1 Application Data, length = 262
pool-1-thread-1, READ: TLSv1 Application Data, length = 483
pool-1-thread-1, READ: TLSv1 Application Data, length = 16148
pool-1-thread-1, READ: TLSv1 Application Data, length = 282
%% Invalidated:  [Session-1, SSL_RSA_WITH_RC4_128_SHA]
pool-1-thread-1, SEND TLSv1 ALERT:  fatal, description = bad_record_mac
pool-1-thread-1, WRITE: TLSv1 Alert, length = 22
pool-1-thread-1, called closeSocket()
pool-1-thread-1, handling exception: javax.net.ssl.SSLException: bad record MAC
2011-11-16 12:32:38,511 WARN  PhaseInterceptorChain: Interceptor for {Blabla}ATK123#{Blabla}Execute has thrown exception, unwinding now
org.apache.cxf.interceptor.Fault: bad record MAC
    at org.apache.cxf.interceptor.LoggingInInterceptor.logging(LoggingInInterceptor.java:144)
    at org.apache.cxf.interceptor.LoggingInInterceptor.handleMessage(LoggingInInterceptor.java:73)
    at org.apache.cxf.phase.PhaseInterceptorChain.doIntercept(PhaseInterceptorChain.java:263)
    at org.apache.cxf.endpoint.ClientImpl.onMessage(ClientImpl.java:797)
.... (more stuff)

Unfortunately, I don’t have possibilities to modify or debug the endpoint at the server.

What could be causing this?

How can I isolate and fix this behavior?

Я использую клиент Apache CXF, работающий в виртуальной машине Windows Java 1.6.0_29-b11 для подключения к мэйнфрейму IBM (я полагаю, это zSeries) и вызывается работающая там веб-служба SOAP. Соединение выполняется через SSL/TLS, и большую часть времени работает нормально.

Однако время от времени у меня есть SSL-исключения с сообщением bad record MAC. Вот результат работы программы с использованием свойства javax.net.debug.

2011-11-16 12:32:37,731 INFO  LoggingOutInterceptor: Outbound Message
---------------------------
ID: 29
Address: https://1.2.3.4/access/servlet/blabla.atk123
Encoding: UTF-8
Content-Type: text/xml
Headers: {Accept=[*/*], SOAPAction=["Blablaaction/ATK123.Execute"]}
Payload: <soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/"><soap:Body><ATK123.Execute xmlns="Blabla"><Usrid>WA</Usrid><Usrpwd>54321</Usrpwd><Ultautid>9999</Ultautid></ATK123.Execute></soap:Body></soap:Envelope>
--------------------------------------
pool-1-thread-1, setSoTimeout(30000) called
pool-1-thread-1, WRITE: TLSv1 Application Data, length = 321
pool-1-thread-1, WRITE: TLSv1 Application Data, length = 262
pool-1-thread-1, READ: TLSv1 Application Data, length = 483
pool-1-thread-1, READ: TLSv1 Application Data, length = 16148
pool-1-thread-1, READ: TLSv1 Application Data, length = 282
%% Invalidated:  [Session-1, SSL_RSA_WITH_RC4_128_SHA]
pool-1-thread-1, SEND TLSv1 ALERT:  fatal, description = bad_record_mac
pool-1-thread-1, WRITE: TLSv1 Alert, length = 22
pool-1-thread-1, called closeSocket()
pool-1-thread-1, handling exception: javax.net.ssl.SSLException: bad record MAC
2011-11-16 12:32:38,511 WARN  PhaseInterceptorChain: Interceptor for {Blabla}ATK123#{Blabla}Execute has thrown exception, unwinding now
org.apache.cxf.interceptor.Fault: bad record MAC
    at org.apache.cxf.interceptor.LoggingInInterceptor.logging(LoggingInInterceptor.java:144)
    at org.apache.cxf.interceptor.LoggingInInterceptor.handleMessage(LoggingInInterceptor.java:73)
    at org.apache.cxf.phase.PhaseInterceptorChain.doIntercept(PhaseInterceptorChain.java:263)
    at org.apache.cxf.endpoint.ClientImpl.onMessage(ClientImpl.java:797)
.... (more stuff)

К сожалению, у меня нет возможности изменять или отлаживать конечную точку на сервере.

Что может быть причиной этого?

Как я могу выделить и исправить это поведение?

Возможно, вам также будет интересно:

  • Imap код ошибки 4 ответ сервера fetch completed
  • Imap код ошибки 25 ответ сервера command or literal size is too large
  • Imap yandex ru ошибка при проверке
  • Imap mail ru ошибка аутенфикации
  • Imap mail ru не отвечает iphone как исправить ошибку

  • Понравилась статья? Поделить с друзьями:
    0 0 голоса
    Рейтинг статьи
    Подписаться
    Уведомить о
    guest

    0 комментариев
    Старые
    Новые Популярные
    Межтекстовые Отзывы
    Посмотреть все комментарии