Revert "Add useful error message for uploading files. (#339)" (#366)

This reverts commit e155d015bcc4c9eb9978572422e3404f26220700.

gRPC errors are... hard. And the gRPC team isn't willing to support
wrapping, which leads to complicated handling. See
https://github.com/grpc/grpc-go/issues/3616.

I'll send a different PR to add stack traces to these grpc errors.
diff --git a/go/pkg/client/cas.go b/go/pkg/client/cas.go
index 34bc81b..5d7ca23 100644
--- a/go/pkg/client/cas.go
+++ b/go/pkg/client/cas.go
@@ -397,7 +397,7 @@
 
 	missing, err := c.MissingBlobs(ctx, dgs)
 	if err != nil {
-		return nil, 0, errors.Wrap(err, "error while determining missing blobs")
+		return nil, 0, err
 	}
 	LogContextInfof(ctx, log.Level(2), "%d items to store", len(missing))
 	var batches [][]digest.Digest
@@ -418,7 +418,7 @@
 		i, batch := i, batch // https://golang.org/doc/faq#closures_and_goroutines
 		eg.Go(func() error {
 			if err := c.casUploaders.Acquire(eCtx, 1); err != nil {
-				return errors.Wrap(err, "error while acquiring semaphore to upload a batch")
+				return err
 			}
 			defer c.casUploaders.Release(1)
 			if i%logInterval == 0 {
@@ -431,12 +431,12 @@
 					ue := ueList[dg]
 					ch, err := chunker.New(ue, false, int(c.ChunkMaxSize))
 					if err != nil {
-						return errors.Wrapf(err, "error while creating chunks for digest %v", dg)
+						return err
 					}
 
 					data, err := ch.FullData()
 					if err != nil {
-						return errors.Wrapf(err, "error while fetching FullData for digest %v", dg)
+						return err
 					}
 
 					if dg.Size != int64(len(data)) {
@@ -447,7 +447,7 @@
 					atomic.AddInt64(&totalBytesTransferred, int64(len(data)))
 				}
 				if err := c.BatchWriteBlobs(eCtx, bchMap); err != nil {
-					return errors.Wrap(err, "error while doing a batch write")
+					return err
 				}
 			} else {
 				LogContextInfof(ctx, log.Level(3), "Uploading single blob with digest %s", batch[0])
@@ -455,11 +455,11 @@
 				dg := ue.Digest
 				ch, err := chunker.New(ue, c.shouldCompress(dg.Size), int(c.ChunkMaxSize))
 				if err != nil {
-					return errors.Wrapf(err, "error while creating chunks for digest %v", dg)
+					return err
 				}
 				written, err := c.writeChunked(eCtx, c.writeRscName(dg), ch)
 				if err != nil {
-					return errors.Wrapf(err, "error while doing a chunked write to %v for %v", ue.Path, dg)
+					return fmt.Errorf("failed to upload %s: %w", ue.Path, err)
 				}
 				atomic.AddInt64(&totalBytesTransferred, written)
 			}
@@ -476,7 +476,8 @@
 	if err != nil {
 		LogContextInfof(ctx, log.Level(2), "Upload error: %v", err)
 	}
-	return missing, totalBytesTransferred, errors.Wrapf(err, "error while doing a non-unified upload")
+
+	return missing, totalBytesTransferred, err
 }
 
 func (c *Client) cancelPendingRequests(reqs []*uploadRequest) {
@@ -539,7 +540,7 @@
 			return nil, 0, ctx.Err()
 		case resp := <-wait:
 			if resp.err != nil {
-				return nil, 0, errors.Wrapf(resp.err, "error while uploading blob with digest %v", resp.digest)
+				return nil, 0, resp.err
 			}
 			if resp.missing {
 				missing = append(missing, resp.digest)
diff --git a/go/pkg/client/cas_test.go b/go/pkg/client/cas_test.go
index 6381a4c..23e490a 100644
--- a/go/pkg/client/cas_test.go
+++ b/go/pkg/client/cas_test.go
@@ -686,7 +686,7 @@
 			for i := 0; i < 50; i++ {
 				eg.Go(func() error {
 					// Verify that we got a context cancellation error. Sometimes, the request can succeed, if the original thread takes a while to run.
-					if _, _, err := c.UploadIfMissing(cCtx, input...); err != nil && !errors.Is(err, context.Canceled) {
+					if _, _, err := c.UploadIfMissing(cCtx, input...); err != nil && err != context.Canceled {
 						return fmt.Errorf("c.UploadIfMissing(ctx, input) gave error %v, expected context canceled", err)
 					}
 					return nil