Skip to content

Commit 7e203c6

Browse files
Fix style errors
1 parent 14d8d96 commit 7e203c6

17 files changed

+256
-217
lines changed
Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,89 @@
1+
<!--
2+
Licensed to the Apache Software Foundation (ASF) under one or more
3+
contributor license agreements. See the NOTICE file distributed with
4+
this work for additional information regarding copyright ownership.
5+
The ASF licenses this file to You under the Apache License, Version 2.0
6+
(the "License"); you may not use this file except in compliance with
7+
the License. You may obtain a copy of the License at
8+
9+
http://www.apache.org/licenses/LICENSE-2.0
10+
11+
Unless required by applicable law or agreed to in writing, software
12+
distributed under the License is distributed on an "AS IS" BASIS,
13+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
See the License for the specific language governing permissions and
15+
limitations under the License.
16+
-->
17+
<FindBugsFilter>
18+
19+
<!-- same code as in FileSystem is triggering the same warning. -->
20+
<Match>
21+
<Class name="org.apache.hadoop.fs.s3native.S3xLoginHelper" />
22+
<Method name="checkPath" />
23+
<Bug pattern="ES_COMPARING_STRINGS_WITH_EQ" />
24+
</Match>
25+
<!-- Redundant null check makes code clearer, future-proof here. -->
26+
<Match>
27+
<Class name="org.apache.hadoop.fs.s3a.S3AFileSystem" />
28+
<Method name="s3Exists" />
29+
<Bug pattern="RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE" />
30+
</Match>
31+
<!-- we are using completable futures, so ignore the Future which submit() returns -->
32+
<Match>
33+
<Class name="org.apache.hadoop.fs.s3a.impl.InputStreamCallbacksImpl" />
34+
<Bug pattern="RV_RETURN_VALUE_IGNORED_BAD_PRACTICE" />
35+
</Match>
36+
37+
<!--
38+
findbugs gets confused by lambda expressions in synchronized methods
39+
and considers references to fields to be unsynchronized.
40+
As you can't disable the methods individually, we have to disable
41+
them for the entire class.
42+
-->
43+
<Match>
44+
<Class name="org.apache.hadoop.fs.s3a.S3AInputStream"/>
45+
<Bug pattern="IS2_INCONSISTENT_SYNC"/>
46+
</Match>
47+
<!--
48+
findbugs reporting RV ignored. Not true.
49+
"Return value of S3AReadOpContext.getReadInvoker() ignored,
50+
but method has no side effect"
51+
-->
52+
<Match>
53+
<Class name="org.apache.hadoop.fs.s3a.S3AInputStream"/>
54+
<Method name="reopen"/>
55+
<Bug pattern="RV_RETURN_VALUE_IGNORED_NO_SIDE_EFFECT"/>
56+
</Match>
57+
<Match>
58+
<Class name="org.apache.hadoop.fs.s3a.S3AFileSystem"/>
59+
<Method name="openFileWithOptions"/>
60+
<Bug pattern="RV_RETURN_VALUE_IGNORED_BAD_PRACTICE"/>
61+
</Match>
62+
<Match>
63+
<Class name="org.apache.hadoop.fs.s3a.S3AFileSystem"/>
64+
<Field name="futurePool"/>
65+
<Bug pattern="IS2_INCONSISTENT_SYNC"/>
66+
</Match>
67+
<Match>
68+
<Class name="org.apache.hadoop.fs.s3a.s3guard.S3GuardTool$BucketInfo"/>
69+
<Method name="run"/>
70+
<Bug pattern="SF_SWITCH_FALLTHROUGH"/>
71+
</Match>
72+
73+
<!--
74+
Some of the S3A Instrumentation classes increment volatile references from
75+
within synchronized contexts; they use volatile to keep the cost
76+
of these updates and reading them down.
77+
-->
78+
<Match>
79+
<Class name="org.apache.hadoop.fs.s3a.S3AInstrumentation$InputStreamStatisticsImpl"/>
80+
<Bug pattern="VO_VOLATILE_INCREMENT"/>
81+
</Match>
82+
83+
<!-- Ignore return value from this method call -->
84+
<Match>
85+
<Class name="org.apache.hadoop.fs.s3a.impl.StoreContext"/>
86+
<Method name="submit"/>
87+
<Bug pattern="RV_RETURN_VALUE_IGNORED_BAD_PRACTICE"/>
88+
</Match>
89+
</FindBugsFilter>

hadoop-tools/hadoop-gcp/src/main/java/org/apache/hadoop/fs/gs/Constants.java

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,9 @@
1818

1919
package org.apache.hadoop.fs.gs;
2020

21-
class Constants {
21+
final class Constants {
22+
private Constants() {}
23+
2224
// URI scheme for GCS.
2325
static final String SCHEME = "gs";
2426
static final String PATH_DELIMITER = "/";

hadoop-tools/hadoop-gcp/src/main/java/org/apache/hadoop/fs/gs/CreateOptions.java

Lines changed: 12 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -29,17 +29,15 @@
2929
/**
3030
* Options that can be specified when creating a file in the {@link GoogleCloudStorageFileSystem}.
3131
*/
32-
class CreateOptions {
32+
final class CreateOptions {
3333
private final ImmutableMap<String, byte[]> attributes;
3434
private final String contentType;
3535
private final boolean ensureNoDirectoryConflict;
3636
private final Duration interval;
3737
private final long overwriteGenerationId;
3838
private final WriteMode mode;
3939

40-
public static final CreateOptions DEFAULT = builder().build();
41-
42-
public String getContentEncoding() {
40+
String getContentEncoding() {
4341
return contentEncoding;
4442
}
4543

@@ -51,19 +49,15 @@ private CreateOptions(CreateOperationOptionsBuilder builder) {
5149
this.ensureNoDirectoryConflict = builder.ensureNoDirectoryConflict;
5250
this.interval = builder.interval;
5351
this.overwriteGenerationId = builder.overwriteGenerationId;
54-
this.mode = builder.mode;
52+
this.mode = builder.writeMode;
5553
this.contentEncoding = builder.contentEncoding;
5654
}
5755

58-
public boolean isOverwriteExisting() {
56+
boolean isOverwriteExisting() {
5957
return this.mode == WriteMode.OVERWRITE;
6058
}
6159

6260
enum WriteMode {
63-
/**
64-
* Write new bytes to the end of the existing file rather than the beginning.
65-
*/
66-
APPEND,
6761
/**
6862
* Creates a new file for write and fails if file already exists.
6963
*/
@@ -74,45 +68,29 @@ enum WriteMode {
7468
OVERWRITE
7569
}
7670

77-
public static CreateOperationOptionsBuilder builder() {
71+
static CreateOperationOptionsBuilder builder() {
7872
return new CreateOperationOptionsBuilder();
7973
}
8074

8175
/**
8276
* Extended attributes to set when creating a file.
8377
*/
84-
public ImmutableMap<String, byte[]> getAttributes() {
78+
ImmutableMap<String, byte[]> getAttributes() {
8579
return attributes;
8680
}
8781

8882
/**
8983
* Content-type to set when creating a file.
9084
*/
9185
@Nullable
92-
public String getContentType() {
86+
String getContentType() {
9387
return contentType;
9488
}
9589

96-
/**
97-
* Configures the minimum time interval (milliseconds) between consecutive sync/flush calls
98-
*/
99-
public Duration getMinSyncInterval() {
100-
return interval;
101-
}
102-
103-
/**
104-
* If true, makes sure there isn't already a directory object of the same name. If false, you run
105-
* the risk of creating hard-to-cleanup/access files whose names collide with directory names. If
106-
* already sure no such directory exists, then this is safe to set for improved performance.
107-
*/
108-
public boolean isEnsureNoDirectoryConflict() {
109-
return ensureNoDirectoryConflict;
110-
}
111-
11290
/**
11391
* Whether to overwrite an existing file with the same name.
11492
*/
115-
public WriteMode getWriteMode() {
93+
WriteMode getWriteMode() {
11694
return mode;
11795
}
11896

@@ -123,7 +101,7 @@ public WriteMode getWriteMode() {
123101
* only be overwritten by the newly created file if its generation matches this provided
124102
* generationId.
125103
*/
126-
public long getOverwriteGenerationId() {
104+
long getOverwriteGenerationId() {
127105
return overwriteGenerationId;
128106
}
129107

@@ -133,38 +111,12 @@ static class CreateOperationOptionsBuilder {
133111
private boolean ensureNoDirectoryConflict = true;
134112
private Duration interval = Duration.ZERO;
135113
private long overwriteGenerationId = StorageResourceId.UNKNOWN_GENERATION_ID;
136-
private WriteMode mode = WriteMode.CREATE_NEW;
114+
private WriteMode writeMode = WriteMode.CREATE_NEW;
137115

138116
private String contentEncoding = null;
139117

140-
public CreateOperationOptionsBuilder setAttributes(Map<String, byte[]> attributes) {
141-
this.attributes = attributes;
142-
return this;
143-
}
144-
145-
public CreateOperationOptionsBuilder setContentType(String contentType) {
146-
this.contentType = contentType;
147-
return this;
148-
}
149-
150-
public CreateOperationOptionsBuilder setEnsureNoDirectoryConflict(
151-
boolean ensureNoDirectoryConflict) {
152-
this.ensureNoDirectoryConflict = ensureNoDirectoryConflict;
153-
return this;
154-
}
155-
156-
public CreateOperationOptionsBuilder setMinSyncInterval(Duration interval) {
157-
this.interval = interval;
158-
return this;
159-
}
160-
161-
public CreateOperationOptionsBuilder setOverwriteGenerationId(long overwriteGenerationId) {
162-
this.overwriteGenerationId = overwriteGenerationId;
163-
return this;
164-
}
165-
166-
public CreateOperationOptionsBuilder setWriteMode(WriteMode mode) {
167-
this.mode = mode;
118+
CreateOperationOptionsBuilder setWriteMode(WriteMode mode) {
119+
this.writeMode = mode;
168120
return this;
169121
}
170122

hadoop-tools/hadoop-gcp/src/main/java/org/apache/hadoop/fs/gs/ErrorTypeExtractor.java

Lines changed: 5 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -17,23 +17,22 @@
1717
package org.apache.hadoop.fs.gs;
1818

1919
import io.grpc.Status;
20-
import io.grpc.StatusRuntimeException;
21-
22-
import javax.annotation.Nullable;
2320

2421
/**
2522
* Implementation for {@link ErrorTypeExtractor} for exception specifically thrown from gRPC path.
2623
*/
27-
class ErrorTypeExtractor {
24+
final class ErrorTypeExtractor {
2825

2926
enum ErrorType {
30-
NOT_FOUND, OUT_OF_RANGE, ALREADY_EXISTS, FAILED_PRECONDITION, INTERNAL, RESOURCE_EXHAUSTED, UNAVAILABLE, UNKNOWN
27+
NOT_FOUND, OUT_OF_RANGE, ALREADY_EXISTS, FAILED_PRECONDITION, INTERNAL, RESOURCE_EXHAUSTED,
28+
UNAVAILABLE, UNKNOWN
3129
}
3230

3331
// public static final ErrorTypeExtractor INSTANCE = new ErrorTypeExtractor();
3432

3533
private static final String BUCKET_ALREADY_EXISTS_MESSAGE =
36-
"FAILED_PRECONDITION: Your previous request to create the named bucket succeeded and you already own it.";
34+
"FAILED_PRECONDITION: Your previous request to create the named bucket succeeded and you "
35+
+ "already own it.";
3736

3837
private ErrorTypeExtractor() {
3938
}
@@ -58,39 +57,4 @@ static ErrorType getErrorType(Exception error) {
5857
return ErrorType.UNKNOWN;
5958
}
6059
}
61-
62-
static boolean bucketAlreadyExists(Exception e) {
63-
ErrorType errorType = getErrorType(e);
64-
if (errorType == ErrorType.ALREADY_EXISTS) {
65-
return true;
66-
}
67-
// The gRPC API currently throws a FAILED_PRECONDITION status code instead of ALREADY_EXISTS,
68-
// so we handle both these conditions in the interim.
69-
// TODO: remove once the status codes are fixed.
70-
else if (errorType == ErrorType.FAILED_PRECONDITION) {
71-
StatusRuntimeException statusRuntimeException = getStatusRuntimeException(e);
72-
return statusRuntimeException != null && BUCKET_ALREADY_EXISTS_MESSAGE.equals(
73-
statusRuntimeException.getMessage());
74-
}
75-
return false;
76-
}
77-
78-
/**
79-
* Extracts StatusRuntimeException from the Exception, if it exists.
80-
*/
81-
@Nullable
82-
static private StatusRuntimeException getStatusRuntimeException(Exception e) {
83-
Throwable cause = e;
84-
// Keeping a counter to break early from the loop to avoid infinite loop condition due to
85-
// cyclic exception chains.
86-
int currentExceptionDepth = 0, maxChainDepth = 1000;
87-
while (cause != null && currentExceptionDepth < maxChainDepth) {
88-
if (cause instanceof StatusRuntimeException) {
89-
return (StatusRuntimeException) cause;
90-
}
91-
cause = cause.getCause();
92-
currentExceptionDepth++;
93-
}
94-
return null;
95-
}
9660
}

0 commit comments

Comments
 (0)