Skip to content

Commit 6bc6203

Browse files
authored
Fix a few typos in comments (#21525)
1 parent 8bf6a58 commit 6bc6203

File tree

13 files changed

+19
-19
lines changed

13 files changed

+19
-19
lines changed

keras/src/backend/jax/numpy.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ def kaiser(x, beta):
6464

6565

6666
def bincount(x, weights=None, minlength=0, sparse=False):
67-
# Note: bincount is never tracable / jittable because the output shape
67+
# Note: bincount is never traceable / jittable because the output shape
6868
# depends on the values in x.
6969
if sparse or isinstance(x, jax_sparse.BCOO):
7070
if isinstance(x, jax_sparse.BCOO):

keras/src/layers/attention/grouped_query_attention_test.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -293,7 +293,7 @@ def test_masking(self, use_causal_mask):
293293
)
294294
def test_correctness(self, flash_attention):
295295
if flash_attention:
296-
# Let the backend decide whether to use flase attention
296+
# Let the backend decide whether to use flash attention
297297
enable_flash_attention()
298298
dtype = "float16" # Flash attention only accepts float16/bfloat16
299299
head_dim = 8 # key_dim % 8 == 0 to enable flash attention

keras/src/layers/attention/multi_head_attention_test.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -395,7 +395,7 @@ def test_no_warning_with_keras_mask(self):
395395
)
396396
def test_correctness(self, flash_attention):
397397
if flash_attention:
398-
# Let the backend decide whether to use flase attention
398+
# Let the backend decide whether to use flash attention
399399
enable_flash_attention()
400400
dtype = "float16" # Flash attention only accepts float16/bfloat16
401401

keras/src/layers/preprocessing/hashing.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -214,7 +214,7 @@ def call(self, inputs):
214214

215215
inputs = tf_utils.ensure_tensor(inputs)
216216
if self.output_mode == "one_hot" and inputs.shape[-1] == 1:
217-
# One hot only unpranks if the final dimension is not 1.
217+
# One hot only upranks if the final dimension is not 1.
218218
inputs = tf_backend.numpy.squeeze(inputs, axis=-1)
219219
if isinstance(inputs, tf.SparseTensor):
220220
indices = tf.SparseTensor(

keras/src/metrics/confusion_metrics.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1494,16 +1494,16 @@ def result(self):
14941494
# 1) Both measures diverge when there are no negative values;
14951495
# 2) Both measures diverge when there are no true positives;
14961496
# 3) Recall gain becomes negative when the recall is lower than the
1497-
# label average (i.e. when more negative exampless are
1497+
# label average (i.e. when more negative examples are
14981498
# classified positive than real positives).
14991499
#
15001500
# We ignore case 1 as it is easily understood that metrics would be
15011501
# badly defined then. For case 2 we set recall_gain to 0 and
15021502
# precision_gain to 1. For case 3 we set recall_gain to 0. These
1503-
# fixes will result in an overstimation of the AUCfor estimators
1503+
# fixes will result in an overestimation of the AUC for estimators
15041504
# that are anti-correlated with the label (at some threshold).
15051505

1506-
# The scaling factor $\frac{P}{N}$ that is used to for mboth gain
1506+
# The scaling factor $\frac{P}{N}$ that is used to for both gain
15071507
# values.
15081508
scaling_factor = ops.divide_no_nan(
15091509
ops.add(self.true_positives, self.false_negatives),

keras/src/metrics/confusion_metrics_test.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1407,7 +1407,7 @@ def test_weighted_prgain_majoring(self):
14071407
)
14081408

14091409
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
1410-
# scaling_facor (P/N) = 7/3
1410+
# scaling_factor (P/N) = 7/3
14111411
# recall_gain = 1 - 7/3 [0/7, 3/4, 7/0] = [1, -3/4, -inf] -> [1, 0, 0]
14121412
# precision_gain = 1 - 7/3 [3/7, 0/4, 0/0] = [0, 1, NaN] -> [0, 1, 1]
14131413
# heights = [max(0, 1), max(1, 1)] = [1, 1]
@@ -1426,7 +1426,7 @@ def test_weighted_prgain_minoring(self):
14261426
)
14271427

14281428
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
1429-
# scaling_facor (P/N) = 7/3
1429+
# scaling_factor (P/N) = 7/3
14301430
# recall_gain = 1 - 7/3 [0/7, 3/4, 7/0] = [1, -3/4, -inf] -> [1, 0, 0]
14311431
# precision_gain = 1 - 7/3 [3/7, 0/4, 0/0] = [0, 1, NaN] -> [0, 1, 1]
14321432
# heights = [min(0, 1), min(1, 1)] = [0, 1]
@@ -1443,7 +1443,7 @@ def test_weighted_prgain_interpolation(self):
14431443
)
14441444

14451445
# tp = [7, 4, 0], fp = [3, 0, 0], fn = [0, 3, 7], tn = [0, 3, 3]
1446-
# scaling_facor (P/N) = 7/3
1446+
# scaling_factor (P/N) = 7/3
14471447
# recall_gain = 1 - 7/3 [0/7, 3/4, 7/0] = [1, -3/4, -inf] -> [1, 0, 0]
14481448
# precision_gain = 1 - 7/3 [3/7, 0/4, 0/0] = [0, 1, NaN] -> [0, 1, 1]
14491449
# heights = [(0+1)/2, (1+1)/2] = [0.5, 1]
@@ -1461,7 +1461,7 @@ def test_prgain_interpolation(self):
14611461
result = auc_obj(y_true, y_pred)
14621462

14631463
# tp = [5, 3, 0], fp = [5, 1, 0], fn = [0, 2, 5], tn = [0, 4, 4]
1464-
# scaling_facor (P/N) = 5/5 = 1
1464+
# scaling_factor (P/N) = 5/5 = 1
14651465
# recall_gain = 1 - [0/5, 2/3, 5/0] = [1, 1/3, -inf] -> [1, 1/3, 0]
14661466
# precision_gain = 1 - [5/5, 1/3, 0/0] = [1, 1/3, NaN] -> [0, 2/3, 1]
14671467
# heights = [(0+2/3)/2, (2/3+1)/2] = [0.333333, 0.833333]

keras/src/metrics/metrics_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -318,7 +318,7 @@ def is_evenly_distributed_thresholds(thresholds):
318318
"""Check if the thresholds list is evenly distributed.
319319
320320
We could leverage evenly distributed thresholds to use less memory when
321-
calculate metrcis like AUC where each individual threshold need to be
321+
calculate metrics like AUC where each individual threshold need to be
322322
evaluated.
323323
324324
Args:

keras/src/models/functional.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -298,7 +298,7 @@ def _standardize_inputs(self, inputs):
298298
self._inputs_struct, dict
299299
):
300300
# This is to avoid warning
301-
# when we have reconciable dict/list structs
301+
# when we have reconcilable dict/list structs
302302
if hasattr(self._inputs_struct, "__len__") and all(
303303
isinstance(i, backend.KerasTensor) for i in self._inputs_struct
304304
):

keras/src/ops/math_test.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -857,7 +857,7 @@ def test_istft(
857857
)
858858
if backend.backend() in ("numpy", "jax", "torch"):
859859
# these backends have different implementation for the boundary of
860-
# the output, so we need to truncate 5% befroe assertAllClose
860+
# the output, so we need to truncate 5% before assertAllClose
861861
truncated_len = int(output.shape[-1] * 0.05)
862862
output = output[..., truncated_len:-truncated_len]
863863
ref = ref[..., truncated_len:-truncated_len]
@@ -889,7 +889,7 @@ def test_istft(
889889
)
890890
if backend.backend() in ("numpy", "jax", "torch"):
891891
# these backends have different implementation for the boundary of
892-
# the output, so we need to truncate 5% befroe assertAllClose
892+
# the output, so we need to truncate 5% before assertAllClose
893893
truncated_len = int(output.shape[-1] * 0.05)
894894
output = output[..., truncated_len:-truncated_len]
895895
ref = ref[..., truncated_len:-truncated_len]

keras/src/testing/test_case.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -525,7 +525,7 @@ def __call__(self, y_true, y_pred, sample_weight=None):
525525
)
526526

527527
# Ensure that the subclass layer doesn't mark itself as built
528-
# when `build` is overriden.
528+
# when `build` is overridden.
529529

530530
class ModifiedBuildLayer(layer_cls):
531531
def build(self, *args, **kwargs):

0 commit comments

Comments
 (0)