Add flake8-builtins to pre-commit and fix errors (#7105)

Ignore `A003`

Co-authored-by: Christian Clauss <cclauss@me.com>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Dhruv Manilawala <dhruvmanila@gmail.com>
This commit is contained in:
Caeden
2022-10-13 15:23:59 +01:00
committed by GitHub
parent e661b98829
commit d5a9f649b8
31 changed files with 113 additions and 106 deletions

View File

@@ -182,7 +182,7 @@ class TwoHiddenLayerNeuralNetwork:
loss = numpy.mean(numpy.square(output - self.feedforward()))
print(f"Iteration {iteration} Loss: {loss}")
def predict(self, input: numpy.ndarray) -> int:
def predict(self, input_arr: numpy.ndarray) -> int:
"""
Predict's the output for the given input values using
the trained neural network.
@@ -201,7 +201,7 @@ class TwoHiddenLayerNeuralNetwork:
"""
# Input values for which the predictions are to be made.
self.array = input
self.array = input_arr
self.layer_between_input_and_first_hidden_layer = sigmoid(
numpy.dot(self.array, self.input_layer_and_first_hidden_layer_weights)
@@ -264,7 +264,7 @@ def example() -> int:
True
"""
# Input values.
input = numpy.array(
test_input = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
@@ -282,7 +282,9 @@ def example() -> int:
output = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]), dtype=numpy.float64)
# Calling neural network class.
neural_network = TwoHiddenLayerNeuralNetwork(input_array=input, output_array=output)
neural_network = TwoHiddenLayerNeuralNetwork(
input_array=test_input, output_array=output
)
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.

View File

@@ -140,24 +140,24 @@ class CNN:
focus_list = np.asarray(focus1_list)
return focus_list, data_featuremap
def pooling(self, featuremaps, size_pooling, type="average_pool"):
def pooling(self, featuremaps, size_pooling, pooling_type="average_pool"):
# pooling process
size_map = len(featuremaps[0])
size_pooled = int(size_map / size_pooling)
featuremap_pooled = []
for i_map in range(len(featuremaps)):
map = featuremaps[i_map]
feature_map = featuremaps[i_map]
map_pooled = []
for i_focus in range(0, size_map, size_pooling):
for j_focus in range(0, size_map, size_pooling):
focus = map[
focus = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if type == "average_pool":
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(focus))
elif type == "max_pooling":
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(focus))
map_pooled = np.asmatrix(map_pooled).reshape(size_pooled, size_pooled)

View File

@@ -182,7 +182,7 @@ samples = [
[0.2012, 0.2611, 5.4631],
]
exit = [
target = [
-1,
-1,
-1,
@@ -222,7 +222,7 @@ if __name__ == "__main__":
doctest.testmod()
network = Perceptron(
sample=samples, target=exit, learning_rate=0.01, epoch_number=1000, bias=-1
sample=samples, target=target, learning_rate=0.01, epoch_number=1000, bias=-1
)
network.training()
print("Finished training perceptron")