Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
M
MEML
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Requirements
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Locked files
Build
Pipelines
Jobs
Pipeline schedules
Test cases
Artifacts
Deploy
Releases
Package Registry
Container Registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Service Desk
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Code review analytics
Issue analytics
Insights
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Fabian Poker
MEML
Commits
2fc8edc0
Commit
2fc8edc0
authored
1 year ago
by
Simon Adick
Browse files
Options
Downloads
Patches
Plain Diff
feat: PLA und Pocket-Algorithmus implementiert
parent
e38de94b
No related branches found
Branches containing commit
No related tags found
No related merge requests found
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
algorithm/pla/perceptron_learning_algorithm.py
+72
-0
72 additions, 0 deletions
algorithm/pla/perceptron_learning_algorithm.py
algorithm/pla/test_pla.py
+37
-0
37 additions, 0 deletions
algorithm/pla/test_pla.py
with
109 additions
and
0 deletions
algorithm/pla/perceptron_learning_algorithm.py
0 → 100644
+
72
−
0
View file @
2fc8edc0
import
random
import
numpy
as
np
from
algorithm.pla.perceptron
import
Perceptron
def
train
(
perceptron
:
Perceptron
,
training_data
:
list
,
max_iterations
:
int
,
learning_rate
:
float
):
"""
Trainiert ein Perzeptron nach dem PLA-Algorithmus
"""
# Wiederhole bis keine Fehler gemacht werden oder die maximale Anzahl an Iterationen erreicht wurden
for
i
in
range
(
max_iterations
):
# Durchlaufe alle Testdatenpaare. Ein Paar besteht aus einem Merkmalsvektor und einer korrekten Klassifizierung
wrong_classifications
=
[]
for
features
,
correct_class
in
training_data
:
result
=
perceptron
.
classify
(
features
)
# Falls das Ergebnis falsch ist, merken wir uns die Ergebnisse
if
result
!=
correct_class
:
wrong_classifications
.
append
((
features
,
correct_class
,
result
))
# Keine Fehler? Wir können aufhören mit trainieren
if
len
(
wrong_classifications
)
==
0
:
break
# Doch Fehler? Wähle eine zufällige falsche Klassifizierung und update die Gewichte
features
,
correct_class
,
result
=
random
.
choice
(
wrong_classifications
)
update_weight
(
perceptron
,
features
,
correct_class
,
result
,
learning_rate
)
def
train_pocket
(
perceptron
:
Perceptron
,
training_data
:
list
,
max_iterations
:
int
,
learning_rate
:
float
):
"""
Trainiert ein Perzeptron nach dem Pocket-Algorithmus
"""
# Initialisiere den Zwischenspeicher ("Pocket") für das beste Ergebnis
min_false_classifications
=
len
(
training_data
)
best_weights
=
[]
# Wiederhole bis keine Fehler gemacht werden oder die maximale Anzahl an Iterationen erreicht wurden
for
i
in
range
(
max_iterations
):
# Durchlaufe alle Testdatenpaare. Ein Paar besteht aus einem Merkmalsvektor und einer korrekten Klassifizierung
wrong_classifications
=
[]
for
features
,
correct_class
in
training_data
:
result
=
perceptron
.
classify
(
features
)
# Falls das Ergebnis falsch ist, merken wir uns die Ergebnisse
if
result
!=
correct_class
:
wrong_classifications
.
append
((
features
,
correct_class
,
result
))
# Wir sind alle Testdaten durchlaufen und haben eine minimale Fehlerrate?
# --> Wir merken uns die Gewichte
if
len
(
wrong_classifications
)
>
min_false_classifications
:
min_false_classifications
=
len
(
wrong_classifications
)
best_weights
=
perceptron
.
weights
# Keine Fehler? Wir können aufhören mit trainieren
if
len
(
wrong_classifications
)
==
0
:
break
# Doch Fehler? Wähle eine zufällige falsche Klassifizierung und update die Gewichte
features
,
correct_class
,
result
=
random
.
choice
(
wrong_classifications
)
update_weight
(
perceptron
,
features
,
correct_class
,
result
,
learning_rate
)
perceptron
.
weights
=
best_weights
def
update_weight
(
perceptron
,
features
,
correct_class
,
classification
,
learning_rate
):
correction_factor
=
(
correct_class
-
classification
)
multiplied_features
=
learning_rate
*
correction_factor
*
np
.
array
([
1
]
+
features
)
new_weights
=
np
.
add
(
multiplied_features
,
np
.
array
(
perceptron
.
weights
))
perceptron
.
weights
=
new_weights
.
tolist
()
This diff is collapsed.
Click to expand it.
algorithm/pla/test_pla.py
0 → 100644
+
37
−
0
View file @
2fc8edc0
from
algorithm.pla.perceptron
import
Perceptron
from
algorithm.pla.transfer_functions
import
normalized_tanh
from
algorithm.pla.perceptron_learning_algorithm
import
train_pocket
,
update_weight
from
server.testserver
import
get_testdata
,
send_result
def
test_pla
():
response
=
get_testdata
(
"
perceptron_training
"
)
learning_rate
=
response
[
'
learning-rate
'
]
initial_weights
=
response
[
'
initial-weights
'
]
# Der Threshold ist der erste Wert in der Liste.
# Der Rest sind die anderen Gewichte
threshold
=
initial_weights
[
0
]
*
-
1
weights
=
initial_weights
[
1
:
len
(
initial_weights
)]
perceptron
=
Perceptron
(
weights
,
threshold
,
normalized_tanh
)
results
=
[]
for
data
in
response
[
'
training-data
'
]:
# Hole die einzelnen Traningsdaten
test_id
=
data
[
'
id
'
]
features
=
data
[
'
input
'
]
correct_class
=
data
[
'
class
'
]
classification
=
perceptron
.
classify
(
features
)
update_weight
(
perceptron
,
features
,
correct_class
,
classification
,
learning_rate
)
# Hänge das Ergebnis in der Liste an
results
.
append
({
'
id
'
:
test_id
,
'
weights
'
:
perceptron
.
weights
})
send_result
(
'
perceptron_training
'
,
{
'
session
'
:
response
[
'
session
'
],
'
results
'
:
results
})
if
__name__
==
'
__main__
'
:
test_pla
()
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment