Kubernetes cluster’ının nasıl kurulacağını ve yönetileceğini detaylandırmaktadır
Home » Uncategorized  »  Kubernetes cluster’ının nasıl kurulacağını ve yönetileceğini detaylandırmaktadır
Kubernetes cluster’ının nasıl kurulacağını ve yönetileceğini detaylandırmaktadır

Bu kılavuz, Rocky Linux üzerinde kubeadm kullanarak bir Kubernetes cluster'ının nasıl kurulacağını ve yönetileceğini detaylandırmaktadır. Kendi deneyimlerimi ve tecrübelerimi ayrıca birkaç dökümanın birleşimiyle paylaşmaktayım. History komutu ile de detaylı komutların listesini sundum. Faydalı olması dileğiyle…

[salvador@vbox ~]$ su - root
Password:

[root@vbox ~]# nano /etc/hostname
[root@vbox ~]#
[root@vbox ~]# cat /etc/hostname
k8s-master1

K8s - 1. Gün - Kurulum Adımları

systemctl restart systemd-hostnamed.service #activate hostname change


hostname -f #check if hostname changed properly


ip a #check if the MAC address of each machine is different

[root@vbox ~]# ip a
1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: enp0s3: mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 08:00:27:fa:f0:e8 brd ff:ff:ff:ff:ff:ff
inet 10.0.2.15/24 brd 10.0.2.255 scope global dynamic noprefixroute enp0s3
valid_lft 86293sec preferred_lft 86293sec
inet6 fd00::a00:27ff:fefa:f0e8/64 scope global dynamic noprefixroute
valid_lft 86295sec preferred_lft 14295sec
inet6 fe80::a00:27ff:fefa:f0e8/64 scope link noprefixroute
valid_lft forever preferred_lft forever
3: enp0s8: mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 08:00:27:c5:f8:cf brd ff:ff:ff:ff:ff:ff
inet 192.168.56.109/24 brd 192.168.56.255 scope global dynamic noprefixroute enp0s8
valid_lft 493sec preferred_lft 493sec
inet6 fe80::a00:27ff:fec5:f8cf/64 scope link noprefixroute
valid_lft forever preferred_lft forever


cat /sys/class/dmi/id/product_uuid #check if the product ID of each machine is different
[root@vbox ~]# cat /sys/class/dmi/id/product_uuid
e717a7ad-5313-184c-abfe-69d6d7b65f17

systemd-machine-id-setup #if needed, creates new ID
cat /sys/class/dmi/id/product_uuid
cat /etc/machine-id
[root@k8s-master1 ~]# cat /etc/machine-id
04abb284efe646b4b2e47a7e009c6cf7

firewall-cmd --permanent --add-port=6443/tcp
firewall-cmd --permanent --add-port=2379-2380/tcp
firewall-cmd --permanent --add-port=10250/tcp
firewall-cmd --permanent --add-port=10259/tcp
firewall-cmd --permanent --add-port=10257/tcp

ufw allow 6443/tcp
ufw allow 2379:2380/tcp
ufw allow 10250/tcp
ufw allow 10259/tcp
ufw allow 10257/tcp

Red Hat tabanlı dağıtımlarda:node1-node2 çalıştır

firewall-cmd --permanent --add-port=10250/tcp
firewall-cmd --permanent --add-port=30000-32767/tcp

Debian tabanlı dağıtımlarda:node1-node2 çalıştır

ufw allow 10250/tcp
ufw allow 30000:32767/tcp

Kurulum tamamlandıktan sonra ilgili portları nc komutu ile test edebilirsiniz:

nc 127.0.0.1 6443

systemctl disable --now firewalld.service #close firewall (normally not needed)
[root@vbox ~]# systemctl disable --now firewalld.service
Removed "/etc/systemd/system/multi-user.target.wants/firewalld.service".
Removed "/etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service".
[root@vbox ~]#
[root@vbox ~]#
[root@vbox ~]#
[root@vbox ~]# systemctl status firewalld.service
○ firewalld.service - firewalld - dynamic firewall daemon
Loaded: loaded (/usr/lib/systemd/system/firewalld.service; disabled; preset: enabled)
Active: inactive (dead)
Docs: man:firewalld(1)

Feb 22 21:14:08 localhost systemd[1]: Starting firewalld - dynamic firewall daemon…
Feb 22 21:14:09 localhost systemd[1]: Started firewalld - dynamic firewall daemon.
Feb 22 21:22:39 k8s-master1 systemd[1]: Stopping firewalld - dynamic firewall daemon…
Feb 22 21:22:39 k8s-master1 systemd[1]: firewalld.service: Deactivated successfully.
Feb 22 21:22:39 k8s-master1 systemd[1]: Stopped firewalld - dynamic firewall daemon.


getenforce get quick info of SELinux mode (not persist across reboots)
[root@vbox ~]# getenforce
Enforcing

setenforce 0 #temporarily disable SELinux (change only in current session runtime)
[root@vbox ~]# setenforce 0
[root@vbox ~]# getenforce
Permissive


nano /etc/selinux/config #make the change below line (to make it persist across reboots)

SELINUX = disabled

This file controls the state of SELinux on the system.

SELINUX= can take one of these three values:

enforcing - SELinux security policy is enforced.

permissive - SELinux prints warnings instead of enforcing.

disabled - No SELinux policy is loaded.

See also:

https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/using_selinux/changing-selinux-states-and-modes_using-selinux#changing-selinux-modes-at-boot-time_changi>

#

NOTE: Up to RHEL 8 release included, SELINUX=disabled would also

fully disable SELinux during boot. If you need a system with SELinux

fully disabled instead of SELinux running with no policy loaded, you

need to pass selinux=0 to the kernel command line. You can use grubby

to persistently set the bootloader to boot with selinux=0:

#

grubby --update-kernel ALL --args selinux=0

#

To revert back to SELinux enabled:

#

grubby --update-kernel ALL --remove-args selinux

#

SELINUX=enforcing

SELINUX=disabled

SELINUXTYPE= can take one of these three values:

targeted - Targeted processes are protected,

minimum - Modification of targeted policy. Only selected processes are protected.

mls - Multi Level Security protection.

SELINUXTYPE=targeted


swapoff -a disable swapping (cgroups cannot work with swap on)

free -h check if swapping disabled
[root@vbox ~]# swapoff -a
[root@vbox ~]# free -h
total used free shared buff/cache available
Mem: 3.8Gi 412Mi 3.4Gi 8.0Mi 189Mi 3.4Gi
Swap: 0B 0B 0B

nano /etc/fstab comment out swap line for persistent swap off across reboots
#

/etc/fstab

Created by anaconda on Sat Feb 22 18:07:37 2025

#

Accessible filesystems, by reference, are maintained under '/dev/disk/'.

See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info.

#

After editing this file, run 'systemctl daemon-reload' to update systemd

units generated from this file.

#
/dev/mapper/rl_vbox-root / xfs defaults 0 0
UUID=fa441483-708c-4683-9ee1-b04dd0089b17 /boot xfs defaults 0 0

/dev/mapper/rl_vbox-swap none swap defaults 0 0


yum update # en son güncellemeleri yap.
Complete!
[root@vbox ~]# yum upgrade
Last metadata expiration check: 0:03:39 ago on Sat 22 Feb 2025 09:28:02 PM +03.
Dependencies resolved.
Nothing to do.
Complete!

[root@vbox ~]# systemctl enable --now cockpit.socket
Created symlink /etc/systemd/system/sockets.target.wants/cockpit.socket → /usr/lib/systemd/system/cockpit.socket.


statik ip adresi ver.

[root@vbox ~]# nano /etc/NetworkManager/system-connections/enp0s8.nmconnection
[root@vbox ~]#
[root@vbox ~]#
[root@vbox ~]# cat /etc/NetworkManager/system-connections/enp0s8.nmconnection
[connection]
id=enp0s8
uuid=9a8873c2-ef49-3367-bd82-6e26219ea021
type=ethernet
autoconnect-priority=-999
interface-name=enp0s8
timestamp=1740247307

[ethernet]

[ipv4]
method=manual
; Aşağıdaki satırda, IP adresini /24 ön ekiyle ve gateway olarak 192.168.56.1 örneği verilmiştir.
addresses1=192.168.56.109/24,192.168.56.1
; DNS sunucularını belirtmek için örnek:
dns=8.8.8.8;8.8.4.4;
[ipv6]
addr-gen-mode=eui64
method=auto

[proxy]

[root@k8s-master1 ~]# nano /etc/cockpit/disallowed-users
[root@k8s-master1 ~]#
[root@k8s-master1 ~]# cat /etc/cockpit/disallowed-users

List of users which are not allowed to login to Cockpit

root

cockpit root kullanıcısı ile login olmak


[root@k8s-master1 ~]# nano /etc/modules-load.d/containerd.conf
[root@k8s-master1 ~]#
[root@k8s-master1 ~]# cat /etc/modules-load.d/containerd.conf
modprobe overlay
modprobe br_filter

nano /etc/modules-load.d/container.d/containerd.conf create a config file, add those lines as kernel modules

overlay br_netfilter

modprobe overlay add module command ekle

modprobe br_netfilter add module command ekle

lsmod | grep <overlay or br_netfilter> check if the module added

network configuration for K8s:

nano /etc/sysctl.d/k8s.conf create a config file, add following lines

net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1

sysctl -p /etc/sysctl.d/k8s.conf reload and apply config file


K8s installation:

Install the dnf-plugins-core package and set up the repository

sudo dnf -y install dnf-plugins-core
sudo dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo

install the engine

dnf install containerd.io

[root@k8s-master1 ~]# nano /etc/sysctl.d/k8s.conf
[root@k8s-master1 ~]#
[root@k8s-master1 ~]#
[root@k8s-master1 ~]# sysctl -p /etc/sysctl.d/k8s.conf
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
net.ipv4.ip_forward = 1
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-ip6tables: No such file or directory
[root@k8s-master1 ~]# sysctl -p /etc/sysctl.d/k8s.conf
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
net.ipv4.ip_forward = 1
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-ip6tables: No such file or directory
[root@k8s-master1 ~]# sysctl -p
[root@k8s-master1 ~]#
[root@k8s-master1 ~]#
[root@k8s-master1 ~]# sysctl -p /etc/sysctl.d/k8s.conf
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
net.ipv4.ip_forward = 1
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-ip6tables: No such file or directory

Ardından sudo sysctl -p /etc/sysctl.d/k8s.conf komutunu çalıştırarak yukarıdaki ayarları aktif ediniz.

Son olarak [WARNING FileExisting-tc]: tc not found in system path hatası almamak için aşağıdaki komutu çalıştırarak iproute-tc paketini kurunuz.

yum install iproute-tc

[root@k8s-master1 ~]# sudo modprobe br_netfilter # ile yükle
[root@k8s-master1 ~]# sysctl -p /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
[root@k8s-master1 ~]# lsmod | grep br_netfilter
br_netfilter 36864 0
bridge 417792 1 br_netfilter

[root@k8s-master1 ~]# sudo dnf -y install dnf-plugins-core
Last metadata expiration check: 0:10:43 ago on Sat 22 Feb 2025 09:36:19 PM +03.
Package dnf-plugins-core-4.3.0-16.el9.noarch is already installed.
Dependencies resolved.
Nothing to do.
Complete!
[root@k8s-master1 ~]# sudo dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
Adding repo from: https://download.docker.com/linux/centos/docker-ce.repo
[root@k8s-master1 ~]# dnf install containerd.io
Docker CE Stable - x86_64 270 kB/s | 64 kB 00:00

Dependencies resolved.

Package Architecture Version Repository Size

Installing:
containerd.io x86_64 1.7.25-3.1.el9 docker-ce-stable 43 M
Installing dependencies:
container-selinux noarch 3:2.232.1-1.el9 appstream 55 k

Transaction Summary

Install 2 Packages

Total download size: 43 M
Installed size: 150 M
Is this ok [y/N]: y
Downloading Packages:
(1/2): container-selinux-2.232.1-1.el9.noarch.rpm 25 kB/s | 55 kB 00:02

(2/2): containerd.io-1.7.25-3.1.el9.x86_64.rpm 9.0 MB/s | 43 MB 00:04

Total 8.2 MB/s | 43 MB 00:05
Docker CE Stable - x86_64 14 kB/s | 1.6 kB 00:00
Importing GPG key 0x621E9F35:
Userid : "Docker Release (CE rpm) docker@docker.com"
Fingerprint: 060A 61C5 1B55 8A7F 742B 77AA C52F EB6B 621E 9F35
From : https://download.docker.com/linux/centos/gpg
Is this ok [y/N]:
Key imported successfully
Running transaction check
Transaction check succeeded.
Running transaction test
Transaction test succeeded.
Running transaction
Preparing : 1/1
Running scriptlet: container-selinux-3:2.232.1-1.el9.noarch 1/2
Installing : container-selinux-3:2.232.1-1.el9.noarch 1/2
Running scriptlet: container-selinux-3:2.232.1-1.el9.noarch 1/2
Installing : containerd.io-1.7.25-3.1.el9.x86_64 2/2
Running scriptlet: containerd.io-1.7.25-3.1.el9.x86_64 2/2
Running scriptlet: container-selinux-3:2.232.1-1.el9.noarch 2/2
Running scriptlet: containerd.io-1.7.25-3.1.el9.x86_64 2/2
Verifying : containerd.io-1.7.25-3.1.el9.x86_64 1/2
Verifying : container-selinux-3:2.232.1-1.el9.noarch 2/2

Installed:
container-selinux-3:2.232.1-1.el9.noarch containerd.io-1.7.25-3.1.el9.x86_64

Complete!


containerd configuration:

containerd config default > /etc/containerd/config.toml write out default config settings

nano /etc/containerd/config.toml make the change below

SystemdCgroup = true

disabled_plugins = []
imports = []
oom_score = 0
plugin_dir = ""
required_plugins = []
root = "/var/lib/containerd"
state = "/run/containerd"
temp = ""
version = 2

[cgroup]
path = ""

[debug]
address = ""
format = ""
gid = 0
level = ""
uid = 0

[grpc]
address = "/run/containerd/containerd.sock"
gid = 0
max_recv_message_size = 16777216
max_send_message_size = 16777216
tcp_address = ""
tcp_tls_ca = ""
tcp_tls_cert = ""
tcp_tls_key = ""
uid = 0

[metrics]
address = ""
grpc_histogram = false

[plugins]

[plugins."io.containerd.gc.v1.scheduler"]
deletion_threshold = 0
mutation_threshold = 100
pause_threshold = 0.02
schedule_delay = "0s"
startup_delay = "100ms"

[plugins."io.containerd.grpc.v1.cri"]
cdi_spec_dirs = ["/etc/cdi", "/var/run/cdi"]
device_ownership_from_security_context = false
disable_apparmor = false
disable_cgroup = false
disable_hugetlb_controller = true
disable_proc_mount = false
disable_tcp_service = true
drain_exec_sync_io_timeout = "0s"
enable_cdi = false
enable_selinux = false
enable_tls_streaming = false
enable_unprivileged_icmp = false
enable_unprivileged_ports = false
ignore_deprecation_warnings = []
ignore_image_defined_volumes = false
image_pull_progress_timeout = "5m0s"
image_pull_with_sync_fs = false
max_concurrent_downloads = 3
max_container_log_line_size = 16384
netns_mounts_under_state_dir = false
restrict_oom_score_adj = false
sandbox_image = "registry.k8s.io/pause:3.8"
selinux_category_range = 1024
stats_collect_period = 10
stream_idle_timeout = "4h0m0s"
stream_server_address = "127.0.0.1"
stream_server_port = "0"
systemd_cgroup = false
tolerate_missing_hugetlb_controller = true
unset_seccomp_profile = ""

[plugins."io.containerd.grpc.v1.cri".cni]

  bin_dir = "/opt/cni/bin"

  conf_dir = "/etc/cni/net.d"

  conf_template = ""

  ip_pref = ""

  max_conf_num = 1

  setup_serially = false

[plugins."io.containerd.grpc.v1.cri".containerd]

default_runtime_name = "runc" disable_snapshot_annotations = true discard_unpacked_layers = false ignore_blockio_not_enabled_errors = false ignore_rdt_not_enabled_errors = false no_pivot = false snapshotter = "overlayfs"

[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]

base_runtime_spec = "" cni_conf_dir = "" cni_max_conf_num = 0 container_annotations = [] pod_annotations = [] privileged_without_host_devices = false privileged_without_host_devices_all_devices_allowed = false runtime_engine = "" runtime_path = "" runtime_root = "" runtime_type = "" sandbox_mode = "" snapshotter = ""

[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options]

[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]

[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]

base_runtime_spec = "" cni_conf_dir = "" cni_max_conf_num = 0 container_annotations = [] pod_annotations = [] privileged_without_host_devices = false privileged_without_host_devices_all_devices_allowed = false runtime_engine = "" runtime_path = "" runtime_root = "" runtime_type = "io.containerd.runc.v2" sandbox_mode = "podsandbox" snapshotter = ""

[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]

BinaryName = "" CriuImagePath = "" CriuPath = "" CriuWorkPath = "" IoGid = 0 IoUid = 0 NoNewKeyring = false NoPivotRoot = false Root = "" ShimCgroup = "" SystemdCgroup = false # değerini true yap.

[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]

base_runtime_spec = "" cni_conf_dir = "" cni_max_conf_num = 0 container_annotations = [] pod_annotations = [] privileged_without_host_devices = false privileged_without_host_devices_all_devices_allowed = false runtime_engine = "" runtime_path = "" runtime_root = "" runtime_type = "" sandbox_mode = "" snapshotter = ""

[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options]

[plugins."io.containerd.grpc.v1.cri".image_decryption] key_model = "node"

[plugins."io.containerd.grpc.v1.cri".registry]

config_path = ""

[plugins."io.containerd.grpc.v1.cri".registry.auths]

[plugins."io.containerd.grpc.v1.cri".registry.configs]

[plugins."io.containerd.grpc.v1.cri".registry.headers]

[plugins."io.containerd.grpc.v1.cri".registry.mirrors]

[plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming]

tls_cert_file = "" tls_key_file = ""

[plugins."io.containerd.internal.v1.opt"]
path = "/opt/containerd"

[plugins."io.containerd.internal.v1.restart"]
interval = "10s"

[plugins."io.containerd.internal.v1.tracing"]

[plugins."io.containerd.metadata.v1.bolt"]
content_sharing_policy = "shared"

[plugins."io.containerd.monitor.v1.cgroups"]
no_prometheus = false

[plugins."io.containerd.nri.v1.nri"]
disable = true
disable_connections = false
plugin_config_path = "/etc/nri/conf.d"
plugin_path = "/opt/nri/plugins"
plugin_registration_timeout = "5s"
plugin_request_timeout = "2s"
socket_path = "/var/run/nri/nri.sock"

[plugins."io.containerd.runtime.v1.linux"]
no_shim = false
runtime = "runc"
runtime_root = ""
shim = "containerd-shim"
shim_debug = false

[plugins."io.containerd.runtime.v2.task"]
platforms = ["linux/amd64"]
sched_core = false

[plugins."io.containerd.service.v1.diff-service"]
default = ["walking"]

[plugins."io.containerd.service.v1.tasks-service"]
blockio_config_file = ""
rdt_config_file = ""

[plugins."io.containerd.snapshotter.v1.aufs"]
root_path = ""

[plugins."io.containerd.snapshotter.v1.blockfile"]
fs_type = ""
mount_options = []
root_path = ""
scratch_file = ""

[plugins."io.containerd.snapshotter.v1.devmapper"]
async_remove = false
base_image_size = ""
discard_blocks = false
fs_options = ""
fs_type = ""
pool_name = ""
root_path = ""

[plugins."io.containerd.snapshotter.v1.native"]
root_path = ""

[plugins."io.containerd.snapshotter.v1.overlayfs"]
mount_options = []
root_path = ""
sync_remove = false
upperdir_label = false

[plugins."io.containerd.snapshotter.v1.zfs"]
root_path = ""

[plugins."io.containerd.tracing.processor.v1.otlp"]

[plugins."io.containerd.transfer.v1.local"]
config_path = ""
max_concurrent_downloads = 3
max_concurrent_uploaded_layers = 3

[[plugins."io.containerd.transfer.v1.local".unpack_config]]

  differ = ""

  platform = "linux/amd64"

  snapshotter = "overlayfs"

[proxy_plugins]

[stream_processors]

[stream_processors."io.containerd.ocicrypt.decoder.v1.tar"]
accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"]
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
path = "ctd-decoder"
returns = "application/vnd.oci.image.layer.v1.tar"

[stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"]
accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"]
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
path = "ctd-decoder"
returns = "application/vnd.oci.image.layer.v1.tar+gzip"

[timeouts]
"io.containerd.timeout.bolt.open" = "0s"
"io.containerd.timeout.metrics.shimstats" = "2s"
"io.containerd.timeout.shim.cleanup" = "5s"
"io.containerd.timeout.shim.load" = "5s"
"io.containerd.timeout.shim.shutdown" = "3s"
"io.containerd.timeout.task.state" = "2s"

[ttrpc]
address = ""
gid = 0
uid = 0

run the container:

systemctl enable --now containerd run the container and make it run on reboot

systemctl status containerd check the status

[root@k8s-master1 ~]# systemctl enable --now containerd
Created symlink /etc/systemd/system/multi-user.target.wants/containerd.service → /usr/lib/systemd/system/containerd.service.
[root@k8s-master1 ~]# systemctl status containerd
● containerd.service - containerd container runtime
Loaded: loaded (/usr/lib/systemd/system/containerd.service; enabled; preset: disabled)
Active: active (running) since Sat 2025-02-22 22:08:16 +03; 11s ago
Docs: https://containerd.io
Process: 10406 ExecStartPre=/sbin/modprobe overlay (code=exited, status=0/SUCCESS)
Main PID: 10408 (containerd)
Tasks: 8
Memory: 17.0M
CPU: 74ms
CGroup: /system.slice/containerd.service
└─10408 /usr/bin/containerd

Feb 22 22:08:16 k8s-master1 containerd[10408]: time="2025-02-22T22:08:16.239198253+03:00" level=info msg="Start subscribing containerd event"
Feb 22 22:08:16 k8s-master1 containerd[10408]: time="2025-02-22T22:08:16.239236978+03:00" level=info msg="Start recovering state"
Feb 22 22:08:16 k8s-master1 containerd[10408]: time="2025-02-22T22:08:16.239322027+03:00" level=info msg="Start event monitor"
Feb 22 22:08:16 k8s-master1 containerd[10408]: time="2025-02-22T22:08:16.239330849+03:00" level=info msg="Start snapshots syncer"
Feb 22 22:08:16 k8s-master1 containerd[10408]: time="2025-02-22T22:08:16.239342040+03:00" level=info msg="Start cni network conf syncer for default"
Feb 22 22:08:16 k8s-master1 containerd[10408]: time="2025-02-22T22:08:16.239348614+03:00" level=info msg="Start streaming server"
Feb 22 22:08:16 k8s-master1 containerd[10408]: time="2025-02-22T22:08:16.239484411+03:00" level=info msg=serving… address=/run/containerd/containerd.sock.ttrpc
Feb 22 22:08:16 k8s-master1 containerd[10408]: time="2025-02-22T22:08:16.239633046+03:00" level=info msg=serving… address=/run/containerd/containerd.sock
Feb 22 22:08:16 k8s-master1 containerd[10408]: time="2025-02-22T22:08:16.239781660+03:00" level=info msg="containerd successfully booted in 0.064848s"
Feb 22 22:08:16 k8s-master1 systemd[1]: Started containerd container runtime.


install kubeadm:

[root@k8s-master1 ~]# bash
cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://pkgs.k8s.io/core:/stable:/v1.32/rpm/
enabled=1
gpgcheck=1
gpgkey=https://pkgs.k8s.io/core:/stable:/v1.32/rpm/repodata/repomd.xml.key
exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
EOF

```bash aşağıdaki komut ver rocky 9 deposu için:

cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://pkgs.k8s.io/core:/stable:/v1.32/rpm/
enabled=1
gpgcheck=1
gpgkey=https://pkgs.k8s.io/core:/stable:/v1.32/rpm/repodata/repomd.xml.key
exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
EOF

[root@k8s-master1 ~]# cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://pkgs.k8s.io/core:/stable:/v1.32/rpm/
enabled=1
gpgcheck=1
gpgkey=https://pkgs.k8s.io/core:/stable:/v1.32/rpm/repodata/repomd.xml.key
exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
EOF
[kubernetes]
name=Kubernetes
baseurl=https://pkgs.k8s.io/core:/stable:/v1.32/rpm/
enabled=1
gpgcheck=1
gpgkey=https://pkgs.k8s.io/core:/stable:/v1.32/rpm/repodata/repomd.xml.key
exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
[root@k8s-master1 ~]# sudo yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
Kubernetes 7.2 kB/s | 9.8 kB 00:01
Rocky Linux 9 - BaseOS 1.2 MB/s | 2.3 MB 00:01
Rocky Linux 9 - AppStream 4.0 MB/s | 8.6 MB 00:02
Rocky Linux 9 - Extras 18 kB/s | 16 kB 00:00

Dependencies resolved.

Package Architecture Version Repository Size

Installing:
kubeadm x86_64 1.32.2-150500.1.1 kubernetes 12 M
kubectl x86_64 1.32.2-150500.1.1 kubernetes 11 M
kubelet x86_64 1.32.2-150500.1.1 kubernetes 15 M
Installing dependencies:
conntrack-tools x86_64 1.4.7-4.el9_5 appstream 222 k
cri-tools x86_64 1.32.0-150500.1.1 kubernetes 7.1 M
kubernetes-cni x86_64 1.6.0-150500.1.1 kubernetes 8.0 M
libnetfilter_cthelper x86_64 1.0.0-22.el9 appstream 23 k
libnetfilter_cttimeout x86_64 1.0.0-19.el9 appstream 23 k
libnetfilter_queue x86_64 1.0.5-1.el9 appstream 28 k

Transaction Summary

Install 9 Packages

Total download size: 53 M
Installed size: 289 M
Downloading Packages:
(1/9): cri-tools-1.32.0-150500.1.1.x86_64.rpm 3.2 MB/s | 7.1 MB 00:02
(2/9): kubectl-1.32.2-150500.1.1.x86_64.rpm 2.9 MB/s | 11 MB 00:03
(3/9): kubeadm-1.32.2-150500.1.1.x86_64.rpm 2.6 MB/s | 12 MB 00:04
(4/9): conntrack-tools-1.4.7-4.el9_5.x86_64.rpm 272 kB/s | 222 kB 00:00
(5/9): libnetfilter_cttimeout-1.0.0-19.el9.x86_64.rpm 158 kB/s | 23 kB 00:00
(6/9): kubelet-1.32.2-150500.1.1.x86_64.rpm 4.2 MB/s | 15 MB 00:03
(7/9): libnetfilter_cthelper-1.0.0-22.el9.x86_64.rpm 153 kB/s | 23 kB 00:00
(8/9): kubernetes-cni-1.6.0-150500.1.1.x86_64.rpm 3.9 MB/s | 8.0 MB 00:02

(9/9): libnetfilter_queue-1.0.5-1.el9.x86_64.rpm 85 kB/s | 28 kB 00:00

Total 8.3 MB/s | 53 MB 00:06
Kubernetes 2.7 kB/s | 1.7 kB 00:00
Importing GPG key 0x9A296436:
Userid : "isv:kubernetes OBS Project "
Fingerprint: DE15 B144 86CD 377B 9E87 6E1A 2346 54DA 9A29 6436
From : https://pkgs.k8s.io/core:/stable:/v1.32/rpm/repodata/repomd.xml.key
Key imported successfully
Running transaction check
Transaction check succeeded.
Running transaction test
Transaction test succeeded.
Running transaction
Preparing : 1/1
Installing : libnetfilter_queue-1.0.5-1.el9.x86_64 1/9
Installing : libnetfilter_cthelper-1.0.0-22.el9.x86_64 2/9
Installing : libnetfilter_cttimeout-1.0.0-19.el9.x86_64 3/9
Installing : conntrack-tools-1.4.7-4.el9_5.x86_64 4/9
Running scriptlet: conntrack-tools-1.4.7-4.el9_5.x86_64 4/9
Installing : kubernetes-cni-1.6.0-150500.1.1.x86_64 5/9
Installing : cri-tools-1.32.0-150500.1.1.x86_64 6/9
Installing : kubeadm-1.32.2-150500.1.1.x86_64 7/9
Installing : kubelet-1.32.2-150500.1.1.x86_64 8/9
Running scriptlet: kubelet-1.32.2-150500.1.1.x86_64 8/9
Installing : kubectl-1.32.2-150500.1.1.x86_64 9/9
Running scriptlet: kubectl-1.32.2-150500.1.1.x86_64 9/9
Verifying : cri-tools-1.32.0-150500.1.1.x86_64 1/9
Verifying : kubeadm-1.32.2-150500.1.1.x86_64 2/9
Verifying : kubectl-1.32.2-150500.1.1.x86_64 3/9
Verifying : kubelet-1.32.2-150500.1.1.x86_64 4/9
Verifying : kubernetes-cni-1.6.0-150500.1.1.x86_64 5/9
Verifying : conntrack-tools-1.4.7-4.el9_5.x86_64 6/9
Verifying : libnetfilter_cttimeout-1.0.0-19.el9.x86_64 7/9
Verifying : libnetfilter_cthelper-1.0.0-22.el9.x86_64 8/9
Verifying : libnetfilter_queue-1.0.5-1.el9.x86_64 9/9

Installed:
conntrack-tools-1.4.7-4.el9_5.x86_64 cri-tools-1.32.0-150500.1.1.x86_64 kubeadm-1.32.2-150500.1.1.x86_64 kubectl-1.32.2-150500.1.1.x86_64
kubelet-1.32.2-150500.1.1.x86_64 kubernetes-cni-1.6.0-150500.1.1.x86_64 libnetfilter_cthelper-1.0.0-22.el9.x86_64 libnetfilter_cttimeout-1.0.0-19.el9.x86_64
libnetfilter_queue-1.0.5-1.el9.x86_64

Complete!

```

  1. Kubeadm Kurulumu

Gereksinimleri başarılı bir şekilde tamamladıktan sonra artık kubeadm kurulumuna geçebiliriz.

Kubeadm kurulumu için şu paketleri tüm nodelara kurmamız gerekiyor:

kubeadm: Kubernetes clusterların başlatılmasını sağlamak için kullanılan bir araçtır.

kubelet: Cluster üzerindeki tüm nodelarda çalışan ve pod ile container'ları başlatmak gibi işler yapan bileşendir.

kubectl: Cluster ile haberleşmek için kullanılan CLI aracıdır.

kubeadm sizin yerinize kubelet veya kubectl'i kurmaz veya yönetmez. Bu nedenle bu üç aracın versiyonlarının eşleştiğinden emin olmalısınız. Aksi halde kararsız ve doğru olmayan bir çalışma ile karşılaşabilirsiniz. Versiyonlar ve örnek senaryolar için şu linki ziyaret edebilirsiniz.
4.1 Red Hat Tabanlı Dağıtımlarda Kurulum

Bu yazıda Rocky Linux kullanıyoruz ve bu sebeple aşağıdaki komutlarla kubeadm kurulumunu yapabilirsiniz.

cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-\$basearch
enabled=1
gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
exclude=kubelet kubeadm kubectl
EOF

Set SELinux in permissive mode (effectively disabling it)

setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config

yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes

systemctl enable --now kubelet

setenforce 0 ve sed ... komutları çalıştırılarak SELinux'u devre dışı bırakılır. Bu, containerların host makinenin dosya sistemine erişmesine izin vermek için gereklidir. Kubelet'te SELinux desteği gelişene kadar bunu yapmanız gerekiyor.

Red Hat tabanlı dağıtımınız basearch'ı yorumlayamayıp baseurl başarısız olursa, $basesearch'ü bilgisayarınızın mimarisiyle değiştirin. Bu değeri görmek için uname -m yazın. Örneğin, x86_64 için baseurl URL'si şu olabilir: https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64.

4.2 Debian Tabanlı Dağıtımlarda Kurulum

Siz Ubuntu gibi bir Debian tabanlı dağıtım kullanmak isterseniz aşağıdaki komutlarla kubeadm i kurabilirsiniz.

apt-get update
apt-get install -y apt-transport-https ca-certificates curl
curl -fsSLo /etc/apt/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
apt-get update
apt-get install -y kubelet kubeadm kubectl
apt-mark hold kubelet kubeadm kubectl

Artık kubeadm, kubelet ve kubectl kurulumlarını da başarıyla tamamladık. Şimdi Kubeadm initialize işlemine geçebiliriz.

systemctl status kubelet komutunu çalıştırdığınızda kubelet servisinin başlayamayarak hata verdiğini göreceksiniz. Bu durum normaldir ve kubeadm, kubelet'in ne yapması gerektiğini söyleyene kadar birkaç saniyede bir yeniden başlayarak hata vermeye devam edecektir.

install kubelet, kubeadm and kubectl:

[root@k8s-master1 ~]# yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
Last metadata expiration check: 0:24:53 ago on Sat 22 Feb 2025 09:47:31 PM +03.
No match for argument: kubelet
No match for argument: kubeadm
No match for argument: kubectl
Error: Unable to find a match: kubelet kubeadm kubectl
[root@k8s-master1 ~]# cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-\$basearch
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
exclude=kube*
EOF
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-$basearch
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
exclude=kube*


install kubelet, kubeadm and kubectl:

sudo yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes

start kubelet:

sudo systemctl enable --now kubelet

sudo yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes

start kubelet:

sudo systemctl enable --now kubelet

[root@k8s-master1 ~]# sudo systemctl enable --now kubelet
Created symlink /etc/systemd/system/multi-user.target.wants/kubelet.service → /usr/lib/systemd/system/kubelet.service.

kubeadm init --pod-network-cidr=10.245.0.0/16 --apiserver-advertise-address=192.168.56.109

4.3 Kubeadm'i Başlatmak

Control-plane yani manager node, etcd (cluster veritabanı) ve API Server (kubectl cli aracının iletişim kurduğu) dahil olmak üzere control-plane bileşenlerinin çalıştığı makinedir.

Kubeadm'i başlatabilmek için kubeadm init komutunu gerekli parametrelerle birlikte çalıştırmalıyız. Kullanabileceğimiz parametreler ve açıklamaları şu şekildedir:

--control-plane-endpoint: Kubernetes cluster'ındaki control-plane node'lara erişmek için kullanılır. Kuracağınız tek control-plane node'lu kubeadm cluster'ını high availabilty için birden fazla control-plane'li node'a yükseltme planınız varsa, (production ortamı için önerilir), bu parametreyi ayarlamanız gerekir. IP adresi veya domain kullanabilirsiniz.

--apiserver-advertise-address: API server'ın dış dünyadan erişilebileceği IP adresini belirtir. Bu parametre, Kubernetes cluster'ın API server'a nasıl erişileceğini belirlemek için kullanılır. Eğer bu parametre belirtilmezse, API server, Kubernetes cluster'ın bulunduğu node'lar arasındaki ağ trafiğine bağlı olarak otomatik olarak belirlenir. Ancak, bazen bu otomatik belirleme yöntemi doğru şekilde çalışmayabilir veya API server'ın dış dünyadan erişilebilmesi gerektiği durumlarda bu parametrenin kullanımı gerekebilir.

--pod-network-cidr: Cluster'da podlar arasındaki ağ iletişimini yönetmek için kullanılan IP adresi aralığını belirtir. Belirtilmediğinde, Kubernetes varsayılan olarak 10.244.0.0/16 IP adresi aralığını kullanır.

--cri-socket: cluster'da container runtime (örneğin, CRI-O veya containerd) ile iletişim kurmak için kullanılan Unix soketinin yolunu belirtir. Belirtilmediğinde, kubeadm, bilinen endpointlerin bir listesini kullanarak container runtime'ı otomatik algılamaya çalışır.

Kullanabileceğiniz diğer parametreler için şu sayfayı inceleyebilirsiniz.

Artık kubeadm init komutunu gerekli parametrelerle çalıştırarak cluster'ı init edebiliriz:

kubeadm init --pod-network-cidr=192.168.0.0/16 --apiserver-advertise-address= --control-plane-endpoint=


Bu yazıda Calico network eklentisini kullanacağımız için --pod-network-cidr parametresini 192.168.0.0/16 olarak ayarladık. Çünkü Calico manifest dosyasında network aralığı bu şekilde ayarlanmıştır. Siz farklı bir aralık kullanacaksanız manifest dosyasında da ilgili aralığı düzenlemelisiniz.

Kurulum başarılı bir şekilde tamamlandığında aşağıdaki gibi örnek bir çıktı görmeniz gerekiyor:

[root@k8s-master1 ~]# kubeadm init --pod-network-cidr=10.245.0.0/16 --apiserver-advertise-address=192.168.56.109
[init] Using Kubernetes version: v1.32.2
[preflight] Running pre-flight checks
[WARNING Hostname]: hostname "k8s-master1" could not be reached
[WARNING Hostname]: hostname "k8s-master1": lookup k8s-master1 on 10.0.2.3:53: server misbehaving
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action beforehand using 'kubeadm config images pull'
W0222 22:26:54.177001 16540 checks.go:846] detected that the sandbox image "registry.k8s.io/pause:3.8" of the container runtime is inconsistent with that used by kubeadm.It is recommended to use "registry.k8s.io/pause:3.10" as the CRI sandbox image.
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master1 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.56.109]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master1 localhost] and IPs [192.168.56.109 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master1 localhost] and IPs [192.168.56.109 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "super-admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
[kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
[kubelet-check] The kubelet is healthy after 501.049847ms
[api-check] Waiting for a healthy API server. This can take up to 4m0s
[api-check] The API server is healthy after 30.001270778s
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node k8s-master1 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node k8s-master1 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]
[bootstrap-token] Using token: 2lfwao.rpasevsb1066vqm7
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.56.109:6443 --token 2lfwao.rpasevsb1066vqm7 \
--discovery-token-ca-cert-hash sha256:ce3fc9d1e4731c044cfa0db74da2ae85c4e155b01148f03b9a30daec480ec12b

master kurulumları tamamdır.
buraya kadar olan adımlar artık kubernetes kurulum yapıldı.OK
token ile node1 ve node2 eklemek için gerekecek.

daha sonra da network için Calico kurulumu yapılacak.Dikkat unutma!
curl -O https://raw.githubusercontent.com/projectcalico/calico/release-v3.29/manifests/calico.yaml
kubectl apply -f calico.yaml # calico.yaml ile oluştur.
poddisruptionbudget.policy/calico-kube-controllers created

[root@k8s-master1 ~]# kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-kube-controllers-77969b7d87-2pnsv 1/1 Running 0 117s
kube-system calico-node-gcswx 1/1 Running 0 117s
kube-system coredns-668d6bf9bc-7k66b 1/1 Running 0 19m
kube-system coredns-668d6bf9bc-rc5sr 1/1 Running 0 19m
kube-system etcd-k8s-master1 1/1 Running 0 19m
kube-system kube-apiserver-k8s-master1 1/1 Running 0 19m
kube-system kube-controller-manager-k8s-master1 1/1 Running 1 (70s ago) 19m
kube-system kube-proxy-kzq8x 1/1 Running 0 19m
kube-system kube-scheduler-k8s-master1 1/1 Running 1 (65s ago) 19m
[root@k8s-master1 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master1 Ready control-plane 19m v1.32.2

[root@k8s-master1 ~]# mkdir -p $HOME/.kube
[root@k8s-master1 ~]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-master1 ~]# chown $(id -u):$(id -g) $HOME/.kube/config
[root@k8s-master1 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master1 NotReady control-plane 6m39s v1.32.2
[root@k8s-master1 ~]# kubectl get pods -A -o wide
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
kube-system coredns-668d6bf9bc-7k66b 0/1 Pending 0 6m45s
kube-system coredns-668d6bf9bc-rc5sr 0/1 Pending 0 6m45s
kube-system etcd-k8s-master1 1/1 Running 0 6m53s 10.0.2.15 k8s-master1
kube-system kube-apiserver-k8s-master1 1/1 Running 0 6m53s 10.0.2.15 k8s-master1
kube-system kube-controller-manager-k8s-master1 1/1 Running 0 6m50s 10.0.2.15 k8s-master1
kube-system kube-proxy-kzq8x 1/1 Running 0 6m46s 10.0.2.15 k8s-master1
kube-system kube-scheduler-k8s-master1 1/1 Running 0 6m53s 10.0.2.15 k8s-master1

Eğer kubectl'i root olmayan bir kullanıcıyla kullanacaksanız şu komutları çalıştırınız:

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

Fakat root kullanıcıyla kullanacaksanız şu komutu çalıştırmak yeterlidir:

export KUBECONFIG=/etc/kubernetes/admin.conf

curl -O https://raw.githubusercontent.com/projectcalico/calico/release-v3.29/manifests/calico.yaml
[root@k8s-master1 ~]# curl -O https://raw.githubusercontent.com/projectcalico/calico/v3.29/manifests/calico.yaml
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 14 100 14 0 0 69 0 --:--:-- --:--:-- --:--:-- 69
[root@k8s-master1 ~]#
[root@k8s-master1 ~]#
[root@k8s-master1 ~]#
[root@k8s-master1 ~]#
[root@k8s-master1 ~]# kubectl apply -f calico.yaml
error: error validating "calico.yaml": error validating data: [apiVersion not set, kind not set]; if you choose to ignore these errors, turn validation off with --validate=false
[root@k8s-master1 ~]# curl -O https://raw.githubusercontent.com/projectcalico/calico/release-v3.29/manifests/calico.yaml
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 316k 100 316k 0 0 319k 0 --:--:-- --:--:-- --:--:-- 319k
[root@k8s-master1 ~]#
[root@k8s-master1 ~]#
[root@k8s-master1 ~]#
[root@k8s-master1 ~]# nano calico.yaml
[root@k8s-master1 ~]# kubectl apply -f calico.yaml
poddisruptionbudget.policy/calico-kube-controllers created
serviceaccount/calico-kube-controllers created
serviceaccount/calico-node created
serviceaccount/calico-cni-plugin created
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgpfilters.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/caliconodestatuses.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipreservations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/tiers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/adminnetworkpolicies.policy.networking.k8s.io created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrole.rbac.authorization.k8s.io/calico-cni-plugin created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-cni-plugin created
daemonset.apps/calico-node created
deployment.apps/calico-kube-controllers created

[root@k8s-master1 ~]# kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-kube-controllers-77969b7d87-2pnsv 1/1 Running 0 117s
kube-system calico-node-gcswx 1/1 Running 0 117s
kube-system coredns-668d6bf9bc-7k66b 1/1 Running 0 19m
kube-system coredns-668d6bf9bc-rc5sr 1/1 Running 0 19m
kube-system etcd-k8s-master1 1/1 Running 0 19m
kube-system kube-apiserver-k8s-master1 1/1 Running 0 19m
kube-system kube-controller-manager-k8s-master1 1/1 Running 1 (70s ago) 19m
kube-system kube-proxy-kzq8x 1/1 Running 0 19m
kube-system kube-scheduler-k8s-master1 1/1 Running 1 (65s ago) 19m
[root@k8s-master1 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master1 Ready control-plane 19m v1.32.2

  1. Kubeadm Kurulumunun Doğrulanması

Yukarıdaki tüm aşamaları başarılı bir şekilde tamamladıktan sonra kubectl get nodes komutu ile nodeların durumunu Ready olarak görmeniz gerekmektedir.

NotReady görmeniz durumunda kubectl describe nodes ile eventleri ve sistem mesajlarını kontrol edebilirsiniz.
[root@k8s-master1 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master1 Ready control-plane 28m v1.32.2

[root@k8s-master1 ~]# kubectl describe nodes
Name: k8s-master1
Roles: control-plane
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=k8s-master1
kubernetes.io/os=linux
node-role.kubernetes.io/control-plane=
node.kubernetes.io/exclude-from-external-load-balancers=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///var/run/containerd/containerd.sock
node.alpha.kubernetes.io/ttl: 0
projectcalico.org/IPv4Address: 192.168.56.109/24
projectcalico.org/IPv4IPIPTunnelAddr: 10.245.159.128
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Sat, 22 Feb 2025 22:28:10 +0300
Taints: node-role.kubernetes.io/control-plane:NoSchedule
Unschedulable: false
Lease:
HolderIdentity: k8s-master1
AcquireTime:
RenewTime: Sat, 22 Feb 2025 22:56:59 +0300
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
NetworkUnavailable False Sat, 22 Feb 2025 22:47:26 +0300 Sat, 22 Feb 2025 22:47:26 +0300 CalicoIsUp Calico is running on this node
MemoryPressure False Sat, 22 Feb 2025 22:53:06 +0300 Sat, 22 Feb 2025 22:28:00 +0300 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Sat, 22 Feb 2025 22:53:06 +0300 Sat, 22 Feb 2025 22:28:00 +0300 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Sat, 22 Feb 2025 22:53:06 +0300 Sat, 22 Feb 2025 22:28:00 +0300 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Sat, 22 Feb 2025 22:53:06 +0300 Sat, 22 Feb 2025 22:47:13 +0300 KubeletReady kubelet is posting ready status
Addresses:
InternalIP: 10.0.2.15
Hostname: k8s-master1
Capacity:
cpu: 2
ephemeral-storage: 17340Mi
hugepages-2Mi: 0
memory: 3746484Ki
pods: 110
Allocatable:
cpu: 2
ephemeral-storage: 16364077029
hugepages-2Mi: 0
memory: 3644084Ki
pods: 110
System Info:
Machine ID: 04abb284efe646b4b2e47a7e009c6cf7
System UUID: e717a7ad-5313-184c-abfe-69d6d7b65f17
Boot ID: 907bb706-77f2-4c09-864c-137aa77c3218
Kernel Version: 5.14.0-503.26.1.el9_5.x86_64
OS Image: Rocky Linux 9.5 (Blue Onyx)
Operating System: linux
Architecture: amd64
Container Runtime Version: containerd://1.7.25
Kubelet Version: v1.32.2
Kube-Proxy Version: v1.32.2
PodCIDR: 10.245.0.0/24
PodCIDRs: 10.245.0.0/24
Non-terminated Pods: (9 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
kube-system calico-kube-controllers-77969b7d87-2pnsv 0 (0%) 0 (0%) 0 (0%) 0 (0%) 11m
kube-system calico-node-gcswx 250m (12%) 0 (0%) 0 (0%) 0 (0%) 11m
kube-system coredns-668d6bf9bc-7k66b 100m (5%) 0 (0%) 70Mi (1%) 170Mi (4%) 28m
kube-system coredns-668d6bf9bc-rc5sr 100m (5%) 0 (0%) 70Mi (1%) 170Mi (4%) 28m
kube-system etcd-k8s-master1 100m (5%) 0 (0%) 100Mi (2%) 0 (0%) 28m
kube-system kube-apiserver-k8s-master1 250m (12%) 0 (0%) 0 (0%) 0 (0%) 28m
kube-system kube-controller-manager-k8s-master1 200m (10%) 0 (0%) 0 (0%) 0 (0%) 28m
kube-system kube-proxy-kzq8x 0 (0%) 0 (0%) 0 (0%) 0 (0%) 28m
kube-system kube-scheduler-k8s-master1 100m (5%) 0 (0%) 0 (0%) 0 (0%) 28m
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 1100m (55%) 0 (0%)
memory 240Mi (6%) 340Mi (9%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 28m kube-proxy
Normal NodeAllocatableEnforced 29m kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 29m (x8 over 29m) kubelet Node k8s-master1 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 29m (x8 over 29m) kubelet Node k8s-master1 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 29m (x7 over 29m) kubelet Node k8s-master1 status is now: NodeHasSufficientPID
Normal Starting 28m kubelet Starting kubelet.
Normal NodeAllocatableEnforced 28m kubelet Updated Node Allocatable limit across pods
Normal NodeHasSufficientMemory 28m kubelet Node k8s-master1 status is now: NodeHasSufficientMemory
Normal NodeHasNoDiskPressure 28m kubelet Node k8s-master1 status is now: NodeHasNoDiskPressure
Normal NodeHasSufficientPID 28m kubelet Node k8s-master1 status is now: NodeHasSufficientPID
Normal RegisteredNode 28m node-controller Node k8s-master1 event: Registered Node k8s-master1 in Controller
Normal NodeReady 9m49s kubelet Node k8s-master1 status is now: NodeReady
Normal RegisteredNode 9m42s node-controller Node k8s-master1 event: Registered Node k8s-master1 in Controller

containerd container runtime

Status

RunningActive since Feb 22, 2025, 10:25 PM

Automatically starts

Path
/usr/lib/systemd/system/containerd.service
Memory
1.72 GB

kubelet: The Kubernetes Node Agent

Status

RunningActive since Feb 22, 2025, 10:28 PM

Automatically starts

Path
/usr/lib/systemd/system/kubelet.service
Memory
50.5 MB

6. Kubernetes Metrics Server Kurulumu

Kubeadm, kurulum sırasında metrics server bileşenini yüklemez. Bu sebeple ayrı olarak yüklememiz gerekiyor.

Bunu doğrulamak için top komutunu çalıştırırsanız, Metrics API not available hatasını görürsünüz.

root@manager1:~# kubectl top nodes
error: Metrics API not available

Metrics server yüklemek için aşağıdaki komutu çalıştırınız:

kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml

Bu manifest dosyası, resmi metrics server github sayfasından alınmıştır. Fakat kubelet-insecure-tls parametresi ek olarak eklenmiştir. Aksi halde because it doesn't contain any IP SANs" node="" hatası verir.

Metrics server nesneleri konuşlandırıldıktan sonra top komutunu kullanarak node ve pod metriklerini görmeniz bir dakika sürer.

kubectl top nodes

Node metriklerini aşağıda gösterildiği gibi görüntüleyebilmeniz gerekir.

root@manager1:~# kubectl top nodes
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
manager1 111m 5% 1695Mi 44%

Aşağıdaki komutu kullanarak belirli bir pod için CPU ve bellek ölçümlerini de görüntüleyebilirsiniz.

kubectl top pod -n kube-system

kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml

nano web01.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
spec:
replicas: 2
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:latest
ports:

- containerPort: 80

apiVersion: v1
kind: Service
metadata:
name: nginx-service
spec:
type: NodePort
selector:
app: nginx
ports:

  • protocol: TCP
    port: 80
    targetPort: 80
    nodePort: 30080
    kubectl apply -f web01.yaml

http://192.168.56.99:30081/

[root@k8s-master1 my-yamls]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 443/TCP 159m
nginx-service NodePort 10.99.205.120 80:30081/TCP 3m48s
opengist-svc NodePort 10.101.17.111 80:30080/TCP 22m

Node 1 ve Node 2 yapılması gerekenler

  1. Statik ip adresi ekle:
    [root@vbox ~]# cat /etc/NetworkManager/system-connections/enp0s8.nmconnection
    [connection]
    id=enp0s8
    uuid=9a8873c2-ef49-3367-bd82-6e26219ea021
    type=ethernet
    autoconnect-priority=-999
    interface-name=enp0s8
    timestamp=1740247307

[ethernet]

[ipv4]
method=manual
; Aşağıdaki satırda, IP adresini /24 ön ekiyle ve gateway olarak 192.168.56.1 örneği verilmiştir.
addresses1=192.168.56.109/24,192.168.56.1
; DNS sunucularını belirtmek için örnek:
dns=8.8.8.8;8.8.4.4;
[ipv6]
addr-gen-mode=eui64
method=auto

[proxy]


  1. Sunucu adını düzenle:
    [root@vbox ~]# nano /etc/hostname
    k8s-node1 # yap.

  1. hosts dosyasını düzenle: nano /etc/hosts
    127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
    ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
    192.168.56.109 k8s-master1
    192.168.56.98 k8s-node1
    192.168.56.99 k8s-node2

4.Makina id öğrenmek:
[root@k8s-node1 ~]# cat /etc/machine-id
b75fecc951d74e49b7f7e2564936d734

  1. Makina product id öğrenmek:
    cat /sys/class/dmi/id/product_uuid
    [root@k8s-node1 ~]# cat /sys/class/dmi/id/product_uuid
    ba04a3e3-ad0c-ec4e-9543-a8acac98dc51

6.firewall durdurmak:
[root@k8s-node1 ~]# systemctl disable --now firewalld.service
Removed "/etc/systemd/system/multi-user.target.wants/firewalld.service".
Removed "/etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service".

  1. firewall selinux setenforce etmek:
    [root@k8s-node1 ~]# getenforce
    Enforcing
    [root@k8s-node1 ~]# setenforce 0
    [root@k8s-node1 ~]# getenforce
    Permissive
  2. selinux config dosyasında disabled etmek:
    nano /etc/selinux/config
  3. swap kapatmak:
    [root@k8s-node1 ~]# swapoff -a
    [root@k8s-node1 ~]# free -h
    total used free shared buff/cache available
    Mem: 3.8Gi 378Mi 3.5Gi 8.0Mi 137Mi 3.5Gi
    Swap: 0B 0B 0B

nano /etc/fstab # dosyasında swap satırını açıklama satırı yap.


  1. sistemi en güncelleme yap:
    yum update
    yum upgrade

  1. cockpit web yönetim ekranın aktif etmek root ile login olmak için:
    [root@k8s-node1 ~]# systemctl enable --now cockpit.socket
    Created symlink /etc/systemd/system/sockets.target.wants/cockpit.socket → /usr/lib/systemd/system/cockpit.socket.

nano /etc/cockpit/disallowed-users

root yap.


  1. containerd için modulleri ekle:
    nano /etc/modules-load.d/containerd.conf
    modprobe overlay
    modprobe br_filter
  2. network configuration for K8s:

nano /etc/sysctl.d/k8s.conf # aşağıdaki satırları ekle.

net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1

[root@k8s-node1 ~]# sudo modprobe br_netfilter # ile yükle
[root@k8s-node1 ~]# sysctl -p /etc/sysctl.d/k8s.conf # aktif et
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1

  1. K8s installation:

Install the dnf-plugins-core package and set up the repository

sudo dnf -y install dnf-plugins-core

sudo dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo

install the engine


  1. containerd yükle:
    dnf install containerd.io
  2. containerd configuration:

containerd config default > /etc/containerd/config.toml # write out default config settings

nano /etc/containerd/config.toml

SystemdCgroup = true # yap false değerini true olarak değiştir.


  1. containerd başlat ve reboot et:
    systemctl enable --now containerd # run the container and make it run on reboot

systemctl status containerd # check the status


  1. kubernetes repo ekle:

cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://pkgs.k8s.io/core:/stable:/v1.32/rpm/
enabled=1
gpgcheck=1
gpgkey=https://pkgs.k8s.io/core:/stable:/v1.32/rpm/repodata/repomd.xml.key
exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
EOF

  1. kubernetes kurulum yap:
    sudo yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes

20.Kubeadm enable et:
[root@k8s-node1 ~]# systemctl enable --now kubelet
Created symlink /etc/systemd/system/multi-user.target.wants/kubelet.service → /usr/lib/systemd/system/kubelet.service.


21.Kubeadm'i Başlatmak ve Kubeadm Kurulumunun Doğrulanması: kubernetes için network ip ayarlarını aktif etmek ve node cluster join etmek için:
kubeadm join 192.168.56.109:6443 --token 2lfwao.rpasevsb1066vqm7 \
--discovery-token-ca-cert-hash sha256:ce3fc9d1e4731c044cfa0db74da2ae85c4e155b01148f03b9a30daec480ec12b

node1 de master1 e eklemek için join komutu verilir.
buraya kadar k8s-node1 tarafında çalıştırılır.

[root@k8s-master1 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master1 Ready control-plane 108m v1.32.2 # 192.168.56.109
k8s-node1 Ready 4m12s v1.32.2 # 192.168.56.98

node1 hazır olması sağlanır. yani cluster a eklenmesi sağlanır.

[root@k8s-master1 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master1 Ready control-plane 123m v1.32.2 # 192.168.56.109
k8s-node1 Ready 18m v1.32.2 # 192.168.56.98
k8s-node2 Ready 81s v1.32.2 # 192.168.56.99

Aşağıdaki komutları master1 üzerinde node lar üzerinde uygulama dağıtmak için yaml dosyaları ile
uygulamalarını dağıtabilirsin artık.

nano deployment01.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: dpl1
labels:
x: "y"
spec:
replicas: 3
selector:
matchLabels:
z: t
template:
metadata:
labels:
z: t
spec:
containers:
- name: w
image: nginx:1.26
[root@k8s-master1 my-yamls]# kubectl apply -f deployment01.yaml
deployment.apps/dpl1 created

[root@k8s-master1 my-yamls]# kubectl get pods
NAME READY STATUS RESTARTS AGE
dpl1-856489758-5vwsw 0/1 ContainerCreating 0 11s
dpl1-856489758-g6rrk 0/1 ContainerCreating 0 11s
dpl1-856489758-t5h8r 0/1 ContainerCreating 0 11s
[root@k8s-master1 my-yamls]# kubectl get pods
NAME READY STATUS RESTARTS AGE
dpl1-856489758-5vwsw 0/1 ContainerCreating 0 13s
dpl1-856489758-g6rrk 0/1 ContainerCreating 0 13s
dpl1-856489758-t5h8r 0/1 ContainerCreating 0 13s
[root@k8s-master1 my-yamls]#
[root@k8s-master1 my-yamls]#
[root@k8s-master1 my-yamls]# kubectl get pods
NAME READY STATUS RESTARTS AGE
dpl1-856489758-5vwsw 1/1 Running 0 24s
dpl1-856489758-g6rrk 1/1 Running 0 24s
dpl1-856489758-t5h8r 1/1 Running 0 24s

Kubernetes eğitim notları:

[root@oyk-master1 ~]# history > kubernetes.txt
[root@oyk-master1 ~]# cat kubernetes.txt
1 ip a
2 yum update
3 cat /etc/redhat-release
4 ping 192.168.56.112
5 ping 192.168.0.114
6 ping 10.0.3.15
7 ip a
8 exit
9 'p a
10 ip a
11 nano /etc/hostname
12 vi /etc/hostname
13 cat /etc/hostname
14 vi /etc/hostname
15 reboot
16 ping 192.168.56.111
17 ping 192.168.56.112
18 ping 192.168.56.113
19 ip a
20 poweroff
21 ip a
22 yum update
23 poweroff
24 vi /etc/hosts
25 ip a
26 vi /etc/resolv.conf
27 poweroff
28 ip a
29 ping 8.8.8.8
30 ping 192.168.56.1
31 ping 192.168.56.112
32 ip a
33 exit
34 ip a
35 systemctl enable --now cockpit.socket
36 sudo dnf -y
37 sudo dnf -y install cockpit
38 sudo systemctl enable --now cockpit.socket
39 ip a
40 vi /etc/hostname
41 nmtui
42 ip a
43 vi /etc/NetworkManager/system-connections/enp0s9.nmconnection
44 cat /etc/NetworkManager/system-connections/enp0s9.nmconnection
45 reboot
46 cat /etc/NetworkManager/system-connections/enp0s9.nmconnection
47 ping 192.168.56.112
48 ping 192.168.56.111
49 ping 8.8.8.8
50 vi /etc/cockpit/disallowed-users
51 systemctl restart cockpit
52 ip a
53 cat /sys/class/dmi/id/product_uuid
54 cat /etc/machine-id
55 cat /etc/sysconfig/selinux
56 getenforce
57 sudo nano /etc/selinux/config
58 vi /etc/sysconfig/selinux
59 dnf -y install nano
60 nano /etc/sysconfig/selinux
61 nano /etc/firewalld/firewalld.conf
62 systemctl disable firewalld
63 systemctl stop firewalld
64 systemctl status firewalld
65 history
66 setenforce 0
67 getenforce
68 cat /etc/sysconfig/selinux
69 history
70 reboot
71 getenforce
72 free -h
73 swapoff -a
74 free -h
75 nano /etc/fstab
76 nano /etc/modules-load.d/containerd.conf
77 cat /etc/modules-load.d/containerd.conf
78 modprobe overlay
79 modprobe br_filter
80 cat /etc/modules-load.d/containerd.conf
81 modprobe br_netfilter
82 nano /etc/sysctl.d/k8s.conf
83 sysctl -p
84 sysctl -p /etc/sysctl.d/k8s.conf
85 dnf -y install dnf-plugins-core
86 dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
87 nano /etc/sysctl.d/k8s.conf
88 dnf install containerd.io
89 containerd config default > /etc/containerd/config.toml
90 naon /etc/containerd/config.toml
91 nano /etc/containerd/config.toml
92 systemctl enable --now containerd.service
93 systemctl status containerd.service
94 history
95 nano /etc/containerd/config.toml
96 systemctl status containerd.service
97 cat /etc/yum.repos.d/docker-ce.repo
98 cat <
112 Feb 10 17:02:06 oyk-master1 systemd[1]: kubelet.service: Failed with result 'exit-code'.
113 journalctl -u kubelet --no-pager -n 50 --output cat
114 sudo systemctl restart kubelet
115 sudo systestatus kubelet
116 exit
117 poweroff
118 journalctl -u kubelet --no-pager -n 50 --output cat
119 cat /var/lib/kubelet/config.yaml
120 nano /etc/containerd/config.toml
121 systemctl status containerd
122 sudo systemctl daemon-reload
123 sudo systemctl restart kubelet
124 sudo systemctl status kubelet
125 sudo system status kubelet
126 sudo systemctl status kubelet
127 systemctl restart containerd
128 systemctl status containerd
129 ip a
130 systemctl status containerd.service
131 systemctl restart kubelet
132 systemctl status kubelet
133 ip a
134 kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.56.111
135 poweroff
136 ip a
137 kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.56.111
138 ping 8.8.8.8
139 poweroff
140 ping 8.8.8.8
141 nano /etc/resolv.conf
142 ip a
143 ping 8.8.8.8
144 poweroff
145 ip a
146 ping 8.8.8.8
147 kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.56.111
148 sudo systemctl status containerd
149 ,
150 sudo systemctl restart containerd
151 sudo systemctl status containerd
152 nano /etc/containerd/config.toml
153 sudo systemctl restart containerd
154 kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.56.111
155 sudo kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.56.111
156 nano /etc/containerd/config.toml
157 ls -l /run/containerd/containerd.sock
158 sudo systemctl restart containerd
159 sudo hostnamectl set-hostname oyk-master1
160 sudo nano /etc/hosts
161 sudo systemctl restart systemd-resolved
162 systemctl restart systemd-resolved
163 nano /etc/containerd/config.toml
164 y
165 sudo systemctl restart containerd
166 sudo kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.56.111
167 nano /etc/hosts
168 ping oyk-node1
169 ping oyk-node2
170 ping oyk-master1
171 systemctl status kubelet
172 mkdir -p $HOME/.kube
173 cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
174 chown $(id -u):$(id -g) $HOME/.kube/config
175 kubectl get nodes
176 kubectl get pods -A
177 kubectl get pods -A -o wide
178 curl -o https://github.com/hegdepavankumar/Cisco-Images-for-GNS3-and-EVE-NG?tab=readme-ov-file
179 dnf -y curl
180 dnf -y install curl
181 curl -o https://github.com/hegdepavankumar/Cisco-Images-for-GNS3-and-EVE-NG?tab=readme-ov-file
182 curl -o https://raw.githubusercontent.com/projectcalico/calico/refs/heads/release-v3.29/manifests/calico.yaml
183 curl -O https://raw.githubusercontent.com/projectcalico/calico/v3.29/manifests/calico.yaml
184 kubectl get pods -A -o wide
185 kubectl apply -f calico.yaml
186 ls
187 curl -O https://raw.githubusercontent.com/projectcalico/calico/v3.29/manifests/calico.yaml
188 ls
189 kubectl apply -f calico.yaml
190 ls -lh calico.yaml
191 cat calico.yaml | head -n 20
192 kubectl apply -f calico.yaml
193 ls
194 ping 8.8.8.8
195 kubectl apply -f calico.yaml
196 curl -O https://raw.githubusercontent.com/projectcalico/calico/v3.29/manifests/calico.yaml
197 ls
198 nano calico.yaml
199 curl -O https://raw.githubusercontent.com/projectcalico/calico/v3.29/manifests/calico.yaml
200 ls
201 nano calico.yaml
202 kubectl apply -f calico.yaml
203 kubectl get pods -A -o wide
204 kubeadm token list
205 kubeadm token create --print-join-command
206 kubeadm token list
207 history
208 kubeadm token list
209 kubectl get nodes
210 kubectl get pods -A -o wide
211 kubectl get nodes
212 kubectl get pods -A -o wide
213 kubectl get nodes
214 kubeadm token create --print-join-command
215 kubectl get nodes
216 systemctl status containerd.service
217 kubectl get nodes
218 kubeadm token create --print-join-command
219 systemctl status containerd.service
220 kubectl get nodes
221 kubectl get pods -n kube-system
222 kubectl get pods -A -o wide
223 history
224 kubectl get pods -A -o wide
225 kubectl get nodes
226 kubectl api-resources
227 kubectl config view
228 mkdir my-yamls
229 cd my-yamls/
230 nano svc-token.yaml
231 cat svc-token.yaml
232 nano svc-token.yaml
233 kubectl create -f svc-token.yaml
234 kubectl get secret
235 kubectl describe default-token
236 kubectl describe secret default-token
237 kubectl get pods -A -o wide
238 kubectl get nodes
239 kubectl get pods -A -o wide
240 kubectl get nodes oyk-master1 -o wide
241 kubectl get nodes oyk-master1 oyk-node1
242 kubectl get nodes/oyk-master1 nodes/oyk-node1
243 kubectl get pods
244 kubectl get pods -A
245 kubectl get pods
246 kubectl get pods -A -o wide
247 kubectl get all -A
248 kubectl get namespace
249 kubectl get serviceaccount
250 kubectl get secret
251 kubectl get namespaces -all
252 kubectl get namespaces all -A
253 kubectl get namespaces
254 kubectl get pods --namespace kub-system
255 kubectl get pods
256 kubectl get
257 kubectl desscribe pods
258 kubectl get pod
259 kubectl get pods
260 kubectl get pods --namespace
261 kubectl get pods -o wide
262 kubectl get pods -n kube-system
263 kubectl get statefulsets
264 kubectl get pods -o wide
265 kubectl get pods -n kube-system
266 kubectl logs etcd
267 kubectl logs etcd-oyk-master1
268 kubectl logs
269 kubectl logs svc-token.yaml
270 kubectl get pods -n kube-system
271 kubectl logs kube-scheduler-oyk-master1 -o wide
272 kubectl logs kube-scheduler-oyk-master1
273 kubectl get namespace
274 kubectl logs kube-system
275 kubectl logs kube-system -o wide
276 kubectl get namespace
277 kubectl get pods -o wide
278 kubectl get namespace
279 kubectl get secrets
280 kubectl get events
281 kubectl get namespaces
282 kubectl get nodes
283 kubectl get
284 kubectl get nodes
285 kubectl get pods --all-namespaces
286 kubectl describe node kube-system
287 kubectl describe node etcd-oyk-master1
288 kubectl describe node oyk-node1
289 kubectl describe node oyk-master1
290 ls
291 cat svc-token.yaml
292 kubectl get pods -A
293 nano pod01.yaml
294 kubectl api-resources
295 nano pod01.yaml
296 kubectl apply -f pod01.yaml
297 kubectl get pods
298 kubectl get pods -s
299 kubectl get pods -a
300 kubectl get pods
301 kubectl describe pod01
302 kubectl describe pods pod01
303 kubectl get pods
304 kubectl get pods -o wide
305 kubectl get pods -o wide -A
306 nano pod02.yaml
307 kubectl apply -f pod02.yaml
308 nano pod02.yaml
309 kubectl apply -f pod02.yaml
310 nano pod02.yaml
311 cat pod01
312 cat pod01.yaml
313 cat pod02.yaml
314 nano pod02.yaml
315 cat pod02.yaml
316 kubectl apply -f pod02.yaml
317 cat pod01.yaml
318 cat pod02.yaml
319 nano pod02.yaml
320 kubectl apply -f pod02.yaml
321 kubectl get pods
322 kubectl get pods describe
323 kubectl get describe
324 kubectl get pods
325 kubectl get pods -o wide -A
326 kubectl get pods
327 kubectl logs pod02
328 kubectl logs pod01
329 kubectl logs pod02 nginx
330 kubectl logs nginx
331 kubectl logs pod01
332 kubectl exec pod02 -i -t -c nginx-container -- bash
333 poweroff
334 ls
335 pwd
336 top
337 kubectl get pods
338 kubectl get nodes
339 ls
340 ls -all
341 kubectl get pods
342 history
343 kubectl exec pod02 -i -t -- bash
344 kubectl exec pod02 -i -t -c nginx-container -- bash
345 kubectl get pods
346 cd my-yamls/
347 nano pod03.yaml
348 kubectl apply -f pod03.yaml
349 kubectl get pods
350 cat pod03.yaml
351 kubectl get pods -o wide -A
352 kubectl get pods
353 kubectl exec pod03 -i -t -- bash
354 kubectl exec pod02 -i -t -c nginx-container -- bash
355 kubectl get pods
356 kubectl get pods -o wide -A
357 kubectl get pods
358 kubectl get pods -o wide -A
359 kubectl describe
360 history
361 kubectl describe node oyk-master1
362 kubectl describe node oyk-node2
363 kubectl get pods
364 kubectl get nodes
365 kubectl describe node oyk-node2
366 kubectl get nodes
367 kubectl get pods
368 kubectl get pods -o wide -n kube-system
369 nano configmap.yaml
370 kubectl apply -f configmap.yaml
371 nano configmap.yaml
372 kubectl apply -f configmap.yaml
373 nano configmap.yaml
374 kubectl apply -f configmap.yaml
375 nano configmap.yaml
376 kubectl apply -f configmap.yaml
377 cat configmap.yaml
378 nano configmap.yaml
379 kubectl apply -f configmap.yaml
380 nano configmap.yaml
381 kubectl apply -f configmap.yaml
382 kubectl get pods
383 kubectl get configmap first-config -o yaml
384 nano secret.yaml
385 kubectl apply -f secret.yaml
386 kubectl get secret
387 kubectl describe secret
388 cat secret.yaml
389 nano pod04.yaml
390 nano service.yaml
391 kubectl apply -f service.yaml
392 kubectl get pods
393 kubectl get nodes
394 kubectl get pods
395 kubectl get svc
396 nano service.yaml
397 ip a
398 kubectl run nginx --image=nginx:latest --labels="app=nginx"
399 kubectl get pods
400 kubectl get svc
401 curl 10.100.44.189:80
402 kubectl describe pods nginx
403 curl 10.100.44.189:80
404 kubectl get endpoints
405 kubectl get pods
406 nano service.yaml
407 kubectl apply -f service.yaml
408 kubectl get pods
409 kubectl get svc
410 kubectl delete service nginx-svc
411 kubectl apply -f service.yaml
412 kubectl get svc
413 kubectl get pods -o wide
414 kubectl get pods -l app=nginx
415 ip a
416 kubectl expose pod nginx --port=80 --type=NodePort --name=nginx-nodeport
417 kubectl get svc
418 ss -nltpa
419 kubectl get svc
420 ls
421 ls -all
422 ls -la .kube/
423 nano replicaset01.yaml
424 kubectl apply -f replicaset01.yaml
425 kubectl get rs
426 kubectl describe rs
427 kubectl get pods
428 kubectl get pods -l app=web
429 kubectl delete webserver-2fkrp
430 kubectl delete webserver-dhmvd
431 kubectl get pods -l app=web
432 kubectl describe rs
433 kubectl delete webserver-rj569
434 kubectl describe rs
435 kubectl get pods
436 kubectl describe rs
437 kubectl get rs
438 kubectl describe rs
439 kubectl delete rs webserver-rj569
440 kubectl delete rs webserver
441 kubectl get
442 kubectl apply -f replicaset01.yaml
443 kubectl describe rs
444 kubectl delete rs webserver-dpqrf
445 kubectl delete pod webserver-dpqrf
446 kubectl describe rs
447 kubectl get pods
448 kubectl delete rs webserver-dpqrf
449 kubectl describe rs
450 kubectl delete pod webserver-xhm5d
451 kubectl describe rs
452 kubectl get pods
453 kubectl get replicasets
454 kubectl get pods
455 kubectl get pods -l app=nginx
456 cp replicaset01.yaml replicaset02.yaml
457 nano replicaset02.yaml
458 kubectl apply -f replicaset02.yaml
459 kubectl get rs
460 kubectl get describe rs web=nginx
461 kubectl get describe rs web
462 kubectl get describe rs app=nginx
463 kubectl get describe app=nginx
464 kubectl get pods describe app=nginx
465 kubectl describe rs
466 kubectl describe rs pods
467 kubectl describe rs pods app=nginx
468 kubectl describe rs pods nginx
469 kubectl get pods -l app=nginx
470 kubectl get pods -l app=web
471 kubectl get describe
472 kubectl describe
473 kubectl describe rs
474 cat replicaset02.yaml
475 cat replicaset01.yaml
476 kubectl get pods
477 history
478 kubectl run nginxx --image=nginx:latest --labels="app=nginx"
479 kubectl get pods
480 kubectl describe rs
481 kubectl get pods
482 nano replicaset02.yaml
483 kubectl apply -f replicaset02.yaml
484 kubectl get pods
485 kubectl get pods rs
486 kubectl get pods -l app=nginx
487 nano deployment01.yaml
488 kubectl apply -f deployment01.yaml
489 nano deployment01.yaml
490 nano replicaset01.yaml
491 nano deployment01.yaml
492 kubectl apply -f deployment01.yaml
493 nano deployment01.yaml
494 kubectl apply -f deployment01.yaml
495 nano deployment01.yaml
496 kubectl apply -f deployment01.yaml
497 kubectl get deployment
498 kubectl get pods --show-labels
499 kubectl describe rs
500 kubectl get pods --show-labels
501 kubectl describe rs dpl1-856489758-77jpz
502 kubectl describe rs dpl1
503 kubectl describe deployment
504 kubectl get rs
505 kubectl rollout status deployment dpl1
506 kubectl set image deployments dpl1 w=nginx:1.27
507 kubectl rollout status deployment dpl1
508 kubectl describe deployment dpl1
509 poweroff
510 kubectl exec pod02 -i -t -c nginx-container -- bash
511 kubectl api-resources
512 kubectl exec pod03 -i -t -- bash
513 kubectl api-resources
514 echo "berkay" | base64
515 echo "deneme" | base64
516 iptables -l
517 iptables
518 iptables -vn
519 curl 192.168.56.112:32408
520 curl 192.168.56.113:32408
521 nano externalname.yaml
522 kubectl apply -f externalname.yaml
523 nano externalname.yaml
524 kubectl apply -f externalname.yaml
525 nano externalname.yaml
526 kubectl apply -f externalname.yaml
527 nano externalname.yaml
528 kubectl apply -f externalname.yaml
529 kubectl apply -f externalname.yaml
530 kubectl apply -f externalname.yaml
531 nano externalname.yaml
532 kubectl apply -f externalname.yaml
533 kubectl get svc
534 kubectl get pods
535 kubectl get svc
536 kubectl get describe
537 kubectl get describe google-servic
538 kubectl get svc
539 kubectl describe node oyk-node1
540 kubectl get svc
541 kubectl expose pod nginx --port=80 --name nginx-headless --cluster-ip=none
542 nano nginx-headless.yaml
543 kubectl apply -f nginx-headless.yaml
544 kubectl get svc
545 kubectl get services
546 kubectl get endpoints nginx-headless
547 ls -la .kube/
548 cat config
549 ls -all
550 cat .kube/config
551 dnf -y install bash-completion
552 kubectl completion bash
553 kubectl completion bash | tee /etc/bash_completion.d/kubectl
554 source /etc/bash_completion.d/kubectl
555 kubectl get nodes
556 history
557 kubectl api-re
558 kubectl api-resources
559 kubectl get secret
560 kubectl get secrets
561 history
562 history > kubernetes.txt

kubectl -n kubernetes-dashboard create token admin-user

eyJhbGciOiJSUzI1NiIsImtpZCI6IkVWZVhNWDZ6cndJTERXckg0dnRGMnVRekxoZURtSmRSM2JscWxpUWJVVkkifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiXSwiZXhwIjoxNzM5Mzg0Njk4LCJpYXQiOjE3MzkzODEwOTgsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwianRpIjoiZDg5NTc4NjgtZjMwNS00NDBjLTg1N2UtMDQ0YjA2ZTJhYjBmIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhZG1pbi11c2VyIiwidWlkIjoiMGJiNjUwOGMtZDZlYi00ZGM3LTk2OTgtNDBmNDU0MTgwNGI4In19LCJuYmYiOjE3MzkzODEwOTgsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDphZG1pbi11c2VyIn0.4baOcana_WZfVSJ4AK6WI61bsvpmcLAny0uFG3T2enIRUxjBDOG4if7ueZiAstjtDaOFfp104LBv_DmU0cau51LjVIb2DtLsGTld307Kdf-fBZFOZFI7W-PSh7VJwR76HFTcxf2TAttwXROidGZIEfDzKyicD4KMuw98INSOSR9jn0z6zuvHXB2OKjUNAVuwOSObF4MPHBPGIOmh-dfRBUrPgndeoUQDe3nzC1FrPZWUTc520eW610aAkgGbpMgWTPT0L4cpLJ6zn60SzxoB4LfCXTLvK3xYawfZPpaAGcJ0ORHjZx5L8w8jgSreeMHbPkbZj4LCkGVB83dFyqTvAg

572 kubectl -n kubernetes-dashboard create token admin-user
573 kubectl proxy
574 kubectl -n kubernetes-dashboard edit service kubernetes-dashboard
575 [200~kubectl -n kubernetes-dashboard edit service kubernetes-dashboard
576 kubectl -n kubernetes-dashboard edit service kubernetes-dashboard
577 nano kubectl -n kubernetes-dashboard edit service kubernetes-dashboard
578 kubectl -n kubernetes-dashboard edit service kubernetes-dashboard
579 kubectl get svc -n kubernetes-dashboard
580 kubectl get pods -n kubernetes-dashboard
581 kubectl describe pod -n kubernetes-dashboard kubernetes-dashboard-79cbcf9fb6-jpg7d
582 kubectl describe pod -n kubernetes-dashboard dashboard-metrics-scraper-5bd45c9dd6-bqjdh
583 kubectl get nodes
584 kubectl get nodes -o wide
585 kubectl describe node | grep -A10 "Allocatable"
586 kubectl describe node oyk-master1 | grep -A10 "Allocatable"
587 kubectl get pods -n kube-system
588 kubectl get pods -n kubernetes-dashboard
589 kubectl describe pod -n kubernetes-dashboard kubernetes-dashboard-79cbcf9fb6-jpg7d
590 kubectl describe pod -n kubernetes-dashboard dashboard-metrics-scraper-5bd45c9dd6-bqjdh
591 kubectl get nodes -o wide
592 sudo systemctl status kubelet
593 sudo systemctl status containerd
594 kubectl get nodes -o wide
595 kubectl get pods -n kubernetes-dashboard
596 kubectl get nodes -o wide
597 kubectl get pods -n kubernetes-dashboard
598 kubectl get nodes -o wide
599 kubectl get pods -n kubernetes-dashboard
600 kubectl get nodes -o wide
601 kubectl get pods -n kubernetes-dashboard
602 kubernetes-dashboard-79cbcf9fb6-jpg7d 0/1 ImagePullBackOff 0 36m
603 kubectl describe pod -n kubernetes-dashboard kubernetes-dashboard-79cbcf9fb6-jpg7d
604 ctr images pull docker.io/kubernetesui/dashboard:v2.7.0
605 kubectl delete pod -n kubernetes-dashboard kubernetes-dashboard-79cbcf9fb6-jpg7d
606 kubectl get pods -n kubernetes-dashboard
607 curl -I https://registry-1.docker.io
608 kubectl get pods -n kubernetes-dashboard
609 kubectl get nodes -o wide
610 kubectl get pods -n kubernetes-dashboard
611 kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.6.0/aio/deploy/recommended.yaml
612 kubectl get pods -n kubernetes-dashboard
613 history
[root@oyk-master1 ~]#

kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml

kubectl get pods -n kubernetes-dashboard

cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
EOF

cat <<EOF | kubectl apply -f -
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:

  • kind: ServiceAccount
    name: admin-user
    namespace: kubernetes-dashboard
    EOF

kubectl -n kubernetes-dashboard create token admin-user

kubectl -n kubernetes-dashboard edit service kubernetes-dashboard

NodePort ile Dışarıdan Erişim Sağla
Eğer Dashboard’a dış dünyadan (IP veya domain üzerinden) erişmek istiyorsan, servisi NodePort olarak değiştirebilirsin.

bash
Kopyala
Düzenle
kubectl -n kubernetes-dashboard edit service kubernetes-dashboard
Bu komut servis yapılandırmasını açacaktır.
Şu satırı bul:

yaml
Kopyala
Düzenle
type: ClusterIP
Ve bunu şu şekilde değiştir:

yaml
Kopyala
Düzenle
type: NodePort
Kaydedip çık (Nano kullanıyorsan CTRL+X, ardından Y ve ENTER tuşlarına basarak kaydedebilirsin).

Servisin atanan port numarasını öğrenmek için:

bash
Kopyala
Düzenle
kubectl get svc -n kubernetes-dashboard
Çıktıda aşağıdakine benzer bir satır göreceksin:

css
Kopyala
Düzenle
kubernetes-dashboard NodePort 10.108.57.174 443:32000/TCP
Burada 32000 gibi bir NodePort atanmış olacak.

kubectl get svc -n kubernetes-dashboard

[root@oyk-master1 ~]# kubectl get svc -n kubernetes-dashboard
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
dashboard-metrics-scraper ClusterIP 10.97.33.39 8000/TCP 70m
kubernetes-dashboard NodePort 10.107.5.83 443:31994/TCP 70m

Şimdi tarayıcına girerek şu şekilde erişebilirsin:

cpp
Kopyala
Düzenle
https://:32000

kubectl -n kubernetes-dashboard create token admin-user
eyJhbGciOiJSUzI1NiIsImtpZCI6IkVWZVhNWDZ6cndJTERXckg0dnRGMnVRekxoZURtSmRSM2JscWxpUWJVVkkifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiXSwiZXhwIjoxNzM5Mzg4NzMyLCJpYXQiOjE3MzkzODUxMzIsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwianRpIjoiODViMDRhNmQtMmJkYy00ZDc1LTgyMzktODQwODYxZDNlYmVhIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhZG1pbi11c2VyIiwidWlkIjoiMGJiNjUwOGMtZDZlYi00ZGM3LTk2OTgtNDBmNDU0MTgwNGI4In19LCJuYmYiOjE3MzkzODUxMzIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDphZG1pbi11c2VyIn0.nKDIh_6JvyTgvAxVSNlMVSf4m-d9jhceTtN_rjYeOavgfjX035p1S58wANtYNlTKKfUZd3DrZNiIThLjMsoGVrWDyVSYybEdeh6ntdbQPiQnX3GSrNVdJ-sbknUWQcjLLVYVYWIiG0whAFNandJytUe-GcsqwcjiaI552Edem-Njw5X52eZBw_UGtSKFRl5TLHweXSmFqymUvIpG5CSnjuPXfVqtSMwzFqJsVzRVqyaqMBvoTNgk1qxUjgspZIY80l8uH4gGUEJkFLLjZG8-KEbTP9deu7sIsDStv0EALv_bBh7z7IW7RTTULnKPqsOngcQtD1AQgUh1VO06_3enNg

eyJhbGciOiJSUzI1NiIsImtpZCI6IkVWZVhNWDZ6cndJTERXckg0dnRGMnVRekxoZURtSmRSM2JscWxpUWJVVkkifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiXSwiZXhwIjoxNzM5NDQ5NTQxLCJpYXQiOjE3Mzk0NDU5NDEsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwianRpIjoiMTUyNmNhZTktNjg0OC00NjdhLWI4OGUtMDdjYTdlMTcxZWUzIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhZG1pbi11c2VyIiwidWlkIjoiMGJiNjUwOGMtZDZlYi00ZGM3LTk2OTgtNDBmNDU0MTgwNGI4In19LCJuYmYiOjE3Mzk0NDU5NDEsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDphZG1pbi11c2VyIn0.lHBZ4XDnCFrXXHcIcFR6GF5wkDoGwNf3o7h3uiN80tq9VJkktoEmexjRV4nujQAxCFZMEdItJLPTEVu0kbV3f7cN3KJ3mRlxw6NOh8U3hoLCHwhtqAEyZsal_t9E1DUiLNuh0h2wS6YuAjwggJN9OrtKGguH-ui19-y20vr64mNAWmXiE1btkfnHSIUMslwjbKHYvOL_Uv6_zme1cw5ESq5i81Dnn_9SVwLtoE9PG5oGZAMUAt9Szavi7Td87nxPiK0n-AXpD1SKNE55gZftui87PO_9t6vnS2tHFrfHvtVcCmY2ajHY7sg5n9fe-0nrnkihHmtZgwvjaQV_Cg51ig

yaml dosyasının içine şeklinde yazarsan
bir yaml dosyası içinde birkaç yaml satırını yazabiliyorsun.


apiVersion: v1


apiVersion: v1


apiVersion: v1

[root@oyk-master1 ~]# kubectl apply -f opengist.yaml
service/opengist-svc created
[root@oyk-master1 ~]#
[root@oyk-master1 ~]#
[root@oyk-master1 ~]#
[root@oyk-master1 ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
dpl1-55b84678c9-k4t5v 1/1 Running 1 (80m ago) 19h
dpl1-55b84678c9-kkx54 1/1 Running 1 (80m ago) 19h
dpl1-55b84678c9-mj4l8 1/1 Running 1 (80m ago) 19h
pod06 1/1 Running 1 (80m ago) 18h
webnginx-2sp5m 1/1 Running 1 (80m ago) 19h
webnginx-56kmq 1/1 Running 1 (80m ago) 19h
webnginx-87q2x 1/1 Running 1 (80m ago) 19h
webnginx-mbfz5 1/1 Running 1 (80m ago) 19h
webserver-kgqq9 1/1 Running 1 (80m ago) 19h
webserver-w9b5r 1/1 Running 1 (80m ago) 19h
[root@oyk-master1 ~]#
[root@oyk-master1 ~]#
[root@oyk-master1 ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
google-service ExternalName google.com.tr 25h
kubernetes ClusterIP 10.96.0.1 443/TCP 2d5h
nginx-headless ClusterIP None 80/TCP 24h
nginx-nodeport NodePort 10.106.17.106 80:32408/TCP 27h
nginx-svc ClusterIP 10.109.90.116 80/TCP,443/TCP 27h
opengist-svc NodePort 10.110.21.53 80:30080/TCP 24s
[root@oyk-master1 ~]#

kubeadm k8s için

k3s suse tarafından geliştirilen single node amaçlı

kubernetes kubeadm

redhat openshift opensource okd var.

rke2 rancher kubernetes engine 2

http://192.168.56.112:30080/all
Opengist tarafından desteklenmektedir ⋅ Load: 60ms⋅
Türkçe

1  ip a

2  yum update

3  cat /etc/redhat-release 

4  ping 192.168.56.112

5  ping 192.168.0.114

6  ping 10.0.3.15

7  ip a

8  exit

9  'p a

10 ip a
11 nano /etc/hostname
12 vi /etc/hostname
13 cat /etc/hostname
14 vi /etc/hostname
15 reboot
16 ping 192.168.56.111
17 ping 192.168.56.112
18 ping 192.168.56.113
19 ip a
20 poweroff
21 ip a
22 yum update
23 poweroff
24 vi /etc/hosts
25 ip a
26 vi /etc/resolv.conf
27 poweroff
28 ip a
29 ping 8.8.8.8
30 ping 192.168.56.1
31 ping 192.168.56.112
32 ip a
33 exit
34 ip a
35 systemctl enable --now cockpit.socket
36 sudo dnf -y
37 sudo dnf -y install cockpit
38 sudo systemctl enable --now cockpit.socket
39 ip a
40 vi /etc/hostname
41 nmtui
42 ip a
43 vi /etc/NetworkManager/system-connections/enp0s9.nmconnection
44 cat /etc/NetworkManager/system-connections/enp0s9.nmconnection
45 reboot
46 cat /etc/NetworkManager/system-connections/enp0s9.nmconnection
47 ping 192.168.56.112
48 ping 192.168.56.111
49 ping 8.8.8.8
50 vi /etc/cockpit/disallowed-users
51 systemctl restart cockpit
52 ip a
53 cat /sys/class/dmi/id/product_uuid
54 cat /etc/machine-id
55 cat /etc/sysconfig/selinux
56 getenforce
57 sudo nano /etc/selinux/config
58 vi /etc/sysconfig/selinux
59 dnf -y install nano
60 nano /etc/sysconfig/selinux
61 nano /etc/firewalld/firewalld.conf
62 systemctl disable firewalld
63 systemctl stop firewalld
64 systemctl status firewalld
65 history
66 setenforce 0
67 getenforce
68 cat /etc/sysconfig/selinux
69 history
70 reboot
71 getenforce
72 free -h
73 swapoff -a
74 free -h
75 nano /etc/fstab
76 nano /etc/modules-load.d/containerd.conf
77 cat /etc/modules-load.d/containerd.conf
78 modprobe overlay
79 modprobe br_filter
80 cat /etc/modules-load.d/containerd.conf
81 modprobe br_netfilter
82 nano /etc/sysctl.d/k8s.conf
83 sysctl -p
84 sysctl -p /etc/sysctl.d/k8s.conf
85 dnf -y install dnf-plugins-core
86 dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
87 nano /etc/sysctl.d/k8s.conf
88 dnf install containerd.io
89 containerd config default > /etc/containerd/config.toml
90 naon /etc/containerd/config.toml
91 nano /etc/containerd/config.toml
92 systemctl enable --now containerd.service
93 systemctl status containerd.service
94 history
95 nano /etc/containerd/config.toml
96 systemctl status containerd.service
97 cat /etc/yum.repos.d/docker-ce.repo
98 cat <
112 Feb 10 17:02:06 oyk-master1 systemd[1]: kubelet.service: Failed with result 'exit-code'.
113 journalctl -u kubelet --no-pager -n 50 --output cat
114 sudo systemctl restart kubelet
115 sudo systestatus kubelet
116 exit
117 poweroff
118 journalctl -u kubelet --no-pager -n 50 --output cat
119 cat /var/lib/kubelet/config.yaml
120 nano /etc/containerd/config.toml
121 systemctl status containerd
122 sudo systemctl daemon-reload
123 sudo systemctl restart kubelet
124 sudo systemctl status kubelet
125 sudo system status kubelet
126 sudo systemctl status kubelet
127 systemctl restart containerd
128 systemctl status containerd
129 ip a
130 systemctl status containerd.service
131 systemctl restart kubelet
132 systemctl status kubelet
133 ip a
134 kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.56.111
135 poweroff
136 ip a
137 kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.56.111
138 ping 8.8.8.8
139 poweroff
140 ping 8.8.8.8
141 nano /etc/resolv.conf
142 ip a
143 ping 8.8.8.8
144 poweroff
145 ip a
146 ping 8.8.8.8
147 kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.56.111
148 sudo systemctl status containerd
149 ,
150 sudo systemctl restart containerd
151 sudo systemctl status containerd
152 nano /etc/containerd/config.toml
153 sudo systemctl restart containerd
154 kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.56.111
155 sudo kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.56.111
156 nano /etc/containerd/config.toml
157 ls -l /run/containerd/containerd.sock
158 sudo systemctl restart containerd
159 sudo hostnamectl set-hostname oyk-master1
160 sudo nano /etc/hosts
161 sudo systemctl restart systemd-resolved
162 systemctl restart systemd-resolved
163 nano /etc/containerd/config.toml
164 y
165 sudo systemctl restart containerd
166 sudo kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.56.111
167 nano /etc/hosts
168 ping oyk-node1
169 ping oyk-node2
170 ping oyk-master1
171 systemctl status kubelet
172 mkdir -p $HOME/.kube
173 cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
174 chown $(id -u):$(id -g) $HOME/.kube/config
175 kubectl get nodes
176 kubectl get pods -A
177 kubectl get pods -A -o wide
178 curl -o https://github.com/hegdepavankumar/Cisco-Images-for-GNS3-and-EVE-NG?tab=readme-ov-file
179 dnf -y curl
180 dnf -y install curl
181 curl -o https://github.com/hegdepavankumar/Cisco-Images-for-GNS3-and-EVE-NG?tab=readme-ov-file
182 curl -o https://raw.githubusercontent.com/projectcalico/calico/refs/heads/release-v3.29/manifests/calico.yaml
183 curl -O https://raw.githubusercontent.com/projectcalico/calico/v3.29/manifests/calico.yaml
184 kubectl get pods -A -o wide
185 kubectl apply -f calico.yaml
186 ls
187 curl -O https://raw.githubusercontent.com/projectcalico/calico/v3.29/manifests/calico.yaml
188 ls
189 kubectl apply -f calico.yaml
190 ls -lh calico.yaml
191 cat calico.yaml | head -n 20
192 kubectl apply -f calico.yaml
193 ls
194 ping 8.8.8.8
195 kubectl apply -f calico.yaml
196 curl -O https://raw.githubusercontent.com/projectcalico/calico/v3.29/manifests/calico.yaml
197 ls
198 nano calico.yaml
199 curl -O https://raw.githubusercontent.com/projectcalico/calico/v3.29/manifests/calico.yaml
200 ls
201 nano calico.yaml
202 kubectl apply -f calico.yaml
203 kubectl get pods -A -o wide
204 kubeadm token list
205 kubeadm token create --print-join-command
206 kubeadm token list
207 history
208 kubeadm token list
209 kubectl get nodes
210 kubectl get pods -A -o wide
211 kubectl get nodes
212 kubectl get pods -A -o wide
213 kubectl get nodes
214 kubeadm token create --print-join-command
215 kubectl get nodes
216 systemctl status containerd.service
217 kubectl get nodes
218 kubeadm token create --print-join-command
219 systemctl status containerd.service
220 kubectl get nodes
221 kubectl get pods -n kube-system
222 kubectl get pods -A -o wide
223 history
224 kubectl get pods -A -o wide
225 kubectl get nodes
226 kubectl api-resources
227 kubectl config view
228 mkdir my-yamls
229 cd my-yamls/
230 nano svc-token.yaml
231 cat svc-token.yaml
232 nano svc-token.yaml
233 kubectl create -f svc-token.yaml
234 kubectl get secret
235 kubectl describe default-token
236 kubectl describe secret default-token
237 kubectl get pods -A -o wide
238 kubectl get nodes
239 kubectl get pods -A -o wide
240 kubectl get nodes oyk-master1 -o wide
241 kubectl get nodes oyk-master1 oyk-node1
242 kubectl get nodes/oyk-master1 nodes/oyk-node1
243 kubectl get pods
244 kubectl get pods -A
245 kubectl get pods
246 kubectl get pods -A -o wide
247 kubectl get all -A
248 kubectl get namespace
249 kubectl get serviceaccount
250 kubectl get secret
251 kubectl get namespaces -all
252 kubectl get namespaces all -A
253 kubectl get namespaces
254 kubectl get pods --namespace kub-system
255 kubectl get pods
256 kubectl get
257 kubectl desscribe pods
258 kubectl get pod
259 kubectl get pods
260 kubectl get pods --namespace
261 kubectl get pods -o wide
262 kubectl get pods -n kube-system
263 kubectl get statefulsets
264 kubectl get pods -o wide
265 kubectl get pods -n kube-system
266 kubectl logs etcd
267 kubectl logs etcd-oyk-master1
268 kubectl logs
269 kubectl logs svc-token.yaml
270 kubectl get pods -n kube-system
271 kubectl logs kube-scheduler-oyk-master1 -o wide
272 kubectl logs kube-scheduler-oyk-master1
273 kubectl get namespace
274 kubectl logs kube-system
275 kubectl logs kube-system -o wide
276 kubectl get namespace
277 kubectl get pods -o wide
278 kubectl get namespace
279 kubectl get secrets
280 kubectl get events
281 kubectl get namespaces
282 kubectl get nodes
283 kubectl get
284 kubectl get nodes
285 kubectl get pods --all-namespaces
286 kubectl describe node kube-system
287 kubectl describe node etcd-oyk-master1
288 kubectl describe node oyk-node1
289 kubectl describe node oyk-master1
290 ls
291 cat svc-token.yaml
292 kubectl get pods -A
293 nano pod01.yaml
294 kubectl api-resources
295 nano pod01.yaml
296 kubectl apply -f pod01.yaml
297 kubectl get pods
298 kubectl get pods -s
299 kubectl get pods -a
300 kubectl get pods
301 kubectl describe pod01
302 kubectl describe pods pod01
303 kubectl get pods
304 kubectl get pods -o wide
305 kubectl get pods -o wide -A
306 nano pod02.yaml
307 kubectl apply -f pod02.yaml
308 nano pod02.yaml
309 kubectl apply -f pod02.yaml
310 nano pod02.yaml
311 cat pod01
312 cat pod01.yaml
313 cat pod02.yaml
314 nano pod02.yaml
315 cat pod02.yaml
316 kubectl apply -f pod02.yaml
317 cat pod01.yaml
318 cat pod02.yaml
319 nano pod02.yaml
320 kubectl apply -f pod02.yaml
321 kubectl get pods
322 kubectl get pods describe
323 kubectl get describe
324 kubectl get pods
325 kubectl get pods -o wide -A
326 kubectl get pods
327 kubectl logs pod02
328 kubectl logs pod01
329 kubectl logs pod02 nginx
330 kubectl logs nginx
331 kubectl logs pod01
332 kubectl exec pod02 -i -t -c nginx-container -- bash
333 poweroff
334 ls
335 pwd
336 top
337 kubectl get pods
338 kubectl get nodes
339 ls
340 ls -all
341 kubectl get pods
342 history
343 kubectl exec pod02 -i -t -- bash
344 kubectl exec pod02 -i -t -c nginx-container -- bash
345 kubectl get pods
346 cd my-yamls/
347 nano pod03.yaml
348 kubectl apply -f pod03.yaml
349 kubectl get pods
350 cat pod03.yaml
351 kubectl get pods -o wide -A
352 kubectl get pods
353 kubectl exec pod03 -i -t -- bash
354 kubectl exec pod02 -i -t -c nginx-container -- bash
355 kubectl get pods
356 kubectl get pods -o wide -A
357 kubectl get pods
358 kubectl get pods -o wide -A
359 kubectl describe
360 history
361 kubectl describe node oyk-master1
362 kubectl describe node oyk-node2
363 kubectl get pods
364 kubectl get nodes
365 kubectl describe node oyk-node2
366 kubectl get nodes
367 kubectl get pods
368 kubectl get pods -o wide -n kube-system
369 nano configmap.yaml
370 kubectl apply -f configmap.yaml
371 nano configmap.yaml
372 kubectl apply -f configmap.yaml
373 nano configmap.yaml
374 kubectl apply -f configmap.yaml
375 nano configmap.yaml
376 kubectl apply -f configmap.yaml
377 cat configmap.yaml
378 nano configmap.yaml
379 kubectl apply -f configmap.yaml
380 nano configmap.yaml
381 kubectl apply -f configmap.yaml
382 kubectl get pods
383 kubectl get configmap first-config -o yaml
384 nano secret.yaml
385 kubectl apply -f secret.yaml
386 kubectl get secret
387 kubectl describe secret
388 cat secret.yaml
389 nano pod04.yaml
390 nano service.yaml
391 kubectl apply -f service.yaml
392 kubectl get pods
393 kubectl get nodes
394 kubectl get pods
395 kubectl get svc
396 nano service.yaml
397 ip a
398 kubectl run nginx --image=nginx:latest --labels="app=nginx"
399 kubectl get pods
400 kubectl get svc
401 curl 10.100.44.189:80
402 kubectl describe pods nginx
403 curl 10.100.44.189:80
404 kubectl get endpoints
405 kubectl get pods
406 nano service.yaml
407 kubectl apply -f service.yaml
408 kubectl get pods
409 kubectl get svc
410 kubectl delete service nginx-svc
411 kubectl apply -f service.yaml
412 kubectl get svc
413 kubectl get pods -o wide
414 kubectl get pods -l app=nginx
415 ip a
416 kubectl expose pod nginx --port=80 --type=NodePort --name=nginx-nodeport
417 kubectl get svc
418 ss -nltpa
419 kubectl get svc
420 ls
421 ls -all
422 ls -la .kube/
423 nano replicaset01.yaml
424 kubectl apply -f replicaset01.yaml
425 kubectl get rs
426 kubectl describe rs
427 kubectl get pods
428 kubectl get pods -l app=web
429 kubectl delete webserver-2fkrp
430 kubectl delete webserver-dhmvd
431 kubectl get pods -l app=web
432 kubectl describe rs
433 kubectl delete webserver-rj569
434 kubectl describe rs
435 kubectl get pods
436 kubectl describe rs
437 kubectl get rs
438 kubectl describe rs
439 kubectl delete rs webserver-rj569
440 kubectl delete rs webserver
441 kubectl get
442 kubectl apply -f replicaset01.yaml
443 kubectl describe rs
444 kubectl delete rs webserver-dpqrf
445 kubectl delete pod webserver-dpqrf
446 kubectl describe rs
447 kubectl get pods
448 kubectl delete rs webserver-dpqrf
449 kubectl describe rs
450 kubectl delete pod webserver-xhm5d
451 kubectl describe rs
452 kubectl get pods
453 kubectl get replicasets
454 kubectl get pods
455 kubectl get pods -l app=nginx
456 cp replicaset01.yaml replicaset02.yaml
457 nano replicaset02.yaml
458 kubectl apply -f replicaset02.yaml
459 kubectl get rs
460 kubectl get describe rs web=nginx
461 kubectl get describe rs web
462 kubectl get describe rs app=nginx
463 kubectl get describe app=nginx
464 kubectl get pods describe app=nginx
465 kubectl describe rs
466 kubectl describe rs pods
467 kubectl describe rs pods app=nginx
468 kubectl describe rs pods nginx
469 kubectl get pods -l app=nginx
470 kubectl get pods -l app=web
471 kubectl get describe
472 kubectl describe
473 kubectl describe rs
474 cat replicaset02.yaml
475 cat replicaset01.yaml
476 kubectl get pods
477 history
478 kubectl run nginxx --image=nginx:latest --labels="app=nginx"
479 kubectl get pods
480 kubectl describe rs
481 kubectl get pods
482 nano replicaset02.yaml
483 kubectl apply -f replicaset02.yaml
484 kubectl get pods
485 kubectl get pods rs
486 kubectl get pods -l app=nginx
487 nano deployment01.yaml
488 kubectl apply -f deployment01.yaml
489 nano deployment01.yaml
490 nano replicaset01.yaml
491 nano deployment01.yaml
492 kubectl apply -f deployment01.yaml
493 nano deployment01.yaml
494 kubectl apply -f deployment01.yaml
495 nano deployment01.yaml
496 kubectl apply -f deployment01.yaml
497 kubectl get deployment
498 kubectl get pods --show-labels
499 kubectl describe rs
500 kubectl get pods --show-labels
501 kubectl describe rs dpl1-856489758-77jpz
502 kubectl describe rs dpl1
503 kubectl describe deployment
504 kubectl get rs
505 kubectl rollout status deployment dpl1
506 kubectl set image deployments dpl1 w=nginx:1.27
507 kubectl rollout status deployment dpl1
508 kubectl describe deployment dpl1
509 poweroff
510 kubectl exec pod02 -i -t -c nginx-container -- bash
511 kubectl api-resources
512 kubectl exec pod03 -i -t -- bash
513 kubectl api-resources
514 echo "berkay" | base64
515 echo "deneme" | base64
516 iptables -l
517 iptables
518 iptables -vn
519 curl 192.168.56.112:32408
520 curl 192.168.56.113:32408
521 nano externalname.yaml
522 kubectl apply -f externalname.yaml
523 nano externalname.yaml
524 kubectl apply -f externalname.yaml
525 nano externalname.yaml
526 kubectl apply -f externalname.yaml
527 nano externalname.yaml
528 kubectl apply -f externalname.yaml
529 kubectl apply -f externalname.yaml
530 kubectl apply -f externalname.yaml
531 nano externalname.yaml
532 kubectl apply -f externalname.yaml
533 kubectl get svc
534 kubectl get pods
535 kubectl get svc
536 kubectl get describe
537 kubectl get describe google-servic
538 kubectl get svc
539 kubectl describe node oyk-node1
540 kubectl get svc
541 kubectl expose pod nginx --port=80 --name nginx-headless --cluster-ip=none
542 nano nginx-headless.yaml
543 kubectl apply -f nginx-headless.yaml
544 kubectl get svc
545 kubectl get services
546 kubectl get endpoints nginx-headless
547 ls -la .kube/
548 cat config
549 ls -all
550 cat .kube/config
551 dnf -y install bash-completion
552 kubectl completion bash
553 kubectl completion bash | tee /etc/bash_completion.d/kubectl
554 source /etc/bash_completion.d/kubectl
555 kubectl get nodes
556 history
557 kubectl api-re
558 kubectl api-resources
559 kubectl get secret
560 kubectl get secrets
561 history
562 history > kubernetes.txt