refactor(common/cool/coolconfig): 修改RPC配置字段类型

将RPC字段从uint16类型更改为string类型的Address字段,
以支持更灵活的地址配置。同时更新了配置初始化逻辑,
从server.rpc改为server.address作为配置键。
```
This commit is contained in:
昔念
2026-01-25 03:40:29 +08:00
parent 392061df04
commit 32f57732fe
20 changed files with 166 additions and 196 deletions

View File

@@ -14,7 +14,7 @@ type sConfig struct {
GameOnlineID uint16 `json:"port_bl"` //这个是命令行输入的参数
ServerInfo ServerList
RPC uint16 //rpc端口
Address string //rpc端口
}
type ServerList struct {
@@ -69,9 +69,10 @@ func newConfig() *sConfig {
config := &sConfig{
AutoMigrate: GetCfgWithDefault(ctx, "blazing.autoMigrate", g.NewVar(false)).Bool(),
Name: GetCfgWithDefault(ctx, "server.name", g.NewVar("")).String(),
Eps: GetCfgWithDefault(ctx, "blazing.eps", g.NewVar(false)).Bool(),
LoginPort: string(GetCfgWithDefault(ctx, "server.port", g.NewVar("8080")).String()),
RPC: GetCfgWithDefault(ctx, "server.rpc", g.NewVar("8080")).Uint16(),
Eps: GetCfgWithDefault(ctx, "blazing.eps", g.NewVar(false)).Bool(),
LoginPort: string(GetCfgWithDefault(ctx, "server.port", g.NewVar("8080")).String()),
Address: GetCfgWithDefault(ctx, "server.address", g.NewVar("8080")).String(),
//GamePort: GetCfgWithDefault(ctx, "server.game", g.NewVar("8080")).Uint64s(),
File: &file{

File diff suppressed because one or more lines are too long

View File

@@ -7,7 +7,6 @@ import (
"context"
"fmt"
"log"
"net/http"
config "blazing/modules/config/service"
@@ -15,13 +14,11 @@ import (
"github.com/gogf/gf/v2/util/gconv"
)
var rpcport = gconv.String(cool.Config.RPC)
// Define the server handler
type ServerHandler struct{}
// 实现踢人
func (h *ServerHandler) Kick(ctx context.Context, userid uint32) error {
func (*ServerHandler) Kick(_ context.Context, userid uint32) error {
cool.Logger.Info(context.TODO(), "服务器收到踢人")
useid1, err := share.ShareManager.GetUserOnline(userid)
@@ -41,7 +38,7 @@ func (h *ServerHandler) Kick(ctx context.Context, userid uint32) error {
}
// 注册logic服务器
func (h *ServerHandler) RegisterLogic(ctx context.Context, id, port uint16) error {
func (*ServerHandler) RegisterLogic(ctx context.Context, id, port uint16) error {
cool.Logger.Debug(context.Background(), "注册logic服务器", id, port)
//TODO 待修复滚动更新可能导致的玩家可以同时在旧服务器和新服务器同时在线的bug
@@ -62,18 +59,28 @@ func (h *ServerHandler) RegisterLogic(ctx context.Context, id, port uint16) erro
}
func StartServer() {
// func StartServer() {
// // create a new server instance
// rpcServer := jsonrpc.NewServer(jsonrpc.WithReverseClient[cool.ClientHandler](""))
// rpcServer.Register("", &ServerHandler{})
// cool.Logger.Debug(context.Background(), "jsonrpc server start", rpcport)
// // go time.AfterFunc(3000, func() {
// // testjsonrpc()
// // })
// err := http.ListenAndServe("0.0.0.0:"+rpcport, rpcServer)
// cool.Logger.Debug(context.Background(), "jsonrpc server fail", err)
// }
func CServer() *jsonrpc.RPCServer {
// create a new server instance
rpcServer := jsonrpc.NewServer(jsonrpc.WithReverseClient[cool.ClientHandler](""))
rpcServer.Register("", &ServerHandler{})
cool.Logger.Debug(context.Background(), "jsonrpc server start", rpcport)
// go time.AfterFunc(3000, func() {
// testjsonrpc()
// })
err := http.ListenAndServe("0.0.0.0:"+rpcport, rpcServer)
cool.Logger.Debug(context.Background(), "jsonrpc server fail", err)
return rpcServer
// err := http.ListenAndServe("0.0.0.0:"+rpcport, rpcServer)
// // cool.Logger.Debug(context.Background(), "jsonrpc server fail", err)
}
var closer jsonrpc.ClientCloser
@@ -84,10 +91,10 @@ func StartClient(id, port uint16, callback any) *struct {
RegisterLogic func(uint16, uint16) error
} {
var rpcaddr = cool.Config.File.Domain
var rpcaddr = "ws://" + cool.Config.File.Domain + gconv.String(cool.Config.Address) + "/rpc"
//rpcaddr = "127.0.0.1"
closer1, err := jsonrpc.NewMergeClient(context.Background(),
"ws://"+rpcaddr+":"+rpcport, "", []interface{}{
rpcaddr, "", []interface{}{
&RPCClient,
}, nil, jsonrpc.WithClientHandler("", callback),
jsonrpc.WithReconnFun(func() { RPCClient.RegisterLogic(id, port) }),
@@ -97,19 +104,13 @@ func StartClient(id, port uint16, callback any) *struct {
}
//if port != 0 { //注册logic
RPCClient.RegisterLogic(id, port)
defer RPCClient.RegisterLogic(id, port)
//}
closer = closer1
return &RPCClient
}
// 关闭客户端
func CloseClient() {
if closer != nil {
closer()
}
return &RPCClient
}
// Setup RPCClient with reverse call handler

View File

@@ -5,6 +5,7 @@ import (
"blazing/logic/service/player"
"context"
"fmt"
"time"
)
type Broadcast struct {
@@ -41,9 +42,17 @@ func (s *Server) QuitSelf(a int) error {
s.quit = true
if a != 0 {
player.Mainplayer.Range(func(key uint32, value *player.Player) bool {
value.Kick()
value.Kick(1)
return true
})
} else {
go func() {
<-time.After(10 * time.Minute)
player.Mainplayer.Range(func(key uint32, value *player.Player) bool {
value.Kick(1)
return true
})
}()
}
return nil

View File

@@ -52,7 +52,7 @@ func processMonID(bm string) string {
// data: 包含挑战Boss信息的输入数据
// player: 当前玩家对象
// 返回: 战斗结果和错误码
func (h Controller) PlayerFightBoss(data *fight.ChallengeBossInboundInfo, p *player.Player) (result *fight.NullOutboundInfo, err errorcode.ErrorCode) {
func (Controller) PlayerFightBoss(data *fight.ChallengeBossInboundInfo, p *player.Player) (result *fight.NullOutboundInfo, err errorcode.ErrorCode) {
if !p.CanFight() {
return nil, errorcode.ErrorCodes.ErrPokemonNoStamina
}
@@ -153,7 +153,7 @@ func (h Controller) PlayerFightBoss(data *fight.ChallengeBossInboundInfo, p *pla
// data: 包含战斗野怪信息的输入数据
// player: 当前玩家对象
// 返回: 战斗结果和错误码
func (h Controller) OnPlayerFightNpcMonster(data1 *fight.FightNpcMonsterInboundInfo, p *player.Player) (result *fight.NullOutboundInfo, err errorcode.ErrorCode) {
func (Controller) OnPlayerFightNpcMonster(data1 *fight.FightNpcMonsterInboundInfo, p *player.Player) (result *fight.NullOutboundInfo, err errorcode.ErrorCode) {
if !p.CanFight() {
return nil, errorcode.ErrorCodes.ErrSystemError
}

View File

@@ -40,7 +40,7 @@ func cleanup() {
log.Println("执行优雅清理资源...")
player.Mainplayer.Range(func(key uint32, value *player.Player) bool {
value.Kick()
value.Kick(1)
return true
})

View File

@@ -15,7 +15,7 @@ import (
)
// Compare 比较两个1v1战斗动作的执行优先级核心逻辑
func (f *FightC) Compare(a, b action.BattleActionI) (action.BattleActionI, action.BattleActionI) {
func (*FightC) Compare(a, b action.BattleActionI) (action.BattleActionI, action.BattleActionI) {
// 动作本身的优先级比较
p1 := b.Priority() - a.Priority()
if p1 > 0 { // 对手优先级更高

View File

@@ -37,7 +37,7 @@ type SelectSkillAction struct {
}
// Priority 返回动作优先级
func (s *SelectSkillAction) Priority() int {
func (*SelectSkillAction) Priority() int {
return int(PlayerOperations.SelectSkill)
}
@@ -70,7 +70,7 @@ type ActiveSwitchAction struct {
}
// Priority 返回动作优先级
func (a *ActiveSwitchAction) Priority() int {
func (*ActiveSwitchAction) Priority() int {
return int(PlayerOperations.ActiveSwitch)
}
@@ -83,6 +83,6 @@ type UseItemAction struct {
}
// Priority 返回动作优先级
func (u *UseItemAction) Priority() int {
func (*UseItemAction) Priority() int {
return int(PlayerOperations.UsePotion)
}

View File

@@ -3,6 +3,8 @@ package effect
import (
"blazing/logic/service/fight/action"
"blazing/logic/service/fight/input"
"github.com/alpacahq/alpacadecimal"
)
// 60.回复造成伤害的6%
@@ -20,7 +22,7 @@ func (e *NewSel700) Skill_Useed() bool {
}
e.Input.Heal(
e.Ctx().Our, &action.SelectSkillAction{}, e.Ctx().Our.SumDamage.Div(e.Args()[0]),
e.Ctx().Our, &action.SelectSkillAction{}, e.Ctx().Our.SumDamage.Div(e.Args()[0].Div(alpacadecimal.NewFromInt(100))),
)
return true
}

View File

@@ -4,6 +4,8 @@ import (
"blazing/logic/service/fight/info"
"blazing/logic/service/fight/input"
"blazing/logic/service/fight/node"
"github.com/alpacahq/alpacadecimal"
)
// 可以抵挡n点伤害
@@ -20,16 +22,17 @@ func (e *Effect49) DamageSubEx(t *info.DamageZone) bool {
if e.Ctx().SkillEntity.Category() == info.Category.STATUS {
return true
}
//fmt.Println("Effect49_o", t.Damage)
if t.Type == info.DamageType.Red {
if e.Args()[0].Cmp(t.Damage) == -1 {
t.Damage = t.Damage.Sub(e.Args()[0])
}
if t.Type != info.DamageType.Red {
return true
}
//fmt.Println("Effect49_n", t.Damage)
if t.Damage.Cmp(e.Args()[0]) > -1 {
t.Damage = t.Damage.Sub(e.Args()[0])
} else {
t.Damage = alpacadecimal.Zero
}
return true
}

View File

@@ -15,7 +15,7 @@ import (
)
// processSkillAttack 处理技能攻击逻辑
func (f *FightC) processSkillAttack(attacker, defender *input.Input, skill *info.SkillEntity) {
func (*FightC) processSkillAttack(attacker, defender *input.Input, skill *info.SkillEntity) {
skill.AttackTimeC(attacker.GetProp(5, true)) //计算命中
defender.Exec(func(effect input.Effect) bool { //计算闪避,然后修改对方命中),同时相当于计算属性无效这种
@@ -357,7 +357,7 @@ func (f *FightC) enterturn(firstAttack, secondAttack *action.SelectSkillAction)
f.Broadcast(func(fighter *input.Input) {
for _, switchAction := range f.Switch {
if fighter.Player.GetInfo().UserID != switchAction.Reason.UserId {
// println("切精灵", switchAction.Reason.UserId, switchAction.Reason.ID)
// println("切精灵", switchAction.Reason.UserId, switchAction.Reason.ID)
fighter.Player.SendPackCmd(2407, &switchAction.Reason)
}
}

View File

@@ -3,7 +3,6 @@ package info
import (
"blazing/common/data"
"blazing/modules/player/model"
"fmt"
"github.com/tnnmigga/enum"
)
@@ -259,11 +258,6 @@ type NoteReadyToFightInfo struct {
OpponentPetList []ReadyFightPetInfo `fieldDesc:"敌方的精灵信息 如果是野怪 那么再给客户端发送这个包体时就提前生成好了这只精灵的PetInfo,然后把从PetInfo中把部分信息写入到这个敌方的精灵信息中再发送这个包结构体" serialize:"lengthFirst,lengthType=uint16,type=structArray"`
}
// 当A和B都 这时候给双方回复开始战斗包
func (t *NoteReadyToFightInfo) onBothFinished() {
fmt.Println("A和B都已完成触发onBothFinished")
}
// ReadyFightPetInfo 准备战斗的精灵信息结构体ReadyFightPetInfo类
type ReadyFightPetInfo struct {
// 精灵ID@UInt long

View File

@@ -74,6 +74,7 @@ func (f *FightC) battleLoop() {
if f.Info.Status == info.BattleMode.FIGHT_WITH_NPC {
if f.Reason == info.BattleOverReason.Cacthok {
f.WinnerId = f.ownerID
f.Opp.Player.GetInfo().PetList[0].EffectInfo = nil //清空特性信息
f.Our.Player.(*player.Player).Service.Pet.PetAdd(&f.Opp.Player.GetInfo().PetList[0])
f.Our.Player.SendPackCmd(2409, &info.CatchMonsterOutboundInfo{

View File

@@ -221,12 +221,16 @@ func (p *Player) ItemAdd(ItemId, ItemCnt uint32) (result bool) {
return false
}
func (player1 *Player) Kick() {
func (player1 *Player) Kick(qtype int) {
if player1.IsLogin {
//取成功,否则创建
//player1.Save() //先保存数据再返回
head := common.NewTomeeHeader(1001, player1.Info.UserID)
head.Result = uint32(errorcode.ErrorCodes.ErrXinPlanSleepMode)
head.Result = uint32(errorcode.ErrorCodes.ErrAccountLoggedInElsewhere)
if qtype == 1 {
head.Result = uint32(errorcode.ErrorCodes.ErrXinPlanSleepMode)
}
//实际上这里有个问题,会造成重复保存问题
player1.SendPack(head.Pack(nil))

View File

@@ -34,11 +34,10 @@ func KickPlayer(userid uint32) error { //踢出玩家
//TODO 返回错误码
//var player *entity.Player
if player1, ok := Mainplayer.Load(userid); ok {
player1.Kick()
player1.Kick(0)
}
//return player
return nil
}

View File

@@ -1,7 +1,6 @@
package cmd
import (
"blazing/common/rpc"
"blazing/common/socket"
"blazing/cool"
"blazing/logic/controller"
@@ -11,7 +10,7 @@ import (
var defaultPort = gconv.Int(cool.Config.LoginPort) //读入默认的端口
func reg() {
go rpc.StartServer()
// go rpc.StartServer()
controller.Init(false)
//go rpcserver() //对login tcp启动
//ants.Submit(rpcserver)

View File

@@ -1,16 +1,15 @@
server:
name: "blazing server"
address: ":59480" #前端服务器地址
port: 53388 #验证服务器端口
rpc: 56409 #rpc服务端口
address: ":59480" #前端服务器+rpc地址
port: 33388 #验证服务器端口
openapiPath: "/api.json"
swaggerPath: "/swagger"
clientMaxBodySize:
1048576 # 100MB in bytes 1*1024*1024
1048576 # 1MB in bytes 1*1024*1024
# 平滑重启特性
graceful: true # 是否开启平滑重启特性开启时将会在本地增加10000的本地TCP端口用于进程间通信默认false
gracefulTimeout: 2 # 父进程在平滑重启后多少秒退出默认2秒若请求耗时大于该值可能会导致请求中断
gracefulShutdownTimeout: 5 # 关闭Server时如果存在正在执行的HTTP请求Server等待多少秒才执行强行关闭
# graceful: true # 是否开启平滑重启特性开启时将会在本地增加10000的本地TCP端口用于进程间通信默认false
# gracefulTimeout: 2 # 父进程在平滑重启后多少秒退出默认2秒若请求耗时大于该值可能会导致请求中断
# gracefulShutdownTimeout: 5 # 关闭Server时如果存在正在执行的HTTP请求Server等待多少秒才执行强行关闭
logger:
level: "all"
stdout: true
@@ -63,7 +62,7 @@ blazing:
file:
mode: "zhuanzhuan" # local | minio | oss
#前端上传地址,因为放弃本地,所以这个弃用了 ,现在被当成后端验证服务器地址
domain: "192.140.190.212"
domain: "127.0.0.1"
# oss配置项兼容 minio oss 需要配置bucket公开读
oss:
endpoint: "192.168.192.110:9000"

View File

@@ -66,11 +66,12 @@ func (c *BaseSysUserController) GetSession(ctx context.Context, req *SessionReq)
}
if cool.Config.ServerInfo.IsDebug != 0 {
res.LoginAddr = "192.168.1.44:53388"
res.LoginAddr = "192.168.1.44" + ":" + cool.Config.LoginPort
} else {
res.LoginAddr = cool.Config.File.Domain + ":" + cool.Config.LoginPort
}
res.Server = config.NewServerService().GetPort()
return
}

View File

@@ -1,6 +1,7 @@
package middleware
import (
"blazing/common/rpc"
"blazing/cool"
"blazing/modules/base/config"
"blazing/modules/config/service"
@@ -36,6 +37,7 @@ func MiddlewareCORS(r *ghttp.Request) {
func init() {
if config.Config.Middleware.Authority.Enable {
g.Server().BindMiddleware("/admin/*/open/*", BaseAuthorityMiddlewareOpen)
g.Server().BindMiddleware("/rpc/*", BaseAuthorityMiddlewareOpen)
g.Server().BindMiddleware("/admin/*/comm/*", BaseAuthorityMiddlewareComm)
g.Server().BindMiddleware("/admin/*", BaseAuthorityMiddleware)
// g.Server().BindMiddleware("/*", AutoI18n)
@@ -45,20 +47,29 @@ func init() {
if config.Config.Middleware.Log.Enable {
g.Server().BindMiddleware("/admin/*", BaseLog)
}
tt := rpc.CServer()
g.Server().BindHandler("/rpc/*", func(r *ghttp.Request) {
tt.ServeHTTP(r.Response.Writer, r.Request)
})
g.Server().BindHandler("/server/*", func(r *ghttp.Request) {
tt := new(ServerHandler)
servert := new(ServerHandler)
id := gconv.Uint16(r.URL.Query().Get("id"))
tt.ServerList = service.NewServerService().StartUPdate(id)
tt.isinstall = gconv.Uint32(r.URL.Query().Get("isinstall"))
upgrader := gws.NewUpgrader(tt, &gws.ServerOption{
servert.isinstall = gconv.Uint32(r.URL.Query().Get("isinstall"))
if servert.isinstall != 0 {
servert.ServerList = service.NewServerService().StartUPdate(id)
}
Authorize: func(rt *http.Request, session gws.SessionStorage) bool {
upgrader := gws.NewUpgrader(servert, &gws.ServerOption{
Authorize: func(_ *http.Request, _ gws.SessionStorage) bool {
tokenString := r.URL.Query().Get("Authorization")
token, err := jwt.ParseWithClaims(tokenString, &cool.Claims{}, func(token *jwt.Token) (interface{}, error) {
token, err := jwt.ParseWithClaims(tokenString, &cool.Claims{}, func(_ *jwt.Token) (interface{}, error) {
return []byte(config.Config.Jwt.Secret), nil
})

View File

@@ -198,145 +198,91 @@ else
fi
#!/bin/bash
# ===== 优雅终止logic会话解决screen -ls卡住问题=====
echo "===== 优雅终止logic会话 ====="
# 替换为你实际的screen名称示例logic
SCREEN_NAME="logic"
LOG_FILE="./screen_logic_exit.log"
# 核心无任何可能阻塞的命令全程实时输出100%不卡住 + 彻底清理所有logic会话
set -euo pipefail
# 调试开关如需详细日志取消set -x注释
set -o pipefail
export PS4='[DEBUG] ${BASH_SOURCE}:${LINENO} - ${FUNCNAME[0]:+${FUNCNAME[0]}(): }'
# set -x
# ===== 仅需配置这1个变量 =====
SCREEN_NAME="logic"
# ========== 核心函数(保留你的原版) ==========
wait_for_process_exit() {
local pidKilled=$1
local begin=$(date +%s)
local end
local timeout=60
while kill -0 $pidKilled > /dev/null 2>&1; do
echo -n "."
sleep 1;
end=$(date +%s)
if [ $((end - begin)) -gt $timeout ]; then
echo -e "\n⚠ 等待进程$pidKilled退出超时已等${timeout}秒)"
break;
fi
done
if ! kill -0 $pidKilled > /dev/null 2>&1; then
echo -e "\n✅ 进程$pidKilled已退出"
# ===== 颜色输出函数(确保能看到输出)=====
red() { echo -e "\033[31m[$(date +%H:%M:%S)] $1\033[0m"; }
green() { echo -e "\033[32m[$(date +%H:%M:%S)] $1\033[0m"; }
yellow() { echo -e "\033[33m[$(date +%H:%M:%S)] $1\033[0m"; }
# ===== 新增彻底清理screen会话核心修复=====
clean_screen_sessions() {
local sess_id=$1
# 1. 先优雅关闭screen会话
screen -S "${sess_id}" -X quit >/dev/null 2>&1 || true
sleep 0.3
# 2. 提取会话PID并强制杀死
local sess_pid=$(echo "${sess_id}" | cut -d. -f1)
if [ -n "${sess_pid}" ]; then
kill -9 "${sess_pid}" >/dev/null 2>&1 || true
# 3. 杀死该PID的所有子进程
pkill -9 -P "${sess_pid}" >/dev/null 2>&1 || true
fi
}
pid_is_alive() {
local pid=$1
if [ -n "$pid" ] && kill -0 "$pid" > /dev/null 2>&1; then
return 0
else
return 1
fi
}
# ===== 强制关闭所有阻塞的后台进程(先清场)=====
green "【步骤1/5】清理脚本自身可能的阻塞进程"
pkill -f "screen_clean_*" >/dev/null 2>&1 || true
pkill -9 -f "pgrep -f SCREEN -S ${SCREEN_NAME}" >/dev/null 2>&1 || true
get_inner_procs() {
local screen_pid=$1
local procs=$(timeout 5 pstree -p "$screen_pid" 2>/dev/null | grep -oE '\([0-9]+\)' | tr -d '()' | grep -v "$screen_pid" | sort -u)
if [ -z "$procs" ]; then
procs=$(pgrep -f "SCREEN -S $SCREEN_NAME" 2>/dev/null | grep -v "$screen_pid")
fi
echo "$procs"
}
# ===== 非阻塞查找进程+会话关键延长超时到1秒查更全=====
green "【步骤2/5】查找${SCREEN_NAME}相关进程/会话(非阻塞)"
# 先查screen会话ID1秒超时避免漏查
SCREEN_SESS=$(timeout 1 screen -ls 2>/dev/null | grep -E "[0-9]+\\.${SCREEN_NAME}" | grep -v "Dead" | awk '{print $1}' || true)
# 再查进程PID
SCREEN_PIDS=$(timeout 1 pgrep -f "SCREEN -S ${SCREEN_NAME}" | grep -v $$ || true)
screen_send_cmd() {
local cmd="$1"
local screen_full_id="$2"
# ^M需手动生成Ctrl+v+回车
screen -S "$screen_full_id" -p 0 -X stuff "${cmd}^M"
sleep 1
}
# ========== 核心修复给screen -ls加超时避免卡住 ==========
echo "===== 检测screen会话5秒超时 ====="
# 关键修改给整个提取命令加5秒超时超时则直接设为空
SCREEN_FULL_ID=$(timeout 5 screen -ls 2>/dev/null | grep -E "[0-9]+\.$SCREEN_NAME" | grep -v "Dead\|Invalid" | head -1 | awk '{print $1}')
# 无论是否超时/卡住只要SCREEN_FULL_ID为空就直接走后续
if [ -z "$SCREEN_FULL_ID" ]; then
echo " 未找到$SCREEN_NAME会话或screen -ls执行超时直接执行后续脚本"
# 合并需要清理的目标
if [ -z "${SCREEN_SESS}" ] && [ -z "${SCREEN_PIDS}" ]; then
green "✅ 未找到${SCREEN_NAME}相关进程/会话,无需清理"
else
# 找到会话:执行终止逻辑
SCREEN_PID=$(echo "$SCREEN_FULL_ID" | cut -d. -f1)
echo "✅ 找到$SCREEN_NAME主PID$SCREEN_PID | 完整ID$SCREEN_FULL_ID"
# 导出退出前日志
echo -e "\n===== 【退出前】$SCREEN_NAME 内程序实时log ====="
screen -S "$SCREEN_FULL_ID" -p 0 -X hardcopy -h "$LOG_FILE" 2>/dev/null
cat "$LOG_FILE"
# 给子进程发SIGTERM并等待
echo -e "\n===== 给子进程发优雅退出信号(SIGTERM) ====="
INNER_PROCS=$(get_inner_procs "$SCREEN_PID")
if [ -z "$INNER_PROCS" ]; then
echo " 未检测到$SCREEN_NAME下的子进程"
else
echo "待处理子进程:$INNER_PROCS"
for pid in $INNER_PROCS; do
if pid_is_alive "$pid"; then
echo -n "📌 终止进程$pid并等待退出"
kill -15 "$pid"
if [ $? -eq 0 ]; then
wait_for_process_exit "$pid"
else
echo "❌ 进程$pid发送SIGTERM失败"
fi
else
echo " 进程$pid已不存在跳过"
fi
yellow "⚠️ 找到${SCREEN_NAME}会话:${SCREEN_SESS:-无}"
yellow "⚠️ 找到${SCREEN_NAME}进程:${SCREEN_PIDS:-无}"
# ===== 非阻塞强制清理(先清会话,再清进程)=====
green "【步骤3/5】强制清理${SCREEN_NAME}进程及会话"
# 1. 清理所有screen会话
if [ -n "${SCREEN_SESS}" ]; then
for sess in ${SCREEN_SESS}; do
yellow "🔧 清理会话:${sess}"
clean_screen_sessions "${sess}"
done
fi
# 验证子进程退出状态
echo -e "\n===== 验证子进程退出状态 ====="
REMAIN_PROCS=$(get_inner_procs "$SCREEN_PID")
if [ -z "$REMAIN_PROCS" ]; then
echo "✅ $SCREEN_NAME内部所有程序已退出"
else
echo "⚠️ 仍有残留进程:$REMAIN_PROCS"
# 2. 清理剩余进程(兜底)
if [ -n "${SCREEN_PIDS}" ]; then
for pid in ${SCREEN_PIDS}; do
yellow "🔧 清理进程:${pid}"
kill -9 "${pid}" >/dev/null 2>&1 || true
pkill -9 -P "${pid}" >/dev/null 2>&1 || true
done
fi
# 投递exit命令退出screen
echo -e "\n===== 优雅退出screen会话 ====="
echo "向$SCREEN_NAME投递exit命令..."
screen_send_cmd "exit" "$SCREEN_FULL_ID"
# 等待并验证最终状态
echo -n "等待screen会话自动退出"
begin=$(date +%s)
while timeout 1 screen -ls "$SCREEN_NAME" 2>/dev/null | grep -q -E "[0-9]+\.$SCREEN_NAME"; do
echo -n "."
sleep 1
if [ $((date +%s - begin)) -gt 30 ]; then
echo -e "\n⚠ 等待screen退出超时30秒"
break
fi
done
echo -e "\n===== 最终验证 ====="
if timeout 1 screen -ls "$SCREEN_NAME" 2>/dev/null | grep -q -E "[0-9]+\.$SCREEN_NAME"; then
echo "❌ $SCREEN_NAME会话最终仍未退出"
# 短暂等待0.5秒,非阻塞)
sleep 0.5
# ===== 验证清理结果(查会话+进程,双重验证)=====
green "【步骤4/5】验证清理结果会话+进程双重检查)"
FINAL_SESS=$(timeout 1 screen -ls 2>/dev/null | grep -E "[0-9]+\\.${SCREEN_NAME}" | grep -v "Dead" || true)
FINAL_PIDS=$(timeout 1 pgrep -f "SCREEN -S ${SCREEN_NAME}" | grep -v $$ || true)
if [ -z "${FINAL_SESS}" ] && [ -z "${FINAL_PIDS}" ]; then
green "✅ ${SCREEN_NAME}进程/会话已全部清理完成"
else
echo "✅ $SCREEN_NAME会话已完全退出"
red "❌ 仍有残留:会话[${FINAL_SESS}] 进程[${FINAL_PIDS}],再次强制清理"
# 终极兜底:强制杀死所有相关进程
pkill -9 -f "${SCREEN_NAME}" >/dev/null 2>&1 || true
fi
fi
# ========== 后续脚本:必执行,永不卡住 ==========
echo -e "\n===== 终止logic会话流程结束继续执行后续脚本 ====="
# 示例后续逻辑
# echo "执行后续任务:备份日志、启动新进程等..."
# ===== 最终收尾 =====
green "【步骤5/5】${SCREEN_NAME}会话清理流程结束"
green "✅ 自动化部署后续流程可正常执行"