-
Notifications
You must be signed in to change notification settings - Fork 305
Add detail log for debug. #1226
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | ||||||
|---|---|---|---|---|---|---|---|---|
|
|
@@ -2,6 +2,9 @@ | |||||||
| from typing import List | ||||||||
| from ...batch import Batch, Req | ||||||||
| from lightllm.server.router.req_queue.base_queue import BaseQueue | ||||||||
| from lightllm.utils.log_utils import init_logger | ||||||||
|
|
||||||||
| logger = init_logger(__name__) | ||||||||
|
|
||||||||
|
|
||||||||
| class ChunkedBeamContinuesBatchQueue(BaseQueue): | ||||||||
|
|
@@ -119,6 +122,8 @@ def generate_new_batch(self, current_batch: Batch): | |||||||
| new_batch = Batch(uuid.uuid4().int, can_run_list, dp_size_in_node=self.dp_size_in_node) | ||||||||
|
|
||||||||
| for req in abort_req_list: | ||||||||
| req: Req = req | ||||||||
| logger.debug(f"router abort req id {req.request_id} shm_index: {req.index_in_shm_mem}") | ||||||||
|
Comment on lines
+125
to
+126
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The line
Suggested change
|
||||||||
| self.free_aborted_req_cpu_cache_pages(req) | ||||||||
| self.router.shm_req_manager.put_back_req_obj(req) | ||||||||
| self.waiting_req_list = self.waiting_req_list[len(can_run_list) + aborted_count :] | ||||||||
|
|
||||||||
| Original file line number | Diff line number | Diff line change | ||||||
|---|---|---|---|---|---|---|---|---|
|
|
@@ -3,6 +3,9 @@ | |||||||
| from ...batch import Batch, Req | ||||||||
| from lightllm.server.router.req_queue.base_queue import BaseQueue | ||||||||
| from lightllm.common.basemodel.infer_lock import g_router_lock | ||||||||
| from lightllm.utils.log_utils import init_logger | ||||||||
|
|
||||||||
| logger = init_logger(__name__) | ||||||||
|
|
||||||||
|
|
||||||||
| class ChunkedPrefillQueue(BaseQueue): | ||||||||
|
|
@@ -96,6 +99,8 @@ def generate_new_batch(self, current_batch: Batch): | |||||||
| if len(can_run_list) != 0: | ||||||||
| new_batch = Batch(uuid.uuid4().int, can_run_list, dp_size_in_node=self.dp_size_in_node) | ||||||||
| for req in abort_req_list: | ||||||||
| req: Req = req | ||||||||
| logger.debug(f"router abort req id {req.request_id} shm_index: {req.index_in_shm_mem}") | ||||||||
|
Comment on lines
+102
to
+103
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The line
Suggested change
|
||||||||
| self.free_aborted_req_cpu_cache_pages(req) | ||||||||
| self.router.shm_req_manager.put_back_req_obj(req) | ||||||||
| self.waiting_req_list = self.waiting_req_list[len(can_run_list) + aborted_count :] | ||||||||
|
|
||||||||
| Original file line number | Diff line number | Diff line change | ||||||
|---|---|---|---|---|---|---|---|---|
|
|
@@ -4,6 +4,9 @@ | |||||||
| from ...batch import Batch, Req | ||||||||
| from lightllm.server.router.req_queue.base_queue import BaseQueue | ||||||||
| from lightllm.common.basemodel.infer_lock import g_router_lock | ||||||||
| from lightllm.utils.log_utils import init_logger | ||||||||
|
|
||||||||
| logger = init_logger(__name__) | ||||||||
|
|
||||||||
|
|
||||||||
| class NIXLPDQueue(BaseQueue): | ||||||||
|
|
@@ -87,6 +90,8 @@ def generate_new_batch(self, current_batch: Batch): | |||||||
| if len(can_run_list) != 0: | ||||||||
| new_batch = Batch(uuid.uuid4().int, can_run_list, dp_size_in_node=self.dp_size_in_node) | ||||||||
| for req in abort_req_list: | ||||||||
| req: Req = req | ||||||||
| logger.debug(f"router abort req id {req.request_id} shm_index: {req.index_in_shm_mem}") | ||||||||
|
Comment on lines
+93
to
+94
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The line
Suggested change
|
||||||||
| self.free_aborted_req_cpu_cache_pages(req) | ||||||||
| self.router.shm_req_manager.put_back_req_obj(req) | ||||||||
| self.waiting_req_list = self.waiting_req_list[len(can_run_list) + aborted_count :] | ||||||||
|
|
||||||||
| Original file line number | Diff line number | Diff line change | ||||||
|---|---|---|---|---|---|---|---|---|
|
|
@@ -6,6 +6,9 @@ | |||||||
| from ...batch import Batch, Req | ||||||||
| from lightllm.server.router.req_queue.base_queue import BaseQueue | ||||||||
| from lightllm.common.basemodel.infer_lock import g_router_lock | ||||||||
| from lightllm.utils.log_utils import init_logger | ||||||||
|
|
||||||||
| logger = init_logger(__name__) | ||||||||
|
|
||||||||
|
|
||||||||
| class QueueForPDDecode(BaseQueue): | ||||||||
|
|
@@ -52,6 +55,8 @@ def generate_new_batch(self, current_batch: Batch): | |||||||
| if len(can_run_list) != 0: | ||||||||
| new_batch = Batch(uuid.uuid4().int, can_run_list, dp_size_in_node=self.dp_size_in_node) | ||||||||
| for req in abort_req_list: | ||||||||
| req: Req = req | ||||||||
| logger.debug(f"router abort req id {req.request_id} shm_index: {req.index_in_shm_mem}") | ||||||||
|
Comment on lines
+58
to
+59
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The line
Suggested change
|
||||||||
| self.free_aborted_req_cpu_cache_pages(req) | ||||||||
| self.router.shm_req_manager.put_back_req_obj(req) | ||||||||
| self.waiting_req_list = self.waiting_req_list[len(can_run_list) + aborted_count :] | ||||||||
|
|
||||||||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
When the
--detail_logflag is enabled, the server logs the full content of the user-provided prompt and its corresponding token IDs at theDEBUGlevel. LLM prompts frequently contain sensitive information, including Personally Identifiable Information (PII), proprietary data, or even secrets. Logging this information to persistent storage poses a significant privacy and security risk, as it could lead to unauthorized exposure of sensitive user data if log files are compromised or improperly handled. Additionally, there's a typo in the log message:samplingparmasshould besamplingparams.f"samplingparams: {sampling_params.to_dict()}\n"