#423 Fixed conflict in GPU and Ascend

Merged
zoulq merged 5 commits from dev0328 into master 1 year ago
frelam reviewed 1 year ago
msadapter/pytorch/nn/functional.py
@@ -2130,2 +2130,4 @@
# ms.ops.prelu do not use tensor function of weight, so without cast_to_ms_tensor(weight), not effect.
# weight = cast_to_ms_tensor(weight)
if is_under_ascend_context() and input.ndim < 2:
input = _expand(input, 2)
frelam commented 1 year ago
expand之后需要在结果再降维回来吗
liuzhuoran2333 commented 1 year ago
入参expand之后,prelu返回结果与pytorch一致,无需降维
frelam reviewed 1 year ago
msadapter/pytorch/nn/modules/loss.py
@@ -289,3 +289,3 @@
if target.dtype == ms.int64:
target = target.astype(ms.int32)
out = self.multilabel_margin_loss(input, target)
out, _ = self.multilabel_margin_loss(input, target)
frelam commented 1 year ago
为什么cpu下没有报错。
liuzhuoran2333 commented 1 year ago
MultiLabelMarginLoss在CPU下不支持,testing/ut/pytorch/nn/test_loss.py用例中检测为CPU平台直接return
frelam reviewed 1 year ago
msadapter/pytorch/tensor.py
@@ -183,1 +183,4 @@

def from_numpy(np_data):
# return cast_to_adapter_tensor(ms.Tensor.from_numpy(np_data))
return cast_to_adapter_tensor(ms.Tensor(np_data))
frelam commented 1 year ago
这里换成return cast_to_adapter_tensor(ms.Tensor.from_numpy(np_data))
liuzhuoran2333 changed title from Fixed conflict in GPU and Ascend to [WIP]Fixed conflict in GPU and Ascend 1 year ago
liuzhuoran2333 reviewed 1 year ago
msadapter/pytorch/tensor.py
@@ -183,2 +183,4 @@

def from_numpy(np_data):
return cast_to_adapter_tensor(ms.Tensor.from_numpy(np_data))

liuzhuoran2333 commented 1 year ago
fixed
liuzhuoran2333 changed title from [WIP]Fixed conflict in GPU and Ascend to Fixed conflict in GPU and Ascend 1 year ago
zoulq reviewed 1 year ago
SupportedList.md
@@ -1001,4 +1001,3 @@
- 不支持七维及以上
- Ascend上对float64类型的输入支持有限,部分接口无法处理float64类型入参,需转换为float32或float16类型之后输入
- 对于备注中存在”输入参数有限制“标注的接口,请查看[限制条件清单](ConstraintList.md),获取详细信息
zoulq commented 1 year ago
这一句可以放到表格最前面
zoulq reviewed 1 year ago
msadapter/pytorch/functional.py
@@ -15,3 +15,3 @@
from mindspore.ops._primitive_cache import _get_cache_prim

from msadapter.pytorch.tensor import tensor, cast_to_ms_tensor, cast_to_adapter_tensor
from msadapter.pytorch.tensor import tensor, cast_to_ms_tensor, cast_to_adapter_tensor, from_numpy
zoulq commented 1 year ago
from_numpy不要放到tensor.py下面
zoulq reviewed 1 year ago
msadapter/pytorch/tensor.py
@@ -904,3 +906,3 @@

output = ms.ops.prod(input, axis, keepdim)
return cast_to_adapter_tensor(output)
return cast_to_adapter_tensor(output)ƒ
zoulq commented 1 year ago
f
zoulq reviewed 1 year ago
msadapter/pytorch/tensor.py
@@ -1555,0 +1557,4 @@
if isinstance(p, ms.Tensor):
p = p.numpy()
np_output = np.random.binomial(1, p, size=self.shape)
return from_numpy(np_output).to(dtype=self.dtype)
zoulq commented 1 year ago
tensor方法不要调用adapter function方法
liuzhuoran2333 reviewed 1 year ago
@@ -1000,4 +1001,4 @@
- 不支持layout, device, requires_grad, memory_format
- 不支持七维及以上
- Ascend上对float64类型的输入支持有限,部分接口无法处理float64类型入参,需转换为float32或float16类型之后输入
liuzhuoran2333 commented 1 year ago
fixed
liuzhuoran2333 reviewed 1 year ago
msadapter/pytorch/functional.py
liuzhuoran2333 commented 1 year ago
fixed
liuzhuoran2333 reviewed 1 year ago
liuzhuoran2333 reviewed 1 year ago
zoulq reviewed 1 year ago
msadapter/pytorch/tensor.py
@@ -1568,0 +1567,4 @@
if isinstance(p, ms.Tensor):
p = p.numpy()
np_output = np.random.binomial(1, p, size=self.shape)
return ms.Tensor.from_numpy(np_output).to(dtype=self.dtype)
zoulq commented 1 year ago
这里返回的是mindpore类型的tensor,应该要adapter tensor 类型
liuzhuoran2333 commented 1 year ago
fixed
zoulq merged commit 195caa9f70 into master 1 year ago
The pull request has been merged as 195caa9f70.
Sign in to join this conversation.
No reviewers
No Label
No Milestone
No Assignees
3 Participants
Notifications
Due Date

No due date set.

Dependencies

This pull request currently doesn't have any dependencies.

Loading…
There is no content yet.